Commit e3d2ad8cb2775e4201446489efd1cf26c5bbce5c
Committed by
James Bottomley
1 parent
24d3f95a5b
Exists in
master
and in
39 other branches
[SCSI] libcxgbi: pdu read fixes
Fixed the locking and releasing skb in the case of error in the pdu read path, and added define iscsi_task_cxgbi_data to access the private data inside the iscsi_task. Signed-off-by: Karen Xie <kxie@chelsio.com> Reviewed-by: Mike Christie <michaelc@cs.wisc.edu> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Showing 2 changed files with 53 additions and 28 deletions Inline Diff
drivers/scsi/cxgbi/libcxgbi.c
1 | /* | 1 | /* |
2 | * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. | 2 | * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. |
3 | * | 3 | * |
4 | * Copyright (c) 2010 Chelsio Communications, Inc. | 4 | * Copyright (c) 2010 Chelsio Communications, Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation. | 8 | * the Free Software Foundation. |
9 | * | 9 | * |
10 | * Written by: Karen Xie (kxie@chelsio.com) | 10 | * Written by: Karen Xie (kxie@chelsio.com) |
11 | * Written by: Rakesh Ranjan (rranjan@chelsio.com) | 11 | * Written by: Rakesh Ranjan (rranjan@chelsio.com) |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ |
15 | 15 | ||
16 | #include <linux/skbuff.h> | 16 | #include <linux/skbuff.h> |
17 | #include <linux/crypto.h> | 17 | #include <linux/crypto.h> |
18 | #include <linux/scatterlist.h> | 18 | #include <linux/scatterlist.h> |
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <scsi/scsi.h> | 20 | #include <scsi/scsi.h> |
21 | #include <scsi/scsi_cmnd.h> | 21 | #include <scsi/scsi_cmnd.h> |
22 | #include <scsi/scsi_host.h> | 22 | #include <scsi/scsi_host.h> |
23 | #include <linux/if_vlan.h> | 23 | #include <linux/if_vlan.h> |
24 | #include <linux/inet.h> | 24 | #include <linux/inet.h> |
25 | #include <net/dst.h> | 25 | #include <net/dst.h> |
26 | #include <net/route.h> | 26 | #include <net/route.h> |
27 | #include <linux/inetdevice.h> /* ip_dev_find */ | 27 | #include <linux/inetdevice.h> /* ip_dev_find */ |
28 | #include <net/tcp.h> | 28 | #include <net/tcp.h> |
29 | 29 | ||
30 | static unsigned int dbg_level; | 30 | static unsigned int dbg_level; |
31 | 31 | ||
32 | #include "libcxgbi.h" | 32 | #include "libcxgbi.h" |
33 | 33 | ||
34 | #define DRV_MODULE_NAME "libcxgbi" | 34 | #define DRV_MODULE_NAME "libcxgbi" |
35 | #define DRV_MODULE_DESC "Chelsio iSCSI driver library" | 35 | #define DRV_MODULE_DESC "Chelsio iSCSI driver library" |
36 | #define DRV_MODULE_VERSION "0.9.0" | 36 | #define DRV_MODULE_VERSION "0.9.0" |
37 | #define DRV_MODULE_RELDATE "Jun. 2010" | 37 | #define DRV_MODULE_RELDATE "Jun. 2010" |
38 | 38 | ||
39 | MODULE_AUTHOR("Chelsio Communications, Inc."); | 39 | MODULE_AUTHOR("Chelsio Communications, Inc."); |
40 | MODULE_DESCRIPTION(DRV_MODULE_DESC); | 40 | MODULE_DESCRIPTION(DRV_MODULE_DESC); |
41 | MODULE_VERSION(DRV_MODULE_VERSION); | 41 | MODULE_VERSION(DRV_MODULE_VERSION); |
42 | MODULE_LICENSE("GPL"); | 42 | MODULE_LICENSE("GPL"); |
43 | 43 | ||
44 | module_param(dbg_level, uint, 0644); | 44 | module_param(dbg_level, uint, 0644); |
45 | MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); | 45 | MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); |
46 | 46 | ||
47 | 47 | ||
48 | /* | 48 | /* |
49 | * cxgbi device management | 49 | * cxgbi device management |
50 | * maintains a list of the cxgbi devices | 50 | * maintains a list of the cxgbi devices |
51 | */ | 51 | */ |
52 | static LIST_HEAD(cdev_list); | 52 | static LIST_HEAD(cdev_list); |
53 | static DEFINE_MUTEX(cdev_mutex); | 53 | static DEFINE_MUTEX(cdev_mutex); |
54 | 54 | ||
55 | int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, | 55 | int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, |
56 | unsigned int max_conn) | 56 | unsigned int max_conn) |
57 | { | 57 | { |
58 | struct cxgbi_ports_map *pmap = &cdev->pmap; | 58 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
59 | 59 | ||
60 | pmap->port_csk = cxgbi_alloc_big_mem(max_conn * | 60 | pmap->port_csk = cxgbi_alloc_big_mem(max_conn * |
61 | sizeof(struct cxgbi_sock *), | 61 | sizeof(struct cxgbi_sock *), |
62 | GFP_KERNEL); | 62 | GFP_KERNEL); |
63 | if (!pmap->port_csk) { | 63 | if (!pmap->port_csk) { |
64 | pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); | 64 | pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); |
65 | return -ENOMEM; | 65 | return -ENOMEM; |
66 | } | 66 | } |
67 | 67 | ||
68 | pmap->max_connect = max_conn; | 68 | pmap->max_connect = max_conn; |
69 | pmap->sport_base = base; | 69 | pmap->sport_base = base; |
70 | spin_lock_init(&pmap->lock); | 70 | spin_lock_init(&pmap->lock); |
71 | return 0; | 71 | return 0; |
72 | } | 72 | } |
73 | EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); | 73 | EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); |
74 | 74 | ||
75 | void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) | 75 | void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) |
76 | { | 76 | { |
77 | struct cxgbi_ports_map *pmap = &cdev->pmap; | 77 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
78 | struct cxgbi_sock *csk; | 78 | struct cxgbi_sock *csk; |
79 | int i; | 79 | int i; |
80 | 80 | ||
81 | for (i = 0; i < pmap->max_connect; i++) { | 81 | for (i = 0; i < pmap->max_connect; i++) { |
82 | if (pmap->port_csk[i]) { | 82 | if (pmap->port_csk[i]) { |
83 | csk = pmap->port_csk[i]; | 83 | csk = pmap->port_csk[i]; |
84 | pmap->port_csk[i] = NULL; | 84 | pmap->port_csk[i] = NULL; |
85 | log_debug(1 << CXGBI_DBG_SOCK, | 85 | log_debug(1 << CXGBI_DBG_SOCK, |
86 | "csk 0x%p, cdev 0x%p, offload down.\n", | 86 | "csk 0x%p, cdev 0x%p, offload down.\n", |
87 | csk, cdev); | 87 | csk, cdev); |
88 | spin_lock_bh(&csk->lock); | 88 | spin_lock_bh(&csk->lock); |
89 | cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); | 89 | cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); |
90 | cxgbi_sock_closed(csk); | 90 | cxgbi_sock_closed(csk); |
91 | spin_unlock_bh(&csk->lock); | 91 | spin_unlock_bh(&csk->lock); |
92 | cxgbi_sock_put(csk); | 92 | cxgbi_sock_put(csk); |
93 | } | 93 | } |
94 | } | 94 | } |
95 | } | 95 | } |
96 | EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); | 96 | EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); |
97 | 97 | ||
98 | static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) | 98 | static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) |
99 | { | 99 | { |
100 | log_debug(1 << CXGBI_DBG_DEV, | 100 | log_debug(1 << CXGBI_DBG_DEV, |
101 | "cdev 0x%p, p# %u.\n", cdev, cdev->nports); | 101 | "cdev 0x%p, p# %u.\n", cdev, cdev->nports); |
102 | cxgbi_hbas_remove(cdev); | 102 | cxgbi_hbas_remove(cdev); |
103 | cxgbi_device_portmap_cleanup(cdev); | 103 | cxgbi_device_portmap_cleanup(cdev); |
104 | if (cdev->dev_ddp_cleanup) | 104 | if (cdev->dev_ddp_cleanup) |
105 | cdev->dev_ddp_cleanup(cdev); | 105 | cdev->dev_ddp_cleanup(cdev); |
106 | else | 106 | else |
107 | cxgbi_ddp_cleanup(cdev); | 107 | cxgbi_ddp_cleanup(cdev); |
108 | if (cdev->ddp) | 108 | if (cdev->ddp) |
109 | cxgbi_ddp_cleanup(cdev); | 109 | cxgbi_ddp_cleanup(cdev); |
110 | if (cdev->pmap.max_connect) | 110 | if (cdev->pmap.max_connect) |
111 | cxgbi_free_big_mem(cdev->pmap.port_csk); | 111 | cxgbi_free_big_mem(cdev->pmap.port_csk); |
112 | kfree(cdev); | 112 | kfree(cdev); |
113 | } | 113 | } |
114 | 114 | ||
115 | struct cxgbi_device *cxgbi_device_register(unsigned int extra, | 115 | struct cxgbi_device *cxgbi_device_register(unsigned int extra, |
116 | unsigned int nports) | 116 | unsigned int nports) |
117 | { | 117 | { |
118 | struct cxgbi_device *cdev; | 118 | struct cxgbi_device *cdev; |
119 | 119 | ||
120 | cdev = kzalloc(sizeof(*cdev) + extra + nports * | 120 | cdev = kzalloc(sizeof(*cdev) + extra + nports * |
121 | (sizeof(struct cxgbi_hba *) + | 121 | (sizeof(struct cxgbi_hba *) + |
122 | sizeof(struct net_device *)), | 122 | sizeof(struct net_device *)), |
123 | GFP_KERNEL); | 123 | GFP_KERNEL); |
124 | if (!cdev) { | 124 | if (!cdev) { |
125 | pr_warn("nport %d, OOM.\n", nports); | 125 | pr_warn("nport %d, OOM.\n", nports); |
126 | return NULL; | 126 | return NULL; |
127 | } | 127 | } |
128 | cdev->ports = (struct net_device **)(cdev + 1); | 128 | cdev->ports = (struct net_device **)(cdev + 1); |
129 | cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * | 129 | cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * |
130 | sizeof(struct net_device *)); | 130 | sizeof(struct net_device *)); |
131 | if (extra) | 131 | if (extra) |
132 | cdev->dd_data = ((char *)cdev->hbas) + | 132 | cdev->dd_data = ((char *)cdev->hbas) + |
133 | nports * sizeof(struct cxgbi_hba *); | 133 | nports * sizeof(struct cxgbi_hba *); |
134 | spin_lock_init(&cdev->pmap.lock); | 134 | spin_lock_init(&cdev->pmap.lock); |
135 | 135 | ||
136 | mutex_lock(&cdev_mutex); | 136 | mutex_lock(&cdev_mutex); |
137 | list_add_tail(&cdev->list_head, &cdev_list); | 137 | list_add_tail(&cdev->list_head, &cdev_list); |
138 | mutex_unlock(&cdev_mutex); | 138 | mutex_unlock(&cdev_mutex); |
139 | 139 | ||
140 | log_debug(1 << CXGBI_DBG_DEV, | 140 | log_debug(1 << CXGBI_DBG_DEV, |
141 | "cdev 0x%p, p# %u.\n", cdev, nports); | 141 | "cdev 0x%p, p# %u.\n", cdev, nports); |
142 | return cdev; | 142 | return cdev; |
143 | } | 143 | } |
144 | EXPORT_SYMBOL_GPL(cxgbi_device_register); | 144 | EXPORT_SYMBOL_GPL(cxgbi_device_register); |
145 | 145 | ||
146 | void cxgbi_device_unregister(struct cxgbi_device *cdev) | 146 | void cxgbi_device_unregister(struct cxgbi_device *cdev) |
147 | { | 147 | { |
148 | log_debug(1 << CXGBI_DBG_DEV, | 148 | log_debug(1 << CXGBI_DBG_DEV, |
149 | "cdev 0x%p, p# %u,%s.\n", | 149 | "cdev 0x%p, p# %u,%s.\n", |
150 | cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); | 150 | cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); |
151 | mutex_lock(&cdev_mutex); | 151 | mutex_lock(&cdev_mutex); |
152 | list_del(&cdev->list_head); | 152 | list_del(&cdev->list_head); |
153 | mutex_unlock(&cdev_mutex); | 153 | mutex_unlock(&cdev_mutex); |
154 | cxgbi_device_destroy(cdev); | 154 | cxgbi_device_destroy(cdev); |
155 | } | 155 | } |
156 | EXPORT_SYMBOL_GPL(cxgbi_device_unregister); | 156 | EXPORT_SYMBOL_GPL(cxgbi_device_unregister); |
157 | 157 | ||
158 | void cxgbi_device_unregister_all(unsigned int flag) | 158 | void cxgbi_device_unregister_all(unsigned int flag) |
159 | { | 159 | { |
160 | struct cxgbi_device *cdev, *tmp; | 160 | struct cxgbi_device *cdev, *tmp; |
161 | 161 | ||
162 | mutex_lock(&cdev_mutex); | 162 | mutex_lock(&cdev_mutex); |
163 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { | 163 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { |
164 | if ((cdev->flags & flag) == flag) { | 164 | if ((cdev->flags & flag) == flag) { |
165 | log_debug(1 << CXGBI_DBG_DEV, | 165 | log_debug(1 << CXGBI_DBG_DEV, |
166 | "cdev 0x%p, p# %u,%s.\n", | 166 | "cdev 0x%p, p# %u,%s.\n", |
167 | cdev, cdev->nports, cdev->nports ? | 167 | cdev, cdev->nports, cdev->nports ? |
168 | cdev->ports[0]->name : ""); | 168 | cdev->ports[0]->name : ""); |
169 | list_del(&cdev->list_head); | 169 | list_del(&cdev->list_head); |
170 | cxgbi_device_destroy(cdev); | 170 | cxgbi_device_destroy(cdev); |
171 | } | 171 | } |
172 | } | 172 | } |
173 | mutex_unlock(&cdev_mutex); | 173 | mutex_unlock(&cdev_mutex); |
174 | } | 174 | } |
175 | EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); | 175 | EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); |
176 | 176 | ||
177 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) | 177 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) |
178 | { | 178 | { |
179 | struct cxgbi_device *cdev, *tmp; | 179 | struct cxgbi_device *cdev, *tmp; |
180 | 180 | ||
181 | mutex_lock(&cdev_mutex); | 181 | mutex_lock(&cdev_mutex); |
182 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { | 182 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { |
183 | if (cdev->lldev == lldev) { | 183 | if (cdev->lldev == lldev) { |
184 | mutex_unlock(&cdev_mutex); | 184 | mutex_unlock(&cdev_mutex); |
185 | return cdev; | 185 | return cdev; |
186 | } | 186 | } |
187 | } | 187 | } |
188 | mutex_unlock(&cdev_mutex); | 188 | mutex_unlock(&cdev_mutex); |
189 | log_debug(1 << CXGBI_DBG_DEV, | 189 | log_debug(1 << CXGBI_DBG_DEV, |
190 | "lldev 0x%p, NO match found.\n", lldev); | 190 | "lldev 0x%p, NO match found.\n", lldev); |
191 | return NULL; | 191 | return NULL; |
192 | } | 192 | } |
193 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); | 193 | EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); |
194 | 194 | ||
195 | static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, | 195 | static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, |
196 | int *port) | 196 | int *port) |
197 | { | 197 | { |
198 | struct cxgbi_device *cdev, *tmp; | 198 | struct cxgbi_device *cdev, *tmp; |
199 | int i; | 199 | int i; |
200 | 200 | ||
201 | if (ndev->priv_flags & IFF_802_1Q_VLAN) | 201 | if (ndev->priv_flags & IFF_802_1Q_VLAN) |
202 | ndev = vlan_dev_real_dev(ndev); | 202 | ndev = vlan_dev_real_dev(ndev); |
203 | 203 | ||
204 | mutex_lock(&cdev_mutex); | 204 | mutex_lock(&cdev_mutex); |
205 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { | 205 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { |
206 | for (i = 0; i < cdev->nports; i++) { | 206 | for (i = 0; i < cdev->nports; i++) { |
207 | if (ndev == cdev->ports[i]) { | 207 | if (ndev == cdev->ports[i]) { |
208 | mutex_unlock(&cdev_mutex); | 208 | mutex_unlock(&cdev_mutex); |
209 | if (port) | 209 | if (port) |
210 | *port = i; | 210 | *port = i; |
211 | return cdev; | 211 | return cdev; |
212 | } | 212 | } |
213 | } | 213 | } |
214 | } | 214 | } |
215 | mutex_unlock(&cdev_mutex); | 215 | mutex_unlock(&cdev_mutex); |
216 | log_debug(1 << CXGBI_DBG_DEV, | 216 | log_debug(1 << CXGBI_DBG_DEV, |
217 | "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); | 217 | "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); |
218 | return NULL; | 218 | return NULL; |
219 | } | 219 | } |
220 | 220 | ||
221 | struct cxgbi_hba *cxgbi_hba_find_by_netdev(struct net_device *dev, | 221 | struct cxgbi_hba *cxgbi_hba_find_by_netdev(struct net_device *dev, |
222 | struct cxgbi_device *cdev) | 222 | struct cxgbi_device *cdev) |
223 | { | 223 | { |
224 | int i; | 224 | int i; |
225 | 225 | ||
226 | if (dev->priv_flags & IFF_802_1Q_VLAN) | 226 | if (dev->priv_flags & IFF_802_1Q_VLAN) |
227 | dev = vlan_dev_real_dev(dev); | 227 | dev = vlan_dev_real_dev(dev); |
228 | 228 | ||
229 | for (i = 0; i < cdev->nports; i++) { | 229 | for (i = 0; i < cdev->nports; i++) { |
230 | if (cdev->hbas[i]->ndev == dev) | 230 | if (cdev->hbas[i]->ndev == dev) |
231 | return cdev->hbas[i]; | 231 | return cdev->hbas[i]; |
232 | } | 232 | } |
233 | log_debug(1 << CXGBI_DBG_DEV, | 233 | log_debug(1 << CXGBI_DBG_DEV, |
234 | "ndev 0x%p, %s, cdev 0x%p, NO match found.\n", | 234 | "ndev 0x%p, %s, cdev 0x%p, NO match found.\n", |
235 | dev, dev->name, cdev); | 235 | dev, dev->name, cdev); |
236 | return NULL; | 236 | return NULL; |
237 | } | 237 | } |
238 | 238 | ||
239 | void cxgbi_hbas_remove(struct cxgbi_device *cdev) | 239 | void cxgbi_hbas_remove(struct cxgbi_device *cdev) |
240 | { | 240 | { |
241 | int i; | 241 | int i; |
242 | struct cxgbi_hba *chba; | 242 | struct cxgbi_hba *chba; |
243 | 243 | ||
244 | log_debug(1 << CXGBI_DBG_DEV, | 244 | log_debug(1 << CXGBI_DBG_DEV, |
245 | "cdev 0x%p, p#%u.\n", cdev, cdev->nports); | 245 | "cdev 0x%p, p#%u.\n", cdev, cdev->nports); |
246 | 246 | ||
247 | for (i = 0; i < cdev->nports; i++) { | 247 | for (i = 0; i < cdev->nports; i++) { |
248 | chba = cdev->hbas[i]; | 248 | chba = cdev->hbas[i]; |
249 | if (chba) { | 249 | if (chba) { |
250 | cdev->hbas[i] = NULL; | 250 | cdev->hbas[i] = NULL; |
251 | iscsi_host_remove(chba->shost); | 251 | iscsi_host_remove(chba->shost); |
252 | pci_dev_put(cdev->pdev); | 252 | pci_dev_put(cdev->pdev); |
253 | iscsi_host_free(chba->shost); | 253 | iscsi_host_free(chba->shost); |
254 | } | 254 | } |
255 | } | 255 | } |
256 | } | 256 | } |
257 | EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); | 257 | EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); |
258 | 258 | ||
259 | int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun, | 259 | int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun, |
260 | unsigned int max_id, struct scsi_host_template *sht, | 260 | unsigned int max_id, struct scsi_host_template *sht, |
261 | struct scsi_transport_template *stt) | 261 | struct scsi_transport_template *stt) |
262 | { | 262 | { |
263 | struct cxgbi_hba *chba; | 263 | struct cxgbi_hba *chba; |
264 | struct Scsi_Host *shost; | 264 | struct Scsi_Host *shost; |
265 | int i, err; | 265 | int i, err; |
266 | 266 | ||
267 | log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); | 267 | log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); |
268 | 268 | ||
269 | for (i = 0; i < cdev->nports; i++) { | 269 | for (i = 0; i < cdev->nports; i++) { |
270 | shost = iscsi_host_alloc(sht, sizeof(*chba), 1); | 270 | shost = iscsi_host_alloc(sht, sizeof(*chba), 1); |
271 | if (!shost) { | 271 | if (!shost) { |
272 | pr_info("0x%p, p%d, %s, host alloc failed.\n", | 272 | pr_info("0x%p, p%d, %s, host alloc failed.\n", |
273 | cdev, i, cdev->ports[i]->name); | 273 | cdev, i, cdev->ports[i]->name); |
274 | err = -ENOMEM; | 274 | err = -ENOMEM; |
275 | goto err_out; | 275 | goto err_out; |
276 | } | 276 | } |
277 | 277 | ||
278 | shost->transportt = stt; | 278 | shost->transportt = stt; |
279 | shost->max_lun = max_lun; | 279 | shost->max_lun = max_lun; |
280 | shost->max_id = max_id; | 280 | shost->max_id = max_id; |
281 | shost->max_channel = 0; | 281 | shost->max_channel = 0; |
282 | shost->max_cmd_len = 16; | 282 | shost->max_cmd_len = 16; |
283 | 283 | ||
284 | chba = iscsi_host_priv(shost); | 284 | chba = iscsi_host_priv(shost); |
285 | chba->cdev = cdev; | 285 | chba->cdev = cdev; |
286 | chba->ndev = cdev->ports[i]; | 286 | chba->ndev = cdev->ports[i]; |
287 | chba->shost = shost; | 287 | chba->shost = shost; |
288 | 288 | ||
289 | log_debug(1 << CXGBI_DBG_DEV, | 289 | log_debug(1 << CXGBI_DBG_DEV, |
290 | "cdev 0x%p, p#%d %s: chba 0x%p.\n", | 290 | "cdev 0x%p, p#%d %s: chba 0x%p.\n", |
291 | cdev, i, cdev->ports[i]->name, chba); | 291 | cdev, i, cdev->ports[i]->name, chba); |
292 | 292 | ||
293 | pci_dev_get(cdev->pdev); | 293 | pci_dev_get(cdev->pdev); |
294 | err = iscsi_host_add(shost, &cdev->pdev->dev); | 294 | err = iscsi_host_add(shost, &cdev->pdev->dev); |
295 | if (err) { | 295 | if (err) { |
296 | pr_info("cdev 0x%p, p#%d %s, host add failed.\n", | 296 | pr_info("cdev 0x%p, p#%d %s, host add failed.\n", |
297 | cdev, i, cdev->ports[i]->name); | 297 | cdev, i, cdev->ports[i]->name); |
298 | pci_dev_put(cdev->pdev); | 298 | pci_dev_put(cdev->pdev); |
299 | scsi_host_put(shost); | 299 | scsi_host_put(shost); |
300 | goto err_out; | 300 | goto err_out; |
301 | } | 301 | } |
302 | 302 | ||
303 | cdev->hbas[i] = chba; | 303 | cdev->hbas[i] = chba; |
304 | } | 304 | } |
305 | 305 | ||
306 | return 0; | 306 | return 0; |
307 | 307 | ||
308 | err_out: | 308 | err_out: |
309 | cxgbi_hbas_remove(cdev); | 309 | cxgbi_hbas_remove(cdev); |
310 | return err; | 310 | return err; |
311 | } | 311 | } |
312 | EXPORT_SYMBOL_GPL(cxgbi_hbas_add); | 312 | EXPORT_SYMBOL_GPL(cxgbi_hbas_add); |
313 | 313 | ||
314 | /* | 314 | /* |
315 | * iSCSI offload | 315 | * iSCSI offload |
316 | * | 316 | * |
317 | * - source port management | 317 | * - source port management |
318 | * To find a free source port in the port allocation map we use a very simple | 318 | * To find a free source port in the port allocation map we use a very simple |
319 | * rotor scheme to look for the next free port. | 319 | * rotor scheme to look for the next free port. |
320 | * | 320 | * |
321 | * If a source port has been specified make sure that it doesn't collide with | 321 | * If a source port has been specified make sure that it doesn't collide with |
322 | * our normal source port allocation map. If it's outside the range of our | 322 | * our normal source port allocation map. If it's outside the range of our |
323 | * allocation/deallocation scheme just let them use it. | 323 | * allocation/deallocation scheme just let them use it. |
324 | * | 324 | * |
325 | * If the source port is outside our allocation range, the caller is | 325 | * If the source port is outside our allocation range, the caller is |
326 | * responsible for keeping track of their port usage. | 326 | * responsible for keeping track of their port usage. |
327 | */ | 327 | */ |
328 | static int sock_get_port(struct cxgbi_sock *csk) | 328 | static int sock_get_port(struct cxgbi_sock *csk) |
329 | { | 329 | { |
330 | struct cxgbi_device *cdev = csk->cdev; | 330 | struct cxgbi_device *cdev = csk->cdev; |
331 | struct cxgbi_ports_map *pmap = &cdev->pmap; | 331 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
332 | unsigned int start; | 332 | unsigned int start; |
333 | int idx; | 333 | int idx; |
334 | 334 | ||
335 | if (!pmap->max_connect) { | 335 | if (!pmap->max_connect) { |
336 | pr_err("cdev 0x%p, p#%u %s, NO port map.\n", | 336 | pr_err("cdev 0x%p, p#%u %s, NO port map.\n", |
337 | cdev, csk->port_id, cdev->ports[csk->port_id]->name); | 337 | cdev, csk->port_id, cdev->ports[csk->port_id]->name); |
338 | return -EADDRNOTAVAIL; | 338 | return -EADDRNOTAVAIL; |
339 | } | 339 | } |
340 | 340 | ||
341 | if (csk->saddr.sin_port) { | 341 | if (csk->saddr.sin_port) { |
342 | pr_err("source port NON-ZERO %u.\n", | 342 | pr_err("source port NON-ZERO %u.\n", |
343 | ntohs(csk->saddr.sin_port)); | 343 | ntohs(csk->saddr.sin_port)); |
344 | return -EADDRINUSE; | 344 | return -EADDRINUSE; |
345 | } | 345 | } |
346 | 346 | ||
347 | spin_lock_bh(&pmap->lock); | 347 | spin_lock_bh(&pmap->lock); |
348 | if (pmap->used >= pmap->max_connect) { | 348 | if (pmap->used >= pmap->max_connect) { |
349 | spin_unlock_bh(&pmap->lock); | 349 | spin_unlock_bh(&pmap->lock); |
350 | pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", | 350 | pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", |
351 | cdev, csk->port_id, cdev->ports[csk->port_id]->name); | 351 | cdev, csk->port_id, cdev->ports[csk->port_id]->name); |
352 | return -EADDRNOTAVAIL; | 352 | return -EADDRNOTAVAIL; |
353 | } | 353 | } |
354 | 354 | ||
355 | start = idx = pmap->next; | 355 | start = idx = pmap->next; |
356 | do { | 356 | do { |
357 | if (++idx >= pmap->max_connect) | 357 | if (++idx >= pmap->max_connect) |
358 | idx = 0; | 358 | idx = 0; |
359 | if (!pmap->port_csk[idx]) { | 359 | if (!pmap->port_csk[idx]) { |
360 | pmap->used++; | 360 | pmap->used++; |
361 | csk->saddr.sin_port = | 361 | csk->saddr.sin_port = |
362 | htons(pmap->sport_base + idx); | 362 | htons(pmap->sport_base + idx); |
363 | pmap->next = idx; | 363 | pmap->next = idx; |
364 | pmap->port_csk[idx] = csk; | 364 | pmap->port_csk[idx] = csk; |
365 | spin_unlock_bh(&pmap->lock); | 365 | spin_unlock_bh(&pmap->lock); |
366 | cxgbi_sock_get(csk); | 366 | cxgbi_sock_get(csk); |
367 | log_debug(1 << CXGBI_DBG_SOCK, | 367 | log_debug(1 << CXGBI_DBG_SOCK, |
368 | "cdev 0x%p, p#%u %s, p %u, %u.\n", | 368 | "cdev 0x%p, p#%u %s, p %u, %u.\n", |
369 | cdev, csk->port_id, | 369 | cdev, csk->port_id, |
370 | cdev->ports[csk->port_id]->name, | 370 | cdev->ports[csk->port_id]->name, |
371 | pmap->sport_base + idx, pmap->next); | 371 | pmap->sport_base + idx, pmap->next); |
372 | return 0; | 372 | return 0; |
373 | } | 373 | } |
374 | } while (idx != start); | 374 | } while (idx != start); |
375 | spin_unlock_bh(&pmap->lock); | 375 | spin_unlock_bh(&pmap->lock); |
376 | 376 | ||
377 | /* should not happen */ | 377 | /* should not happen */ |
378 | pr_warn("cdev 0x%p, p#%u %s, next %u?\n", | 378 | pr_warn("cdev 0x%p, p#%u %s, next %u?\n", |
379 | cdev, csk->port_id, cdev->ports[csk->port_id]->name, | 379 | cdev, csk->port_id, cdev->ports[csk->port_id]->name, |
380 | pmap->next); | 380 | pmap->next); |
381 | return -EADDRNOTAVAIL; | 381 | return -EADDRNOTAVAIL; |
382 | } | 382 | } |
383 | 383 | ||
384 | static void sock_put_port(struct cxgbi_sock *csk) | 384 | static void sock_put_port(struct cxgbi_sock *csk) |
385 | { | 385 | { |
386 | struct cxgbi_device *cdev = csk->cdev; | 386 | struct cxgbi_device *cdev = csk->cdev; |
387 | struct cxgbi_ports_map *pmap = &cdev->pmap; | 387 | struct cxgbi_ports_map *pmap = &cdev->pmap; |
388 | 388 | ||
389 | if (csk->saddr.sin_port) { | 389 | if (csk->saddr.sin_port) { |
390 | int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base; | 390 | int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base; |
391 | 391 | ||
392 | csk->saddr.sin_port = 0; | 392 | csk->saddr.sin_port = 0; |
393 | if (idx < 0 || idx >= pmap->max_connect) { | 393 | if (idx < 0 || idx >= pmap->max_connect) { |
394 | pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", | 394 | pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", |
395 | cdev, csk->port_id, | 395 | cdev, csk->port_id, |
396 | cdev->ports[csk->port_id]->name, | 396 | cdev->ports[csk->port_id]->name, |
397 | ntohs(csk->saddr.sin_port)); | 397 | ntohs(csk->saddr.sin_port)); |
398 | return; | 398 | return; |
399 | } | 399 | } |
400 | 400 | ||
401 | spin_lock_bh(&pmap->lock); | 401 | spin_lock_bh(&pmap->lock); |
402 | pmap->port_csk[idx] = NULL; | 402 | pmap->port_csk[idx] = NULL; |
403 | pmap->used--; | 403 | pmap->used--; |
404 | spin_unlock_bh(&pmap->lock); | 404 | spin_unlock_bh(&pmap->lock); |
405 | 405 | ||
406 | log_debug(1 << CXGBI_DBG_SOCK, | 406 | log_debug(1 << CXGBI_DBG_SOCK, |
407 | "cdev 0x%p, p#%u %s, release %u.\n", | 407 | "cdev 0x%p, p#%u %s, release %u.\n", |
408 | cdev, csk->port_id, cdev->ports[csk->port_id]->name, | 408 | cdev, csk->port_id, cdev->ports[csk->port_id]->name, |
409 | pmap->sport_base + idx); | 409 | pmap->sport_base + idx); |
410 | 410 | ||
411 | cxgbi_sock_put(csk); | 411 | cxgbi_sock_put(csk); |
412 | } | 412 | } |
413 | } | 413 | } |
414 | 414 | ||
415 | /* | 415 | /* |
416 | * iscsi tcp connection | 416 | * iscsi tcp connection |
417 | */ | 417 | */ |
418 | void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) | 418 | void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) |
419 | { | 419 | { |
420 | if (csk->cpl_close) { | 420 | if (csk->cpl_close) { |
421 | kfree_skb(csk->cpl_close); | 421 | kfree_skb(csk->cpl_close); |
422 | csk->cpl_close = NULL; | 422 | csk->cpl_close = NULL; |
423 | } | 423 | } |
424 | if (csk->cpl_abort_req) { | 424 | if (csk->cpl_abort_req) { |
425 | kfree_skb(csk->cpl_abort_req); | 425 | kfree_skb(csk->cpl_abort_req); |
426 | csk->cpl_abort_req = NULL; | 426 | csk->cpl_abort_req = NULL; |
427 | } | 427 | } |
428 | if (csk->cpl_abort_rpl) { | 428 | if (csk->cpl_abort_rpl) { |
429 | kfree_skb(csk->cpl_abort_rpl); | 429 | kfree_skb(csk->cpl_abort_rpl); |
430 | csk->cpl_abort_rpl = NULL; | 430 | csk->cpl_abort_rpl = NULL; |
431 | } | 431 | } |
432 | } | 432 | } |
433 | EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); | 433 | EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); |
434 | 434 | ||
435 | static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) | 435 | static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) |
436 | { | 436 | { |
437 | struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); | 437 | struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); |
438 | 438 | ||
439 | if (!csk) { | 439 | if (!csk) { |
440 | pr_info("alloc csk %zu failed.\n", sizeof(*csk)); | 440 | pr_info("alloc csk %zu failed.\n", sizeof(*csk)); |
441 | return NULL; | 441 | return NULL; |
442 | } | 442 | } |
443 | 443 | ||
444 | if (cdev->csk_alloc_cpls(csk) < 0) { | 444 | if (cdev->csk_alloc_cpls(csk) < 0) { |
445 | pr_info("csk 0x%p, alloc cpls failed.\n", csk); | 445 | pr_info("csk 0x%p, alloc cpls failed.\n", csk); |
446 | kfree(csk); | 446 | kfree(csk); |
447 | return NULL; | 447 | return NULL; |
448 | } | 448 | } |
449 | 449 | ||
450 | spin_lock_init(&csk->lock); | 450 | spin_lock_init(&csk->lock); |
451 | kref_init(&csk->refcnt); | 451 | kref_init(&csk->refcnt); |
452 | skb_queue_head_init(&csk->receive_queue); | 452 | skb_queue_head_init(&csk->receive_queue); |
453 | skb_queue_head_init(&csk->write_queue); | 453 | skb_queue_head_init(&csk->write_queue); |
454 | setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); | 454 | setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); |
455 | rwlock_init(&csk->callback_lock); | 455 | rwlock_init(&csk->callback_lock); |
456 | csk->cdev = cdev; | 456 | csk->cdev = cdev; |
457 | csk->flags = 0; | 457 | csk->flags = 0; |
458 | cxgbi_sock_set_state(csk, CTP_CLOSED); | 458 | cxgbi_sock_set_state(csk, CTP_CLOSED); |
459 | 459 | ||
460 | log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); | 460 | log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); |
461 | 461 | ||
462 | return csk; | 462 | return csk; |
463 | } | 463 | } |
464 | 464 | ||
465 | static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr, | 465 | static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr, |
466 | __be16 sport, __be16 dport, u8 tos) | 466 | __be16 sport, __be16 dport, u8 tos) |
467 | { | 467 | { |
468 | struct rtable *rt; | 468 | struct rtable *rt; |
469 | struct flowi fl = { | 469 | struct flowi fl = { |
470 | .oif = 0, | 470 | .oif = 0, |
471 | .nl_u = { | 471 | .nl_u = { |
472 | .ip4_u = { | 472 | .ip4_u = { |
473 | .daddr = daddr, | 473 | .daddr = daddr, |
474 | .saddr = saddr, | 474 | .saddr = saddr, |
475 | .tos = tos } | 475 | .tos = tos } |
476 | }, | 476 | }, |
477 | .proto = IPPROTO_TCP, | 477 | .proto = IPPROTO_TCP, |
478 | .uli_u = { | 478 | .uli_u = { |
479 | .ports = { | 479 | .ports = { |
480 | .sport = sport, | 480 | .sport = sport, |
481 | .dport = dport } | 481 | .dport = dport } |
482 | } | 482 | } |
483 | }; | 483 | }; |
484 | 484 | ||
485 | if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) | 485 | if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) |
486 | return NULL; | 486 | return NULL; |
487 | 487 | ||
488 | return rt; | 488 | return rt; |
489 | } | 489 | } |
490 | 490 | ||
491 | static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) | 491 | static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) |
492 | { | 492 | { |
493 | struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; | 493 | struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; |
494 | struct dst_entry *dst; | 494 | struct dst_entry *dst; |
495 | struct net_device *ndev; | 495 | struct net_device *ndev; |
496 | struct cxgbi_device *cdev; | 496 | struct cxgbi_device *cdev; |
497 | struct rtable *rt = NULL; | 497 | struct rtable *rt = NULL; |
498 | struct cxgbi_sock *csk = NULL; | 498 | struct cxgbi_sock *csk = NULL; |
499 | unsigned int mtu = 0; | 499 | unsigned int mtu = 0; |
500 | int port = 0xFFFF; | 500 | int port = 0xFFFF; |
501 | int err = 0; | 501 | int err = 0; |
502 | 502 | ||
503 | if (daddr->sin_family != AF_INET) { | 503 | if (daddr->sin_family != AF_INET) { |
504 | pr_info("address family 0x%x NOT supported.\n", | 504 | pr_info("address family 0x%x NOT supported.\n", |
505 | daddr->sin_family); | 505 | daddr->sin_family); |
506 | err = -EAFNOSUPPORT; | 506 | err = -EAFNOSUPPORT; |
507 | goto err_out; | 507 | goto err_out; |
508 | } | 508 | } |
509 | 509 | ||
510 | rt = find_route_ipv4(0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); | 510 | rt = find_route_ipv4(0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); |
511 | if (!rt) { | 511 | if (!rt) { |
512 | pr_info("no route to ipv4 0x%x, port %u.\n", | 512 | pr_info("no route to ipv4 0x%x, port %u.\n", |
513 | daddr->sin_addr.s_addr, daddr->sin_port); | 513 | daddr->sin_addr.s_addr, daddr->sin_port); |
514 | err = -ENETUNREACH; | 514 | err = -ENETUNREACH; |
515 | goto err_out; | 515 | goto err_out; |
516 | } | 516 | } |
517 | dst = &rt->dst; | 517 | dst = &rt->dst; |
518 | ndev = dst->neighbour->dev; | 518 | ndev = dst->neighbour->dev; |
519 | 519 | ||
520 | if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { | 520 | if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { |
521 | pr_info("multi-cast route %pI4, port %u, dev %s.\n", | 521 | pr_info("multi-cast route %pI4, port %u, dev %s.\n", |
522 | &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), | 522 | &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), |
523 | ndev->name); | 523 | ndev->name); |
524 | err = -ENETUNREACH; | 524 | err = -ENETUNREACH; |
525 | goto rel_rt; | 525 | goto rel_rt; |
526 | } | 526 | } |
527 | 527 | ||
528 | if (ndev->flags & IFF_LOOPBACK) { | 528 | if (ndev->flags & IFF_LOOPBACK) { |
529 | ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); | 529 | ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); |
530 | mtu = ndev->mtu; | 530 | mtu = ndev->mtu; |
531 | pr_info("rt dev %s, loopback -> %s, mtu %u.\n", | 531 | pr_info("rt dev %s, loopback -> %s, mtu %u.\n", |
532 | dst->neighbour->dev->name, ndev->name, mtu); | 532 | dst->neighbour->dev->name, ndev->name, mtu); |
533 | } | 533 | } |
534 | 534 | ||
535 | if (ndev->priv_flags & IFF_802_1Q_VLAN) { | 535 | if (ndev->priv_flags & IFF_802_1Q_VLAN) { |
536 | ndev = vlan_dev_real_dev(ndev); | 536 | ndev = vlan_dev_real_dev(ndev); |
537 | pr_info("rt dev %s, vlan -> %s.\n", | 537 | pr_info("rt dev %s, vlan -> %s.\n", |
538 | dst->neighbour->dev->name, ndev->name); | 538 | dst->neighbour->dev->name, ndev->name); |
539 | } | 539 | } |
540 | 540 | ||
541 | cdev = cxgbi_device_find_by_netdev(ndev, &port); | 541 | cdev = cxgbi_device_find_by_netdev(ndev, &port); |
542 | if (!cdev) { | 542 | if (!cdev) { |
543 | pr_info("dst %pI4, %s, NOT cxgbi device.\n", | 543 | pr_info("dst %pI4, %s, NOT cxgbi device.\n", |
544 | &daddr->sin_addr.s_addr, ndev->name); | 544 | &daddr->sin_addr.s_addr, ndev->name); |
545 | err = -ENETUNREACH; | 545 | err = -ENETUNREACH; |
546 | goto rel_rt; | 546 | goto rel_rt; |
547 | } | 547 | } |
548 | log_debug(1 << CXGBI_DBG_SOCK, | 548 | log_debug(1 << CXGBI_DBG_SOCK, |
549 | "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", | 549 | "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", |
550 | &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), | 550 | &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), |
551 | port, ndev->name, cdev); | 551 | port, ndev->name, cdev); |
552 | 552 | ||
553 | csk = cxgbi_sock_create(cdev); | 553 | csk = cxgbi_sock_create(cdev); |
554 | if (!csk) { | 554 | if (!csk) { |
555 | err = -ENOMEM; | 555 | err = -ENOMEM; |
556 | goto rel_rt; | 556 | goto rel_rt; |
557 | } | 557 | } |
558 | csk->cdev = cdev; | 558 | csk->cdev = cdev; |
559 | csk->port_id = port; | 559 | csk->port_id = port; |
560 | csk->mtu = mtu; | 560 | csk->mtu = mtu; |
561 | csk->dst = dst; | 561 | csk->dst = dst; |
562 | csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; | 562 | csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; |
563 | csk->daddr.sin_port = daddr->sin_port; | 563 | csk->daddr.sin_port = daddr->sin_port; |
564 | if (cdev->hbas[port]->ipv4addr) | 564 | if (cdev->hbas[port]->ipv4addr) |
565 | csk->saddr.sin_addr.s_addr = cdev->hbas[port]->ipv4addr; | 565 | csk->saddr.sin_addr.s_addr = cdev->hbas[port]->ipv4addr; |
566 | else | 566 | else |
567 | csk->saddr.sin_addr.s_addr = rt->rt_src; | 567 | csk->saddr.sin_addr.s_addr = rt->rt_src; |
568 | 568 | ||
569 | return csk; | 569 | return csk; |
570 | 570 | ||
571 | rel_rt: | 571 | rel_rt: |
572 | ip_rt_put(rt); | 572 | ip_rt_put(rt); |
573 | if (csk) | 573 | if (csk) |
574 | cxgbi_sock_closed(csk); | 574 | cxgbi_sock_closed(csk); |
575 | err_out: | 575 | err_out: |
576 | return ERR_PTR(err); | 576 | return ERR_PTR(err); |
577 | } | 577 | } |
578 | 578 | ||
579 | void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, | 579 | void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, |
580 | unsigned int opt) | 580 | unsigned int opt) |
581 | { | 581 | { |
582 | csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; | 582 | csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; |
583 | dst_confirm(csk->dst); | 583 | dst_confirm(csk->dst); |
584 | smp_mb(); | 584 | smp_mb(); |
585 | cxgbi_sock_set_state(csk, CTP_ESTABLISHED); | 585 | cxgbi_sock_set_state(csk, CTP_ESTABLISHED); |
586 | } | 586 | } |
587 | EXPORT_SYMBOL_GPL(cxgbi_sock_established); | 587 | EXPORT_SYMBOL_GPL(cxgbi_sock_established); |
588 | 588 | ||
589 | static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) | 589 | static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) |
590 | { | 590 | { |
591 | log_debug(1 << CXGBI_DBG_SOCK, | 591 | log_debug(1 << CXGBI_DBG_SOCK, |
592 | "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", | 592 | "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", |
593 | csk, csk->state, csk->flags, csk->user_data); | 593 | csk, csk->state, csk->flags, csk->user_data); |
594 | 594 | ||
595 | if (csk->state != CTP_ESTABLISHED) { | 595 | if (csk->state != CTP_ESTABLISHED) { |
596 | read_lock(&csk->callback_lock); | 596 | read_lock_bh(&csk->callback_lock); |
597 | if (csk->user_data) | 597 | if (csk->user_data) |
598 | iscsi_conn_failure(csk->user_data, | 598 | iscsi_conn_failure(csk->user_data, |
599 | ISCSI_ERR_CONN_FAILED); | 599 | ISCSI_ERR_CONN_FAILED); |
600 | read_unlock(&csk->callback_lock); | 600 | read_unlock_bh(&csk->callback_lock); |
601 | } | 601 | } |
602 | } | 602 | } |
603 | 603 | ||
604 | void cxgbi_sock_closed(struct cxgbi_sock *csk) | 604 | void cxgbi_sock_closed(struct cxgbi_sock *csk) |
605 | { | 605 | { |
606 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | 606 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", |
607 | csk, (csk)->state, (csk)->flags, (csk)->tid); | 607 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
608 | cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); | 608 | cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); |
609 | if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) | 609 | if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) |
610 | return; | 610 | return; |
611 | if (csk->saddr.sin_port) | 611 | if (csk->saddr.sin_port) |
612 | sock_put_port(csk); | 612 | sock_put_port(csk); |
613 | if (csk->dst) | 613 | if (csk->dst) |
614 | dst_release(csk->dst); | 614 | dst_release(csk->dst); |
615 | csk->cdev->csk_release_offload_resources(csk); | 615 | csk->cdev->csk_release_offload_resources(csk); |
616 | cxgbi_sock_set_state(csk, CTP_CLOSED); | 616 | cxgbi_sock_set_state(csk, CTP_CLOSED); |
617 | cxgbi_inform_iscsi_conn_closing(csk); | 617 | cxgbi_inform_iscsi_conn_closing(csk); |
618 | cxgbi_sock_put(csk); | 618 | cxgbi_sock_put(csk); |
619 | } | 619 | } |
620 | EXPORT_SYMBOL_GPL(cxgbi_sock_closed); | 620 | EXPORT_SYMBOL_GPL(cxgbi_sock_closed); |
621 | 621 | ||
622 | static void need_active_close(struct cxgbi_sock *csk) | 622 | static void need_active_close(struct cxgbi_sock *csk) |
623 | { | 623 | { |
624 | int data_lost; | 624 | int data_lost; |
625 | int close_req = 0; | 625 | int close_req = 0; |
626 | 626 | ||
627 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | 627 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", |
628 | csk, (csk)->state, (csk)->flags, (csk)->tid); | 628 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
629 | spin_lock_bh(&csk->lock); | 629 | spin_lock_bh(&csk->lock); |
630 | dst_confirm(csk->dst); | 630 | dst_confirm(csk->dst); |
631 | data_lost = skb_queue_len(&csk->receive_queue); | 631 | data_lost = skb_queue_len(&csk->receive_queue); |
632 | __skb_queue_purge(&csk->receive_queue); | 632 | __skb_queue_purge(&csk->receive_queue); |
633 | 633 | ||
634 | if (csk->state == CTP_ACTIVE_OPEN) | 634 | if (csk->state == CTP_ACTIVE_OPEN) |
635 | cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); | 635 | cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); |
636 | else if (csk->state == CTP_ESTABLISHED) { | 636 | else if (csk->state == CTP_ESTABLISHED) { |
637 | close_req = 1; | 637 | close_req = 1; |
638 | cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); | 638 | cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); |
639 | } else if (csk->state == CTP_PASSIVE_CLOSE) { | 639 | } else if (csk->state == CTP_PASSIVE_CLOSE) { |
640 | close_req = 1; | 640 | close_req = 1; |
641 | cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); | 641 | cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); |
642 | } | 642 | } |
643 | 643 | ||
644 | if (close_req) { | 644 | if (close_req) { |
645 | if (data_lost) | 645 | if (data_lost) |
646 | csk->cdev->csk_send_abort_req(csk); | 646 | csk->cdev->csk_send_abort_req(csk); |
647 | else | 647 | else |
648 | csk->cdev->csk_send_close_req(csk); | 648 | csk->cdev->csk_send_close_req(csk); |
649 | } | 649 | } |
650 | 650 | ||
651 | spin_unlock_bh(&csk->lock); | 651 | spin_unlock_bh(&csk->lock); |
652 | } | 652 | } |
653 | 653 | ||
654 | void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) | 654 | void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) |
655 | { | 655 | { |
656 | pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", | 656 | pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", |
657 | csk, csk->state, csk->flags, | 657 | csk, csk->state, csk->flags, |
658 | &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, | 658 | &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, |
659 | &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, | 659 | &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, |
660 | errno); | 660 | errno); |
661 | 661 | ||
662 | cxgbi_sock_set_state(csk, CTP_CONNECTING); | 662 | cxgbi_sock_set_state(csk, CTP_CONNECTING); |
663 | csk->err = errno; | 663 | csk->err = errno; |
664 | cxgbi_sock_closed(csk); | 664 | cxgbi_sock_closed(csk); |
665 | } | 665 | } |
666 | EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); | 666 | EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); |
667 | 667 | ||
668 | void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) | 668 | void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) |
669 | { | 669 | { |
670 | struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; | 670 | struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; |
671 | 671 | ||
672 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | 672 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", |
673 | csk, (csk)->state, (csk)->flags, (csk)->tid); | 673 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
674 | cxgbi_sock_get(csk); | 674 | cxgbi_sock_get(csk); |
675 | spin_lock_bh(&csk->lock); | 675 | spin_lock_bh(&csk->lock); |
676 | if (csk->state == CTP_ACTIVE_OPEN) | 676 | if (csk->state == CTP_ACTIVE_OPEN) |
677 | cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); | 677 | cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); |
678 | spin_unlock_bh(&csk->lock); | 678 | spin_unlock_bh(&csk->lock); |
679 | cxgbi_sock_put(csk); | 679 | cxgbi_sock_put(csk); |
680 | __kfree_skb(skb); | 680 | __kfree_skb(skb); |
681 | } | 681 | } |
682 | EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); | 682 | EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); |
683 | 683 | ||
684 | void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) | 684 | void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) |
685 | { | 685 | { |
686 | cxgbi_sock_get(csk); | 686 | cxgbi_sock_get(csk); |
687 | spin_lock_bh(&csk->lock); | 687 | spin_lock_bh(&csk->lock); |
688 | if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { | 688 | if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { |
689 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) | 689 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) |
690 | cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); | 690 | cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); |
691 | else { | 691 | else { |
692 | cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); | 692 | cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); |
693 | cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); | 693 | cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); |
694 | if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) | 694 | if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) |
695 | pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", | 695 | pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", |
696 | csk, csk->state, csk->flags, csk->tid); | 696 | csk, csk->state, csk->flags, csk->tid); |
697 | cxgbi_sock_closed(csk); | 697 | cxgbi_sock_closed(csk); |
698 | } | 698 | } |
699 | } | 699 | } |
700 | spin_unlock_bh(&csk->lock); | 700 | spin_unlock_bh(&csk->lock); |
701 | cxgbi_sock_put(csk); | 701 | cxgbi_sock_put(csk); |
702 | } | 702 | } |
703 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); | 703 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); |
704 | 704 | ||
705 | void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) | 705 | void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) |
706 | { | 706 | { |
707 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | 707 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", |
708 | csk, (csk)->state, (csk)->flags, (csk)->tid); | 708 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
709 | cxgbi_sock_get(csk); | 709 | cxgbi_sock_get(csk); |
710 | spin_lock_bh(&csk->lock); | 710 | spin_lock_bh(&csk->lock); |
711 | 711 | ||
712 | if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) | 712 | if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) |
713 | goto done; | 713 | goto done; |
714 | 714 | ||
715 | switch (csk->state) { | 715 | switch (csk->state) { |
716 | case CTP_ESTABLISHED: | 716 | case CTP_ESTABLISHED: |
717 | cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); | 717 | cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); |
718 | break; | 718 | break; |
719 | case CTP_ACTIVE_CLOSE: | 719 | case CTP_ACTIVE_CLOSE: |
720 | cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); | 720 | cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); |
721 | break; | 721 | break; |
722 | case CTP_CLOSE_WAIT_1: | 722 | case CTP_CLOSE_WAIT_1: |
723 | cxgbi_sock_closed(csk); | 723 | cxgbi_sock_closed(csk); |
724 | break; | 724 | break; |
725 | case CTP_ABORTING: | 725 | case CTP_ABORTING: |
726 | break; | 726 | break; |
727 | default: | 727 | default: |
728 | pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", | 728 | pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", |
729 | csk, csk->state, csk->flags, csk->tid); | 729 | csk, csk->state, csk->flags, csk->tid); |
730 | } | 730 | } |
731 | cxgbi_inform_iscsi_conn_closing(csk); | 731 | cxgbi_inform_iscsi_conn_closing(csk); |
732 | done: | 732 | done: |
733 | spin_unlock_bh(&csk->lock); | 733 | spin_unlock_bh(&csk->lock); |
734 | cxgbi_sock_put(csk); | 734 | cxgbi_sock_put(csk); |
735 | } | 735 | } |
736 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); | 736 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); |
737 | 737 | ||
738 | void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) | 738 | void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) |
739 | { | 739 | { |
740 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", | 740 | log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", |
741 | csk, (csk)->state, (csk)->flags, (csk)->tid); | 741 | csk, (csk)->state, (csk)->flags, (csk)->tid); |
742 | cxgbi_sock_get(csk); | 742 | cxgbi_sock_get(csk); |
743 | spin_lock_bh(&csk->lock); | 743 | spin_lock_bh(&csk->lock); |
744 | 744 | ||
745 | csk->snd_una = snd_nxt - 1; | 745 | csk->snd_una = snd_nxt - 1; |
746 | if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) | 746 | if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) |
747 | goto done; | 747 | goto done; |
748 | 748 | ||
749 | switch (csk->state) { | 749 | switch (csk->state) { |
750 | case CTP_ACTIVE_CLOSE: | 750 | case CTP_ACTIVE_CLOSE: |
751 | cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); | 751 | cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); |
752 | break; | 752 | break; |
753 | case CTP_CLOSE_WAIT_1: | 753 | case CTP_CLOSE_WAIT_1: |
754 | case CTP_CLOSE_WAIT_2: | 754 | case CTP_CLOSE_WAIT_2: |
755 | cxgbi_sock_closed(csk); | 755 | cxgbi_sock_closed(csk); |
756 | break; | 756 | break; |
757 | case CTP_ABORTING: | 757 | case CTP_ABORTING: |
758 | break; | 758 | break; |
759 | default: | 759 | default: |
760 | pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", | 760 | pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", |
761 | csk, csk->state, csk->flags, csk->tid); | 761 | csk, csk->state, csk->flags, csk->tid); |
762 | } | 762 | } |
763 | done: | 763 | done: |
764 | spin_unlock_bh(&csk->lock); | 764 | spin_unlock_bh(&csk->lock); |
765 | cxgbi_sock_put(csk); | 765 | cxgbi_sock_put(csk); |
766 | } | 766 | } |
767 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); | 767 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); |
768 | 768 | ||
769 | void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, | 769 | void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, |
770 | unsigned int snd_una, int seq_chk) | 770 | unsigned int snd_una, int seq_chk) |
771 | { | 771 | { |
772 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | 772 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
773 | "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", | 773 | "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", |
774 | csk, csk->state, csk->flags, csk->tid, credits, | 774 | csk, csk->state, csk->flags, csk->tid, credits, |
775 | csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); | 775 | csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); |
776 | 776 | ||
777 | spin_lock_bh(&csk->lock); | 777 | spin_lock_bh(&csk->lock); |
778 | 778 | ||
779 | csk->wr_cred += credits; | 779 | csk->wr_cred += credits; |
780 | if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) | 780 | if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) |
781 | csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; | 781 | csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; |
782 | 782 | ||
783 | while (credits) { | 783 | while (credits) { |
784 | struct sk_buff *p = cxgbi_sock_peek_wr(csk); | 784 | struct sk_buff *p = cxgbi_sock_peek_wr(csk); |
785 | 785 | ||
786 | if (unlikely(!p)) { | 786 | if (unlikely(!p)) { |
787 | pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", | 787 | pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", |
788 | csk, csk->state, csk->flags, csk->tid, credits, | 788 | csk, csk->state, csk->flags, csk->tid, credits, |
789 | csk->wr_cred, csk->wr_una_cred); | 789 | csk->wr_cred, csk->wr_una_cred); |
790 | break; | 790 | break; |
791 | } | 791 | } |
792 | 792 | ||
793 | if (unlikely(credits < p->csum)) { | 793 | if (unlikely(credits < p->csum)) { |
794 | pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", | 794 | pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", |
795 | csk, csk->state, csk->flags, csk->tid, | 795 | csk, csk->state, csk->flags, csk->tid, |
796 | credits, csk->wr_cred, csk->wr_una_cred, | 796 | credits, csk->wr_cred, csk->wr_una_cred, |
797 | p->csum); | 797 | p->csum); |
798 | p->csum -= credits; | 798 | p->csum -= credits; |
799 | break; | 799 | break; |
800 | } else { | 800 | } else { |
801 | cxgbi_sock_dequeue_wr(csk); | 801 | cxgbi_sock_dequeue_wr(csk); |
802 | credits -= p->csum; | 802 | credits -= p->csum; |
803 | kfree_skb(p); | 803 | kfree_skb(p); |
804 | } | 804 | } |
805 | } | 805 | } |
806 | 806 | ||
807 | cxgbi_sock_check_wr_invariants(csk); | 807 | cxgbi_sock_check_wr_invariants(csk); |
808 | 808 | ||
809 | if (seq_chk) { | 809 | if (seq_chk) { |
810 | if (unlikely(before(snd_una, csk->snd_una))) { | 810 | if (unlikely(before(snd_una, csk->snd_una))) { |
811 | pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", | 811 | pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", |
812 | csk, csk->state, csk->flags, csk->tid, snd_una, | 812 | csk, csk->state, csk->flags, csk->tid, snd_una, |
813 | csk->snd_una); | 813 | csk->snd_una); |
814 | goto done; | 814 | goto done; |
815 | } | 815 | } |
816 | 816 | ||
817 | if (csk->snd_una != snd_una) { | 817 | if (csk->snd_una != snd_una) { |
818 | csk->snd_una = snd_una; | 818 | csk->snd_una = snd_una; |
819 | dst_confirm(csk->dst); | 819 | dst_confirm(csk->dst); |
820 | } | 820 | } |
821 | } | 821 | } |
822 | 822 | ||
823 | if (skb_queue_len(&csk->write_queue)) { | 823 | if (skb_queue_len(&csk->write_queue)) { |
824 | if (csk->cdev->csk_push_tx_frames(csk, 0)) | 824 | if (csk->cdev->csk_push_tx_frames(csk, 0)) |
825 | cxgbi_conn_tx_open(csk); | 825 | cxgbi_conn_tx_open(csk); |
826 | } else | 826 | } else |
827 | cxgbi_conn_tx_open(csk); | 827 | cxgbi_conn_tx_open(csk); |
828 | done: | 828 | done: |
829 | spin_unlock_bh(&csk->lock); | 829 | spin_unlock_bh(&csk->lock); |
830 | } | 830 | } |
831 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); | 831 | EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); |
832 | 832 | ||
833 | static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, | 833 | static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, |
834 | unsigned short mtu) | 834 | unsigned short mtu) |
835 | { | 835 | { |
836 | int i = 0; | 836 | int i = 0; |
837 | 837 | ||
838 | while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) | 838 | while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) |
839 | ++i; | 839 | ++i; |
840 | 840 | ||
841 | return i; | 841 | return i; |
842 | } | 842 | } |
843 | 843 | ||
844 | unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) | 844 | unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) |
845 | { | 845 | { |
846 | unsigned int idx; | 846 | unsigned int idx; |
847 | struct dst_entry *dst = csk->dst; | 847 | struct dst_entry *dst = csk->dst; |
848 | 848 | ||
849 | csk->advmss = dst_metric(dst, RTAX_ADVMSS); | 849 | csk->advmss = dst_metric(dst, RTAX_ADVMSS); |
850 | 850 | ||
851 | if (csk->advmss > pmtu - 40) | 851 | if (csk->advmss > pmtu - 40) |
852 | csk->advmss = pmtu - 40; | 852 | csk->advmss = pmtu - 40; |
853 | if (csk->advmss < csk->cdev->mtus[0] - 40) | 853 | if (csk->advmss < csk->cdev->mtus[0] - 40) |
854 | csk->advmss = csk->cdev->mtus[0] - 40; | 854 | csk->advmss = csk->cdev->mtus[0] - 40; |
855 | idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); | 855 | idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); |
856 | 856 | ||
857 | return idx; | 857 | return idx; |
858 | } | 858 | } |
859 | EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); | 859 | EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); |
860 | 860 | ||
861 | void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) | 861 | void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) |
862 | { | 862 | { |
863 | cxgbi_skcb_tcp_seq(skb) = csk->write_seq; | 863 | cxgbi_skcb_tcp_seq(skb) = csk->write_seq; |
864 | __skb_queue_tail(&csk->write_queue, skb); | 864 | __skb_queue_tail(&csk->write_queue, skb); |
865 | } | 865 | } |
866 | EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); | 866 | EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); |
867 | 867 | ||
868 | void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) | 868 | void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) |
869 | { | 869 | { |
870 | struct sk_buff *skb; | 870 | struct sk_buff *skb; |
871 | 871 | ||
872 | while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) | 872 | while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) |
873 | kfree_skb(skb); | 873 | kfree_skb(skb); |
874 | } | 874 | } |
875 | EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); | 875 | EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); |
876 | 876 | ||
877 | void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) | 877 | void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) |
878 | { | 878 | { |
879 | int pending = cxgbi_sock_count_pending_wrs(csk); | 879 | int pending = cxgbi_sock_count_pending_wrs(csk); |
880 | 880 | ||
881 | if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) | 881 | if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) |
882 | pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", | 882 | pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", |
883 | csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); | 883 | csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); |
884 | } | 884 | } |
885 | EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); | 885 | EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); |
886 | 886 | ||
887 | static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) | 887 | static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) |
888 | { | 888 | { |
889 | struct cxgbi_device *cdev = csk->cdev; | 889 | struct cxgbi_device *cdev = csk->cdev; |
890 | struct sk_buff *next; | 890 | struct sk_buff *next; |
891 | int err, copied = 0; | 891 | int err, copied = 0; |
892 | 892 | ||
893 | spin_lock_bh(&csk->lock); | 893 | spin_lock_bh(&csk->lock); |
894 | 894 | ||
895 | if (csk->state != CTP_ESTABLISHED) { | 895 | if (csk->state != CTP_ESTABLISHED) { |
896 | log_debug(1 << CXGBI_DBG_PDU_TX, | 896 | log_debug(1 << CXGBI_DBG_PDU_TX, |
897 | "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", | 897 | "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", |
898 | csk, csk->state, csk->flags, csk->tid); | 898 | csk, csk->state, csk->flags, csk->tid); |
899 | err = -EAGAIN; | 899 | err = -EAGAIN; |
900 | goto out_err; | 900 | goto out_err; |
901 | } | 901 | } |
902 | 902 | ||
903 | if (csk->err) { | 903 | if (csk->err) { |
904 | log_debug(1 << CXGBI_DBG_PDU_TX, | 904 | log_debug(1 << CXGBI_DBG_PDU_TX, |
905 | "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", | 905 | "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", |
906 | csk, csk->state, csk->flags, csk->tid, csk->err); | 906 | csk, csk->state, csk->flags, csk->tid, csk->err); |
907 | err = -EPIPE; | 907 | err = -EPIPE; |
908 | goto out_err; | 908 | goto out_err; |
909 | } | 909 | } |
910 | 910 | ||
911 | if (csk->write_seq - csk->snd_una >= cdev->snd_win) { | 911 | if (csk->write_seq - csk->snd_una >= cdev->snd_win) { |
912 | log_debug(1 << CXGBI_DBG_PDU_TX, | 912 | log_debug(1 << CXGBI_DBG_PDU_TX, |
913 | "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", | 913 | "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", |
914 | csk, csk->state, csk->flags, csk->tid, csk->write_seq, | 914 | csk, csk->state, csk->flags, csk->tid, csk->write_seq, |
915 | csk->snd_una, cdev->snd_win); | 915 | csk->snd_una, cdev->snd_win); |
916 | err = -ENOBUFS; | 916 | err = -ENOBUFS; |
917 | goto out_err; | 917 | goto out_err; |
918 | } | 918 | } |
919 | 919 | ||
920 | while (skb) { | 920 | while (skb) { |
921 | int frags = skb_shinfo(skb)->nr_frags + | 921 | int frags = skb_shinfo(skb)->nr_frags + |
922 | (skb->len != skb->data_len); | 922 | (skb->len != skb->data_len); |
923 | 923 | ||
924 | if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) { | 924 | if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) { |
925 | pr_err("csk 0x%p, skb head %u < %u.\n", | 925 | pr_err("csk 0x%p, skb head %u < %u.\n", |
926 | csk, skb_headroom(skb), cdev->skb_tx_rsvd); | 926 | csk, skb_headroom(skb), cdev->skb_tx_rsvd); |
927 | err = -EINVAL; | 927 | err = -EINVAL; |
928 | goto out_err; | 928 | goto out_err; |
929 | } | 929 | } |
930 | 930 | ||
931 | if (frags >= SKB_WR_LIST_SIZE) { | 931 | if (frags >= SKB_WR_LIST_SIZE) { |
932 | pr_err("csk 0x%p, frags %d, %u,%u >%u.\n", | 932 | pr_err("csk 0x%p, frags %d, %u,%u >%u.\n", |
933 | csk, skb_shinfo(skb)->nr_frags, skb->len, | 933 | csk, skb_shinfo(skb)->nr_frags, skb->len, |
934 | skb->data_len, (uint)(SKB_WR_LIST_SIZE)); | 934 | skb->data_len, (uint)(SKB_WR_LIST_SIZE)); |
935 | err = -EINVAL; | 935 | err = -EINVAL; |
936 | goto out_err; | 936 | goto out_err; |
937 | } | 937 | } |
938 | 938 | ||
939 | next = skb->next; | 939 | next = skb->next; |
940 | skb->next = NULL; | 940 | skb->next = NULL; |
941 | cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); | 941 | cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); |
942 | cxgbi_sock_skb_entail(csk, skb); | 942 | cxgbi_sock_skb_entail(csk, skb); |
943 | copied += skb->len; | 943 | copied += skb->len; |
944 | csk->write_seq += skb->len + | 944 | csk->write_seq += skb->len + |
945 | cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); | 945 | cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); |
946 | skb = next; | 946 | skb = next; |
947 | } | 947 | } |
948 | done: | 948 | done: |
949 | if (likely(skb_queue_len(&csk->write_queue))) | 949 | if (likely(skb_queue_len(&csk->write_queue))) |
950 | cdev->csk_push_tx_frames(csk, 1); | 950 | cdev->csk_push_tx_frames(csk, 1); |
951 | spin_unlock_bh(&csk->lock); | 951 | spin_unlock_bh(&csk->lock); |
952 | return copied; | 952 | return copied; |
953 | 953 | ||
954 | out_err: | 954 | out_err: |
955 | if (copied == 0 && err == -EPIPE) | 955 | if (copied == 0 && err == -EPIPE) |
956 | copied = csk->err ? csk->err : -EPIPE; | 956 | copied = csk->err ? csk->err : -EPIPE; |
957 | else | 957 | else |
958 | copied = err; | 958 | copied = err; |
959 | goto done; | 959 | goto done; |
960 | } | 960 | } |
961 | 961 | ||
962 | /* | 962 | /* |
963 | * Direct Data Placement - | 963 | * Direct Data Placement - |
964 | * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted | 964 | * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted |
965 | * final destination host-memory buffers based on the Initiator Task Tag (ITT) | 965 | * final destination host-memory buffers based on the Initiator Task Tag (ITT) |
966 | * in Data-In or Target Task Tag (TTT) in Data-Out PDUs. | 966 | * in Data-In or Target Task Tag (TTT) in Data-Out PDUs. |
967 | * The host memory address is programmed into h/w in the format of pagepod | 967 | * The host memory address is programmed into h/w in the format of pagepod |
968 | * entries. | 968 | * entries. |
969 | * The location of the pagepod entry is encoded into ddp tag which is used as | 969 | * The location of the pagepod entry is encoded into ddp tag which is used as |
970 | * the base for ITT/TTT. | 970 | * the base for ITT/TTT. |
971 | */ | 971 | */ |
972 | 972 | ||
973 | static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; | 973 | static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; |
974 | static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; | 974 | static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; |
975 | static unsigned char page_idx = DDP_PGIDX_MAX; | 975 | static unsigned char page_idx = DDP_PGIDX_MAX; |
976 | 976 | ||
977 | static unsigned char sw_tag_idx_bits; | 977 | static unsigned char sw_tag_idx_bits; |
978 | static unsigned char sw_tag_age_bits; | 978 | static unsigned char sw_tag_age_bits; |
979 | 979 | ||
980 | /* | 980 | /* |
981 | * Direct-Data Placement page size adjustment | 981 | * Direct-Data Placement page size adjustment |
982 | */ | 982 | */ |
983 | static int ddp_adjust_page_table(void) | 983 | static int ddp_adjust_page_table(void) |
984 | { | 984 | { |
985 | int i; | 985 | int i; |
986 | unsigned int base_order, order; | 986 | unsigned int base_order, order; |
987 | 987 | ||
988 | if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { | 988 | if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { |
989 | pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n", | 989 | pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n", |
990 | PAGE_SIZE, 1UL << ddp_page_shift[0]); | 990 | PAGE_SIZE, 1UL << ddp_page_shift[0]); |
991 | return -EINVAL; | 991 | return -EINVAL; |
992 | } | 992 | } |
993 | 993 | ||
994 | base_order = get_order(1UL << ddp_page_shift[0]); | 994 | base_order = get_order(1UL << ddp_page_shift[0]); |
995 | order = get_order(1UL << PAGE_SHIFT); | 995 | order = get_order(1UL << PAGE_SHIFT); |
996 | 996 | ||
997 | for (i = 0; i < DDP_PGIDX_MAX; i++) { | 997 | for (i = 0; i < DDP_PGIDX_MAX; i++) { |
998 | /* first is the kernel page size, then just doubling */ | 998 | /* first is the kernel page size, then just doubling */ |
999 | ddp_page_order[i] = order - base_order + i; | 999 | ddp_page_order[i] = order - base_order + i; |
1000 | ddp_page_shift[i] = PAGE_SHIFT + i; | 1000 | ddp_page_shift[i] = PAGE_SHIFT + i; |
1001 | } | 1001 | } |
1002 | return 0; | 1002 | return 0; |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | static int ddp_find_page_index(unsigned long pgsz) | 1005 | static int ddp_find_page_index(unsigned long pgsz) |
1006 | { | 1006 | { |
1007 | int i; | 1007 | int i; |
1008 | 1008 | ||
1009 | for (i = 0; i < DDP_PGIDX_MAX; i++) { | 1009 | for (i = 0; i < DDP_PGIDX_MAX; i++) { |
1010 | if (pgsz == (1UL << ddp_page_shift[i])) | 1010 | if (pgsz == (1UL << ddp_page_shift[i])) |
1011 | return i; | 1011 | return i; |
1012 | } | 1012 | } |
1013 | pr_info("ddp page size %lu not supported.\n", pgsz); | 1013 | pr_info("ddp page size %lu not supported.\n", pgsz); |
1014 | return DDP_PGIDX_MAX; | 1014 | return DDP_PGIDX_MAX; |
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | static void ddp_setup_host_page_size(void) | 1017 | static void ddp_setup_host_page_size(void) |
1018 | { | 1018 | { |
1019 | if (page_idx == DDP_PGIDX_MAX) { | 1019 | if (page_idx == DDP_PGIDX_MAX) { |
1020 | page_idx = ddp_find_page_index(PAGE_SIZE); | 1020 | page_idx = ddp_find_page_index(PAGE_SIZE); |
1021 | 1021 | ||
1022 | if (page_idx == DDP_PGIDX_MAX) { | 1022 | if (page_idx == DDP_PGIDX_MAX) { |
1023 | pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE); | 1023 | pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE); |
1024 | if (ddp_adjust_page_table() < 0) { | 1024 | if (ddp_adjust_page_table() < 0) { |
1025 | pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE); | 1025 | pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE); |
1026 | return; | 1026 | return; |
1027 | } | 1027 | } |
1028 | page_idx = ddp_find_page_index(PAGE_SIZE); | 1028 | page_idx = ddp_find_page_index(PAGE_SIZE); |
1029 | } | 1029 | } |
1030 | pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx); | 1030 | pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx); |
1031 | } | 1031 | } |
1032 | } | 1032 | } |
1033 | 1033 | ||
1034 | void cxgbi_ddp_page_size_factor(int *pgsz_factor) | 1034 | void cxgbi_ddp_page_size_factor(int *pgsz_factor) |
1035 | { | 1035 | { |
1036 | int i; | 1036 | int i; |
1037 | 1037 | ||
1038 | for (i = 0; i < DDP_PGIDX_MAX; i++) | 1038 | for (i = 0; i < DDP_PGIDX_MAX; i++) |
1039 | pgsz_factor[i] = ddp_page_order[i]; | 1039 | pgsz_factor[i] = ddp_page_order[i]; |
1040 | } | 1040 | } |
1041 | EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor); | 1041 | EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor); |
1042 | 1042 | ||
1043 | /* | 1043 | /* |
1044 | * DDP setup & teardown | 1044 | * DDP setup & teardown |
1045 | */ | 1045 | */ |
1046 | 1046 | ||
1047 | void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod, | 1047 | void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod, |
1048 | struct cxgbi_pagepod_hdr *hdr, | 1048 | struct cxgbi_pagepod_hdr *hdr, |
1049 | struct cxgbi_gather_list *gl, unsigned int gidx) | 1049 | struct cxgbi_gather_list *gl, unsigned int gidx) |
1050 | { | 1050 | { |
1051 | int i; | 1051 | int i; |
1052 | 1052 | ||
1053 | memcpy(ppod, hdr, sizeof(*hdr)); | 1053 | memcpy(ppod, hdr, sizeof(*hdr)); |
1054 | for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) { | 1054 | for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) { |
1055 | ppod->addr[i] = gidx < gl->nelem ? | 1055 | ppod->addr[i] = gidx < gl->nelem ? |
1056 | cpu_to_be64(gl->phys_addr[gidx]) : 0ULL; | 1056 | cpu_to_be64(gl->phys_addr[gidx]) : 0ULL; |
1057 | } | 1057 | } |
1058 | } | 1058 | } |
1059 | EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set); | 1059 | EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set); |
1060 | 1060 | ||
1061 | void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod) | 1061 | void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod) |
1062 | { | 1062 | { |
1063 | memset(ppod, 0, sizeof(*ppod)); | 1063 | memset(ppod, 0, sizeof(*ppod)); |
1064 | } | 1064 | } |
1065 | EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear); | 1065 | EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear); |
1066 | 1066 | ||
1067 | static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp, | 1067 | static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp, |
1068 | unsigned int start, unsigned int max, | 1068 | unsigned int start, unsigned int max, |
1069 | unsigned int count, | 1069 | unsigned int count, |
1070 | struct cxgbi_gather_list *gl) | 1070 | struct cxgbi_gather_list *gl) |
1071 | { | 1071 | { |
1072 | unsigned int i, j, k; | 1072 | unsigned int i, j, k; |
1073 | 1073 | ||
1074 | /* not enough entries */ | 1074 | /* not enough entries */ |
1075 | if ((max - start) < count) { | 1075 | if ((max - start) < count) { |
1076 | log_debug(1 << CXGBI_DBG_DDP, | 1076 | log_debug(1 << CXGBI_DBG_DDP, |
1077 | "NOT enough entries %u+%u < %u.\n", start, count, max); | 1077 | "NOT enough entries %u+%u < %u.\n", start, count, max); |
1078 | return -EBUSY; | 1078 | return -EBUSY; |
1079 | } | 1079 | } |
1080 | 1080 | ||
1081 | max -= count; | 1081 | max -= count; |
1082 | spin_lock(&ddp->map_lock); | 1082 | spin_lock(&ddp->map_lock); |
1083 | for (i = start; i < max;) { | 1083 | for (i = start; i < max;) { |
1084 | for (j = 0, k = i; j < count; j++, k++) { | 1084 | for (j = 0, k = i; j < count; j++, k++) { |
1085 | if (ddp->gl_map[k]) | 1085 | if (ddp->gl_map[k]) |
1086 | break; | 1086 | break; |
1087 | } | 1087 | } |
1088 | if (j == count) { | 1088 | if (j == count) { |
1089 | for (j = 0, k = i; j < count; j++, k++) | 1089 | for (j = 0, k = i; j < count; j++, k++) |
1090 | ddp->gl_map[k] = gl; | 1090 | ddp->gl_map[k] = gl; |
1091 | spin_unlock(&ddp->map_lock); | 1091 | spin_unlock(&ddp->map_lock); |
1092 | return i; | 1092 | return i; |
1093 | } | 1093 | } |
1094 | i += j + 1; | 1094 | i += j + 1; |
1095 | } | 1095 | } |
1096 | spin_unlock(&ddp->map_lock); | 1096 | spin_unlock(&ddp->map_lock); |
1097 | log_debug(1 << CXGBI_DBG_DDP, | 1097 | log_debug(1 << CXGBI_DBG_DDP, |
1098 | "NO suitable entries %u available.\n", count); | 1098 | "NO suitable entries %u available.\n", count); |
1099 | return -EBUSY; | 1099 | return -EBUSY; |
1100 | } | 1100 | } |
1101 | 1101 | ||
1102 | static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp, | 1102 | static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp, |
1103 | int start, int count) | 1103 | int start, int count) |
1104 | { | 1104 | { |
1105 | spin_lock(&ddp->map_lock); | 1105 | spin_lock(&ddp->map_lock); |
1106 | memset(&ddp->gl_map[start], 0, | 1106 | memset(&ddp->gl_map[start], 0, |
1107 | count * sizeof(struct cxgbi_gather_list *)); | 1107 | count * sizeof(struct cxgbi_gather_list *)); |
1108 | spin_unlock(&ddp->map_lock); | 1108 | spin_unlock(&ddp->map_lock); |
1109 | } | 1109 | } |
1110 | 1110 | ||
1111 | static inline void ddp_gl_unmap(struct pci_dev *pdev, | 1111 | static inline void ddp_gl_unmap(struct pci_dev *pdev, |
1112 | struct cxgbi_gather_list *gl) | 1112 | struct cxgbi_gather_list *gl) |
1113 | { | 1113 | { |
1114 | int i; | 1114 | int i; |
1115 | 1115 | ||
1116 | for (i = 0; i < gl->nelem; i++) | 1116 | for (i = 0; i < gl->nelem; i++) |
1117 | dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE, | 1117 | dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE, |
1118 | PCI_DMA_FROMDEVICE); | 1118 | PCI_DMA_FROMDEVICE); |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | static inline int ddp_gl_map(struct pci_dev *pdev, | 1121 | static inline int ddp_gl_map(struct pci_dev *pdev, |
1122 | struct cxgbi_gather_list *gl) | 1122 | struct cxgbi_gather_list *gl) |
1123 | { | 1123 | { |
1124 | int i; | 1124 | int i; |
1125 | 1125 | ||
1126 | for (i = 0; i < gl->nelem; i++) { | 1126 | for (i = 0; i < gl->nelem; i++) { |
1127 | gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0, | 1127 | gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0, |
1128 | PAGE_SIZE, | 1128 | PAGE_SIZE, |
1129 | PCI_DMA_FROMDEVICE); | 1129 | PCI_DMA_FROMDEVICE); |
1130 | if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) { | 1130 | if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) { |
1131 | log_debug(1 << CXGBI_DBG_DDP, | 1131 | log_debug(1 << CXGBI_DBG_DDP, |
1132 | "page %d 0x%p, 0x%p dma mapping err.\n", | 1132 | "page %d 0x%p, 0x%p dma mapping err.\n", |
1133 | i, gl->pages[i], pdev); | 1133 | i, gl->pages[i], pdev); |
1134 | goto unmap; | 1134 | goto unmap; |
1135 | } | 1135 | } |
1136 | } | 1136 | } |
1137 | return i; | 1137 | return i; |
1138 | unmap: | 1138 | unmap: |
1139 | if (i) { | 1139 | if (i) { |
1140 | unsigned int nelem = gl->nelem; | 1140 | unsigned int nelem = gl->nelem; |
1141 | 1141 | ||
1142 | gl->nelem = i; | 1142 | gl->nelem = i; |
1143 | ddp_gl_unmap(pdev, gl); | 1143 | ddp_gl_unmap(pdev, gl); |
1144 | gl->nelem = nelem; | 1144 | gl->nelem = nelem; |
1145 | } | 1145 | } |
1146 | return -EINVAL; | 1146 | return -EINVAL; |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | static void ddp_release_gl(struct cxgbi_gather_list *gl, | 1149 | static void ddp_release_gl(struct cxgbi_gather_list *gl, |
1150 | struct pci_dev *pdev) | 1150 | struct pci_dev *pdev) |
1151 | { | 1151 | { |
1152 | ddp_gl_unmap(pdev, gl); | 1152 | ddp_gl_unmap(pdev, gl); |
1153 | kfree(gl); | 1153 | kfree(gl); |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen, | 1156 | static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen, |
1157 | struct scatterlist *sgl, | 1157 | struct scatterlist *sgl, |
1158 | unsigned int sgcnt, | 1158 | unsigned int sgcnt, |
1159 | struct pci_dev *pdev, | 1159 | struct pci_dev *pdev, |
1160 | gfp_t gfp) | 1160 | gfp_t gfp) |
1161 | { | 1161 | { |
1162 | struct cxgbi_gather_list *gl; | 1162 | struct cxgbi_gather_list *gl; |
1163 | struct scatterlist *sg = sgl; | 1163 | struct scatterlist *sg = sgl; |
1164 | struct page *sgpage = sg_page(sg); | 1164 | struct page *sgpage = sg_page(sg); |
1165 | unsigned int sglen = sg->length; | 1165 | unsigned int sglen = sg->length; |
1166 | unsigned int sgoffset = sg->offset; | 1166 | unsigned int sgoffset = sg->offset; |
1167 | unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> | 1167 | unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> |
1168 | PAGE_SHIFT; | 1168 | PAGE_SHIFT; |
1169 | int i = 1, j = 0; | 1169 | int i = 1, j = 0; |
1170 | 1170 | ||
1171 | if (xferlen < DDP_THRESHOLD) { | 1171 | if (xferlen < DDP_THRESHOLD) { |
1172 | log_debug(1 << CXGBI_DBG_DDP, | 1172 | log_debug(1 << CXGBI_DBG_DDP, |
1173 | "xfer %u < threshold %u, no ddp.\n", | 1173 | "xfer %u < threshold %u, no ddp.\n", |
1174 | xferlen, DDP_THRESHOLD); | 1174 | xferlen, DDP_THRESHOLD); |
1175 | return NULL; | 1175 | return NULL; |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | gl = kzalloc(sizeof(struct cxgbi_gather_list) + | 1178 | gl = kzalloc(sizeof(struct cxgbi_gather_list) + |
1179 | npages * (sizeof(dma_addr_t) + | 1179 | npages * (sizeof(dma_addr_t) + |
1180 | sizeof(struct page *)), gfp); | 1180 | sizeof(struct page *)), gfp); |
1181 | if (!gl) { | 1181 | if (!gl) { |
1182 | log_debug(1 << CXGBI_DBG_DDP, | 1182 | log_debug(1 << CXGBI_DBG_DDP, |
1183 | "xfer %u, %u pages, OOM.\n", xferlen, npages); | 1183 | "xfer %u, %u pages, OOM.\n", xferlen, npages); |
1184 | return NULL; | 1184 | return NULL; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | log_debug(1 << CXGBI_DBG_DDP, | 1187 | log_debug(1 << CXGBI_DBG_DDP, |
1188 | "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); | 1188 | "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); |
1189 | 1189 | ||
1190 | gl->pages = (struct page **)&gl->phys_addr[npages]; | 1190 | gl->pages = (struct page **)&gl->phys_addr[npages]; |
1191 | gl->nelem = npages; | 1191 | gl->nelem = npages; |
1192 | gl->length = xferlen; | 1192 | gl->length = xferlen; |
1193 | gl->offset = sgoffset; | 1193 | gl->offset = sgoffset; |
1194 | gl->pages[0] = sgpage; | 1194 | gl->pages[0] = sgpage; |
1195 | 1195 | ||
1196 | for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt; | 1196 | for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt; |
1197 | i++, sg = sg_next(sg)) { | 1197 | i++, sg = sg_next(sg)) { |
1198 | struct page *page = sg_page(sg); | 1198 | struct page *page = sg_page(sg); |
1199 | 1199 | ||
1200 | if (sgpage == page && sg->offset == sgoffset + sglen) | 1200 | if (sgpage == page && sg->offset == sgoffset + sglen) |
1201 | sglen += sg->length; | 1201 | sglen += sg->length; |
1202 | else { | 1202 | else { |
1203 | /* make sure the sgl is fit for ddp: | 1203 | /* make sure the sgl is fit for ddp: |
1204 | * each has the same page size, and | 1204 | * each has the same page size, and |
1205 | * all of the middle pages are used completely | 1205 | * all of the middle pages are used completely |
1206 | */ | 1206 | */ |
1207 | if ((j && sgoffset) || ((i != sgcnt - 1) && | 1207 | if ((j && sgoffset) || ((i != sgcnt - 1) && |
1208 | ((sglen + sgoffset) & ~PAGE_MASK))) { | 1208 | ((sglen + sgoffset) & ~PAGE_MASK))) { |
1209 | log_debug(1 << CXGBI_DBG_DDP, | 1209 | log_debug(1 << CXGBI_DBG_DDP, |
1210 | "page %d/%u, %u + %u.\n", | 1210 | "page %d/%u, %u + %u.\n", |
1211 | i, sgcnt, sgoffset, sglen); | 1211 | i, sgcnt, sgoffset, sglen); |
1212 | goto error_out; | 1212 | goto error_out; |
1213 | } | 1213 | } |
1214 | 1214 | ||
1215 | j++; | 1215 | j++; |
1216 | if (j == gl->nelem || sg->offset) { | 1216 | if (j == gl->nelem || sg->offset) { |
1217 | log_debug(1 << CXGBI_DBG_DDP, | 1217 | log_debug(1 << CXGBI_DBG_DDP, |
1218 | "page %d/%u, offset %u.\n", | 1218 | "page %d/%u, offset %u.\n", |
1219 | j, gl->nelem, sg->offset); | 1219 | j, gl->nelem, sg->offset); |
1220 | goto error_out; | 1220 | goto error_out; |
1221 | } | 1221 | } |
1222 | gl->pages[j] = page; | 1222 | gl->pages[j] = page; |
1223 | sglen = sg->length; | 1223 | sglen = sg->length; |
1224 | sgoffset = sg->offset; | 1224 | sgoffset = sg->offset; |
1225 | sgpage = page; | 1225 | sgpage = page; |
1226 | } | 1226 | } |
1227 | } | 1227 | } |
1228 | gl->nelem = ++j; | 1228 | gl->nelem = ++j; |
1229 | 1229 | ||
1230 | if (ddp_gl_map(pdev, gl) < 0) | 1230 | if (ddp_gl_map(pdev, gl) < 0) |
1231 | goto error_out; | 1231 | goto error_out; |
1232 | 1232 | ||
1233 | return gl; | 1233 | return gl; |
1234 | 1234 | ||
1235 | error_out: | 1235 | error_out: |
1236 | kfree(gl); | 1236 | kfree(gl); |
1237 | return NULL; | 1237 | return NULL; |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag) | 1240 | static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag) |
1241 | { | 1241 | { |
1242 | struct cxgbi_device *cdev = chba->cdev; | 1242 | struct cxgbi_device *cdev = chba->cdev; |
1243 | struct cxgbi_ddp_info *ddp = cdev->ddp; | 1243 | struct cxgbi_ddp_info *ddp = cdev->ddp; |
1244 | u32 idx; | 1244 | u32 idx; |
1245 | 1245 | ||
1246 | idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; | 1246 | idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; |
1247 | if (idx < ddp->nppods) { | 1247 | if (idx < ddp->nppods) { |
1248 | struct cxgbi_gather_list *gl = ddp->gl_map[idx]; | 1248 | struct cxgbi_gather_list *gl = ddp->gl_map[idx]; |
1249 | unsigned int npods; | 1249 | unsigned int npods; |
1250 | 1250 | ||
1251 | if (!gl || !gl->nelem) { | 1251 | if (!gl || !gl->nelem) { |
1252 | pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n", | 1252 | pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n", |
1253 | tag, idx, gl, gl ? gl->nelem : 0); | 1253 | tag, idx, gl, gl ? gl->nelem : 0); |
1254 | return; | 1254 | return; |
1255 | } | 1255 | } |
1256 | npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; | 1256 | npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; |
1257 | log_debug(1 << CXGBI_DBG_DDP, | 1257 | log_debug(1 << CXGBI_DBG_DDP, |
1258 | "tag 0x%x, release idx %u, npods %u.\n", | 1258 | "tag 0x%x, release idx %u, npods %u.\n", |
1259 | tag, idx, npods); | 1259 | tag, idx, npods); |
1260 | cdev->csk_ddp_clear(chba, tag, idx, npods); | 1260 | cdev->csk_ddp_clear(chba, tag, idx, npods); |
1261 | ddp_unmark_entries(ddp, idx, npods); | 1261 | ddp_unmark_entries(ddp, idx, npods); |
1262 | ddp_release_gl(gl, ddp->pdev); | 1262 | ddp_release_gl(gl, ddp->pdev); |
1263 | } else | 1263 | } else |
1264 | pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods); | 1264 | pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods); |
1265 | } | 1265 | } |
1266 | 1266 | ||
1267 | static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, | 1267 | static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, |
1268 | u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl, | 1268 | u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl, |
1269 | gfp_t gfp) | 1269 | gfp_t gfp) |
1270 | { | 1270 | { |
1271 | struct cxgbi_device *cdev = csk->cdev; | 1271 | struct cxgbi_device *cdev = csk->cdev; |
1272 | struct cxgbi_ddp_info *ddp = cdev->ddp; | 1272 | struct cxgbi_ddp_info *ddp = cdev->ddp; |
1273 | struct cxgbi_tag_format *tformat = &cdev->tag_format; | 1273 | struct cxgbi_tag_format *tformat = &cdev->tag_format; |
1274 | struct cxgbi_pagepod_hdr hdr; | 1274 | struct cxgbi_pagepod_hdr hdr; |
1275 | unsigned int npods; | 1275 | unsigned int npods; |
1276 | int idx = -1; | 1276 | int idx = -1; |
1277 | int err = -ENOMEM; | 1277 | int err = -ENOMEM; |
1278 | u32 tag; | 1278 | u32 tag; |
1279 | 1279 | ||
1280 | npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; | 1280 | npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; |
1281 | if (ddp->idx_last == ddp->nppods) | 1281 | if (ddp->idx_last == ddp->nppods) |
1282 | idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, | 1282 | idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, |
1283 | npods, gl); | 1283 | npods, gl); |
1284 | else { | 1284 | else { |
1285 | idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, | 1285 | idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, |
1286 | ddp->nppods, npods, | 1286 | ddp->nppods, npods, |
1287 | gl); | 1287 | gl); |
1288 | if (idx < 0 && ddp->idx_last >= npods) { | 1288 | if (idx < 0 && ddp->idx_last >= npods) { |
1289 | idx = ddp_find_unused_entries(ddp, 0, | 1289 | idx = ddp_find_unused_entries(ddp, 0, |
1290 | min(ddp->idx_last + npods, ddp->nppods), | 1290 | min(ddp->idx_last + npods, ddp->nppods), |
1291 | npods, gl); | 1291 | npods, gl); |
1292 | } | 1292 | } |
1293 | } | 1293 | } |
1294 | if (idx < 0) { | 1294 | if (idx < 0) { |
1295 | log_debug(1 << CXGBI_DBG_DDP, | 1295 | log_debug(1 << CXGBI_DBG_DDP, |
1296 | "xferlen %u, gl %u, npods %u NO DDP.\n", | 1296 | "xferlen %u, gl %u, npods %u NO DDP.\n", |
1297 | gl->length, gl->nelem, npods); | 1297 | gl->length, gl->nelem, npods); |
1298 | return idx; | 1298 | return idx; |
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | if (cdev->csk_ddp_alloc_gl_skb) { | 1301 | if (cdev->csk_ddp_alloc_gl_skb) { |
1302 | err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp); | 1302 | err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp); |
1303 | if (err < 0) | 1303 | if (err < 0) |
1304 | goto unmark_entries; | 1304 | goto unmark_entries; |
1305 | } | 1305 | } |
1306 | 1306 | ||
1307 | tag = cxgbi_ddp_tag_base(tformat, sw_tag); | 1307 | tag = cxgbi_ddp_tag_base(tformat, sw_tag); |
1308 | tag |= idx << PPOD_IDX_SHIFT; | 1308 | tag |= idx << PPOD_IDX_SHIFT; |
1309 | 1309 | ||
1310 | hdr.rsvd = 0; | 1310 | hdr.rsvd = 0; |
1311 | hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); | 1311 | hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); |
1312 | hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); | 1312 | hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); |
1313 | hdr.max_offset = htonl(gl->length); | 1313 | hdr.max_offset = htonl(gl->length); |
1314 | hdr.page_offset = htonl(gl->offset); | 1314 | hdr.page_offset = htonl(gl->offset); |
1315 | 1315 | ||
1316 | err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); | 1316 | err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); |
1317 | if (err < 0) { | 1317 | if (err < 0) { |
1318 | if (cdev->csk_ddp_free_gl_skb) | 1318 | if (cdev->csk_ddp_free_gl_skb) |
1319 | cdev->csk_ddp_free_gl_skb(ddp, idx, npods); | 1319 | cdev->csk_ddp_free_gl_skb(ddp, idx, npods); |
1320 | goto unmark_entries; | 1320 | goto unmark_entries; |
1321 | } | 1321 | } |
1322 | 1322 | ||
1323 | ddp->idx_last = idx; | 1323 | ddp->idx_last = idx; |
1324 | log_debug(1 << CXGBI_DBG_DDP, | 1324 | log_debug(1 << CXGBI_DBG_DDP, |
1325 | "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n", | 1325 | "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n", |
1326 | gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx, | 1326 | gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx, |
1327 | npods); | 1327 | npods); |
1328 | *tagp = tag; | 1328 | *tagp = tag; |
1329 | return 0; | 1329 | return 0; |
1330 | 1330 | ||
1331 | unmark_entries: | 1331 | unmark_entries: |
1332 | ddp_unmark_entries(ddp, idx, npods); | 1332 | ddp_unmark_entries(ddp, idx, npods); |
1333 | return err; | 1333 | return err; |
1334 | } | 1334 | } |
1335 | 1335 | ||
1336 | int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp, | 1336 | int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp, |
1337 | unsigned int sw_tag, unsigned int xferlen, | 1337 | unsigned int sw_tag, unsigned int xferlen, |
1338 | struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp) | 1338 | struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp) |
1339 | { | 1339 | { |
1340 | struct cxgbi_device *cdev = csk->cdev; | 1340 | struct cxgbi_device *cdev = csk->cdev; |
1341 | struct cxgbi_tag_format *tformat = &cdev->tag_format; | 1341 | struct cxgbi_tag_format *tformat = &cdev->tag_format; |
1342 | struct cxgbi_gather_list *gl; | 1342 | struct cxgbi_gather_list *gl; |
1343 | int err; | 1343 | int err; |
1344 | 1344 | ||
1345 | if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp || | 1345 | if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp || |
1346 | xferlen < DDP_THRESHOLD) { | 1346 | xferlen < DDP_THRESHOLD) { |
1347 | log_debug(1 << CXGBI_DBG_DDP, | 1347 | log_debug(1 << CXGBI_DBG_DDP, |
1348 | "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen); | 1348 | "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen); |
1349 | return -EINVAL; | 1349 | return -EINVAL; |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | if (!cxgbi_sw_tag_usable(tformat, sw_tag)) { | 1352 | if (!cxgbi_sw_tag_usable(tformat, sw_tag)) { |
1353 | log_debug(1 << CXGBI_DBG_DDP, | 1353 | log_debug(1 << CXGBI_DBG_DDP, |
1354 | "sw_tag 0x%x NOT usable.\n", sw_tag); | 1354 | "sw_tag 0x%x NOT usable.\n", sw_tag); |
1355 | return -EINVAL; | 1355 | return -EINVAL; |
1356 | } | 1356 | } |
1357 | 1357 | ||
1358 | gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp); | 1358 | gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp); |
1359 | if (!gl) | 1359 | if (!gl) |
1360 | return -ENOMEM; | 1360 | return -ENOMEM; |
1361 | 1361 | ||
1362 | err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp); | 1362 | err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp); |
1363 | if (err < 0) | 1363 | if (err < 0) |
1364 | ddp_release_gl(gl, cdev->pdev); | 1364 | ddp_release_gl(gl, cdev->pdev); |
1365 | 1365 | ||
1366 | return err; | 1366 | return err; |
1367 | } | 1367 | } |
1368 | 1368 | ||
1369 | static void ddp_destroy(struct kref *kref) | 1369 | static void ddp_destroy(struct kref *kref) |
1370 | { | 1370 | { |
1371 | struct cxgbi_ddp_info *ddp = container_of(kref, | 1371 | struct cxgbi_ddp_info *ddp = container_of(kref, |
1372 | struct cxgbi_ddp_info, | 1372 | struct cxgbi_ddp_info, |
1373 | refcnt); | 1373 | refcnt); |
1374 | struct cxgbi_device *cdev = ddp->cdev; | 1374 | struct cxgbi_device *cdev = ddp->cdev; |
1375 | int i = 0; | 1375 | int i = 0; |
1376 | 1376 | ||
1377 | pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev); | 1377 | pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev); |
1378 | 1378 | ||
1379 | while (i < ddp->nppods) { | 1379 | while (i < ddp->nppods) { |
1380 | struct cxgbi_gather_list *gl = ddp->gl_map[i]; | 1380 | struct cxgbi_gather_list *gl = ddp->gl_map[i]; |
1381 | 1381 | ||
1382 | if (gl) { | 1382 | if (gl) { |
1383 | int npods = (gl->nelem + PPOD_PAGES_MAX - 1) | 1383 | int npods = (gl->nelem + PPOD_PAGES_MAX - 1) |
1384 | >> PPOD_PAGES_SHIFT; | 1384 | >> PPOD_PAGES_SHIFT; |
1385 | pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); | 1385 | pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); |
1386 | kfree(gl); | 1386 | kfree(gl); |
1387 | if (cdev->csk_ddp_free_gl_skb) | 1387 | if (cdev->csk_ddp_free_gl_skb) |
1388 | cdev->csk_ddp_free_gl_skb(ddp, i, npods); | 1388 | cdev->csk_ddp_free_gl_skb(ddp, i, npods); |
1389 | i += npods; | 1389 | i += npods; |
1390 | } else | 1390 | } else |
1391 | i++; | 1391 | i++; |
1392 | } | 1392 | } |
1393 | cxgbi_free_big_mem(ddp); | 1393 | cxgbi_free_big_mem(ddp); |
1394 | } | 1394 | } |
1395 | 1395 | ||
1396 | int cxgbi_ddp_cleanup(struct cxgbi_device *cdev) | 1396 | int cxgbi_ddp_cleanup(struct cxgbi_device *cdev) |
1397 | { | 1397 | { |
1398 | struct cxgbi_ddp_info *ddp = cdev->ddp; | 1398 | struct cxgbi_ddp_info *ddp = cdev->ddp; |
1399 | 1399 | ||
1400 | log_debug(1 << CXGBI_DBG_DDP, | 1400 | log_debug(1 << CXGBI_DBG_DDP, |
1401 | "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp); | 1401 | "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp); |
1402 | cdev->ddp = NULL; | 1402 | cdev->ddp = NULL; |
1403 | if (ddp) | 1403 | if (ddp) |
1404 | return kref_put(&ddp->refcnt, ddp_destroy); | 1404 | return kref_put(&ddp->refcnt, ddp_destroy); |
1405 | return 0; | 1405 | return 0; |
1406 | } | 1406 | } |
1407 | EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup); | 1407 | EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup); |
1408 | 1408 | ||
1409 | int cxgbi_ddp_init(struct cxgbi_device *cdev, | 1409 | int cxgbi_ddp_init(struct cxgbi_device *cdev, |
1410 | unsigned int llimit, unsigned int ulimit, | 1410 | unsigned int llimit, unsigned int ulimit, |
1411 | unsigned int max_txsz, unsigned int max_rxsz) | 1411 | unsigned int max_txsz, unsigned int max_rxsz) |
1412 | { | 1412 | { |
1413 | struct cxgbi_ddp_info *ddp; | 1413 | struct cxgbi_ddp_info *ddp; |
1414 | unsigned int ppmax, bits; | 1414 | unsigned int ppmax, bits; |
1415 | 1415 | ||
1416 | ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT; | 1416 | ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT; |
1417 | bits = __ilog2_u32(ppmax) + 1; | 1417 | bits = __ilog2_u32(ppmax) + 1; |
1418 | if (bits > PPOD_IDX_MAX_SIZE) | 1418 | if (bits > PPOD_IDX_MAX_SIZE) |
1419 | bits = PPOD_IDX_MAX_SIZE; | 1419 | bits = PPOD_IDX_MAX_SIZE; |
1420 | ppmax = (1 << (bits - 1)) - 1; | 1420 | ppmax = (1 << (bits - 1)) - 1; |
1421 | 1421 | ||
1422 | ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) + | 1422 | ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) + |
1423 | ppmax * (sizeof(struct cxgbi_gather_list *) + | 1423 | ppmax * (sizeof(struct cxgbi_gather_list *) + |
1424 | sizeof(struct sk_buff *)), | 1424 | sizeof(struct sk_buff *)), |
1425 | GFP_KERNEL); | 1425 | GFP_KERNEL); |
1426 | if (!ddp) { | 1426 | if (!ddp) { |
1427 | pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax); | 1427 | pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax); |
1428 | return -ENOMEM; | 1428 | return -ENOMEM; |
1429 | } | 1429 | } |
1430 | ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); | 1430 | ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); |
1431 | ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) + | 1431 | ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) + |
1432 | ppmax * sizeof(struct cxgbi_gather_list *)); | 1432 | ppmax * sizeof(struct cxgbi_gather_list *)); |
1433 | cdev->ddp = ddp; | 1433 | cdev->ddp = ddp; |
1434 | 1434 | ||
1435 | spin_lock_init(&ddp->map_lock); | 1435 | spin_lock_init(&ddp->map_lock); |
1436 | kref_init(&ddp->refcnt); | 1436 | kref_init(&ddp->refcnt); |
1437 | 1437 | ||
1438 | ddp->cdev = cdev; | 1438 | ddp->cdev = cdev; |
1439 | ddp->pdev = cdev->pdev; | 1439 | ddp->pdev = cdev->pdev; |
1440 | ddp->llimit = llimit; | 1440 | ddp->llimit = llimit; |
1441 | ddp->ulimit = ulimit; | 1441 | ddp->ulimit = ulimit; |
1442 | ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE); | 1442 | ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE); |
1443 | ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE); | 1443 | ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE); |
1444 | ddp->nppods = ppmax; | 1444 | ddp->nppods = ppmax; |
1445 | ddp->idx_last = ppmax; | 1445 | ddp->idx_last = ppmax; |
1446 | ddp->idx_bits = bits; | 1446 | ddp->idx_bits = bits; |
1447 | ddp->idx_mask = (1 << bits) - 1; | 1447 | ddp->idx_mask = (1 << bits) - 1; |
1448 | ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; | 1448 | ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; |
1449 | 1449 | ||
1450 | cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; | 1450 | cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; |
1451 | cdev->tag_format.rsvd_bits = ddp->idx_bits; | 1451 | cdev->tag_format.rsvd_bits = ddp->idx_bits; |
1452 | cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT; | 1452 | cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT; |
1453 | cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1; | 1453 | cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1; |
1454 | 1454 | ||
1455 | pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n", | 1455 | pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n", |
1456 | cdev->ports[0]->name, cdev->tag_format.sw_bits, | 1456 | cdev->ports[0]->name, cdev->tag_format.sw_bits, |
1457 | cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift, | 1457 | cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift, |
1458 | cdev->tag_format.rsvd_mask); | 1458 | cdev->tag_format.rsvd_mask); |
1459 | 1459 | ||
1460 | cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | 1460 | cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, |
1461 | ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); | 1461 | ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); |
1462 | cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, | 1462 | cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, |
1463 | ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); | 1463 | ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); |
1464 | 1464 | ||
1465 | log_debug(1 << CXGBI_DBG_DDP, | 1465 | log_debug(1 << CXGBI_DBG_DDP, |
1466 | "%s max payload size: %u/%u, %u/%u.\n", | 1466 | "%s max payload size: %u/%u, %u/%u.\n", |
1467 | cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz, | 1467 | cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz, |
1468 | cdev->rx_max_size, ddp->max_rxsz); | 1468 | cdev->rx_max_size, ddp->max_rxsz); |
1469 | return 0; | 1469 | return 0; |
1470 | } | 1470 | } |
1471 | EXPORT_SYMBOL_GPL(cxgbi_ddp_init); | 1471 | EXPORT_SYMBOL_GPL(cxgbi_ddp_init); |
1472 | 1472 | ||
1473 | /* | 1473 | /* |
1474 | * APIs interacting with open-iscsi libraries | 1474 | * APIs interacting with open-iscsi libraries |
1475 | */ | 1475 | */ |
1476 | 1476 | ||
1477 | static unsigned char padding[4]; | 1477 | static unsigned char padding[4]; |
1478 | 1478 | ||
1479 | static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) | 1479 | static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) |
1480 | { | 1480 | { |
1481 | struct scsi_cmnd *sc = task->sc; | 1481 | struct scsi_cmnd *sc = task->sc; |
1482 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; | 1482 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; |
1483 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | 1483 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
1484 | struct cxgbi_hba *chba = cconn->chba; | 1484 | struct cxgbi_hba *chba = cconn->chba; |
1485 | struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; | 1485 | struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; |
1486 | u32 tag = ntohl((__force u32)hdr_itt); | 1486 | u32 tag = ntohl((__force u32)hdr_itt); |
1487 | 1487 | ||
1488 | log_debug(1 << CXGBI_DBG_DDP, | 1488 | log_debug(1 << CXGBI_DBG_DDP, |
1489 | "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag); | 1489 | "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag); |
1490 | if (sc && | 1490 | if (sc && |
1491 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && | 1491 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && |
1492 | cxgbi_is_ddp_tag(tformat, tag)) | 1492 | cxgbi_is_ddp_tag(tformat, tag)) |
1493 | ddp_tag_release(chba, tag); | 1493 | ddp_tag_release(chba, tag); |
1494 | } | 1494 | } |
1495 | 1495 | ||
1496 | static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) | 1496 | static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) |
1497 | { | 1497 | { |
1498 | struct scsi_cmnd *sc = task->sc; | 1498 | struct scsi_cmnd *sc = task->sc; |
1499 | struct iscsi_conn *conn = task->conn; | 1499 | struct iscsi_conn *conn = task->conn; |
1500 | struct iscsi_session *sess = conn->session; | 1500 | struct iscsi_session *sess = conn->session; |
1501 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 1501 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1502 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | 1502 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
1503 | struct cxgbi_hba *chba = cconn->chba; | 1503 | struct cxgbi_hba *chba = cconn->chba; |
1504 | struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; | 1504 | struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; |
1505 | u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; | 1505 | u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; |
1506 | u32 tag = 0; | 1506 | u32 tag = 0; |
1507 | int err = -EINVAL; | 1507 | int err = -EINVAL; |
1508 | 1508 | ||
1509 | if (sc && | 1509 | if (sc && |
1510 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) { | 1510 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) { |
1511 | err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag, | 1511 | err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag, |
1512 | scsi_in(sc)->length, | 1512 | scsi_in(sc)->length, |
1513 | scsi_in(sc)->table.sgl, | 1513 | scsi_in(sc)->table.sgl, |
1514 | scsi_in(sc)->table.nents, | 1514 | scsi_in(sc)->table.nents, |
1515 | GFP_ATOMIC); | 1515 | GFP_ATOMIC); |
1516 | if (err < 0) | 1516 | if (err < 0) |
1517 | log_debug(1 << CXGBI_DBG_DDP, | 1517 | log_debug(1 << CXGBI_DBG_DDP, |
1518 | "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", | 1518 | "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", |
1519 | cconn->cep->csk, task, scsi_in(sc)->length, | 1519 | cconn->cep->csk, task, scsi_in(sc)->length, |
1520 | scsi_in(sc)->table.nents); | 1520 | scsi_in(sc)->table.nents); |
1521 | } | 1521 | } |
1522 | 1522 | ||
1523 | if (err < 0) | 1523 | if (err < 0) |
1524 | tag = cxgbi_set_non_ddp_tag(tformat, sw_tag); | 1524 | tag = cxgbi_set_non_ddp_tag(tformat, sw_tag); |
1525 | /* the itt need to sent in big-endian order */ | 1525 | /* the itt need to sent in big-endian order */ |
1526 | *hdr_itt = (__force itt_t)htonl(tag); | 1526 | *hdr_itt = (__force itt_t)htonl(tag); |
1527 | 1527 | ||
1528 | log_debug(1 << CXGBI_DBG_DDP, | 1528 | log_debug(1 << CXGBI_DBG_DDP, |
1529 | "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", | 1529 | "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", |
1530 | chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); | 1530 | chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); |
1531 | return 0; | 1531 | return 0; |
1532 | } | 1532 | } |
1533 | 1533 | ||
1534 | void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) | 1534 | void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) |
1535 | { | 1535 | { |
1536 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 1536 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1537 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | 1537 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
1538 | struct cxgbi_device *cdev = cconn->chba->cdev; | 1538 | struct cxgbi_device *cdev = cconn->chba->cdev; |
1539 | u32 tag = ntohl((__force u32) itt); | 1539 | u32 tag = ntohl((__force u32) itt); |
1540 | u32 sw_bits; | 1540 | u32 sw_bits; |
1541 | 1541 | ||
1542 | sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag); | 1542 | sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag); |
1543 | if (idx) | 1543 | if (idx) |
1544 | *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); | 1544 | *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); |
1545 | if (age) | 1545 | if (age) |
1546 | *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; | 1546 | *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; |
1547 | 1547 | ||
1548 | log_debug(1 << CXGBI_DBG_DDP, | 1548 | log_debug(1 << CXGBI_DBG_DDP, |
1549 | "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", | 1549 | "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", |
1550 | cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, | 1550 | cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, |
1551 | age ? *age : 0xFF); | 1551 | age ? *age : 0xFF); |
1552 | } | 1552 | } |
1553 | EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); | 1553 | EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); |
1554 | 1554 | ||
1555 | void cxgbi_conn_tx_open(struct cxgbi_sock *csk) | 1555 | void cxgbi_conn_tx_open(struct cxgbi_sock *csk) |
1556 | { | 1556 | { |
1557 | struct iscsi_conn *conn = csk->user_data; | 1557 | struct iscsi_conn *conn = csk->user_data; |
1558 | 1558 | ||
1559 | if (conn) { | 1559 | if (conn) { |
1560 | log_debug(1 << CXGBI_DBG_SOCK, | 1560 | log_debug(1 << CXGBI_DBG_SOCK, |
1561 | "csk 0x%p, cid %d.\n", csk, conn->id); | 1561 | "csk 0x%p, cid %d.\n", csk, conn->id); |
1562 | iscsi_conn_queue_work(conn); | 1562 | iscsi_conn_queue_work(conn); |
1563 | } | 1563 | } |
1564 | } | 1564 | } |
1565 | EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); | 1565 | EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); |
1566 | 1566 | ||
1567 | /* | 1567 | /* |
1568 | * pdu receive, interact with libiscsi_tcp | 1568 | * pdu receive, interact with libiscsi_tcp |
1569 | */ | 1569 | */ |
1570 | static inline int read_pdu_skb(struct iscsi_conn *conn, | 1570 | static inline int read_pdu_skb(struct iscsi_conn *conn, |
1571 | struct sk_buff *skb, | 1571 | struct sk_buff *skb, |
1572 | unsigned int offset, | 1572 | unsigned int offset, |
1573 | int offloaded) | 1573 | int offloaded) |
1574 | { | 1574 | { |
1575 | int status = 0; | 1575 | int status = 0; |
1576 | int bytes_read; | 1576 | int bytes_read; |
1577 | 1577 | ||
1578 | bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); | 1578 | bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); |
1579 | switch (status) { | 1579 | switch (status) { |
1580 | case ISCSI_TCP_CONN_ERR: | 1580 | case ISCSI_TCP_CONN_ERR: |
1581 | pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", | 1581 | pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", |
1582 | skb, offset, offloaded); | 1582 | skb, offset, offloaded); |
1583 | return -EIO; | 1583 | return -EIO; |
1584 | case ISCSI_TCP_SUSPENDED: | 1584 | case ISCSI_TCP_SUSPENDED: |
1585 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1585 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1586 | "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", | 1586 | "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", |
1587 | skb, offset, offloaded, bytes_read); | 1587 | skb, offset, offloaded, bytes_read); |
1588 | /* no transfer - just have caller flush queue */ | 1588 | /* no transfer - just have caller flush queue */ |
1589 | return bytes_read; | 1589 | return bytes_read; |
1590 | case ISCSI_TCP_SKB_DONE: | 1590 | case ISCSI_TCP_SKB_DONE: |
1591 | pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", | 1591 | pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", |
1592 | skb, offset, offloaded); | 1592 | skb, offset, offloaded); |
1593 | /* | 1593 | /* |
1594 | * pdus should always fit in the skb and we should get | 1594 | * pdus should always fit in the skb and we should get |
1595 | * segment done notifcation. | 1595 | * segment done notifcation. |
1596 | */ | 1596 | */ |
1597 | iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); | 1597 | iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); |
1598 | return -EFAULT; | 1598 | return -EFAULT; |
1599 | case ISCSI_TCP_SEGMENT_DONE: | 1599 | case ISCSI_TCP_SEGMENT_DONE: |
1600 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1600 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1601 | "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", | 1601 | "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", |
1602 | skb, offset, offloaded, bytes_read); | 1602 | skb, offset, offloaded, bytes_read); |
1603 | return bytes_read; | 1603 | return bytes_read; |
1604 | default: | 1604 | default: |
1605 | pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", | 1605 | pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", |
1606 | skb, offset, offloaded, status); | 1606 | skb, offset, offloaded, status); |
1607 | return -EINVAL; | 1607 | return -EINVAL; |
1608 | } | 1608 | } |
1609 | } | 1609 | } |
1610 | 1610 | ||
1611 | static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) | 1611 | static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) |
1612 | { | 1612 | { |
1613 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 1613 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1614 | 1614 | ||
1615 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1615 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1616 | "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", | 1616 | "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", |
1617 | conn, skb, skb->len, cxgbi_skcb_flags(skb)); | 1617 | conn, skb, skb->len, cxgbi_skcb_flags(skb)); |
1618 | 1618 | ||
1619 | if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { | 1619 | if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { |
1620 | pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); | 1620 | pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); |
1621 | iscsi_conn_failure(conn, ISCSI_ERR_PROTO); | 1621 | iscsi_conn_failure(conn, ISCSI_ERR_PROTO); |
1622 | return -EIO; | 1622 | return -EIO; |
1623 | } | 1623 | } |
1624 | 1624 | ||
1625 | if (conn->hdrdgst_en && | 1625 | if (conn->hdrdgst_en && |
1626 | cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { | 1626 | cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { |
1627 | pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); | 1627 | pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); |
1628 | iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); | 1628 | iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); |
1629 | return -EIO; | 1629 | return -EIO; |
1630 | } | 1630 | } |
1631 | 1631 | ||
1632 | return read_pdu_skb(conn, skb, 0, 0); | 1632 | return read_pdu_skb(conn, skb, 0, 0); |
1633 | } | 1633 | } |
1634 | 1634 | ||
1635 | static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, | 1635 | static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, |
1636 | struct sk_buff *skb, unsigned int offset) | 1636 | struct sk_buff *skb, unsigned int offset) |
1637 | { | 1637 | { |
1638 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 1638 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
1639 | bool offloaded = 0; | 1639 | bool offloaded = 0; |
1640 | int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; | 1640 | int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; |
1641 | 1641 | ||
1642 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1642 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1643 | "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", | 1643 | "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", |
1644 | conn, skb, skb->len, cxgbi_skcb_flags(skb)); | 1644 | conn, skb, skb->len, cxgbi_skcb_flags(skb)); |
1645 | 1645 | ||
1646 | if (conn->datadgst_en && | 1646 | if (conn->datadgst_en && |
1647 | cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { | 1647 | cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { |
1648 | pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", | 1648 | pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", |
1649 | conn, lskb, cxgbi_skcb_flags(lskb)); | 1649 | conn, lskb, cxgbi_skcb_flags(lskb)); |
1650 | iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); | 1650 | iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); |
1651 | return -EIO; | 1651 | return -EIO; |
1652 | } | 1652 | } |
1653 | 1653 | ||
1654 | if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) | 1654 | if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) |
1655 | return 0; | 1655 | return 0; |
1656 | 1656 | ||
1657 | /* coalesced, add header digest length */ | 1657 | /* coalesced, add header digest length */ |
1658 | if (lskb == skb && conn->hdrdgst_en) | 1658 | if (lskb == skb && conn->hdrdgst_en) |
1659 | offset += ISCSI_DIGEST_SIZE; | 1659 | offset += ISCSI_DIGEST_SIZE; |
1660 | 1660 | ||
1661 | if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) | 1661 | if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) |
1662 | offloaded = 1; | 1662 | offloaded = 1; |
1663 | 1663 | ||
1664 | if (opcode == ISCSI_OP_SCSI_DATA_IN) | 1664 | if (opcode == ISCSI_OP_SCSI_DATA_IN) |
1665 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1665 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1666 | "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", | 1666 | "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", |
1667 | skb, opcode, ntohl(tcp_conn->in.hdr->itt), | 1667 | skb, opcode, ntohl(tcp_conn->in.hdr->itt), |
1668 | tcp_conn->in.datalen, offloaded ? "is" : "not"); | 1668 | tcp_conn->in.datalen, offloaded ? "is" : "not"); |
1669 | 1669 | ||
1670 | return read_pdu_skb(conn, skb, offset, offloaded); | 1670 | return read_pdu_skb(conn, skb, offset, offloaded); |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) | 1673 | static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) |
1674 | { | 1674 | { |
1675 | struct cxgbi_device *cdev = csk->cdev; | 1675 | struct cxgbi_device *cdev = csk->cdev; |
1676 | int must_send; | 1676 | int must_send; |
1677 | u32 credits; | 1677 | u32 credits; |
1678 | 1678 | ||
1679 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1679 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1680 | "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n", | 1680 | "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n", |
1681 | csk, csk->state, csk->flags, csk->tid, csk->copied_seq, | 1681 | csk, csk->state, csk->flags, csk->tid, csk->copied_seq, |
1682 | csk->rcv_wup, cdev->rx_credit_thres, | 1682 | csk->rcv_wup, cdev->rx_credit_thres, |
1683 | cdev->rcv_win); | 1683 | cdev->rcv_win); |
1684 | 1684 | ||
1685 | if (csk->state != CTP_ESTABLISHED) | 1685 | if (csk->state != CTP_ESTABLISHED) |
1686 | return; | 1686 | return; |
1687 | 1687 | ||
1688 | credits = csk->copied_seq - csk->rcv_wup; | 1688 | credits = csk->copied_seq - csk->rcv_wup; |
1689 | if (unlikely(!credits)) | 1689 | if (unlikely(!credits)) |
1690 | return; | 1690 | return; |
1691 | if (unlikely(cdev->rx_credit_thres == 0)) | 1691 | if (unlikely(cdev->rx_credit_thres == 0)) |
1692 | return; | 1692 | return; |
1693 | 1693 | ||
1694 | must_send = credits + 16384 >= cdev->rcv_win; | 1694 | must_send = credits + 16384 >= cdev->rcv_win; |
1695 | if (must_send || credits >= cdev->rx_credit_thres) | 1695 | if (must_send || credits >= cdev->rx_credit_thres) |
1696 | csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); | 1696 | csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); |
1697 | } | 1697 | } |
1698 | 1698 | ||
1699 | void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) | 1699 | void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) |
1700 | { | 1700 | { |
1701 | struct cxgbi_device *cdev = csk->cdev; | 1701 | struct cxgbi_device *cdev = csk->cdev; |
1702 | struct iscsi_conn *conn = csk->user_data; | 1702 | struct iscsi_conn *conn = csk->user_data; |
1703 | struct sk_buff *skb; | 1703 | struct sk_buff *skb; |
1704 | unsigned int read = 0; | 1704 | unsigned int read = 0; |
1705 | int err = 0; | 1705 | int err = 0; |
1706 | 1706 | ||
1707 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1707 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1708 | "csk 0x%p, conn 0x%p.\n", csk, conn); | 1708 | "csk 0x%p, conn 0x%p.\n", csk, conn); |
1709 | 1709 | ||
1710 | if (unlikely(!conn || conn->suspend_rx)) { | 1710 | if (unlikely(!conn || conn->suspend_rx)) { |
1711 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1711 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1712 | "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", | 1712 | "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", |
1713 | csk, conn, conn ? conn->id : 0xFF, | 1713 | csk, conn, conn ? conn->id : 0xFF, |
1714 | conn ? conn->suspend_rx : 0xFF); | 1714 | conn ? conn->suspend_rx : 0xFF); |
1715 | read_unlock(&csk->callback_lock); | ||
1716 | return; | 1715 | return; |
1717 | } | 1716 | } |
1718 | 1717 | ||
1719 | while (!err) { | 1718 | while (!err) { |
1720 | read_lock(&csk->callback_lock); | ||
1721 | skb = skb_peek(&csk->receive_queue); | 1719 | skb = skb_peek(&csk->receive_queue); |
1722 | if (!skb || | 1720 | if (!skb || |
1723 | !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { | 1721 | !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { |
1724 | if (skb) | 1722 | if (skb) |
1725 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1723 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1726 | "skb 0x%p, NOT ready 0x%lx.\n", | 1724 | "skb 0x%p, NOT ready 0x%lx.\n", |
1727 | skb, cxgbi_skcb_flags(skb)); | 1725 | skb, cxgbi_skcb_flags(skb)); |
1728 | read_unlock(&csk->callback_lock); | ||
1729 | break; | 1726 | break; |
1730 | } | 1727 | } |
1731 | __skb_unlink(skb, &csk->receive_queue); | 1728 | __skb_unlink(skb, &csk->receive_queue); |
1732 | read_unlock(&csk->callback_lock); | ||
1733 | 1729 | ||
1734 | read += cxgbi_skcb_rx_pdulen(skb); | 1730 | read += cxgbi_skcb_rx_pdulen(skb); |
1735 | log_debug(1 << CXGBI_DBG_PDU_RX, | 1731 | log_debug(1 << CXGBI_DBG_PDU_RX, |
1736 | "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", | 1732 | "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", |
1737 | csk, skb, skb->len, cxgbi_skcb_flags(skb), | 1733 | csk, skb, skb->len, cxgbi_skcb_flags(skb), |
1738 | cxgbi_skcb_rx_pdulen(skb)); | 1734 | cxgbi_skcb_rx_pdulen(skb)); |
1739 | 1735 | ||
1740 | if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { | 1736 | if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { |
1741 | err = skb_read_pdu_bhs(conn, skb); | 1737 | err = skb_read_pdu_bhs(conn, skb); |
1742 | if (err < 0) | 1738 | if (err < 0) { |
1743 | break; | 1739 | pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " |
1740 | "f 0x%lx, plen %u.\n", | ||
1741 | csk, skb, skb->len, | ||
1742 | cxgbi_skcb_flags(skb), | ||
1743 | cxgbi_skcb_rx_pdulen(skb)); | ||
1744 | goto skb_done; | ||
1745 | } | ||
1744 | err = skb_read_pdu_data(conn, skb, skb, | 1746 | err = skb_read_pdu_data(conn, skb, skb, |
1745 | err + cdev->skb_rx_extra); | 1747 | err + cdev->skb_rx_extra); |
1748 | if (err < 0) | ||
1749 | pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, " | ||
1750 | "f 0x%lx, plen %u.\n", | ||
1751 | csk, skb, skb->len, | ||
1752 | cxgbi_skcb_flags(skb), | ||
1753 | cxgbi_skcb_rx_pdulen(skb)); | ||
1746 | } else { | 1754 | } else { |
1747 | err = skb_read_pdu_bhs(conn, skb); | 1755 | err = skb_read_pdu_bhs(conn, skb); |
1748 | if (err < 0) | 1756 | if (err < 0) { |
1749 | break; | 1757 | pr_err("bhs, csk 0x%p, skb 0x%p,%u, " |
1758 | "f 0x%lx, plen %u.\n", | ||
1759 | csk, skb, skb->len, | ||
1760 | cxgbi_skcb_flags(skb), | ||
1761 | cxgbi_skcb_rx_pdulen(skb)); | ||
1762 | goto skb_done; | ||
1763 | } | ||
1764 | |||
1750 | if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { | 1765 | if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { |
1751 | struct sk_buff *dskb; | 1766 | struct sk_buff *dskb; |
1752 | 1767 | ||
1753 | read_lock(&csk->callback_lock); | ||
1754 | dskb = skb_peek(&csk->receive_queue); | 1768 | dskb = skb_peek(&csk->receive_queue); |
1755 | if (!dskb) { | 1769 | if (!dskb) { |
1756 | read_unlock(&csk->callback_lock); | 1770 | pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx," |
1757 | pr_err("csk 0x%p, NO data.\n", csk); | 1771 | " plen %u, NO data.\n", |
1758 | err = -EAGAIN; | 1772 | csk, skb, skb->len, |
1759 | break; | 1773 | cxgbi_skcb_flags(skb), |
1774 | cxgbi_skcb_rx_pdulen(skb)); | ||
1775 | err = -EIO; | ||
1776 | goto skb_done; | ||
1760 | } | 1777 | } |
1761 | __skb_unlink(dskb, &csk->receive_queue); | 1778 | __skb_unlink(dskb, &csk->receive_queue); |
1762 | read_unlock(&csk->callback_lock); | ||
1763 | 1779 | ||
1764 | err = skb_read_pdu_data(conn, skb, dskb, 0); | 1780 | err = skb_read_pdu_data(conn, skb, dskb, 0); |
1781 | if (err < 0) | ||
1782 | pr_err("data, csk 0x%p, skb 0x%p,%u, " | ||
1783 | "f 0x%lx, plen %u, dskb 0x%p," | ||
1784 | "%u.\n", | ||
1785 | csk, skb, skb->len, | ||
1786 | cxgbi_skcb_flags(skb), | ||
1787 | cxgbi_skcb_rx_pdulen(skb), | ||
1788 | dskb, dskb->len); | ||
1765 | __kfree_skb(dskb); | 1789 | __kfree_skb(dskb); |
1766 | } else | 1790 | } else |
1767 | err = skb_read_pdu_data(conn, skb, skb, 0); | 1791 | err = skb_read_pdu_data(conn, skb, skb, 0); |
1768 | } | 1792 | } |
1793 | skb_done: | ||
1794 | __kfree_skb(skb); | ||
1795 | |||
1769 | if (err < 0) | 1796 | if (err < 0) |
1770 | break; | 1797 | break; |
1771 | |||
1772 | __kfree_skb(skb); | ||
1773 | } | 1798 | } |
1774 | 1799 | ||
1775 | log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); | 1800 | log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); |
1776 | if (read) { | 1801 | if (read) { |
1777 | csk->copied_seq += read; | 1802 | csk->copied_seq += read; |
1778 | csk_return_rx_credits(csk, read); | 1803 | csk_return_rx_credits(csk, read); |
1779 | conn->rxdata_octets += read; | 1804 | conn->rxdata_octets += read; |
1780 | } | 1805 | } |
1781 | 1806 | ||
1782 | if (err < 0) { | 1807 | if (err < 0) { |
1783 | pr_info("csk 0x%p, 0x%p, rx failed %d.\n", csk, conn, err); | 1808 | pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n", |
1809 | csk, conn, err, read); | ||
1784 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1810 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
1785 | } | 1811 | } |
1786 | } | 1812 | } |
1787 | EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); | 1813 | EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); |
1788 | 1814 | ||
1789 | static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, | 1815 | static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, |
1790 | unsigned int offset, unsigned int *off, | 1816 | unsigned int offset, unsigned int *off, |
1791 | struct scatterlist **sgp) | 1817 | struct scatterlist **sgp) |
1792 | { | 1818 | { |
1793 | int i; | 1819 | int i; |
1794 | struct scatterlist *sg; | 1820 | struct scatterlist *sg; |
1795 | 1821 | ||
1796 | for_each_sg(sgl, sg, sgcnt, i) { | 1822 | for_each_sg(sgl, sg, sgcnt, i) { |
1797 | if (offset < sg->length) { | 1823 | if (offset < sg->length) { |
1798 | *off = offset; | 1824 | *off = offset; |
1799 | *sgp = sg; | 1825 | *sgp = sg; |
1800 | return 0; | 1826 | return 0; |
1801 | } | 1827 | } |
1802 | offset -= sg->length; | 1828 | offset -= sg->length; |
1803 | } | 1829 | } |
1804 | return -EFAULT; | 1830 | return -EFAULT; |
1805 | } | 1831 | } |
1806 | 1832 | ||
1807 | static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, | 1833 | static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, |
1808 | unsigned int dlen, skb_frag_t *frags, | 1834 | unsigned int dlen, skb_frag_t *frags, |
1809 | int frag_max) | 1835 | int frag_max) |
1810 | { | 1836 | { |
1811 | unsigned int datalen = dlen; | 1837 | unsigned int datalen = dlen; |
1812 | unsigned int sglen = sg->length - sgoffset; | 1838 | unsigned int sglen = sg->length - sgoffset; |
1813 | struct page *page = sg_page(sg); | 1839 | struct page *page = sg_page(sg); |
1814 | int i; | 1840 | int i; |
1815 | 1841 | ||
1816 | i = 0; | 1842 | i = 0; |
1817 | do { | 1843 | do { |
1818 | unsigned int copy; | 1844 | unsigned int copy; |
1819 | 1845 | ||
1820 | if (!sglen) { | 1846 | if (!sglen) { |
1821 | sg = sg_next(sg); | 1847 | sg = sg_next(sg); |
1822 | if (!sg) { | 1848 | if (!sg) { |
1823 | pr_warn("sg %d NULL, len %u/%u.\n", | 1849 | pr_warn("sg %d NULL, len %u/%u.\n", |
1824 | i, datalen, dlen); | 1850 | i, datalen, dlen); |
1825 | return -EINVAL; | 1851 | return -EINVAL; |
1826 | } | 1852 | } |
1827 | sgoffset = 0; | 1853 | sgoffset = 0; |
1828 | sglen = sg->length; | 1854 | sglen = sg->length; |
1829 | page = sg_page(sg); | 1855 | page = sg_page(sg); |
1830 | 1856 | ||
1831 | } | 1857 | } |
1832 | copy = min(datalen, sglen); | 1858 | copy = min(datalen, sglen); |
1833 | if (i && page == frags[i - 1].page && | 1859 | if (i && page == frags[i - 1].page && |
1834 | sgoffset + sg->offset == | 1860 | sgoffset + sg->offset == |
1835 | frags[i - 1].page_offset + frags[i - 1].size) { | 1861 | frags[i - 1].page_offset + frags[i - 1].size) { |
1836 | frags[i - 1].size += copy; | 1862 | frags[i - 1].size += copy; |
1837 | } else { | 1863 | } else { |
1838 | if (i >= frag_max) { | 1864 | if (i >= frag_max) { |
1839 | pr_warn("too many pages %u, dlen %u.\n", | 1865 | pr_warn("too many pages %u, dlen %u.\n", |
1840 | frag_max, dlen); | 1866 | frag_max, dlen); |
1841 | return -EINVAL; | 1867 | return -EINVAL; |
1842 | } | 1868 | } |
1843 | 1869 | ||
1844 | frags[i].page = page; | 1870 | frags[i].page = page; |
1845 | frags[i].page_offset = sg->offset + sgoffset; | 1871 | frags[i].page_offset = sg->offset + sgoffset; |
1846 | frags[i].size = copy; | 1872 | frags[i].size = copy; |
1847 | i++; | 1873 | i++; |
1848 | } | 1874 | } |
1849 | datalen -= copy; | 1875 | datalen -= copy; |
1850 | sgoffset += copy; | 1876 | sgoffset += copy; |
1851 | sglen -= copy; | 1877 | sglen -= copy; |
1852 | } while (datalen); | 1878 | } while (datalen); |
1853 | 1879 | ||
1854 | return i; | 1880 | return i; |
1855 | } | 1881 | } |
1856 | 1882 | ||
1857 | int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | 1883 | int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) |
1858 | { | 1884 | { |
1859 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; | 1885 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; |
1860 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | 1886 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
1861 | struct cxgbi_device *cdev = cconn->chba->cdev; | 1887 | struct cxgbi_device *cdev = cconn->chba->cdev; |
1862 | struct iscsi_conn *conn = task->conn; | 1888 | struct iscsi_conn *conn = task->conn; |
1863 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 1889 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
1864 | struct cxgbi_task_data *tdata = task->dd_data + sizeof(*tcp_task); | 1890 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
1865 | struct scsi_cmnd *sc = task->sc; | 1891 | struct scsi_cmnd *sc = task->sc; |
1866 | int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; | 1892 | int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; |
1867 | 1893 | ||
1868 | tcp_task->dd_data = tdata; | 1894 | tcp_task->dd_data = tdata; |
1869 | task->hdr = NULL; | 1895 | task->hdr = NULL; |
1870 | 1896 | ||
1871 | if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && | 1897 | if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && |
1872 | (opcode == ISCSI_OP_SCSI_DATA_OUT || | 1898 | (opcode == ISCSI_OP_SCSI_DATA_OUT || |
1873 | (opcode == ISCSI_OP_SCSI_CMD && | 1899 | (opcode == ISCSI_OP_SCSI_CMD && |
1874 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) | 1900 | (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) |
1875 | /* data could goes into skb head */ | 1901 | /* data could goes into skb head */ |
1876 | headroom += min_t(unsigned int, | 1902 | headroom += min_t(unsigned int, |
1877 | SKB_MAX_HEAD(cdev->skb_tx_rsvd), | 1903 | SKB_MAX_HEAD(cdev->skb_tx_rsvd), |
1878 | conn->max_xmit_dlength); | 1904 | conn->max_xmit_dlength); |
1879 | 1905 | ||
1880 | tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); | 1906 | tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); |
1881 | if (!tdata->skb) { | 1907 | if (!tdata->skb) { |
1882 | pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n", | 1908 | pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n", |
1883 | cdev->skb_tx_rsvd, headroom, opcode); | 1909 | cdev->skb_tx_rsvd, headroom, opcode); |
1884 | return -ENOMEM; | 1910 | return -ENOMEM; |
1885 | } | 1911 | } |
1886 | 1912 | ||
1887 | skb_reserve(tdata->skb, cdev->skb_tx_rsvd); | 1913 | skb_reserve(tdata->skb, cdev->skb_tx_rsvd); |
1888 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; | 1914 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; |
1889 | task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ | 1915 | task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ |
1890 | 1916 | ||
1891 | /* data_out uses scsi_cmd's itt */ | 1917 | /* data_out uses scsi_cmd's itt */ |
1892 | if (opcode != ISCSI_OP_SCSI_DATA_OUT) | 1918 | if (opcode != ISCSI_OP_SCSI_DATA_OUT) |
1893 | task_reserve_itt(task, &task->hdr->itt); | 1919 | task_reserve_itt(task, &task->hdr->itt); |
1894 | 1920 | ||
1895 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, | 1921 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
1896 | "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", | 1922 | "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", |
1897 | task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom, | 1923 | task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom, |
1898 | conn->max_xmit_dlength, ntohl(task->hdr->itt)); | 1924 | conn->max_xmit_dlength, ntohl(task->hdr->itt)); |
1899 | 1925 | ||
1900 | return 0; | 1926 | return 0; |
1901 | } | 1927 | } |
1902 | EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); | 1928 | EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); |
1903 | 1929 | ||
1904 | static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) | 1930 | static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) |
1905 | { | 1931 | { |
1906 | u8 submode = 0; | 1932 | u8 submode = 0; |
1907 | 1933 | ||
1908 | if (hcrc) | 1934 | if (hcrc) |
1909 | submode |= 1; | 1935 | submode |= 1; |
1910 | if (dcrc) | 1936 | if (dcrc) |
1911 | submode |= 2; | 1937 | submode |= 2; |
1912 | cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; | 1938 | cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; |
1913 | } | 1939 | } |
1914 | 1940 | ||
1915 | int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, | 1941 | int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, |
1916 | unsigned int count) | 1942 | unsigned int count) |
1917 | { | 1943 | { |
1918 | struct iscsi_conn *conn = task->conn; | 1944 | struct iscsi_conn *conn = task->conn; |
1919 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 1945 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
1920 | struct cxgbi_task_data *tdata = tcp_task->dd_data; | ||
1921 | struct sk_buff *skb = tdata->skb; | 1946 | struct sk_buff *skb = tdata->skb; |
1922 | unsigned int datalen = count; | 1947 | unsigned int datalen = count; |
1923 | int i, padlen = iscsi_padding(count); | 1948 | int i, padlen = iscsi_padding(count); |
1924 | struct page *pg; | 1949 | struct page *pg; |
1925 | 1950 | ||
1926 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, | 1951 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
1927 | "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", | 1952 | "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", |
1928 | task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, | 1953 | task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, |
1929 | ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count); | 1954 | ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count); |
1930 | 1955 | ||
1931 | skb_put(skb, task->hdr_len); | 1956 | skb_put(skb, task->hdr_len); |
1932 | tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); | 1957 | tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); |
1933 | if (!count) | 1958 | if (!count) |
1934 | return 0; | 1959 | return 0; |
1935 | 1960 | ||
1936 | if (task->sc) { | 1961 | if (task->sc) { |
1937 | struct scsi_data_buffer *sdb = scsi_out(task->sc); | 1962 | struct scsi_data_buffer *sdb = scsi_out(task->sc); |
1938 | struct scatterlist *sg = NULL; | 1963 | struct scatterlist *sg = NULL; |
1939 | int err; | 1964 | int err; |
1940 | 1965 | ||
1941 | tdata->offset = offset; | 1966 | tdata->offset = offset; |
1942 | tdata->count = count; | 1967 | tdata->count = count; |
1943 | err = sgl_seek_offset( | 1968 | err = sgl_seek_offset( |
1944 | sdb->table.sgl, sdb->table.nents, | 1969 | sdb->table.sgl, sdb->table.nents, |
1945 | tdata->offset, &tdata->sgoffset, &sg); | 1970 | tdata->offset, &tdata->sgoffset, &sg); |
1946 | if (err < 0) { | 1971 | if (err < 0) { |
1947 | pr_warn("tpdu, sgl %u, bad offset %u/%u.\n", | 1972 | pr_warn("tpdu, sgl %u, bad offset %u/%u.\n", |
1948 | sdb->table.nents, tdata->offset, sdb->length); | 1973 | sdb->table.nents, tdata->offset, sdb->length); |
1949 | return err; | 1974 | return err; |
1950 | } | 1975 | } |
1951 | err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, | 1976 | err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, |
1952 | tdata->frags, MAX_PDU_FRAGS); | 1977 | tdata->frags, MAX_PDU_FRAGS); |
1953 | if (err < 0) { | 1978 | if (err < 0) { |
1954 | pr_warn("tpdu, sgl %u, bad offset %u + %u.\n", | 1979 | pr_warn("tpdu, sgl %u, bad offset %u + %u.\n", |
1955 | sdb->table.nents, tdata->offset, tdata->count); | 1980 | sdb->table.nents, tdata->offset, tdata->count); |
1956 | return err; | 1981 | return err; |
1957 | } | 1982 | } |
1958 | tdata->nr_frags = err; | 1983 | tdata->nr_frags = err; |
1959 | 1984 | ||
1960 | if (tdata->nr_frags > MAX_SKB_FRAGS || | 1985 | if (tdata->nr_frags > MAX_SKB_FRAGS || |
1961 | (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { | 1986 | (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { |
1962 | char *dst = skb->data + task->hdr_len; | 1987 | char *dst = skb->data + task->hdr_len; |
1963 | skb_frag_t *frag = tdata->frags; | 1988 | skb_frag_t *frag = tdata->frags; |
1964 | 1989 | ||
1965 | /* data fits in the skb's headroom */ | 1990 | /* data fits in the skb's headroom */ |
1966 | for (i = 0; i < tdata->nr_frags; i++, frag++) { | 1991 | for (i = 0; i < tdata->nr_frags; i++, frag++) { |
1967 | char *src = kmap_atomic(frag->page, | 1992 | char *src = kmap_atomic(frag->page, |
1968 | KM_SOFTIRQ0); | 1993 | KM_SOFTIRQ0); |
1969 | 1994 | ||
1970 | memcpy(dst, src+frag->page_offset, frag->size); | 1995 | memcpy(dst, src+frag->page_offset, frag->size); |
1971 | dst += frag->size; | 1996 | dst += frag->size; |
1972 | kunmap_atomic(src, KM_SOFTIRQ0); | 1997 | kunmap_atomic(src, KM_SOFTIRQ0); |
1973 | } | 1998 | } |
1974 | if (padlen) { | 1999 | if (padlen) { |
1975 | memset(dst, 0, padlen); | 2000 | memset(dst, 0, padlen); |
1976 | padlen = 0; | 2001 | padlen = 0; |
1977 | } | 2002 | } |
1978 | skb_put(skb, count + padlen); | 2003 | skb_put(skb, count + padlen); |
1979 | } else { | 2004 | } else { |
1980 | /* data fit into frag_list */ | 2005 | /* data fit into frag_list */ |
1981 | for (i = 0; i < tdata->nr_frags; i++) | 2006 | for (i = 0; i < tdata->nr_frags; i++) |
1982 | get_page(tdata->frags[i].page); | 2007 | get_page(tdata->frags[i].page); |
1983 | 2008 | ||
1984 | memcpy(skb_shinfo(skb)->frags, tdata->frags, | 2009 | memcpy(skb_shinfo(skb)->frags, tdata->frags, |
1985 | sizeof(skb_frag_t) * tdata->nr_frags); | 2010 | sizeof(skb_frag_t) * tdata->nr_frags); |
1986 | skb_shinfo(skb)->nr_frags = tdata->nr_frags; | 2011 | skb_shinfo(skb)->nr_frags = tdata->nr_frags; |
1987 | skb->len += count; | 2012 | skb->len += count; |
1988 | skb->data_len += count; | 2013 | skb->data_len += count; |
1989 | skb->truesize += count; | 2014 | skb->truesize += count; |
1990 | } | 2015 | } |
1991 | 2016 | ||
1992 | } else { | 2017 | } else { |
1993 | pg = virt_to_page(task->data); | 2018 | pg = virt_to_page(task->data); |
1994 | 2019 | ||
1995 | get_page(pg); | 2020 | get_page(pg); |
1996 | skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), | 2021 | skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), |
1997 | count); | 2022 | count); |
1998 | skb->len += count; | 2023 | skb->len += count; |
1999 | skb->data_len += count; | 2024 | skb->data_len += count; |
2000 | skb->truesize += count; | 2025 | skb->truesize += count; |
2001 | } | 2026 | } |
2002 | 2027 | ||
2003 | if (padlen) { | 2028 | if (padlen) { |
2004 | i = skb_shinfo(skb)->nr_frags; | 2029 | i = skb_shinfo(skb)->nr_frags; |
2005 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | 2030 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
2006 | virt_to_page(padding), offset_in_page(padding), | 2031 | virt_to_page(padding), offset_in_page(padding), |
2007 | padlen); | 2032 | padlen); |
2008 | 2033 | ||
2009 | skb->data_len += padlen; | 2034 | skb->data_len += padlen; |
2010 | skb->truesize += padlen; | 2035 | skb->truesize += padlen; |
2011 | skb->len += padlen; | 2036 | skb->len += padlen; |
2012 | } | 2037 | } |
2013 | 2038 | ||
2014 | return 0; | 2039 | return 0; |
2015 | } | 2040 | } |
2016 | EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); | 2041 | EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); |
2017 | 2042 | ||
2018 | int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | 2043 | int cxgbi_conn_xmit_pdu(struct iscsi_task *task) |
2019 | { | 2044 | { |
2020 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; | 2045 | struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; |
2021 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | 2046 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
2022 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 2047 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
2023 | struct cxgbi_task_data *tdata = tcp_task->dd_data; | ||
2024 | struct sk_buff *skb = tdata->skb; | 2048 | struct sk_buff *skb = tdata->skb; |
2025 | unsigned int datalen; | 2049 | unsigned int datalen; |
2026 | int err; | 2050 | int err; |
2027 | 2051 | ||
2028 | if (!skb) { | 2052 | if (!skb) { |
2029 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, | 2053 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
2030 | "task 0x%p, skb NULL.\n", task); | 2054 | "task 0x%p, skb NULL.\n", task); |
2031 | return 0; | 2055 | return 0; |
2032 | } | 2056 | } |
2033 | 2057 | ||
2034 | datalen = skb->data_len; | 2058 | datalen = skb->data_len; |
2035 | tdata->skb = NULL; | 2059 | tdata->skb = NULL; |
2036 | err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); | 2060 | err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); |
2037 | if (err > 0) { | 2061 | if (err > 0) { |
2038 | int pdulen = err; | 2062 | int pdulen = err; |
2039 | 2063 | ||
2040 | log_debug(1 << CXGBI_DBG_PDU_TX, | 2064 | log_debug(1 << CXGBI_DBG_PDU_TX, |
2041 | "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n", | 2065 | "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n", |
2042 | task, task->sc, skb, skb->len, skb->data_len, err); | 2066 | task, task->sc, skb, skb->len, skb->data_len, err); |
2043 | 2067 | ||
2044 | if (task->conn->hdrdgst_en) | 2068 | if (task->conn->hdrdgst_en) |
2045 | pdulen += ISCSI_DIGEST_SIZE; | 2069 | pdulen += ISCSI_DIGEST_SIZE; |
2046 | 2070 | ||
2047 | if (datalen && task->conn->datadgst_en) | 2071 | if (datalen && task->conn->datadgst_en) |
2048 | pdulen += ISCSI_DIGEST_SIZE; | 2072 | pdulen += ISCSI_DIGEST_SIZE; |
2049 | 2073 | ||
2050 | task->conn->txdata_octets += pdulen; | 2074 | task->conn->txdata_octets += pdulen; |
2051 | return 0; | 2075 | return 0; |
2052 | } | 2076 | } |
2053 | 2077 | ||
2054 | if (err == -EAGAIN || err == -ENOBUFS) { | 2078 | if (err == -EAGAIN || err == -ENOBUFS) { |
2055 | log_debug(1 << CXGBI_DBG_PDU_TX, | 2079 | log_debug(1 << CXGBI_DBG_PDU_TX, |
2056 | "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", | 2080 | "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", |
2057 | task, skb, skb->len, skb->data_len, err); | 2081 | task, skb, skb->len, skb->data_len, err); |
2058 | /* reset skb to send when we are called again */ | 2082 | /* reset skb to send when we are called again */ |
2059 | tdata->skb = skb; | 2083 | tdata->skb = skb; |
2060 | return err; | 2084 | return err; |
2061 | } | 2085 | } |
2062 | 2086 | ||
2063 | kfree_skb(skb); | 2087 | kfree_skb(skb); |
2064 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, | 2088 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
2065 | "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", | 2089 | "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", |
2066 | task->itt, skb, skb->len, skb->data_len, err); | 2090 | task->itt, skb, skb->len, skb->data_len, err); |
2067 | iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); | 2091 | iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); |
2068 | iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); | 2092 | iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); |
2069 | return err; | 2093 | return err; |
2070 | } | 2094 | } |
2071 | EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); | 2095 | EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); |
2072 | 2096 | ||
2073 | void cxgbi_cleanup_task(struct iscsi_task *task) | 2097 | void cxgbi_cleanup_task(struct iscsi_task *task) |
2074 | { | 2098 | { |
2075 | struct cxgbi_task_data *tdata = task->dd_data + | 2099 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
2076 | sizeof(struct iscsi_tcp_task); | ||
2077 | 2100 | ||
2078 | log_debug(1 << CXGBI_DBG_ISCSI, | 2101 | log_debug(1 << CXGBI_DBG_ISCSI, |
2079 | "task 0x%p, skb 0x%p, itt 0x%x.\n", | 2102 | "task 0x%p, skb 0x%p, itt 0x%x.\n", |
2080 | task, tdata->skb, task->hdr_itt); | 2103 | task, tdata->skb, task->hdr_itt); |
2081 | 2104 | ||
2082 | /* never reached the xmit task callout */ | 2105 | /* never reached the xmit task callout */ |
2083 | if (tdata->skb) | 2106 | if (tdata->skb) |
2084 | __kfree_skb(tdata->skb); | 2107 | __kfree_skb(tdata->skb); |
2085 | memset(tdata, 0, sizeof(*tdata)); | 2108 | memset(tdata, 0, sizeof(*tdata)); |
2086 | 2109 | ||
2087 | task_release_itt(task, task->hdr_itt); | 2110 | task_release_itt(task, task->hdr_itt); |
2088 | iscsi_tcp_cleanup_task(task); | 2111 | iscsi_tcp_cleanup_task(task); |
2089 | } | 2112 | } |
2090 | EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); | 2113 | EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); |
2091 | 2114 | ||
2092 | void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, | 2115 | void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, |
2093 | struct iscsi_stats *stats) | 2116 | struct iscsi_stats *stats) |
2094 | { | 2117 | { |
2095 | struct iscsi_conn *conn = cls_conn->dd_data; | 2118 | struct iscsi_conn *conn = cls_conn->dd_data; |
2096 | 2119 | ||
2097 | stats->txdata_octets = conn->txdata_octets; | 2120 | stats->txdata_octets = conn->txdata_octets; |
2098 | stats->rxdata_octets = conn->rxdata_octets; | 2121 | stats->rxdata_octets = conn->rxdata_octets; |
2099 | stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; | 2122 | stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; |
2100 | stats->dataout_pdus = conn->dataout_pdus_cnt; | 2123 | stats->dataout_pdus = conn->dataout_pdus_cnt; |
2101 | stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; | 2124 | stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; |
2102 | stats->datain_pdus = conn->datain_pdus_cnt; | 2125 | stats->datain_pdus = conn->datain_pdus_cnt; |
2103 | stats->r2t_pdus = conn->r2t_pdus_cnt; | 2126 | stats->r2t_pdus = conn->r2t_pdus_cnt; |
2104 | stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; | 2127 | stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; |
2105 | stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; | 2128 | stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; |
2106 | stats->digest_err = 0; | 2129 | stats->digest_err = 0; |
2107 | stats->timeout_err = 0; | 2130 | stats->timeout_err = 0; |
2108 | stats->custom_length = 1; | 2131 | stats->custom_length = 1; |
2109 | strcpy(stats->custom[0].desc, "eh_abort_cnt"); | 2132 | strcpy(stats->custom[0].desc, "eh_abort_cnt"); |
2110 | stats->custom[0].value = conn->eh_abort_cnt; | 2133 | stats->custom[0].value = conn->eh_abort_cnt; |
2111 | } | 2134 | } |
2112 | EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); | 2135 | EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); |
2113 | 2136 | ||
2114 | static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) | 2137 | static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) |
2115 | { | 2138 | { |
2116 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 2139 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
2117 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | 2140 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
2118 | struct cxgbi_device *cdev = cconn->chba->cdev; | 2141 | struct cxgbi_device *cdev = cconn->chba->cdev; |
2119 | unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); | 2142 | unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); |
2120 | unsigned int max_def = 512 * MAX_SKB_FRAGS; | 2143 | unsigned int max_def = 512 * MAX_SKB_FRAGS; |
2121 | unsigned int max = max(max_def, headroom); | 2144 | unsigned int max = max(max_def, headroom); |
2122 | 2145 | ||
2123 | max = min(cconn->chba->cdev->tx_max_size, max); | 2146 | max = min(cconn->chba->cdev->tx_max_size, max); |
2124 | if (conn->max_xmit_dlength) | 2147 | if (conn->max_xmit_dlength) |
2125 | conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); | 2148 | conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); |
2126 | else | 2149 | else |
2127 | conn->max_xmit_dlength = max; | 2150 | conn->max_xmit_dlength = max; |
2128 | cxgbi_align_pdu_size(conn->max_xmit_dlength); | 2151 | cxgbi_align_pdu_size(conn->max_xmit_dlength); |
2129 | 2152 | ||
2130 | return 0; | 2153 | return 0; |
2131 | } | 2154 | } |
2132 | 2155 | ||
2133 | static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) | 2156 | static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) |
2134 | { | 2157 | { |
2135 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 2158 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
2136 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | 2159 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
2137 | unsigned int max = cconn->chba->cdev->rx_max_size; | 2160 | unsigned int max = cconn->chba->cdev->rx_max_size; |
2138 | 2161 | ||
2139 | cxgbi_align_pdu_size(max); | 2162 | cxgbi_align_pdu_size(max); |
2140 | 2163 | ||
2141 | if (conn->max_recv_dlength) { | 2164 | if (conn->max_recv_dlength) { |
2142 | if (conn->max_recv_dlength > max) { | 2165 | if (conn->max_recv_dlength > max) { |
2143 | pr_err("MaxRecvDataSegmentLength %u > %u.\n", | 2166 | pr_err("MaxRecvDataSegmentLength %u > %u.\n", |
2144 | conn->max_recv_dlength, max); | 2167 | conn->max_recv_dlength, max); |
2145 | return -EINVAL; | 2168 | return -EINVAL; |
2146 | } | 2169 | } |
2147 | conn->max_recv_dlength = min(conn->max_recv_dlength, max); | 2170 | conn->max_recv_dlength = min(conn->max_recv_dlength, max); |
2148 | cxgbi_align_pdu_size(conn->max_recv_dlength); | 2171 | cxgbi_align_pdu_size(conn->max_recv_dlength); |
2149 | } else | 2172 | } else |
2150 | conn->max_recv_dlength = max; | 2173 | conn->max_recv_dlength = max; |
2151 | 2174 | ||
2152 | return 0; | 2175 | return 0; |
2153 | } | 2176 | } |
2154 | 2177 | ||
2155 | int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, | 2178 | int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, |
2156 | enum iscsi_param param, char *buf, int buflen) | 2179 | enum iscsi_param param, char *buf, int buflen) |
2157 | { | 2180 | { |
2158 | struct iscsi_conn *conn = cls_conn->dd_data; | 2181 | struct iscsi_conn *conn = cls_conn->dd_data; |
2159 | struct iscsi_session *session = conn->session; | 2182 | struct iscsi_session *session = conn->session; |
2160 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 2183 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
2161 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | 2184 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
2162 | struct cxgbi_sock *csk = cconn->cep->csk; | 2185 | struct cxgbi_sock *csk = cconn->cep->csk; |
2163 | int value, err = 0; | 2186 | int value, err = 0; |
2164 | 2187 | ||
2165 | log_debug(1 << CXGBI_DBG_ISCSI, | 2188 | log_debug(1 << CXGBI_DBG_ISCSI, |
2166 | "cls_conn 0x%p, param %d, buf(%d) %s.\n", | 2189 | "cls_conn 0x%p, param %d, buf(%d) %s.\n", |
2167 | cls_conn, param, buflen, buf); | 2190 | cls_conn, param, buflen, buf); |
2168 | 2191 | ||
2169 | switch (param) { | 2192 | switch (param) { |
2170 | case ISCSI_PARAM_HDRDGST_EN: | 2193 | case ISCSI_PARAM_HDRDGST_EN: |
2171 | err = iscsi_set_param(cls_conn, param, buf, buflen); | 2194 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
2172 | if (!err && conn->hdrdgst_en) | 2195 | if (!err && conn->hdrdgst_en) |
2173 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, | 2196 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, |
2174 | conn->hdrdgst_en, | 2197 | conn->hdrdgst_en, |
2175 | conn->datadgst_en, 0); | 2198 | conn->datadgst_en, 0); |
2176 | break; | 2199 | break; |
2177 | case ISCSI_PARAM_DATADGST_EN: | 2200 | case ISCSI_PARAM_DATADGST_EN: |
2178 | err = iscsi_set_param(cls_conn, param, buf, buflen); | 2201 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
2179 | if (!err && conn->datadgst_en) | 2202 | if (!err && conn->datadgst_en) |
2180 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, | 2203 | err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, |
2181 | conn->hdrdgst_en, | 2204 | conn->hdrdgst_en, |
2182 | conn->datadgst_en, 0); | 2205 | conn->datadgst_en, 0); |
2183 | break; | 2206 | break; |
2184 | case ISCSI_PARAM_MAX_R2T: | 2207 | case ISCSI_PARAM_MAX_R2T: |
2185 | sscanf(buf, "%d", &value); | 2208 | sscanf(buf, "%d", &value); |
2186 | if (value <= 0 || !is_power_of_2(value)) | 2209 | if (value <= 0 || !is_power_of_2(value)) |
2187 | return -EINVAL; | 2210 | return -EINVAL; |
2188 | if (session->max_r2t == value) | 2211 | if (session->max_r2t == value) |
2189 | break; | 2212 | break; |
2190 | iscsi_tcp_r2tpool_free(session); | 2213 | iscsi_tcp_r2tpool_free(session); |
2191 | err = iscsi_set_param(cls_conn, param, buf, buflen); | 2214 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
2192 | if (!err && iscsi_tcp_r2tpool_alloc(session)) | 2215 | if (!err && iscsi_tcp_r2tpool_alloc(session)) |
2193 | return -ENOMEM; | 2216 | return -ENOMEM; |
2194 | case ISCSI_PARAM_MAX_RECV_DLENGTH: | 2217 | case ISCSI_PARAM_MAX_RECV_DLENGTH: |
2195 | err = iscsi_set_param(cls_conn, param, buf, buflen); | 2218 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
2196 | if (!err) | 2219 | if (!err) |
2197 | err = cxgbi_conn_max_recv_dlength(conn); | 2220 | err = cxgbi_conn_max_recv_dlength(conn); |
2198 | break; | 2221 | break; |
2199 | case ISCSI_PARAM_MAX_XMIT_DLENGTH: | 2222 | case ISCSI_PARAM_MAX_XMIT_DLENGTH: |
2200 | err = iscsi_set_param(cls_conn, param, buf, buflen); | 2223 | err = iscsi_set_param(cls_conn, param, buf, buflen); |
2201 | if (!err) | 2224 | if (!err) |
2202 | err = cxgbi_conn_max_xmit_dlength(conn); | 2225 | err = cxgbi_conn_max_xmit_dlength(conn); |
2203 | break; | 2226 | break; |
2204 | default: | 2227 | default: |
2205 | return iscsi_set_param(cls_conn, param, buf, buflen); | 2228 | return iscsi_set_param(cls_conn, param, buf, buflen); |
2206 | } | 2229 | } |
2207 | return err; | 2230 | return err; |
2208 | } | 2231 | } |
2209 | EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); | 2232 | EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); |
2210 | 2233 | ||
2211 | int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn, | 2234 | int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn, |
2212 | enum iscsi_param param, char *buf) | 2235 | enum iscsi_param param, char *buf) |
2213 | { | 2236 | { |
2214 | struct iscsi_conn *iconn = cls_conn->dd_data; | 2237 | struct iscsi_conn *iconn = cls_conn->dd_data; |
2215 | int len; | 2238 | int len; |
2216 | 2239 | ||
2217 | log_debug(1 << CXGBI_DBG_ISCSI, | 2240 | log_debug(1 << CXGBI_DBG_ISCSI, |
2218 | "cls_conn 0x%p, param %d.\n", cls_conn, param); | 2241 | "cls_conn 0x%p, param %d.\n", cls_conn, param); |
2219 | 2242 | ||
2220 | switch (param) { | 2243 | switch (param) { |
2221 | case ISCSI_PARAM_CONN_PORT: | 2244 | case ISCSI_PARAM_CONN_PORT: |
2222 | spin_lock_bh(&iconn->session->lock); | 2245 | spin_lock_bh(&iconn->session->lock); |
2223 | len = sprintf(buf, "%hu\n", iconn->portal_port); | 2246 | len = sprintf(buf, "%hu\n", iconn->portal_port); |
2224 | spin_unlock_bh(&iconn->session->lock); | 2247 | spin_unlock_bh(&iconn->session->lock); |
2225 | break; | 2248 | break; |
2226 | case ISCSI_PARAM_CONN_ADDRESS: | 2249 | case ISCSI_PARAM_CONN_ADDRESS: |
2227 | spin_lock_bh(&iconn->session->lock); | 2250 | spin_lock_bh(&iconn->session->lock); |
2228 | len = sprintf(buf, "%s\n", iconn->portal_address); | 2251 | len = sprintf(buf, "%s\n", iconn->portal_address); |
2229 | spin_unlock_bh(&iconn->session->lock); | 2252 | spin_unlock_bh(&iconn->session->lock); |
2230 | break; | 2253 | break; |
2231 | default: | 2254 | default: |
2232 | return iscsi_conn_get_param(cls_conn, param, buf); | 2255 | return iscsi_conn_get_param(cls_conn, param, buf); |
2233 | } | 2256 | } |
2234 | return len; | 2257 | return len; |
2235 | } | 2258 | } |
2236 | EXPORT_SYMBOL_GPL(cxgbi_get_conn_param); | 2259 | EXPORT_SYMBOL_GPL(cxgbi_get_conn_param); |
2237 | 2260 | ||
2238 | struct iscsi_cls_conn * | 2261 | struct iscsi_cls_conn * |
2239 | cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) | 2262 | cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) |
2240 | { | 2263 | { |
2241 | struct iscsi_cls_conn *cls_conn; | 2264 | struct iscsi_cls_conn *cls_conn; |
2242 | struct iscsi_conn *conn; | 2265 | struct iscsi_conn *conn; |
2243 | struct iscsi_tcp_conn *tcp_conn; | 2266 | struct iscsi_tcp_conn *tcp_conn; |
2244 | struct cxgbi_conn *cconn; | 2267 | struct cxgbi_conn *cconn; |
2245 | 2268 | ||
2246 | cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); | 2269 | cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); |
2247 | if (!cls_conn) | 2270 | if (!cls_conn) |
2248 | return NULL; | 2271 | return NULL; |
2249 | 2272 | ||
2250 | conn = cls_conn->dd_data; | 2273 | conn = cls_conn->dd_data; |
2251 | tcp_conn = conn->dd_data; | 2274 | tcp_conn = conn->dd_data; |
2252 | cconn = tcp_conn->dd_data; | 2275 | cconn = tcp_conn->dd_data; |
2253 | cconn->iconn = conn; | 2276 | cconn->iconn = conn; |
2254 | 2277 | ||
2255 | log_debug(1 << CXGBI_DBG_ISCSI, | 2278 | log_debug(1 << CXGBI_DBG_ISCSI, |
2256 | "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", | 2279 | "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", |
2257 | cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); | 2280 | cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); |
2258 | 2281 | ||
2259 | return cls_conn; | 2282 | return cls_conn; |
2260 | } | 2283 | } |
2261 | EXPORT_SYMBOL_GPL(cxgbi_create_conn); | 2284 | EXPORT_SYMBOL_GPL(cxgbi_create_conn); |
2262 | 2285 | ||
2263 | int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, | 2286 | int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, |
2264 | struct iscsi_cls_conn *cls_conn, | 2287 | struct iscsi_cls_conn *cls_conn, |
2265 | u64 transport_eph, int is_leading) | 2288 | u64 transport_eph, int is_leading) |
2266 | { | 2289 | { |
2267 | struct iscsi_conn *conn = cls_conn->dd_data; | 2290 | struct iscsi_conn *conn = cls_conn->dd_data; |
2268 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; | 2291 | struct iscsi_tcp_conn *tcp_conn = conn->dd_data; |
2269 | struct cxgbi_conn *cconn = tcp_conn->dd_data; | 2292 | struct cxgbi_conn *cconn = tcp_conn->dd_data; |
2270 | struct iscsi_endpoint *ep; | 2293 | struct iscsi_endpoint *ep; |
2271 | struct cxgbi_endpoint *cep; | 2294 | struct cxgbi_endpoint *cep; |
2272 | struct cxgbi_sock *csk; | 2295 | struct cxgbi_sock *csk; |
2273 | int err; | 2296 | int err; |
2274 | 2297 | ||
2275 | ep = iscsi_lookup_endpoint(transport_eph); | 2298 | ep = iscsi_lookup_endpoint(transport_eph); |
2276 | if (!ep) | 2299 | if (!ep) |
2277 | return -EINVAL; | 2300 | return -EINVAL; |
2278 | 2301 | ||
2279 | /* setup ddp pagesize */ | 2302 | /* setup ddp pagesize */ |
2280 | cep = ep->dd_data; | 2303 | cep = ep->dd_data; |
2281 | csk = cep->csk; | 2304 | csk = cep->csk; |
2282 | err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0); | 2305 | err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0); |
2283 | if (err < 0) | 2306 | if (err < 0) |
2284 | return err; | 2307 | return err; |
2285 | 2308 | ||
2286 | err = iscsi_conn_bind(cls_session, cls_conn, is_leading); | 2309 | err = iscsi_conn_bind(cls_session, cls_conn, is_leading); |
2287 | if (err) | 2310 | if (err) |
2288 | return -EINVAL; | 2311 | return -EINVAL; |
2289 | 2312 | ||
2290 | /* calculate the tag idx bits needed for this conn based on cmds_max */ | 2313 | /* calculate the tag idx bits needed for this conn based on cmds_max */ |
2291 | cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; | 2314 | cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; |
2292 | 2315 | ||
2293 | write_lock(&csk->callback_lock); | 2316 | write_lock_bh(&csk->callback_lock); |
2294 | csk->user_data = conn; | 2317 | csk->user_data = conn; |
2295 | cconn->chba = cep->chba; | 2318 | cconn->chba = cep->chba; |
2296 | cconn->cep = cep; | 2319 | cconn->cep = cep; |
2297 | cep->cconn = cconn; | 2320 | cep->cconn = cconn; |
2298 | write_unlock(&csk->callback_lock); | 2321 | write_unlock_bh(&csk->callback_lock); |
2299 | 2322 | ||
2300 | cxgbi_conn_max_xmit_dlength(conn); | 2323 | cxgbi_conn_max_xmit_dlength(conn); |
2301 | cxgbi_conn_max_recv_dlength(conn); | 2324 | cxgbi_conn_max_recv_dlength(conn); |
2302 | 2325 | ||
2303 | spin_lock_bh(&conn->session->lock); | 2326 | spin_lock_bh(&conn->session->lock); |
2304 | sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr); | 2327 | sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr); |
2305 | conn->portal_port = ntohs(csk->daddr.sin_port); | 2328 | conn->portal_port = ntohs(csk->daddr.sin_port); |
2306 | spin_unlock_bh(&conn->session->lock); | 2329 | spin_unlock_bh(&conn->session->lock); |
2307 | 2330 | ||
2308 | log_debug(1 << CXGBI_DBG_ISCSI, | 2331 | log_debug(1 << CXGBI_DBG_ISCSI, |
2309 | "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", | 2332 | "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", |
2310 | cls_session, cls_conn, ep, cconn, csk); | 2333 | cls_session, cls_conn, ep, cconn, csk); |
2311 | /* init recv engine */ | 2334 | /* init recv engine */ |
2312 | iscsi_tcp_hdr_recv_prep(tcp_conn); | 2335 | iscsi_tcp_hdr_recv_prep(tcp_conn); |
2313 | 2336 | ||
2314 | return 0; | 2337 | return 0; |
2315 | } | 2338 | } |
2316 | EXPORT_SYMBOL_GPL(cxgbi_bind_conn); | 2339 | EXPORT_SYMBOL_GPL(cxgbi_bind_conn); |
2317 | 2340 | ||
2318 | struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, | 2341 | struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, |
2319 | u16 cmds_max, u16 qdepth, | 2342 | u16 cmds_max, u16 qdepth, |
2320 | u32 initial_cmdsn) | 2343 | u32 initial_cmdsn) |
2321 | { | 2344 | { |
2322 | struct cxgbi_endpoint *cep; | 2345 | struct cxgbi_endpoint *cep; |
2323 | struct cxgbi_hba *chba; | 2346 | struct cxgbi_hba *chba; |
2324 | struct Scsi_Host *shost; | 2347 | struct Scsi_Host *shost; |
2325 | struct iscsi_cls_session *cls_session; | 2348 | struct iscsi_cls_session *cls_session; |
2326 | struct iscsi_session *session; | 2349 | struct iscsi_session *session; |
2327 | 2350 | ||
2328 | if (!ep) { | 2351 | if (!ep) { |
2329 | pr_err("missing endpoint.\n"); | 2352 | pr_err("missing endpoint.\n"); |
2330 | return NULL; | 2353 | return NULL; |
2331 | } | 2354 | } |
2332 | 2355 | ||
2333 | cep = ep->dd_data; | 2356 | cep = ep->dd_data; |
2334 | chba = cep->chba; | 2357 | chba = cep->chba; |
2335 | shost = chba->shost; | 2358 | shost = chba->shost; |
2336 | 2359 | ||
2337 | BUG_ON(chba != iscsi_host_priv(shost)); | 2360 | BUG_ON(chba != iscsi_host_priv(shost)); |
2338 | 2361 | ||
2339 | cls_session = iscsi_session_setup(chba->cdev->itp, shost, | 2362 | cls_session = iscsi_session_setup(chba->cdev->itp, shost, |
2340 | cmds_max, 0, | 2363 | cmds_max, 0, |
2341 | sizeof(struct iscsi_tcp_task) + | 2364 | sizeof(struct iscsi_tcp_task) + |
2342 | sizeof(struct cxgbi_task_data), | 2365 | sizeof(struct cxgbi_task_data), |
2343 | initial_cmdsn, ISCSI_MAX_TARGET); | 2366 | initial_cmdsn, ISCSI_MAX_TARGET); |
2344 | if (!cls_session) | 2367 | if (!cls_session) |
2345 | return NULL; | 2368 | return NULL; |
2346 | 2369 | ||
2347 | session = cls_session->dd_data; | 2370 | session = cls_session->dd_data; |
2348 | if (iscsi_tcp_r2tpool_alloc(session)) | 2371 | if (iscsi_tcp_r2tpool_alloc(session)) |
2349 | goto remove_session; | 2372 | goto remove_session; |
2350 | 2373 | ||
2351 | log_debug(1 << CXGBI_DBG_ISCSI, | 2374 | log_debug(1 << CXGBI_DBG_ISCSI, |
2352 | "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); | 2375 | "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); |
2353 | return cls_session; | 2376 | return cls_session; |
2354 | 2377 | ||
2355 | remove_session: | 2378 | remove_session: |
2356 | iscsi_session_teardown(cls_session); | 2379 | iscsi_session_teardown(cls_session); |
2357 | return NULL; | 2380 | return NULL; |
2358 | } | 2381 | } |
2359 | EXPORT_SYMBOL_GPL(cxgbi_create_session); | 2382 | EXPORT_SYMBOL_GPL(cxgbi_create_session); |
2360 | 2383 | ||
2361 | void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) | 2384 | void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) |
2362 | { | 2385 | { |
2363 | log_debug(1 << CXGBI_DBG_ISCSI, | 2386 | log_debug(1 << CXGBI_DBG_ISCSI, |
2364 | "cls sess 0x%p.\n", cls_session); | 2387 | "cls sess 0x%p.\n", cls_session); |
2365 | 2388 | ||
2366 | iscsi_tcp_r2tpool_free(cls_session->dd_data); | 2389 | iscsi_tcp_r2tpool_free(cls_session->dd_data); |
2367 | iscsi_session_teardown(cls_session); | 2390 | iscsi_session_teardown(cls_session); |
2368 | } | 2391 | } |
2369 | EXPORT_SYMBOL_GPL(cxgbi_destroy_session); | 2392 | EXPORT_SYMBOL_GPL(cxgbi_destroy_session); |
2370 | 2393 | ||
2371 | int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, | 2394 | int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, |
2372 | char *buf, int buflen) | 2395 | char *buf, int buflen) |
2373 | { | 2396 | { |
2374 | struct cxgbi_hba *chba = iscsi_host_priv(shost); | 2397 | struct cxgbi_hba *chba = iscsi_host_priv(shost); |
2375 | 2398 | ||
2376 | if (!chba->ndev) { | 2399 | if (!chba->ndev) { |
2377 | shost_printk(KERN_ERR, shost, "Could not get host param. " | 2400 | shost_printk(KERN_ERR, shost, "Could not get host param. " |
2378 | "netdev for host not set.\n"); | 2401 | "netdev for host not set.\n"); |
2379 | return -ENODEV; | 2402 | return -ENODEV; |
2380 | } | 2403 | } |
2381 | 2404 | ||
2382 | log_debug(1 << CXGBI_DBG_ISCSI, | 2405 | log_debug(1 << CXGBI_DBG_ISCSI, |
2383 | "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", | 2406 | "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", |
2384 | shost, chba, chba->ndev->name, param, buflen, buf); | 2407 | shost, chba, chba->ndev->name, param, buflen, buf); |
2385 | 2408 | ||
2386 | switch (param) { | 2409 | switch (param) { |
2387 | case ISCSI_HOST_PARAM_IPADDRESS: | 2410 | case ISCSI_HOST_PARAM_IPADDRESS: |
2388 | { | 2411 | { |
2389 | __be32 addr = in_aton(buf); | 2412 | __be32 addr = in_aton(buf); |
2390 | log_debug(1 << CXGBI_DBG_ISCSI, | 2413 | log_debug(1 << CXGBI_DBG_ISCSI, |
2391 | "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); | 2414 | "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); |
2392 | cxgbi_set_iscsi_ipv4(chba, addr); | 2415 | cxgbi_set_iscsi_ipv4(chba, addr); |
2393 | return 0; | 2416 | return 0; |
2394 | } | 2417 | } |
2395 | case ISCSI_HOST_PARAM_HWADDRESS: | 2418 | case ISCSI_HOST_PARAM_HWADDRESS: |
2396 | case ISCSI_HOST_PARAM_NETDEV_NAME: | 2419 | case ISCSI_HOST_PARAM_NETDEV_NAME: |
2397 | return 0; | 2420 | return 0; |
2398 | default: | 2421 | default: |
2399 | return iscsi_host_set_param(shost, param, buf, buflen); | 2422 | return iscsi_host_set_param(shost, param, buf, buflen); |
2400 | } | 2423 | } |
2401 | } | 2424 | } |
2402 | EXPORT_SYMBOL_GPL(cxgbi_set_host_param); | 2425 | EXPORT_SYMBOL_GPL(cxgbi_set_host_param); |
2403 | 2426 | ||
2404 | int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, | 2427 | int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, |
2405 | char *buf) | 2428 | char *buf) |
2406 | { | 2429 | { |
2407 | struct cxgbi_hba *chba = iscsi_host_priv(shost); | 2430 | struct cxgbi_hba *chba = iscsi_host_priv(shost); |
2408 | int len = 0; | 2431 | int len = 0; |
2409 | 2432 | ||
2410 | if (!chba->ndev) { | 2433 | if (!chba->ndev) { |
2411 | shost_printk(KERN_ERR, shost, "Could not get host param. " | 2434 | shost_printk(KERN_ERR, shost, "Could not get host param. " |
2412 | "netdev for host not set.\n"); | 2435 | "netdev for host not set.\n"); |
2413 | return -ENODEV; | 2436 | return -ENODEV; |
2414 | } | 2437 | } |
2415 | 2438 | ||
2416 | log_debug(1 << CXGBI_DBG_ISCSI, | 2439 | log_debug(1 << CXGBI_DBG_ISCSI, |
2417 | "shost 0x%p, hba 0x%p,%s, param %d.\n", | 2440 | "shost 0x%p, hba 0x%p,%s, param %d.\n", |
2418 | shost, chba, chba->ndev->name, param); | 2441 | shost, chba, chba->ndev->name, param); |
2419 | 2442 | ||
2420 | switch (param) { | 2443 | switch (param) { |
2421 | case ISCSI_HOST_PARAM_HWADDRESS: | 2444 | case ISCSI_HOST_PARAM_HWADDRESS: |
2422 | len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); | 2445 | len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); |
2423 | break; | 2446 | break; |
2424 | case ISCSI_HOST_PARAM_NETDEV_NAME: | 2447 | case ISCSI_HOST_PARAM_NETDEV_NAME: |
2425 | len = sprintf(buf, "%s\n", chba->ndev->name); | 2448 | len = sprintf(buf, "%s\n", chba->ndev->name); |
2426 | break; | 2449 | break; |
2427 | case ISCSI_HOST_PARAM_IPADDRESS: | 2450 | case ISCSI_HOST_PARAM_IPADDRESS: |
2428 | { | 2451 | { |
2429 | __be32 addr; | 2452 | __be32 addr; |
2430 | 2453 | ||
2431 | addr = cxgbi_get_iscsi_ipv4(chba); | 2454 | addr = cxgbi_get_iscsi_ipv4(chba); |
2432 | len = sprintf(buf, "%pI4", &addr); | 2455 | len = sprintf(buf, "%pI4", &addr); |
2433 | log_debug(1 << CXGBI_DBG_ISCSI, | 2456 | log_debug(1 << CXGBI_DBG_ISCSI, |
2434 | "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr); | 2457 | "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr); |
2435 | break; | 2458 | break; |
2436 | } | 2459 | } |
2437 | default: | 2460 | default: |
2438 | return iscsi_host_get_param(shost, param, buf); | 2461 | return iscsi_host_get_param(shost, param, buf); |
2439 | } | 2462 | } |
2440 | 2463 | ||
2441 | return len; | 2464 | return len; |
2442 | } | 2465 | } |
2443 | EXPORT_SYMBOL_GPL(cxgbi_get_host_param); | 2466 | EXPORT_SYMBOL_GPL(cxgbi_get_host_param); |
2444 | 2467 | ||
2445 | struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, | 2468 | struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, |
2446 | struct sockaddr *dst_addr, | 2469 | struct sockaddr *dst_addr, |
2447 | int non_blocking) | 2470 | int non_blocking) |
2448 | { | 2471 | { |
2449 | struct iscsi_endpoint *ep; | 2472 | struct iscsi_endpoint *ep; |
2450 | struct cxgbi_endpoint *cep; | 2473 | struct cxgbi_endpoint *cep; |
2451 | struct cxgbi_hba *hba = NULL; | 2474 | struct cxgbi_hba *hba = NULL; |
2452 | struct cxgbi_sock *csk; | 2475 | struct cxgbi_sock *csk; |
2453 | int err = -EINVAL; | 2476 | int err = -EINVAL; |
2454 | 2477 | ||
2455 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, | 2478 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, |
2456 | "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", | 2479 | "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", |
2457 | shost, non_blocking, dst_addr); | 2480 | shost, non_blocking, dst_addr); |
2458 | 2481 | ||
2459 | if (shost) { | 2482 | if (shost) { |
2460 | hba = iscsi_host_priv(shost); | 2483 | hba = iscsi_host_priv(shost); |
2461 | if (!hba) { | 2484 | if (!hba) { |
2462 | pr_info("shost 0x%p, priv NULL.\n", shost); | 2485 | pr_info("shost 0x%p, priv NULL.\n", shost); |
2463 | goto err_out; | 2486 | goto err_out; |
2464 | } | 2487 | } |
2465 | } | 2488 | } |
2466 | 2489 | ||
2467 | csk = cxgbi_check_route(dst_addr); | 2490 | csk = cxgbi_check_route(dst_addr); |
2468 | if (IS_ERR(csk)) | 2491 | if (IS_ERR(csk)) |
2469 | return (struct iscsi_endpoint *)csk; | 2492 | return (struct iscsi_endpoint *)csk; |
2470 | cxgbi_sock_get(csk); | 2493 | cxgbi_sock_get(csk); |
2471 | 2494 | ||
2472 | if (!hba) | 2495 | if (!hba) |
2473 | hba = csk->cdev->hbas[csk->port_id]; | 2496 | hba = csk->cdev->hbas[csk->port_id]; |
2474 | else if (hba != csk->cdev->hbas[csk->port_id]) { | 2497 | else if (hba != csk->cdev->hbas[csk->port_id]) { |
2475 | pr_info("Could not connect through requested host %u" | 2498 | pr_info("Could not connect through requested host %u" |
2476 | "hba 0x%p != 0x%p (%u).\n", | 2499 | "hba 0x%p != 0x%p (%u).\n", |
2477 | shost->host_no, hba, | 2500 | shost->host_no, hba, |
2478 | csk->cdev->hbas[csk->port_id], csk->port_id); | 2501 | csk->cdev->hbas[csk->port_id], csk->port_id); |
2479 | err = -ENOSPC; | 2502 | err = -ENOSPC; |
2480 | goto release_conn; | 2503 | goto release_conn; |
2481 | } | 2504 | } |
2482 | 2505 | ||
2483 | err = sock_get_port(csk); | 2506 | err = sock_get_port(csk); |
2484 | if (err) | 2507 | if (err) |
2485 | goto release_conn; | 2508 | goto release_conn; |
2486 | 2509 | ||
2487 | cxgbi_sock_set_state(csk, CTP_CONNECTING); | 2510 | cxgbi_sock_set_state(csk, CTP_CONNECTING); |
2488 | err = csk->cdev->csk_init_act_open(csk); | 2511 | err = csk->cdev->csk_init_act_open(csk); |
2489 | if (err) | 2512 | if (err) |
2490 | goto release_conn; | 2513 | goto release_conn; |
2491 | 2514 | ||
2492 | if (cxgbi_sock_is_closing(csk)) { | 2515 | if (cxgbi_sock_is_closing(csk)) { |
2493 | err = -ENOSPC; | 2516 | err = -ENOSPC; |
2494 | pr_info("csk 0x%p is closing.\n", csk); | 2517 | pr_info("csk 0x%p is closing.\n", csk); |
2495 | goto release_conn; | 2518 | goto release_conn; |
2496 | } | 2519 | } |
2497 | 2520 | ||
2498 | ep = iscsi_create_endpoint(sizeof(*cep)); | 2521 | ep = iscsi_create_endpoint(sizeof(*cep)); |
2499 | if (!ep) { | 2522 | if (!ep) { |
2500 | err = -ENOMEM; | 2523 | err = -ENOMEM; |
2501 | pr_info("iscsi alloc ep, OOM.\n"); | 2524 | pr_info("iscsi alloc ep, OOM.\n"); |
2502 | goto release_conn; | 2525 | goto release_conn; |
2503 | } | 2526 | } |
2504 | 2527 | ||
2505 | cep = ep->dd_data; | 2528 | cep = ep->dd_data; |
2506 | cep->csk = csk; | 2529 | cep->csk = csk; |
2507 | cep->chba = hba; | 2530 | cep->chba = hba; |
2508 | 2531 | ||
2509 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, | 2532 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, |
2510 | "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", | 2533 | "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", |
2511 | ep, cep, csk, hba, hba->ndev->name); | 2534 | ep, cep, csk, hba, hba->ndev->name); |
2512 | return ep; | 2535 | return ep; |
2513 | 2536 | ||
2514 | release_conn: | 2537 | release_conn: |
2515 | cxgbi_sock_put(csk); | 2538 | cxgbi_sock_put(csk); |
2516 | cxgbi_sock_closed(csk); | 2539 | cxgbi_sock_closed(csk); |
2517 | err_out: | 2540 | err_out: |
2518 | return ERR_PTR(err); | 2541 | return ERR_PTR(err); |
2519 | } | 2542 | } |
2520 | EXPORT_SYMBOL_GPL(cxgbi_ep_connect); | 2543 | EXPORT_SYMBOL_GPL(cxgbi_ep_connect); |
2521 | 2544 | ||
2522 | int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) | 2545 | int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) |
2523 | { | 2546 | { |
2524 | struct cxgbi_endpoint *cep = ep->dd_data; | 2547 | struct cxgbi_endpoint *cep = ep->dd_data; |
2525 | struct cxgbi_sock *csk = cep->csk; | 2548 | struct cxgbi_sock *csk = cep->csk; |
2526 | 2549 | ||
2527 | if (!cxgbi_sock_is_established(csk)) | 2550 | if (!cxgbi_sock_is_established(csk)) |
2528 | return 0; | 2551 | return 0; |
2529 | return 1; | 2552 | return 1; |
2530 | } | 2553 | } |
2531 | EXPORT_SYMBOL_GPL(cxgbi_ep_poll); | 2554 | EXPORT_SYMBOL_GPL(cxgbi_ep_poll); |
2532 | 2555 | ||
2533 | void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) | 2556 | void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) |
2534 | { | 2557 | { |
2535 | struct cxgbi_endpoint *cep = ep->dd_data; | 2558 | struct cxgbi_endpoint *cep = ep->dd_data; |
2536 | struct cxgbi_conn *cconn = cep->cconn; | 2559 | struct cxgbi_conn *cconn = cep->cconn; |
2537 | struct cxgbi_sock *csk = cep->csk; | 2560 | struct cxgbi_sock *csk = cep->csk; |
2538 | 2561 | ||
2539 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, | 2562 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, |
2540 | "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", | 2563 | "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", |
2541 | ep, cep, cconn, csk, csk->state, csk->flags); | 2564 | ep, cep, cconn, csk, csk->state, csk->flags); |
2542 | 2565 | ||
2543 | if (cconn && cconn->iconn) { | 2566 | if (cconn && cconn->iconn) { |
2544 | iscsi_suspend_tx(cconn->iconn); | 2567 | iscsi_suspend_tx(cconn->iconn); |
2545 | write_lock_bh(&csk->callback_lock); | 2568 | write_lock_bh(&csk->callback_lock); |
2546 | cep->csk->user_data = NULL; | 2569 | cep->csk->user_data = NULL; |
2547 | cconn->cep = NULL; | 2570 | cconn->cep = NULL; |
2548 | write_unlock_bh(&csk->callback_lock); | 2571 | write_unlock_bh(&csk->callback_lock); |
2549 | } | 2572 | } |
2550 | iscsi_destroy_endpoint(ep); | 2573 | iscsi_destroy_endpoint(ep); |
2551 | 2574 | ||
2552 | if (likely(csk->state >= CTP_ESTABLISHED)) | 2575 | if (likely(csk->state >= CTP_ESTABLISHED)) |
2553 | need_active_close(csk); | 2576 | need_active_close(csk); |
2554 | else | 2577 | else |
2555 | cxgbi_sock_closed(csk); | 2578 | cxgbi_sock_closed(csk); |
2556 | 2579 | ||
2557 | cxgbi_sock_put(csk); | 2580 | cxgbi_sock_put(csk); |
2558 | } | 2581 | } |
2559 | EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); | 2582 | EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); |
2560 | 2583 | ||
2561 | int cxgbi_iscsi_init(struct iscsi_transport *itp, | 2584 | int cxgbi_iscsi_init(struct iscsi_transport *itp, |
2562 | struct scsi_transport_template **stt) | 2585 | struct scsi_transport_template **stt) |
2563 | { | 2586 | { |
2564 | *stt = iscsi_register_transport(itp); | 2587 | *stt = iscsi_register_transport(itp); |
2565 | if (*stt == NULL) { | 2588 | if (*stt == NULL) { |
2566 | pr_err("unable to register %s transport 0x%p.\n", | 2589 | pr_err("unable to register %s transport 0x%p.\n", |
2567 | itp->name, itp); | 2590 | itp->name, itp); |
2568 | return -ENODEV; | 2591 | return -ENODEV; |
2569 | } | 2592 | } |
2570 | log_debug(1 << CXGBI_DBG_ISCSI, | 2593 | log_debug(1 << CXGBI_DBG_ISCSI, |
2571 | "%s, registered iscsi transport 0x%p.\n", | 2594 | "%s, registered iscsi transport 0x%p.\n", |
2572 | itp->name, stt); | 2595 | itp->name, stt); |
2573 | return 0; | 2596 | return 0; |
2574 | } | 2597 | } |
2575 | EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); | 2598 | EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); |
2576 | 2599 | ||
2577 | void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, | 2600 | void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, |
2578 | struct scsi_transport_template **stt) | 2601 | struct scsi_transport_template **stt) |
2579 | { | 2602 | { |
2580 | if (*stt) { | 2603 | if (*stt) { |
2581 | log_debug(1 << CXGBI_DBG_ISCSI, | 2604 | log_debug(1 << CXGBI_DBG_ISCSI, |
2582 | "de-register transport 0x%p, %s, stt 0x%p.\n", | 2605 | "de-register transport 0x%p, %s, stt 0x%p.\n", |
2583 | itp, itp->name, *stt); | 2606 | itp, itp->name, *stt); |
2584 | *stt = NULL; | 2607 | *stt = NULL; |
2585 | iscsi_unregister_transport(itp); | 2608 | iscsi_unregister_transport(itp); |
2586 | } | 2609 | } |
2587 | } | 2610 | } |
2588 | EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); | 2611 | EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); |
2589 | 2612 | ||
2590 | static int __init libcxgbi_init_module(void) | 2613 | static int __init libcxgbi_init_module(void) |
2591 | { | 2614 | { |
2592 | sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; | 2615 | sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; |
2593 | sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; | 2616 | sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; |
2594 | 2617 | ||
2595 | pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", | 2618 | pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", |
2596 | ISCSI_ITT_MASK, sw_tag_idx_bits, | 2619 | ISCSI_ITT_MASK, sw_tag_idx_bits, |
2597 | ISCSI_AGE_MASK, sw_tag_age_bits); | 2620 | ISCSI_AGE_MASK, sw_tag_age_bits); |
2598 | 2621 | ||
2599 | ddp_setup_host_page_size(); | 2622 | ddp_setup_host_page_size(); |
2600 | return 0; | 2623 | return 0; |
drivers/scsi/cxgbi/libcxgbi.h
1 | /* | 1 | /* |
2 | * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver. | 2 | * libcxgbi.h: Chelsio common library for T3/T4 iSCSI driver. |
3 | * | 3 | * |
4 | * Copyright (c) 2010 Chelsio Communications, Inc. | 4 | * Copyright (c) 2010 Chelsio Communications, Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation. | 8 | * the Free Software Foundation. |
9 | * | 9 | * |
10 | * Written by: Karen Xie (kxie@chelsio.com) | 10 | * Written by: Karen Xie (kxie@chelsio.com) |
11 | * Written by: Rakesh Ranjan (rranjan@chelsio.com) | 11 | * Written by: Rakesh Ranjan (rranjan@chelsio.com) |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #ifndef __LIBCXGBI_H__ | 14 | #ifndef __LIBCXGBI_H__ |
15 | #define __LIBCXGBI_H__ | 15 | #define __LIBCXGBI_H__ |
16 | 16 | ||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <linux/debugfs.h> | 20 | #include <linux/debugfs.h> |
21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
22 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
23 | #include <linux/if_vlan.h> | 23 | #include <linux/if_vlan.h> |
24 | #include <linux/scatterlist.h> | 24 | #include <linux/scatterlist.h> |
25 | #include <linux/skbuff.h> | 25 | #include <linux/skbuff.h> |
26 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
27 | #include <scsi/scsi_device.h> | 27 | #include <scsi/scsi_device.h> |
28 | #include <scsi/libiscsi_tcp.h> | 28 | #include <scsi/libiscsi_tcp.h> |
29 | 29 | ||
30 | enum cxgbi_dbg_flag { | 30 | enum cxgbi_dbg_flag { |
31 | CXGBI_DBG_ISCSI, | 31 | CXGBI_DBG_ISCSI, |
32 | CXGBI_DBG_DDP, | 32 | CXGBI_DBG_DDP, |
33 | CXGBI_DBG_TOE, | 33 | CXGBI_DBG_TOE, |
34 | CXGBI_DBG_SOCK, | 34 | CXGBI_DBG_SOCK, |
35 | 35 | ||
36 | CXGBI_DBG_PDU_TX, | 36 | CXGBI_DBG_PDU_TX, |
37 | CXGBI_DBG_PDU_RX, | 37 | CXGBI_DBG_PDU_RX, |
38 | CXGBI_DBG_DEV, | 38 | CXGBI_DBG_DEV, |
39 | }; | 39 | }; |
40 | 40 | ||
41 | #define log_debug(level, fmt, ...) \ | 41 | #define log_debug(level, fmt, ...) \ |
42 | do { \ | 42 | do { \ |
43 | if (dbg_level & (level)) \ | 43 | if (dbg_level & (level)) \ |
44 | pr_info(fmt, ##__VA_ARGS__); \ | 44 | pr_info(fmt, ##__VA_ARGS__); \ |
45 | } while (0) | 45 | } while (0) |
46 | 46 | ||
47 | /* max. connections per adapter */ | 47 | /* max. connections per adapter */ |
48 | #define CXGBI_MAX_CONN 16384 | 48 | #define CXGBI_MAX_CONN 16384 |
49 | 49 | ||
50 | /* always allocate rooms for AHS */ | 50 | /* always allocate rooms for AHS */ |
51 | #define SKB_TX_ISCSI_PDU_HEADER_MAX \ | 51 | #define SKB_TX_ISCSI_PDU_HEADER_MAX \ |
52 | (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE) | 52 | (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE) |
53 | 53 | ||
54 | #define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/ | 54 | #define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8)*/ |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * align pdu size to multiple of 512 for better performance | 57 | * align pdu size to multiple of 512 for better performance |
58 | */ | 58 | */ |
59 | #define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0) | 59 | #define cxgbi_align_pdu_size(n) do { n = (n) & (~511); } while (0) |
60 | 60 | ||
61 | #define ULP2_MODE_ISCSI 2 | 61 | #define ULP2_MODE_ISCSI 2 |
62 | 62 | ||
63 | #define ULP2_MAX_PKT_SIZE 16224 | 63 | #define ULP2_MAX_PKT_SIZE 16224 |
64 | #define ULP2_MAX_PDU_PAYLOAD \ | 64 | #define ULP2_MAX_PDU_PAYLOAD \ |
65 | (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) | 65 | (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN) |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * For iscsi connections HW may inserts digest bytes into the pdu. Those digest | 68 | * For iscsi connections HW may inserts digest bytes into the pdu. Those digest |
69 | * bytes are not sent by the host but are part of the TCP payload and therefore | 69 | * bytes are not sent by the host but are part of the TCP payload and therefore |
70 | * consume TCP sequence space. | 70 | * consume TCP sequence space. |
71 | */ | 71 | */ |
72 | static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 }; | 72 | static const unsigned int ulp2_extra_len[] = { 0, 4, 4, 8 }; |
73 | static inline unsigned int cxgbi_ulp_extra_len(int submode) | 73 | static inline unsigned int cxgbi_ulp_extra_len(int submode) |
74 | { | 74 | { |
75 | return ulp2_extra_len[submode & 3]; | 75 | return ulp2_extra_len[submode & 3]; |
76 | } | 76 | } |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * struct pagepod_hdr, pagepod - pagepod format | 79 | * struct pagepod_hdr, pagepod - pagepod format |
80 | */ | 80 | */ |
81 | 81 | ||
82 | #define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ | 82 | #define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */ |
83 | #define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */ | 83 | #define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */ |
84 | #define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ | 84 | #define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */ |
85 | #define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ | 85 | #define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */ |
86 | 86 | ||
87 | struct cxgbi_pagepod_hdr { | 87 | struct cxgbi_pagepod_hdr { |
88 | u32 vld_tid; | 88 | u32 vld_tid; |
89 | u32 pgsz_tag_clr; | 89 | u32 pgsz_tag_clr; |
90 | u32 max_offset; | 90 | u32 max_offset; |
91 | u32 page_offset; | 91 | u32 page_offset; |
92 | u64 rsvd; | 92 | u64 rsvd; |
93 | }; | 93 | }; |
94 | 94 | ||
95 | #define PPOD_PAGES_MAX 4 | 95 | #define PPOD_PAGES_MAX 4 |
96 | struct cxgbi_pagepod { | 96 | struct cxgbi_pagepod { |
97 | struct cxgbi_pagepod_hdr hdr; | 97 | struct cxgbi_pagepod_hdr hdr; |
98 | u64 addr[PPOD_PAGES_MAX + 1]; | 98 | u64 addr[PPOD_PAGES_MAX + 1]; |
99 | }; | 99 | }; |
100 | 100 | ||
101 | struct cxgbi_tag_format { | 101 | struct cxgbi_tag_format { |
102 | unsigned char sw_bits; | 102 | unsigned char sw_bits; |
103 | unsigned char rsvd_bits; | 103 | unsigned char rsvd_bits; |
104 | unsigned char rsvd_shift; | 104 | unsigned char rsvd_shift; |
105 | unsigned char filler[1]; | 105 | unsigned char filler[1]; |
106 | u32 rsvd_mask; | 106 | u32 rsvd_mask; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | struct cxgbi_gather_list { | 109 | struct cxgbi_gather_list { |
110 | unsigned int tag; | 110 | unsigned int tag; |
111 | unsigned int length; | 111 | unsigned int length; |
112 | unsigned int offset; | 112 | unsigned int offset; |
113 | unsigned int nelem; | 113 | unsigned int nelem; |
114 | struct page **pages; | 114 | struct page **pages; |
115 | dma_addr_t phys_addr[0]; | 115 | dma_addr_t phys_addr[0]; |
116 | }; | 116 | }; |
117 | 117 | ||
118 | struct cxgbi_ddp_info { | 118 | struct cxgbi_ddp_info { |
119 | struct kref refcnt; | 119 | struct kref refcnt; |
120 | struct cxgbi_device *cdev; | 120 | struct cxgbi_device *cdev; |
121 | struct pci_dev *pdev; | 121 | struct pci_dev *pdev; |
122 | unsigned int max_txsz; | 122 | unsigned int max_txsz; |
123 | unsigned int max_rxsz; | 123 | unsigned int max_rxsz; |
124 | unsigned int llimit; | 124 | unsigned int llimit; |
125 | unsigned int ulimit; | 125 | unsigned int ulimit; |
126 | unsigned int nppods; | 126 | unsigned int nppods; |
127 | unsigned int idx_last; | 127 | unsigned int idx_last; |
128 | unsigned char idx_bits; | 128 | unsigned char idx_bits; |
129 | unsigned char filler[3]; | 129 | unsigned char filler[3]; |
130 | unsigned int idx_mask; | 130 | unsigned int idx_mask; |
131 | unsigned int rsvd_tag_mask; | 131 | unsigned int rsvd_tag_mask; |
132 | spinlock_t map_lock; | 132 | spinlock_t map_lock; |
133 | struct cxgbi_gather_list **gl_map; | 133 | struct cxgbi_gather_list **gl_map; |
134 | struct sk_buff **gl_skb; | 134 | struct sk_buff **gl_skb; |
135 | }; | 135 | }; |
136 | 136 | ||
137 | #define DDP_PGIDX_MAX 4 | 137 | #define DDP_PGIDX_MAX 4 |
138 | #define DDP_THRESHOLD 2048 | 138 | #define DDP_THRESHOLD 2048 |
139 | 139 | ||
140 | #define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ | 140 | #define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ |
141 | 141 | ||
142 | #define PPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */ | 142 | #define PPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */ |
143 | #define PPOD_SIZE_SHIFT 6 | 143 | #define PPOD_SIZE_SHIFT 6 |
144 | 144 | ||
145 | #define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ | 145 | #define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ |
146 | #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ | 146 | #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ |
147 | #define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ | 147 | #define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ |
148 | 148 | ||
149 | #define PPOD_COLOR_SHIFT 0 | 149 | #define PPOD_COLOR_SHIFT 0 |
150 | #define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT) | 150 | #define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT) |
151 | 151 | ||
152 | #define PPOD_IDX_SHIFT 6 | 152 | #define PPOD_IDX_SHIFT 6 |
153 | #define PPOD_IDX_MAX_SIZE 24 | 153 | #define PPOD_IDX_MAX_SIZE 24 |
154 | 154 | ||
155 | #define PPOD_TID_SHIFT 0 | 155 | #define PPOD_TID_SHIFT 0 |
156 | #define PPOD_TID(x) ((x) << PPOD_TID_SHIFT) | 156 | #define PPOD_TID(x) ((x) << PPOD_TID_SHIFT) |
157 | 157 | ||
158 | #define PPOD_TAG_SHIFT 6 | 158 | #define PPOD_TAG_SHIFT 6 |
159 | #define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT) | 159 | #define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT) |
160 | 160 | ||
161 | #define PPOD_VALID_SHIFT 24 | 161 | #define PPOD_VALID_SHIFT 24 |
162 | #define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) | 162 | #define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) |
163 | #define PPOD_VALID_FLAG PPOD_VALID(1U) | 163 | #define PPOD_VALID_FLAG PPOD_VALID(1U) |
164 | 164 | ||
165 | #define W_TCB_ULP_TYPE 0 | 165 | #define W_TCB_ULP_TYPE 0 |
166 | #define TCB_ULP_TYPE_SHIFT 0 | 166 | #define TCB_ULP_TYPE_SHIFT 0 |
167 | #define TCB_ULP_TYPE_MASK 0xfULL | 167 | #define TCB_ULP_TYPE_MASK 0xfULL |
168 | #define TCB_ULP_TYPE(x) ((x) << TCB_ULP_TYPE_SHIFT) | 168 | #define TCB_ULP_TYPE(x) ((x) << TCB_ULP_TYPE_SHIFT) |
169 | 169 | ||
170 | #define W_TCB_ULP_RAW 0 | 170 | #define W_TCB_ULP_RAW 0 |
171 | #define TCB_ULP_RAW_SHIFT 4 | 171 | #define TCB_ULP_RAW_SHIFT 4 |
172 | #define TCB_ULP_RAW_MASK 0xffULL | 172 | #define TCB_ULP_RAW_MASK 0xffULL |
173 | #define TCB_ULP_RAW(x) ((x) << TCB_ULP_RAW_SHIFT) | 173 | #define TCB_ULP_RAW(x) ((x) << TCB_ULP_RAW_SHIFT) |
174 | 174 | ||
175 | /* | 175 | /* |
176 | * sge_opaque_hdr - | 176 | * sge_opaque_hdr - |
177 | * Opaque version of structure the SGE stores at skb->head of TX_DATA packets | 177 | * Opaque version of structure the SGE stores at skb->head of TX_DATA packets |
178 | * and for which we must reserve space. | 178 | * and for which we must reserve space. |
179 | */ | 179 | */ |
180 | struct sge_opaque_hdr { | 180 | struct sge_opaque_hdr { |
181 | void *dev; | 181 | void *dev; |
182 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | 182 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; |
183 | }; | 183 | }; |
184 | 184 | ||
185 | struct cxgbi_sock { | 185 | struct cxgbi_sock { |
186 | struct cxgbi_device *cdev; | 186 | struct cxgbi_device *cdev; |
187 | 187 | ||
188 | int tid; | 188 | int tid; |
189 | int atid; | 189 | int atid; |
190 | unsigned long flags; | 190 | unsigned long flags; |
191 | unsigned int mtu; | 191 | unsigned int mtu; |
192 | unsigned short rss_qid; | 192 | unsigned short rss_qid; |
193 | unsigned short txq_idx; | 193 | unsigned short txq_idx; |
194 | unsigned short advmss; | 194 | unsigned short advmss; |
195 | unsigned int tx_chan; | 195 | unsigned int tx_chan; |
196 | unsigned int rx_chan; | 196 | unsigned int rx_chan; |
197 | unsigned int mss_idx; | 197 | unsigned int mss_idx; |
198 | unsigned int smac_idx; | 198 | unsigned int smac_idx; |
199 | unsigned char port_id; | 199 | unsigned char port_id; |
200 | int wr_max_cred; | 200 | int wr_max_cred; |
201 | int wr_cred; | 201 | int wr_cred; |
202 | int wr_una_cred; | 202 | int wr_una_cred; |
203 | unsigned char hcrc_len; | 203 | unsigned char hcrc_len; |
204 | unsigned char dcrc_len; | 204 | unsigned char dcrc_len; |
205 | 205 | ||
206 | void *l2t; | 206 | void *l2t; |
207 | struct sk_buff *wr_pending_head; | 207 | struct sk_buff *wr_pending_head; |
208 | struct sk_buff *wr_pending_tail; | 208 | struct sk_buff *wr_pending_tail; |
209 | struct sk_buff *cpl_close; | 209 | struct sk_buff *cpl_close; |
210 | struct sk_buff *cpl_abort_req; | 210 | struct sk_buff *cpl_abort_req; |
211 | struct sk_buff *cpl_abort_rpl; | 211 | struct sk_buff *cpl_abort_rpl; |
212 | struct sk_buff *skb_ulp_lhdr; | 212 | struct sk_buff *skb_ulp_lhdr; |
213 | spinlock_t lock; | 213 | spinlock_t lock; |
214 | struct kref refcnt; | 214 | struct kref refcnt; |
215 | unsigned int state; | 215 | unsigned int state; |
216 | struct sockaddr_in saddr; | 216 | struct sockaddr_in saddr; |
217 | struct sockaddr_in daddr; | 217 | struct sockaddr_in daddr; |
218 | struct dst_entry *dst; | 218 | struct dst_entry *dst; |
219 | struct sk_buff_head receive_queue; | 219 | struct sk_buff_head receive_queue; |
220 | struct sk_buff_head write_queue; | 220 | struct sk_buff_head write_queue; |
221 | struct timer_list retry_timer; | 221 | struct timer_list retry_timer; |
222 | int err; | 222 | int err; |
223 | rwlock_t callback_lock; | 223 | rwlock_t callback_lock; |
224 | void *user_data; | 224 | void *user_data; |
225 | 225 | ||
226 | u32 rcv_nxt; | 226 | u32 rcv_nxt; |
227 | u32 copied_seq; | 227 | u32 copied_seq; |
228 | u32 rcv_wup; | 228 | u32 rcv_wup; |
229 | u32 snd_nxt; | 229 | u32 snd_nxt; |
230 | u32 snd_una; | 230 | u32 snd_una; |
231 | u32 write_seq; | 231 | u32 write_seq; |
232 | }; | 232 | }; |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * connection states | 235 | * connection states |
236 | */ | 236 | */ |
237 | enum cxgbi_sock_states{ | 237 | enum cxgbi_sock_states{ |
238 | CTP_CLOSED, | 238 | CTP_CLOSED, |
239 | CTP_CONNECTING, | 239 | CTP_CONNECTING, |
240 | CTP_ACTIVE_OPEN, | 240 | CTP_ACTIVE_OPEN, |
241 | CTP_ESTABLISHED, | 241 | CTP_ESTABLISHED, |
242 | CTP_ACTIVE_CLOSE, | 242 | CTP_ACTIVE_CLOSE, |
243 | CTP_PASSIVE_CLOSE, | 243 | CTP_PASSIVE_CLOSE, |
244 | CTP_CLOSE_WAIT_1, | 244 | CTP_CLOSE_WAIT_1, |
245 | CTP_CLOSE_WAIT_2, | 245 | CTP_CLOSE_WAIT_2, |
246 | CTP_ABORTING, | 246 | CTP_ABORTING, |
247 | }; | 247 | }; |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * Connection flags -- many to track some close related events. | 250 | * Connection flags -- many to track some close related events. |
251 | */ | 251 | */ |
252 | enum cxgbi_sock_flags { | 252 | enum cxgbi_sock_flags { |
253 | CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */ | 253 | CTPF_ABORT_RPL_RCVD, /*received one ABORT_RPL_RSS message */ |
254 | CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */ | 254 | CTPF_ABORT_REQ_RCVD, /*received one ABORT_REQ_RSS message */ |
255 | CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */ | 255 | CTPF_ABORT_RPL_PENDING, /* expecting an abort reply */ |
256 | CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */ | 256 | CTPF_TX_DATA_SENT, /* already sent a TX_DATA WR */ |
257 | CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */ | 257 | CTPF_ACTIVE_CLOSE_NEEDED,/* need to be closed */ |
258 | CTPF_HAS_ATID, /* reserved atid */ | 258 | CTPF_HAS_ATID, /* reserved atid */ |
259 | CTPF_HAS_TID, /* reserved hw tid */ | 259 | CTPF_HAS_TID, /* reserved hw tid */ |
260 | CTPF_OFFLOAD_DOWN, /* offload function off */ | 260 | CTPF_OFFLOAD_DOWN, /* offload function off */ |
261 | }; | 261 | }; |
262 | 262 | ||
263 | struct cxgbi_skb_rx_cb { | 263 | struct cxgbi_skb_rx_cb { |
264 | __u32 ddigest; | 264 | __u32 ddigest; |
265 | __u32 pdulen; | 265 | __u32 pdulen; |
266 | }; | 266 | }; |
267 | 267 | ||
268 | struct cxgbi_skb_tx_cb { | 268 | struct cxgbi_skb_tx_cb { |
269 | void *l2t; | 269 | void *l2t; |
270 | struct sk_buff *wr_next; | 270 | struct sk_buff *wr_next; |
271 | }; | 271 | }; |
272 | 272 | ||
273 | enum cxgbi_skcb_flags { | 273 | enum cxgbi_skcb_flags { |
274 | SKCBF_TX_NEED_HDR, /* packet needs a header */ | 274 | SKCBF_TX_NEED_HDR, /* packet needs a header */ |
275 | SKCBF_RX_COALESCED, /* received whole pdu */ | 275 | SKCBF_RX_COALESCED, /* received whole pdu */ |
276 | SKCBF_RX_HDR, /* recieved pdu header */ | 276 | SKCBF_RX_HDR, /* recieved pdu header */ |
277 | SKCBF_RX_DATA, /* recieved pdu payload */ | 277 | SKCBF_RX_DATA, /* recieved pdu payload */ |
278 | SKCBF_RX_STATUS, /* recieved ddp status */ | 278 | SKCBF_RX_STATUS, /* recieved ddp status */ |
279 | SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */ | 279 | SKCBF_RX_DATA_DDPD, /* pdu payload ddp'd */ |
280 | SKCBF_RX_HCRC_ERR, /* header digest error */ | 280 | SKCBF_RX_HCRC_ERR, /* header digest error */ |
281 | SKCBF_RX_DCRC_ERR, /* data digest error */ | 281 | SKCBF_RX_DCRC_ERR, /* data digest error */ |
282 | SKCBF_RX_PAD_ERR, /* padding byte error */ | 282 | SKCBF_RX_PAD_ERR, /* padding byte error */ |
283 | }; | 283 | }; |
284 | 284 | ||
285 | struct cxgbi_skb_cb { | 285 | struct cxgbi_skb_cb { |
286 | unsigned char ulp_mode; | 286 | unsigned char ulp_mode; |
287 | unsigned long flags; | 287 | unsigned long flags; |
288 | unsigned int seq; | 288 | unsigned int seq; |
289 | union { | 289 | union { |
290 | struct cxgbi_skb_rx_cb rx; | 290 | struct cxgbi_skb_rx_cb rx; |
291 | struct cxgbi_skb_tx_cb tx; | 291 | struct cxgbi_skb_tx_cb tx; |
292 | }; | 292 | }; |
293 | }; | 293 | }; |
294 | 294 | ||
295 | #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) | 295 | #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) |
296 | #define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags) | 296 | #define cxgbi_skcb_flags(skb) (CXGBI_SKB_CB(skb)->flags) |
297 | #define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode) | 297 | #define cxgbi_skcb_ulp_mode(skb) (CXGBI_SKB_CB(skb)->ulp_mode) |
298 | #define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq) | 298 | #define cxgbi_skcb_tcp_seq(skb) (CXGBI_SKB_CB(skb)->seq) |
299 | #define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest) | 299 | #define cxgbi_skcb_rx_ddigest(skb) (CXGBI_SKB_CB(skb)->rx.ddigest) |
300 | #define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen) | 300 | #define cxgbi_skcb_rx_pdulen(skb) (CXGBI_SKB_CB(skb)->rx.pdulen) |
301 | #define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next) | 301 | #define cxgbi_skcb_tx_wr_next(skb) (CXGBI_SKB_CB(skb)->tx.wr_next) |
302 | 302 | ||
303 | static inline void cxgbi_skcb_set_flag(struct sk_buff *skb, | 303 | static inline void cxgbi_skcb_set_flag(struct sk_buff *skb, |
304 | enum cxgbi_skcb_flags flag) | 304 | enum cxgbi_skcb_flags flag) |
305 | { | 305 | { |
306 | __set_bit(flag, &(cxgbi_skcb_flags(skb))); | 306 | __set_bit(flag, &(cxgbi_skcb_flags(skb))); |
307 | } | 307 | } |
308 | 308 | ||
309 | static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb, | 309 | static inline void cxgbi_skcb_clear_flag(struct sk_buff *skb, |
310 | enum cxgbi_skcb_flags flag) | 310 | enum cxgbi_skcb_flags flag) |
311 | { | 311 | { |
312 | __clear_bit(flag, &(cxgbi_skcb_flags(skb))); | 312 | __clear_bit(flag, &(cxgbi_skcb_flags(skb))); |
313 | } | 313 | } |
314 | 314 | ||
315 | static inline int cxgbi_skcb_test_flag(struct sk_buff *skb, | 315 | static inline int cxgbi_skcb_test_flag(struct sk_buff *skb, |
316 | enum cxgbi_skcb_flags flag) | 316 | enum cxgbi_skcb_flags flag) |
317 | { | 317 | { |
318 | return test_bit(flag, &(cxgbi_skcb_flags(skb))); | 318 | return test_bit(flag, &(cxgbi_skcb_flags(skb))); |
319 | } | 319 | } |
320 | 320 | ||
321 | static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk, | 321 | static inline void cxgbi_sock_set_flag(struct cxgbi_sock *csk, |
322 | enum cxgbi_sock_flags flag) | 322 | enum cxgbi_sock_flags flag) |
323 | { | 323 | { |
324 | __set_bit(flag, &csk->flags); | 324 | __set_bit(flag, &csk->flags); |
325 | log_debug(1 << CXGBI_DBG_SOCK, | 325 | log_debug(1 << CXGBI_DBG_SOCK, |
326 | "csk 0x%p,%u,0x%lx, bit %d.\n", | 326 | "csk 0x%p,%u,0x%lx, bit %d.\n", |
327 | csk, csk->state, csk->flags, flag); | 327 | csk, csk->state, csk->flags, flag); |
328 | } | 328 | } |
329 | 329 | ||
330 | static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk, | 330 | static inline void cxgbi_sock_clear_flag(struct cxgbi_sock *csk, |
331 | enum cxgbi_sock_flags flag) | 331 | enum cxgbi_sock_flags flag) |
332 | { | 332 | { |
333 | __clear_bit(flag, &csk->flags); | 333 | __clear_bit(flag, &csk->flags); |
334 | log_debug(1 << CXGBI_DBG_SOCK, | 334 | log_debug(1 << CXGBI_DBG_SOCK, |
335 | "csk 0x%p,%u,0x%lx, bit %d.\n", | 335 | "csk 0x%p,%u,0x%lx, bit %d.\n", |
336 | csk, csk->state, csk->flags, flag); | 336 | csk, csk->state, csk->flags, flag); |
337 | } | 337 | } |
338 | 338 | ||
339 | static inline int cxgbi_sock_flag(struct cxgbi_sock *csk, | 339 | static inline int cxgbi_sock_flag(struct cxgbi_sock *csk, |
340 | enum cxgbi_sock_flags flag) | 340 | enum cxgbi_sock_flags flag) |
341 | { | 341 | { |
342 | if (csk == NULL) | 342 | if (csk == NULL) |
343 | return 0; | 343 | return 0; |
344 | return test_bit(flag, &csk->flags); | 344 | return test_bit(flag, &csk->flags); |
345 | } | 345 | } |
346 | 346 | ||
347 | static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state) | 347 | static inline void cxgbi_sock_set_state(struct cxgbi_sock *csk, int state) |
348 | { | 348 | { |
349 | log_debug(1 << CXGBI_DBG_SOCK, | 349 | log_debug(1 << CXGBI_DBG_SOCK, |
350 | "csk 0x%p,%u,0x%lx, state -> %u.\n", | 350 | "csk 0x%p,%u,0x%lx, state -> %u.\n", |
351 | csk, csk->state, csk->flags, state); | 351 | csk, csk->state, csk->flags, state); |
352 | csk->state = state; | 352 | csk->state = state; |
353 | } | 353 | } |
354 | 354 | ||
355 | static inline void cxgbi_sock_free(struct kref *kref) | 355 | static inline void cxgbi_sock_free(struct kref *kref) |
356 | { | 356 | { |
357 | struct cxgbi_sock *csk = container_of(kref, | 357 | struct cxgbi_sock *csk = container_of(kref, |
358 | struct cxgbi_sock, | 358 | struct cxgbi_sock, |
359 | refcnt); | 359 | refcnt); |
360 | if (csk) { | 360 | if (csk) { |
361 | log_debug(1 << CXGBI_DBG_SOCK, | 361 | log_debug(1 << CXGBI_DBG_SOCK, |
362 | "free csk 0x%p, state %u, flags 0x%lx\n", | 362 | "free csk 0x%p, state %u, flags 0x%lx\n", |
363 | csk, csk->state, csk->flags); | 363 | csk, csk->state, csk->flags); |
364 | kfree(csk); | 364 | kfree(csk); |
365 | } | 365 | } |
366 | } | 366 | } |
367 | 367 | ||
368 | static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk) | 368 | static inline void __cxgbi_sock_put(const char *fn, struct cxgbi_sock *csk) |
369 | { | 369 | { |
370 | log_debug(1 << CXGBI_DBG_SOCK, | 370 | log_debug(1 << CXGBI_DBG_SOCK, |
371 | "%s, put csk 0x%p, ref %u-1.\n", | 371 | "%s, put csk 0x%p, ref %u-1.\n", |
372 | fn, csk, atomic_read(&csk->refcnt.refcount)); | 372 | fn, csk, atomic_read(&csk->refcnt.refcount)); |
373 | kref_put(&csk->refcnt, cxgbi_sock_free); | 373 | kref_put(&csk->refcnt, cxgbi_sock_free); |
374 | } | 374 | } |
375 | #define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk) | 375 | #define cxgbi_sock_put(csk) __cxgbi_sock_put(__func__, csk) |
376 | 376 | ||
377 | static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk) | 377 | static inline void __cxgbi_sock_get(const char *fn, struct cxgbi_sock *csk) |
378 | { | 378 | { |
379 | log_debug(1 << CXGBI_DBG_SOCK, | 379 | log_debug(1 << CXGBI_DBG_SOCK, |
380 | "%s, get csk 0x%p, ref %u+1.\n", | 380 | "%s, get csk 0x%p, ref %u+1.\n", |
381 | fn, csk, atomic_read(&csk->refcnt.refcount)); | 381 | fn, csk, atomic_read(&csk->refcnt.refcount)); |
382 | kref_get(&csk->refcnt); | 382 | kref_get(&csk->refcnt); |
383 | } | 383 | } |
384 | #define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk) | 384 | #define cxgbi_sock_get(csk) __cxgbi_sock_get(__func__, csk) |
385 | 385 | ||
386 | static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk) | 386 | static inline int cxgbi_sock_is_closing(struct cxgbi_sock *csk) |
387 | { | 387 | { |
388 | return csk->state >= CTP_ACTIVE_CLOSE; | 388 | return csk->state >= CTP_ACTIVE_CLOSE; |
389 | } | 389 | } |
390 | 390 | ||
391 | static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk) | 391 | static inline int cxgbi_sock_is_established(struct cxgbi_sock *csk) |
392 | { | 392 | { |
393 | return csk->state == CTP_ESTABLISHED; | 393 | return csk->state == CTP_ESTABLISHED; |
394 | } | 394 | } |
395 | 395 | ||
396 | static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk) | 396 | static inline void cxgbi_sock_purge_write_queue(struct cxgbi_sock *csk) |
397 | { | 397 | { |
398 | struct sk_buff *skb; | 398 | struct sk_buff *skb; |
399 | 399 | ||
400 | while ((skb = __skb_dequeue(&csk->write_queue))) | 400 | while ((skb = __skb_dequeue(&csk->write_queue))) |
401 | __kfree_skb(skb); | 401 | __kfree_skb(skb); |
402 | } | 402 | } |
403 | 403 | ||
404 | static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win) | 404 | static inline unsigned int cxgbi_sock_compute_wscale(unsigned int win) |
405 | { | 405 | { |
406 | unsigned int wscale = 0; | 406 | unsigned int wscale = 0; |
407 | 407 | ||
408 | while (wscale < 14 && (65535 << wscale) < win) | 408 | while (wscale < 14 && (65535 << wscale) < win) |
409 | wscale++; | 409 | wscale++; |
410 | return wscale; | 410 | return wscale; |
411 | } | 411 | } |
412 | 412 | ||
413 | static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp) | 413 | static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp) |
414 | { | 414 | { |
415 | struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp); | 415 | struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp); |
416 | 416 | ||
417 | if (skb) { | 417 | if (skb) { |
418 | __skb_put(skb, wrlen); | 418 | __skb_put(skb, wrlen); |
419 | memset(skb->head, 0, wrlen + dlen); | 419 | memset(skb->head, 0, wrlen + dlen); |
420 | } else | 420 | } else |
421 | pr_info("alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen); | 421 | pr_info("alloc cpl wr skb %u+%u, OOM.\n", wrlen, dlen); |
422 | return skb; | 422 | return skb; |
423 | } | 423 | } |
424 | 424 | ||
425 | 425 | ||
426 | /* | 426 | /* |
427 | * The number of WRs needed for an skb depends on the number of fragments | 427 | * The number of WRs needed for an skb depends on the number of fragments |
428 | * in the skb and whether it has any payload in its main body. This maps the | 428 | * in the skb and whether it has any payload in its main body. This maps the |
429 | * length of the gather list represented by an skb into the # of necessary WRs. | 429 | * length of the gather list represented by an skb into the # of necessary WRs. |
430 | * The extra two fragments are for iscsi bhs and payload padding. | 430 | * The extra two fragments are for iscsi bhs and payload padding. |
431 | */ | 431 | */ |
432 | #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) | 432 | #define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2) |
433 | 433 | ||
434 | static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk) | 434 | static inline void cxgbi_sock_reset_wr_list(struct cxgbi_sock *csk) |
435 | { | 435 | { |
436 | csk->wr_pending_head = csk->wr_pending_tail = NULL; | 436 | csk->wr_pending_head = csk->wr_pending_tail = NULL; |
437 | } | 437 | } |
438 | 438 | ||
439 | static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk, | 439 | static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk, |
440 | struct sk_buff *skb) | 440 | struct sk_buff *skb) |
441 | { | 441 | { |
442 | cxgbi_skcb_tx_wr_next(skb) = NULL; | 442 | cxgbi_skcb_tx_wr_next(skb) = NULL; |
443 | /* | 443 | /* |
444 | * We want to take an extra reference since both us and the driver | 444 | * We want to take an extra reference since both us and the driver |
445 | * need to free the packet before it's really freed. We know there's | 445 | * need to free the packet before it's really freed. We know there's |
446 | * just one user currently so we use atomic_set rather than skb_get | 446 | * just one user currently so we use atomic_set rather than skb_get |
447 | * to avoid the atomic op. | 447 | * to avoid the atomic op. |
448 | */ | 448 | */ |
449 | atomic_set(&skb->users, 2); | 449 | atomic_set(&skb->users, 2); |
450 | 450 | ||
451 | if (!csk->wr_pending_head) | 451 | if (!csk->wr_pending_head) |
452 | csk->wr_pending_head = skb; | 452 | csk->wr_pending_head = skb; |
453 | else | 453 | else |
454 | cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb; | 454 | cxgbi_skcb_tx_wr_next(csk->wr_pending_tail) = skb; |
455 | csk->wr_pending_tail = skb; | 455 | csk->wr_pending_tail = skb; |
456 | } | 456 | } |
457 | 457 | ||
458 | static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk) | 458 | static inline int cxgbi_sock_count_pending_wrs(const struct cxgbi_sock *csk) |
459 | { | 459 | { |
460 | int n = 0; | 460 | int n = 0; |
461 | const struct sk_buff *skb = csk->wr_pending_head; | 461 | const struct sk_buff *skb = csk->wr_pending_head; |
462 | 462 | ||
463 | while (skb) { | 463 | while (skb) { |
464 | n += skb->csum; | 464 | n += skb->csum; |
465 | skb = cxgbi_skcb_tx_wr_next(skb); | 465 | skb = cxgbi_skcb_tx_wr_next(skb); |
466 | } | 466 | } |
467 | return n; | 467 | return n; |
468 | } | 468 | } |
469 | 469 | ||
470 | static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk) | 470 | static inline struct sk_buff *cxgbi_sock_peek_wr(const struct cxgbi_sock *csk) |
471 | { | 471 | { |
472 | return csk->wr_pending_head; | 472 | return csk->wr_pending_head; |
473 | } | 473 | } |
474 | 474 | ||
475 | static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk) | 475 | static inline struct sk_buff *cxgbi_sock_dequeue_wr(struct cxgbi_sock *csk) |
476 | { | 476 | { |
477 | struct sk_buff *skb = csk->wr_pending_head; | 477 | struct sk_buff *skb = csk->wr_pending_head; |
478 | 478 | ||
479 | if (likely(skb)) { | 479 | if (likely(skb)) { |
480 | csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb); | 480 | csk->wr_pending_head = cxgbi_skcb_tx_wr_next(skb); |
481 | cxgbi_skcb_tx_wr_next(skb) = NULL; | 481 | cxgbi_skcb_tx_wr_next(skb) = NULL; |
482 | } | 482 | } |
483 | return skb; | 483 | return skb; |
484 | } | 484 | } |
485 | 485 | ||
486 | void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *); | 486 | void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *); |
487 | void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *); | 487 | void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *); |
488 | void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *); | 488 | void cxgbi_sock_skb_entail(struct cxgbi_sock *, struct sk_buff *); |
489 | void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int); | 489 | void cxgbi_sock_fail_act_open(struct cxgbi_sock *, int); |
490 | void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *); | 490 | void cxgbi_sock_act_open_req_arp_failure(void *, struct sk_buff *); |
491 | void cxgbi_sock_closed(struct cxgbi_sock *); | 491 | void cxgbi_sock_closed(struct cxgbi_sock *); |
492 | void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int); | 492 | void cxgbi_sock_established(struct cxgbi_sock *, unsigned int, unsigned int); |
493 | void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *); | 493 | void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *); |
494 | void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *); | 494 | void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *); |
495 | void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32); | 495 | void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *, u32); |
496 | void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int, | 496 | void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *, unsigned int, unsigned int, |
497 | int); | 497 | int); |
498 | unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int); | 498 | unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *, unsigned int); |
499 | void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *); | 499 | void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *); |
500 | 500 | ||
501 | struct cxgbi_hba { | 501 | struct cxgbi_hba { |
502 | struct net_device *ndev; | 502 | struct net_device *ndev; |
503 | struct Scsi_Host *shost; | 503 | struct Scsi_Host *shost; |
504 | struct cxgbi_device *cdev; | 504 | struct cxgbi_device *cdev; |
505 | __be32 ipv4addr; | 505 | __be32 ipv4addr; |
506 | unsigned char port_id; | 506 | unsigned char port_id; |
507 | }; | 507 | }; |
508 | 508 | ||
509 | struct cxgbi_ports_map { | 509 | struct cxgbi_ports_map { |
510 | unsigned int max_connect; | 510 | unsigned int max_connect; |
511 | unsigned int used; | 511 | unsigned int used; |
512 | unsigned short sport_base; | 512 | unsigned short sport_base; |
513 | spinlock_t lock; | 513 | spinlock_t lock; |
514 | unsigned int next; | 514 | unsigned int next; |
515 | struct cxgbi_sock **port_csk; | 515 | struct cxgbi_sock **port_csk; |
516 | }; | 516 | }; |
517 | 517 | ||
518 | #define CXGBI_FLAG_DEV_T3 0x1 | 518 | #define CXGBI_FLAG_DEV_T3 0x1 |
519 | #define CXGBI_FLAG_DEV_T4 0x2 | 519 | #define CXGBI_FLAG_DEV_T4 0x2 |
520 | #define CXGBI_FLAG_ADAPTER_RESET 0x4 | 520 | #define CXGBI_FLAG_ADAPTER_RESET 0x4 |
521 | #define CXGBI_FLAG_IPV4_SET 0x10 | 521 | #define CXGBI_FLAG_IPV4_SET 0x10 |
522 | struct cxgbi_device { | 522 | struct cxgbi_device { |
523 | struct list_head list_head; | 523 | struct list_head list_head; |
524 | unsigned int flags; | 524 | unsigned int flags; |
525 | struct net_device **ports; | 525 | struct net_device **ports; |
526 | void *lldev; | 526 | void *lldev; |
527 | struct cxgbi_hba **hbas; | 527 | struct cxgbi_hba **hbas; |
528 | const unsigned short *mtus; | 528 | const unsigned short *mtus; |
529 | unsigned char nmtus; | 529 | unsigned char nmtus; |
530 | unsigned char nports; | 530 | unsigned char nports; |
531 | struct pci_dev *pdev; | 531 | struct pci_dev *pdev; |
532 | struct dentry *debugfs_root; | 532 | struct dentry *debugfs_root; |
533 | struct iscsi_transport *itp; | 533 | struct iscsi_transport *itp; |
534 | 534 | ||
535 | unsigned int pfvf; | 535 | unsigned int pfvf; |
536 | unsigned int snd_win; | 536 | unsigned int snd_win; |
537 | unsigned int rcv_win; | 537 | unsigned int rcv_win; |
538 | unsigned int rx_credit_thres; | 538 | unsigned int rx_credit_thres; |
539 | unsigned int skb_tx_rsvd; | 539 | unsigned int skb_tx_rsvd; |
540 | unsigned int skb_rx_extra; /* for msg coalesced mode */ | 540 | unsigned int skb_rx_extra; /* for msg coalesced mode */ |
541 | unsigned int tx_max_size; | 541 | unsigned int tx_max_size; |
542 | unsigned int rx_max_size; | 542 | unsigned int rx_max_size; |
543 | struct cxgbi_ports_map pmap; | 543 | struct cxgbi_ports_map pmap; |
544 | struct cxgbi_tag_format tag_format; | 544 | struct cxgbi_tag_format tag_format; |
545 | struct cxgbi_ddp_info *ddp; | 545 | struct cxgbi_ddp_info *ddp; |
546 | 546 | ||
547 | void (*dev_ddp_cleanup)(struct cxgbi_device *); | 547 | void (*dev_ddp_cleanup)(struct cxgbi_device *); |
548 | void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int); | 548 | void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int); |
549 | int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t); | 549 | int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t); |
550 | int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *, | 550 | int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *, |
551 | unsigned int, unsigned int, | 551 | unsigned int, unsigned int, |
552 | struct cxgbi_gather_list *); | 552 | struct cxgbi_gather_list *); |
553 | void (*csk_ddp_clear)(struct cxgbi_hba *, | 553 | void (*csk_ddp_clear)(struct cxgbi_hba *, |
554 | unsigned int, unsigned int, unsigned int); | 554 | unsigned int, unsigned int, unsigned int); |
555 | int (*csk_ddp_setup_digest)(struct cxgbi_sock *, | 555 | int (*csk_ddp_setup_digest)(struct cxgbi_sock *, |
556 | unsigned int, int, int, int); | 556 | unsigned int, int, int, int); |
557 | int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, | 557 | int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, |
558 | unsigned int, int, bool); | 558 | unsigned int, int, bool); |
559 | 559 | ||
560 | void (*csk_release_offload_resources)(struct cxgbi_sock *); | 560 | void (*csk_release_offload_resources)(struct cxgbi_sock *); |
561 | int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); | 561 | int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); |
562 | u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32); | 562 | u32 (*csk_send_rx_credits)(struct cxgbi_sock *, u32); |
563 | int (*csk_push_tx_frames)(struct cxgbi_sock *, int); | 563 | int (*csk_push_tx_frames)(struct cxgbi_sock *, int); |
564 | void (*csk_send_abort_req)(struct cxgbi_sock *); | 564 | void (*csk_send_abort_req)(struct cxgbi_sock *); |
565 | void (*csk_send_close_req)(struct cxgbi_sock *); | 565 | void (*csk_send_close_req)(struct cxgbi_sock *); |
566 | int (*csk_alloc_cpls)(struct cxgbi_sock *); | 566 | int (*csk_alloc_cpls)(struct cxgbi_sock *); |
567 | int (*csk_init_act_open)(struct cxgbi_sock *); | 567 | int (*csk_init_act_open)(struct cxgbi_sock *); |
568 | 568 | ||
569 | void *dd_data; | 569 | void *dd_data; |
570 | }; | 570 | }; |
571 | #define cxgbi_cdev_priv(cdev) ((cdev)->dd_data) | 571 | #define cxgbi_cdev_priv(cdev) ((cdev)->dd_data) |
572 | 572 | ||
573 | struct cxgbi_conn { | 573 | struct cxgbi_conn { |
574 | struct cxgbi_endpoint *cep; | 574 | struct cxgbi_endpoint *cep; |
575 | struct iscsi_conn *iconn; | 575 | struct iscsi_conn *iconn; |
576 | struct cxgbi_hba *chba; | 576 | struct cxgbi_hba *chba; |
577 | u32 task_idx_bits; | 577 | u32 task_idx_bits; |
578 | }; | 578 | }; |
579 | 579 | ||
580 | struct cxgbi_endpoint { | 580 | struct cxgbi_endpoint { |
581 | struct cxgbi_conn *cconn; | 581 | struct cxgbi_conn *cconn; |
582 | struct cxgbi_hba *chba; | 582 | struct cxgbi_hba *chba; |
583 | struct cxgbi_sock *csk; | 583 | struct cxgbi_sock *csk; |
584 | }; | 584 | }; |
585 | 585 | ||
586 | #define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512) | 586 | #define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512) |
587 | struct cxgbi_task_data { | 587 | struct cxgbi_task_data { |
588 | unsigned short nr_frags; | 588 | unsigned short nr_frags; |
589 | skb_frag_t frags[MAX_PDU_FRAGS]; | 589 | skb_frag_t frags[MAX_PDU_FRAGS]; |
590 | struct sk_buff *skb; | 590 | struct sk_buff *skb; |
591 | unsigned int offset; | 591 | unsigned int offset; |
592 | unsigned int count; | 592 | unsigned int count; |
593 | unsigned int sgoffset; | 593 | unsigned int sgoffset; |
594 | }; | 594 | }; |
595 | #define iscsi_task_cxgbi_data(task) \ | ||
596 | ((task)->dd_data + sizeof(struct iscsi_tcp_task)) | ||
595 | 597 | ||
596 | static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag) | 598 | static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag) |
597 | { | 599 | { |
598 | return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1))); | 600 | return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1))); |
599 | } | 601 | } |
600 | 602 | ||
601 | static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat, | 603 | static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat, |
602 | u32 sw_tag) | 604 | u32 sw_tag) |
603 | { | 605 | { |
604 | sw_tag >>= (32 - tformat->rsvd_bits); | 606 | sw_tag >>= (32 - tformat->rsvd_bits); |
605 | return !sw_tag; | 607 | return !sw_tag; |
606 | } | 608 | } |
607 | 609 | ||
608 | static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat, | 610 | static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat, |
609 | u32 sw_tag) | 611 | u32 sw_tag) |
610 | { | 612 | { |
611 | unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; | 613 | unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; |
612 | u32 mask = (1 << shift) - 1; | 614 | u32 mask = (1 << shift) - 1; |
613 | 615 | ||
614 | if (sw_tag && (sw_tag & ~mask)) { | 616 | if (sw_tag && (sw_tag & ~mask)) { |
615 | u32 v1 = sw_tag & ((1 << shift) - 1); | 617 | u32 v1 = sw_tag & ((1 << shift) - 1); |
616 | u32 v2 = (sw_tag >> (shift - 1)) << shift; | 618 | u32 v2 = (sw_tag >> (shift - 1)) << shift; |
617 | 619 | ||
618 | return v2 | v1 | 1 << shift; | 620 | return v2 | v1 | 1 << shift; |
619 | } | 621 | } |
620 | 622 | ||
621 | return sw_tag | 1 << shift; | 623 | return sw_tag | 1 << shift; |
622 | } | 624 | } |
623 | 625 | ||
624 | static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat, | 626 | static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat, |
625 | u32 sw_tag) | 627 | u32 sw_tag) |
626 | { | 628 | { |
627 | u32 mask = (1 << tformat->rsvd_shift) - 1; | 629 | u32 mask = (1 << tformat->rsvd_shift) - 1; |
628 | 630 | ||
629 | if (sw_tag && (sw_tag & ~mask)) { | 631 | if (sw_tag && (sw_tag & ~mask)) { |
630 | u32 v1 = sw_tag & mask; | 632 | u32 v1 = sw_tag & mask; |
631 | u32 v2 = sw_tag >> tformat->rsvd_shift; | 633 | u32 v2 = sw_tag >> tformat->rsvd_shift; |
632 | 634 | ||
633 | v2 <<= tformat->rsvd_bits + tformat->rsvd_shift; | 635 | v2 <<= tformat->rsvd_bits + tformat->rsvd_shift; |
634 | 636 | ||
635 | return v2 | v1; | 637 | return v2 | v1; |
636 | } | 638 | } |
637 | 639 | ||
638 | return sw_tag; | 640 | return sw_tag; |
639 | } | 641 | } |
640 | 642 | ||
641 | static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat, | 643 | static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat, |
642 | u32 tag) | 644 | u32 tag) |
643 | { | 645 | { |
644 | if (cxgbi_is_ddp_tag(tformat, tag)) | 646 | if (cxgbi_is_ddp_tag(tformat, tag)) |
645 | return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask; | 647 | return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask; |
646 | 648 | ||
647 | return 0; | 649 | return 0; |
648 | } | 650 | } |
649 | 651 | ||
650 | static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat, | 652 | static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat, |
651 | u32 tag) | 653 | u32 tag) |
652 | { | 654 | { |
653 | unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; | 655 | unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1; |
654 | u32 v1, v2; | 656 | u32 v1, v2; |
655 | 657 | ||
656 | if (cxgbi_is_ddp_tag(tformat, tag)) { | 658 | if (cxgbi_is_ddp_tag(tformat, tag)) { |
657 | v1 = tag & ((1 << tformat->rsvd_shift) - 1); | 659 | v1 = tag & ((1 << tformat->rsvd_shift) - 1); |
658 | v2 = (tag >> (shift + 1)) << tformat->rsvd_shift; | 660 | v2 = (tag >> (shift + 1)) << tformat->rsvd_shift; |
659 | } else { | 661 | } else { |
660 | u32 mask = (1 << shift) - 1; | 662 | u32 mask = (1 << shift) - 1; |
661 | tag &= ~(1 << shift); | 663 | tag &= ~(1 << shift); |
662 | v1 = tag & mask; | 664 | v1 = tag & mask; |
663 | v2 = (tag >> 1) & ~mask; | 665 | v2 = (tag >> 1) & ~mask; |
664 | } | 666 | } |
665 | return v1 | v2; | 667 | return v1 | v2; |
666 | } | 668 | } |
667 | 669 | ||
668 | static inline void *cxgbi_alloc_big_mem(unsigned int size, | 670 | static inline void *cxgbi_alloc_big_mem(unsigned int size, |
669 | gfp_t gfp) | 671 | gfp_t gfp) |
670 | { | 672 | { |
671 | void *p = kmalloc(size, gfp); | 673 | void *p = kmalloc(size, gfp); |
672 | if (!p) | 674 | if (!p) |
673 | p = vmalloc(size); | 675 | p = vmalloc(size); |
674 | if (p) | 676 | if (p) |
675 | memset(p, 0, size); | 677 | memset(p, 0, size); |
676 | return p; | 678 | return p; |
677 | } | 679 | } |
678 | 680 | ||
679 | static inline void cxgbi_free_big_mem(void *addr) | 681 | static inline void cxgbi_free_big_mem(void *addr) |
680 | { | 682 | { |
681 | if (is_vmalloc_addr(addr)) | 683 | if (is_vmalloc_addr(addr)) |
682 | vfree(addr); | 684 | vfree(addr); |
683 | else | 685 | else |
684 | kfree(addr); | 686 | kfree(addr); |
685 | } | 687 | } |
686 | 688 | ||
687 | static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr) | 689 | static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr) |
688 | { | 690 | { |
689 | if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET) | 691 | if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET) |
690 | chba->ipv4addr = ipaddr; | 692 | chba->ipv4addr = ipaddr; |
691 | else | 693 | else |
692 | pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n", | 694 | pr_info("set iscsi ipv4 NOT supported, using %s ipv4.\n", |
693 | chba->ndev->name); | 695 | chba->ndev->name); |
694 | } | 696 | } |
695 | 697 | ||
696 | static inline __be32 cxgbi_get_iscsi_ipv4(struct cxgbi_hba *chba) | 698 | static inline __be32 cxgbi_get_iscsi_ipv4(struct cxgbi_hba *chba) |
697 | { | 699 | { |
698 | return chba->ipv4addr; | 700 | return chba->ipv4addr; |
699 | } | 701 | } |
700 | 702 | ||
701 | struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int); | 703 | struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int); |
702 | void cxgbi_device_unregister(struct cxgbi_device *); | 704 | void cxgbi_device_unregister(struct cxgbi_device *); |
703 | void cxgbi_device_unregister_all(unsigned int flag); | 705 | void cxgbi_device_unregister_all(unsigned int flag); |
704 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *); | 706 | struct cxgbi_device *cxgbi_device_find_by_lldev(void *); |
705 | int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int, | 707 | int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int, |
706 | struct scsi_host_template *, | 708 | struct scsi_host_template *, |
707 | struct scsi_transport_template *); | 709 | struct scsi_transport_template *); |
708 | void cxgbi_hbas_remove(struct cxgbi_device *); | 710 | void cxgbi_hbas_remove(struct cxgbi_device *); |
709 | 711 | ||
710 | int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, | 712 | int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, |
711 | unsigned int max_conn); | 713 | unsigned int max_conn); |
712 | void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev); | 714 | void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev); |
713 | 715 | ||
714 | void cxgbi_conn_tx_open(struct cxgbi_sock *); | 716 | void cxgbi_conn_tx_open(struct cxgbi_sock *); |
715 | void cxgbi_conn_pdu_ready(struct cxgbi_sock *); | 717 | void cxgbi_conn_pdu_ready(struct cxgbi_sock *); |
716 | int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8); | 718 | int cxgbi_conn_alloc_pdu(struct iscsi_task *, u8); |
717 | int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int); | 719 | int cxgbi_conn_init_pdu(struct iscsi_task *, unsigned int , unsigned int); |
718 | int cxgbi_conn_xmit_pdu(struct iscsi_task *); | 720 | int cxgbi_conn_xmit_pdu(struct iscsi_task *); |
719 | 721 | ||
720 | void cxgbi_cleanup_task(struct iscsi_task *task); | 722 | void cxgbi_cleanup_task(struct iscsi_task *task); |
721 | 723 | ||
722 | void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); | 724 | void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); |
723 | int cxgbi_set_conn_param(struct iscsi_cls_conn *, | 725 | int cxgbi_set_conn_param(struct iscsi_cls_conn *, |
724 | enum iscsi_param, char *, int); | 726 | enum iscsi_param, char *, int); |
725 | int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *); | 727 | int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *); |
726 | struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32); | 728 | struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32); |
727 | int cxgbi_bind_conn(struct iscsi_cls_session *, | 729 | int cxgbi_bind_conn(struct iscsi_cls_session *, |
728 | struct iscsi_cls_conn *, u64, int); | 730 | struct iscsi_cls_conn *, u64, int); |
729 | void cxgbi_destroy_session(struct iscsi_cls_session *); | 731 | void cxgbi_destroy_session(struct iscsi_cls_session *); |
730 | struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *, | 732 | struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *, |
731 | u16, u16, u32); | 733 | u16, u16, u32); |
732 | int cxgbi_set_host_param(struct Scsi_Host *, | 734 | int cxgbi_set_host_param(struct Scsi_Host *, |
733 | enum iscsi_host_param, char *, int); | 735 | enum iscsi_host_param, char *, int); |
734 | int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *); | 736 | int cxgbi_get_host_param(struct Scsi_Host *, enum iscsi_host_param, char *); |
735 | struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *, | 737 | struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *, |
736 | struct sockaddr *, int); | 738 | struct sockaddr *, int); |
737 | int cxgbi_ep_poll(struct iscsi_endpoint *, int); | 739 | int cxgbi_ep_poll(struct iscsi_endpoint *, int); |
738 | void cxgbi_ep_disconnect(struct iscsi_endpoint *); | 740 | void cxgbi_ep_disconnect(struct iscsi_endpoint *); |
739 | 741 | ||
740 | int cxgbi_iscsi_init(struct iscsi_transport *, | 742 | int cxgbi_iscsi_init(struct iscsi_transport *, |
741 | struct scsi_transport_template **); | 743 | struct scsi_transport_template **); |
742 | void cxgbi_iscsi_cleanup(struct iscsi_transport *, | 744 | void cxgbi_iscsi_cleanup(struct iscsi_transport *, |
743 | struct scsi_transport_template **); | 745 | struct scsi_transport_template **); |
744 | void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *); | 746 | void cxgbi_parse_pdu_itt(struct iscsi_conn *, itt_t, int *, int *); |
745 | int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int, | 747 | int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int, |
746 | unsigned int, unsigned int); | 748 | unsigned int, unsigned int); |
747 | int cxgbi_ddp_cleanup(struct cxgbi_device *); | 749 | int cxgbi_ddp_cleanup(struct cxgbi_device *); |
748 | void cxgbi_ddp_page_size_factor(int *); | 750 | void cxgbi_ddp_page_size_factor(int *); |
749 | void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *); | 751 | void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *); |
750 | void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *, struct cxgbi_pagepod_hdr *, | 752 | void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *, struct cxgbi_pagepod_hdr *, |
751 | struct cxgbi_gather_list *, unsigned int); | 753 | struct cxgbi_gather_list *, unsigned int); |
752 | #endif /*__LIBCXGBI_H__*/ | 754 | #endif /*__LIBCXGBI_H__*/ |
753 | 755 |