Commit 92ddc447ce7382e36b72a240697c00bf4beb8d75

Authored by Dotan Barak
Committed by Roland Dreier
1 parent bfb3ea1251

IB: Move the macro IB_UMEM_MAX_PAGE_CHUNK() to umem.c

After moving the definition of struct ib_umem_chunk from ib_verbs.h to
ib_umem.h there isn't any reason for the macro IB_UMEM_MAX_PAGE_CHUNK
to stay in ib_verbs.h.  Move the macro to umem.c, the only place where
it is used.

Signed-off-by: Dotan Barak <dotanb@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

Showing 2 changed files with 5 additions and 5 deletions Inline Diff

drivers/infiniband/core/umem.c
1 /* 1 /*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file 8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the 9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below: 10 * OpenIB.org BSD license below:
11 * 11 *
12 * Redistribution and use in source and binary forms, with or 12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following 13 * without modification, are permitted provided that the following
14 * conditions are met: 14 * conditions are met:
15 * 15 *
16 * - Redistributions of source code must retain the above 16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following 17 * copyright notice, this list of conditions and the following
18 * disclaimer. 18 * disclaimer.
19 * 19 *
20 * - Redistributions in binary form must reproduce the above 20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following 21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials 22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution. 23 * provided with the distribution.
24 * 24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE. 32 * SOFTWARE.
33 * 33 *
34 * $Id: uverbs_mem.c 2743 2005-06-28 22:27:59Z roland $ 34 * $Id: uverbs_mem.c 2743 2005-06-28 22:27:59Z roland $
35 */ 35 */
36 36
37 #include <linux/mm.h> 37 #include <linux/mm.h>
38 #include <linux/dma-mapping.h> 38 #include <linux/dma-mapping.h>
39 #include <linux/sched.h> 39 #include <linux/sched.h>
40 40
41 #include "uverbs.h" 41 #include "uverbs.h"
42 42
43 #define IB_UMEM_MAX_PAGE_CHUNK \
44 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
45 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
46 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
47
43 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) 48 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
44 { 49 {
45 struct ib_umem_chunk *chunk, *tmp; 50 struct ib_umem_chunk *chunk, *tmp;
46 int i; 51 int i;
47 52
48 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { 53 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
49 ib_dma_unmap_sg(dev, chunk->page_list, 54 ib_dma_unmap_sg(dev, chunk->page_list,
50 chunk->nents, DMA_BIDIRECTIONAL); 55 chunk->nents, DMA_BIDIRECTIONAL);
51 for (i = 0; i < chunk->nents; ++i) { 56 for (i = 0; i < chunk->nents; ++i) {
52 if (umem->writable && dirty) 57 if (umem->writable && dirty)
53 set_page_dirty_lock(chunk->page_list[i].page); 58 set_page_dirty_lock(chunk->page_list[i].page);
54 put_page(chunk->page_list[i].page); 59 put_page(chunk->page_list[i].page);
55 } 60 }
56 61
57 kfree(chunk); 62 kfree(chunk);
58 } 63 }
59 } 64 }
60 65
61 /** 66 /**
62 * ib_umem_get - Pin and DMA map userspace memory. 67 * ib_umem_get - Pin and DMA map userspace memory.
63 * @context: userspace context to pin memory for 68 * @context: userspace context to pin memory for
64 * @addr: userspace virtual address to start at 69 * @addr: userspace virtual address to start at
65 * @size: length of region to pin 70 * @size: length of region to pin
66 * @access: IB_ACCESS_xxx flags for memory being pinned 71 * @access: IB_ACCESS_xxx flags for memory being pinned
67 */ 72 */
68 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, 73 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
69 size_t size, int access) 74 size_t size, int access)
70 { 75 {
71 struct ib_umem *umem; 76 struct ib_umem *umem;
72 struct page **page_list; 77 struct page **page_list;
73 struct ib_umem_chunk *chunk; 78 struct ib_umem_chunk *chunk;
74 unsigned long locked; 79 unsigned long locked;
75 unsigned long lock_limit; 80 unsigned long lock_limit;
76 unsigned long cur_base; 81 unsigned long cur_base;
77 unsigned long npages; 82 unsigned long npages;
78 int ret; 83 int ret;
79 int off; 84 int off;
80 int i; 85 int i;
81 86
82 if (!can_do_mlock()) 87 if (!can_do_mlock())
83 return ERR_PTR(-EPERM); 88 return ERR_PTR(-EPERM);
84 89
85 umem = kmalloc(sizeof *umem, GFP_KERNEL); 90 umem = kmalloc(sizeof *umem, GFP_KERNEL);
86 if (!umem) 91 if (!umem)
87 return ERR_PTR(-ENOMEM); 92 return ERR_PTR(-ENOMEM);
88 93
89 umem->context = context; 94 umem->context = context;
90 umem->length = size; 95 umem->length = size;
91 umem->offset = addr & ~PAGE_MASK; 96 umem->offset = addr & ~PAGE_MASK;
92 umem->page_size = PAGE_SIZE; 97 umem->page_size = PAGE_SIZE;
93 /* 98 /*
94 * We ask for writable memory if any access flags other than 99 * We ask for writable memory if any access flags other than
95 * "remote read" are set. "Local write" and "remote write" 100 * "remote read" are set. "Local write" and "remote write"
96 * obviously require write access. "Remote atomic" can do 101 * obviously require write access. "Remote atomic" can do
97 * things like fetch and add, which will modify memory, and 102 * things like fetch and add, which will modify memory, and
98 * "MW bind" can change permissions by binding a window. 103 * "MW bind" can change permissions by binding a window.
99 */ 104 */
100 umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ); 105 umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
101 106
102 INIT_LIST_HEAD(&umem->chunk_list); 107 INIT_LIST_HEAD(&umem->chunk_list);
103 108
104 page_list = (struct page **) __get_free_page(GFP_KERNEL); 109 page_list = (struct page **) __get_free_page(GFP_KERNEL);
105 if (!page_list) { 110 if (!page_list) {
106 kfree(umem); 111 kfree(umem);
107 return ERR_PTR(-ENOMEM); 112 return ERR_PTR(-ENOMEM);
108 } 113 }
109 114
110 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT; 115 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
111 116
112 down_write(&current->mm->mmap_sem); 117 down_write(&current->mm->mmap_sem);
113 118
114 locked = npages + current->mm->locked_vm; 119 locked = npages + current->mm->locked_vm;
115 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; 120 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
116 121
117 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { 122 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
118 ret = -ENOMEM; 123 ret = -ENOMEM;
119 goto out; 124 goto out;
120 } 125 }
121 126
122 cur_base = addr & PAGE_MASK; 127 cur_base = addr & PAGE_MASK;
123 128
124 ret = 0; 129 ret = 0;
125 while (npages) { 130 while (npages) {
126 ret = get_user_pages(current, current->mm, cur_base, 131 ret = get_user_pages(current, current->mm, cur_base,
127 min_t(int, npages, 132 min_t(int, npages,
128 PAGE_SIZE / sizeof (struct page *)), 133 PAGE_SIZE / sizeof (struct page *)),
129 1, !umem->writable, page_list, NULL); 134 1, !umem->writable, page_list, NULL);
130 135
131 if (ret < 0) 136 if (ret < 0)
132 goto out; 137 goto out;
133 138
134 cur_base += ret * PAGE_SIZE; 139 cur_base += ret * PAGE_SIZE;
135 npages -= ret; 140 npages -= ret;
136 141
137 off = 0; 142 off = 0;
138 143
139 while (ret) { 144 while (ret) {
140 chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) * 145 chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
141 min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK), 146 min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
142 GFP_KERNEL); 147 GFP_KERNEL);
143 if (!chunk) { 148 if (!chunk) {
144 ret = -ENOMEM; 149 ret = -ENOMEM;
145 goto out; 150 goto out;
146 } 151 }
147 152
148 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); 153 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
149 for (i = 0; i < chunk->nents; ++i) { 154 for (i = 0; i < chunk->nents; ++i) {
150 chunk->page_list[i].page = page_list[i + off]; 155 chunk->page_list[i].page = page_list[i + off];
151 chunk->page_list[i].offset = 0; 156 chunk->page_list[i].offset = 0;
152 chunk->page_list[i].length = PAGE_SIZE; 157 chunk->page_list[i].length = PAGE_SIZE;
153 } 158 }
154 159
155 chunk->nmap = ib_dma_map_sg(context->device, 160 chunk->nmap = ib_dma_map_sg(context->device,
156 &chunk->page_list[0], 161 &chunk->page_list[0],
157 chunk->nents, 162 chunk->nents,
158 DMA_BIDIRECTIONAL); 163 DMA_BIDIRECTIONAL);
159 if (chunk->nmap <= 0) { 164 if (chunk->nmap <= 0) {
160 for (i = 0; i < chunk->nents; ++i) 165 for (i = 0; i < chunk->nents; ++i)
161 put_page(chunk->page_list[i].page); 166 put_page(chunk->page_list[i].page);
162 kfree(chunk); 167 kfree(chunk);
163 168
164 ret = -ENOMEM; 169 ret = -ENOMEM;
165 goto out; 170 goto out;
166 } 171 }
167 172
168 ret -= chunk->nents; 173 ret -= chunk->nents;
169 off += chunk->nents; 174 off += chunk->nents;
170 list_add_tail(&chunk->list, &umem->chunk_list); 175 list_add_tail(&chunk->list, &umem->chunk_list);
171 } 176 }
172 177
173 ret = 0; 178 ret = 0;
174 } 179 }
175 180
176 out: 181 out:
177 if (ret < 0) { 182 if (ret < 0) {
178 __ib_umem_release(context->device, umem, 0); 183 __ib_umem_release(context->device, umem, 0);
179 kfree(umem); 184 kfree(umem);
180 } else 185 } else
181 current->mm->locked_vm = locked; 186 current->mm->locked_vm = locked;
182 187
183 up_write(&current->mm->mmap_sem); 188 up_write(&current->mm->mmap_sem);
184 free_page((unsigned long) page_list); 189 free_page((unsigned long) page_list);
185 190
186 return ret < 0 ? ERR_PTR(ret) : umem; 191 return ret < 0 ? ERR_PTR(ret) : umem;
187 } 192 }
188 EXPORT_SYMBOL(ib_umem_get); 193 EXPORT_SYMBOL(ib_umem_get);
189 194
190 static void ib_umem_account(struct work_struct *work) 195 static void ib_umem_account(struct work_struct *work)
191 { 196 {
192 struct ib_umem *umem = container_of(work, struct ib_umem, work); 197 struct ib_umem *umem = container_of(work, struct ib_umem, work);
193 198
194 down_write(&umem->mm->mmap_sem); 199 down_write(&umem->mm->mmap_sem);
195 umem->mm->locked_vm -= umem->diff; 200 umem->mm->locked_vm -= umem->diff;
196 up_write(&umem->mm->mmap_sem); 201 up_write(&umem->mm->mmap_sem);
197 mmput(umem->mm); 202 mmput(umem->mm);
198 kfree(umem); 203 kfree(umem);
199 } 204 }
200 205
201 /** 206 /**
202 * ib_umem_release - release memory pinned with ib_umem_get 207 * ib_umem_release - release memory pinned with ib_umem_get
203 * @umem: umem struct to release 208 * @umem: umem struct to release
204 */ 209 */
205 void ib_umem_release(struct ib_umem *umem) 210 void ib_umem_release(struct ib_umem *umem)
206 { 211 {
207 struct ib_ucontext *context = umem->context; 212 struct ib_ucontext *context = umem->context;
208 struct mm_struct *mm; 213 struct mm_struct *mm;
209 unsigned long diff; 214 unsigned long diff;
210 215
211 __ib_umem_release(umem->context->device, umem, 1); 216 __ib_umem_release(umem->context->device, umem, 1);
212 217
213 mm = get_task_mm(current); 218 mm = get_task_mm(current);
214 if (!mm) { 219 if (!mm) {
215 kfree(umem); 220 kfree(umem);
216 return; 221 return;
217 } 222 }
218 223
219 diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; 224 diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
220 225
221 /* 226 /*
222 * We may be called with the mm's mmap_sem already held. This 227 * We may be called with the mm's mmap_sem already held. This
223 * can happen when a userspace munmap() is the call that drops 228 * can happen when a userspace munmap() is the call that drops
224 * the last reference to our file and calls our release 229 * the last reference to our file and calls our release
225 * method. If there are memory regions to destroy, we'll end 230 * method. If there are memory regions to destroy, we'll end
226 * up here and not be able to take the mmap_sem. In that case 231 * up here and not be able to take the mmap_sem. In that case
227 * we defer the vm_locked accounting to the system workqueue. 232 * we defer the vm_locked accounting to the system workqueue.
228 */ 233 */
229 if (context->closing) { 234 if (context->closing) {
230 if (!down_write_trylock(&mm->mmap_sem)) { 235 if (!down_write_trylock(&mm->mmap_sem)) {
231 INIT_WORK(&umem->work, ib_umem_account); 236 INIT_WORK(&umem->work, ib_umem_account);
232 umem->mm = mm; 237 umem->mm = mm;
233 umem->diff = diff; 238 umem->diff = diff;
234 239
235 schedule_work(&umem->work); 240 schedule_work(&umem->work);
236 return; 241 return;
237 } 242 }
238 } else 243 } else
239 down_write(&mm->mmap_sem); 244 down_write(&mm->mmap_sem);
240 245
241 current->mm->locked_vm -= diff; 246 current->mm->locked_vm -= diff;
242 up_write(&mm->mmap_sem); 247 up_write(&mm->mmap_sem);
243 mmput(mm); 248 mmput(mm);
244 kfree(umem); 249 kfree(umem);
245 } 250 }
246 EXPORT_SYMBOL(ib_umem_release); 251 EXPORT_SYMBOL(ib_umem_release);
247 252
248 int ib_umem_page_count(struct ib_umem *umem) 253 int ib_umem_page_count(struct ib_umem *umem)
249 { 254 {
250 struct ib_umem_chunk *chunk; 255 struct ib_umem_chunk *chunk;
251 int shift; 256 int shift;
252 int i; 257 int i;
253 int n; 258 int n;
254 259
255 shift = ilog2(umem->page_size); 260 shift = ilog2(umem->page_size);
256 261
257 n = 0; 262 n = 0;
258 list_for_each_entry(chunk, &umem->chunk_list, list) 263 list_for_each_entry(chunk, &umem->chunk_list, list)
259 for (i = 0; i < chunk->nmap; ++i) 264 for (i = 0; i < chunk->nmap; ++i)
260 n += sg_dma_len(&chunk->page_list[i]) >> shift; 265 n += sg_dma_len(&chunk->page_list[i]) >> shift;
261 266
262 return n; 267 return n;
263 } 268 }
264 EXPORT_SYMBOL(ib_umem_page_count); 269 EXPORT_SYMBOL(ib_umem_page_count);
265 270
include/rdma/ib_verbs.h
1 /* 1 /*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
9 * 9 *
10 * This software is available to you under a choice of one of two 10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU 11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file 12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the 13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below: 14 * OpenIB.org BSD license below:
15 * 15 *
16 * Redistribution and use in source and binary forms, with or 16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following 17 * without modification, are permitted provided that the following
18 * conditions are met: 18 * conditions are met:
19 * 19 *
20 * - Redistributions of source code must retain the above 20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following 21 * copyright notice, this list of conditions and the following
22 * disclaimer. 22 * disclaimer.
23 * 23 *
24 * - Redistributions in binary form must reproduce the above 24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following 25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials 26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution. 27 * provided with the distribution.
28 * 28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE. 36 * SOFTWARE.
37 * 37 *
38 * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $ 38 * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
39 */ 39 */
40 40
41 #if !defined(IB_VERBS_H) 41 #if !defined(IB_VERBS_H)
42 #define IB_VERBS_H 42 #define IB_VERBS_H
43 43
44 #include <linux/types.h> 44 #include <linux/types.h>
45 #include <linux/device.h> 45 #include <linux/device.h>
46 #include <linux/mm.h> 46 #include <linux/mm.h>
47 #include <linux/dma-mapping.h> 47 #include <linux/dma-mapping.h>
48 #include <linux/kref.h> 48 #include <linux/kref.h>
49 #include <linux/list.h> 49 #include <linux/list.h>
50 #include <linux/rwsem.h> 50 #include <linux/rwsem.h>
51 51
52 #include <asm/atomic.h> 52 #include <asm/atomic.h>
53 #include <asm/scatterlist.h> 53 #include <asm/scatterlist.h>
54 #include <asm/uaccess.h> 54 #include <asm/uaccess.h>
55 55
56 union ib_gid { 56 union ib_gid {
57 u8 raw[16]; 57 u8 raw[16];
58 struct { 58 struct {
59 __be64 subnet_prefix; 59 __be64 subnet_prefix;
60 __be64 interface_id; 60 __be64 interface_id;
61 } global; 61 } global;
62 }; 62 };
63 63
64 enum rdma_node_type { 64 enum rdma_node_type {
65 /* IB values map to NodeInfo:NodeType. */ 65 /* IB values map to NodeInfo:NodeType. */
66 RDMA_NODE_IB_CA = 1, 66 RDMA_NODE_IB_CA = 1,
67 RDMA_NODE_IB_SWITCH, 67 RDMA_NODE_IB_SWITCH,
68 RDMA_NODE_IB_ROUTER, 68 RDMA_NODE_IB_ROUTER,
69 RDMA_NODE_RNIC 69 RDMA_NODE_RNIC
70 }; 70 };
71 71
72 enum rdma_transport_type { 72 enum rdma_transport_type {
73 RDMA_TRANSPORT_IB, 73 RDMA_TRANSPORT_IB,
74 RDMA_TRANSPORT_IWARP 74 RDMA_TRANSPORT_IWARP
75 }; 75 };
76 76
77 enum rdma_transport_type 77 enum rdma_transport_type
78 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__; 78 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
79 79
80 enum ib_device_cap_flags { 80 enum ib_device_cap_flags {
81 IB_DEVICE_RESIZE_MAX_WR = 1, 81 IB_DEVICE_RESIZE_MAX_WR = 1,
82 IB_DEVICE_BAD_PKEY_CNTR = (1<<1), 82 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
83 IB_DEVICE_BAD_QKEY_CNTR = (1<<2), 83 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
84 IB_DEVICE_RAW_MULTI = (1<<3), 84 IB_DEVICE_RAW_MULTI = (1<<3),
85 IB_DEVICE_AUTO_PATH_MIG = (1<<4), 85 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
86 IB_DEVICE_CHANGE_PHY_PORT = (1<<5), 86 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
87 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), 87 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
88 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), 88 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
89 IB_DEVICE_SHUTDOWN_PORT = (1<<8), 89 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
90 IB_DEVICE_INIT_TYPE = (1<<9), 90 IB_DEVICE_INIT_TYPE = (1<<9),
91 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), 91 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
92 IB_DEVICE_SYS_IMAGE_GUID = (1<<11), 92 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
93 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), 93 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
94 IB_DEVICE_SRQ_RESIZE = (1<<13), 94 IB_DEVICE_SRQ_RESIZE = (1<<13),
95 IB_DEVICE_N_NOTIFY_CQ = (1<<14), 95 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
96 IB_DEVICE_ZERO_STAG = (1<<15), 96 IB_DEVICE_ZERO_STAG = (1<<15),
97 IB_DEVICE_SEND_W_INV = (1<<16), 97 IB_DEVICE_SEND_W_INV = (1<<16),
98 IB_DEVICE_MEM_WINDOW = (1<<17) 98 IB_DEVICE_MEM_WINDOW = (1<<17)
99 }; 99 };
100 100
101 enum ib_atomic_cap { 101 enum ib_atomic_cap {
102 IB_ATOMIC_NONE, 102 IB_ATOMIC_NONE,
103 IB_ATOMIC_HCA, 103 IB_ATOMIC_HCA,
104 IB_ATOMIC_GLOB 104 IB_ATOMIC_GLOB
105 }; 105 };
106 106
107 struct ib_device_attr { 107 struct ib_device_attr {
108 u64 fw_ver; 108 u64 fw_ver;
109 __be64 sys_image_guid; 109 __be64 sys_image_guid;
110 u64 max_mr_size; 110 u64 max_mr_size;
111 u64 page_size_cap; 111 u64 page_size_cap;
112 u32 vendor_id; 112 u32 vendor_id;
113 u32 vendor_part_id; 113 u32 vendor_part_id;
114 u32 hw_ver; 114 u32 hw_ver;
115 int max_qp; 115 int max_qp;
116 int max_qp_wr; 116 int max_qp_wr;
117 int device_cap_flags; 117 int device_cap_flags;
118 int max_sge; 118 int max_sge;
119 int max_sge_rd; 119 int max_sge_rd;
120 int max_cq; 120 int max_cq;
121 int max_cqe; 121 int max_cqe;
122 int max_mr; 122 int max_mr;
123 int max_pd; 123 int max_pd;
124 int max_qp_rd_atom; 124 int max_qp_rd_atom;
125 int max_ee_rd_atom; 125 int max_ee_rd_atom;
126 int max_res_rd_atom; 126 int max_res_rd_atom;
127 int max_qp_init_rd_atom; 127 int max_qp_init_rd_atom;
128 int max_ee_init_rd_atom; 128 int max_ee_init_rd_atom;
129 enum ib_atomic_cap atomic_cap; 129 enum ib_atomic_cap atomic_cap;
130 int max_ee; 130 int max_ee;
131 int max_rdd; 131 int max_rdd;
132 int max_mw; 132 int max_mw;
133 int max_raw_ipv6_qp; 133 int max_raw_ipv6_qp;
134 int max_raw_ethy_qp; 134 int max_raw_ethy_qp;
135 int max_mcast_grp; 135 int max_mcast_grp;
136 int max_mcast_qp_attach; 136 int max_mcast_qp_attach;
137 int max_total_mcast_qp_attach; 137 int max_total_mcast_qp_attach;
138 int max_ah; 138 int max_ah;
139 int max_fmr; 139 int max_fmr;
140 int max_map_per_fmr; 140 int max_map_per_fmr;
141 int max_srq; 141 int max_srq;
142 int max_srq_wr; 142 int max_srq_wr;
143 int max_srq_sge; 143 int max_srq_sge;
144 u16 max_pkeys; 144 u16 max_pkeys;
145 u8 local_ca_ack_delay; 145 u8 local_ca_ack_delay;
146 }; 146 };
147 147
148 enum ib_mtu { 148 enum ib_mtu {
149 IB_MTU_256 = 1, 149 IB_MTU_256 = 1,
150 IB_MTU_512 = 2, 150 IB_MTU_512 = 2,
151 IB_MTU_1024 = 3, 151 IB_MTU_1024 = 3,
152 IB_MTU_2048 = 4, 152 IB_MTU_2048 = 4,
153 IB_MTU_4096 = 5 153 IB_MTU_4096 = 5
154 }; 154 };
155 155
156 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 156 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
157 { 157 {
158 switch (mtu) { 158 switch (mtu) {
159 case IB_MTU_256: return 256; 159 case IB_MTU_256: return 256;
160 case IB_MTU_512: return 512; 160 case IB_MTU_512: return 512;
161 case IB_MTU_1024: return 1024; 161 case IB_MTU_1024: return 1024;
162 case IB_MTU_2048: return 2048; 162 case IB_MTU_2048: return 2048;
163 case IB_MTU_4096: return 4096; 163 case IB_MTU_4096: return 4096;
164 default: return -1; 164 default: return -1;
165 } 165 }
166 } 166 }
167 167
168 enum ib_port_state { 168 enum ib_port_state {
169 IB_PORT_NOP = 0, 169 IB_PORT_NOP = 0,
170 IB_PORT_DOWN = 1, 170 IB_PORT_DOWN = 1,
171 IB_PORT_INIT = 2, 171 IB_PORT_INIT = 2,
172 IB_PORT_ARMED = 3, 172 IB_PORT_ARMED = 3,
173 IB_PORT_ACTIVE = 4, 173 IB_PORT_ACTIVE = 4,
174 IB_PORT_ACTIVE_DEFER = 5 174 IB_PORT_ACTIVE_DEFER = 5
175 }; 175 };
176 176
177 enum ib_port_cap_flags { 177 enum ib_port_cap_flags {
178 IB_PORT_SM = 1 << 1, 178 IB_PORT_SM = 1 << 1,
179 IB_PORT_NOTICE_SUP = 1 << 2, 179 IB_PORT_NOTICE_SUP = 1 << 2,
180 IB_PORT_TRAP_SUP = 1 << 3, 180 IB_PORT_TRAP_SUP = 1 << 3,
181 IB_PORT_OPT_IPD_SUP = 1 << 4, 181 IB_PORT_OPT_IPD_SUP = 1 << 4,
182 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 182 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
183 IB_PORT_SL_MAP_SUP = 1 << 6, 183 IB_PORT_SL_MAP_SUP = 1 << 6,
184 IB_PORT_MKEY_NVRAM = 1 << 7, 184 IB_PORT_MKEY_NVRAM = 1 << 7,
185 IB_PORT_PKEY_NVRAM = 1 << 8, 185 IB_PORT_PKEY_NVRAM = 1 << 8,
186 IB_PORT_LED_INFO_SUP = 1 << 9, 186 IB_PORT_LED_INFO_SUP = 1 << 9,
187 IB_PORT_SM_DISABLED = 1 << 10, 187 IB_PORT_SM_DISABLED = 1 << 10,
188 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 188 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
189 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 189 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
190 IB_PORT_CM_SUP = 1 << 16, 190 IB_PORT_CM_SUP = 1 << 16,
191 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 191 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
192 IB_PORT_REINIT_SUP = 1 << 18, 192 IB_PORT_REINIT_SUP = 1 << 18,
193 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 193 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
194 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 194 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
195 IB_PORT_DR_NOTICE_SUP = 1 << 21, 195 IB_PORT_DR_NOTICE_SUP = 1 << 21,
196 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 196 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
197 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 197 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
198 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 198 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
199 IB_PORT_CLIENT_REG_SUP = 1 << 25 199 IB_PORT_CLIENT_REG_SUP = 1 << 25
200 }; 200 };
201 201
202 enum ib_port_width { 202 enum ib_port_width {
203 IB_WIDTH_1X = 1, 203 IB_WIDTH_1X = 1,
204 IB_WIDTH_4X = 2, 204 IB_WIDTH_4X = 2,
205 IB_WIDTH_8X = 4, 205 IB_WIDTH_8X = 4,
206 IB_WIDTH_12X = 8 206 IB_WIDTH_12X = 8
207 }; 207 };
208 208
209 static inline int ib_width_enum_to_int(enum ib_port_width width) 209 static inline int ib_width_enum_to_int(enum ib_port_width width)
210 { 210 {
211 switch (width) { 211 switch (width) {
212 case IB_WIDTH_1X: return 1; 212 case IB_WIDTH_1X: return 1;
213 case IB_WIDTH_4X: return 4; 213 case IB_WIDTH_4X: return 4;
214 case IB_WIDTH_8X: return 8; 214 case IB_WIDTH_8X: return 8;
215 case IB_WIDTH_12X: return 12; 215 case IB_WIDTH_12X: return 12;
216 default: return -1; 216 default: return -1;
217 } 217 }
218 } 218 }
219 219
220 struct ib_port_attr { 220 struct ib_port_attr {
221 enum ib_port_state state; 221 enum ib_port_state state;
222 enum ib_mtu max_mtu; 222 enum ib_mtu max_mtu;
223 enum ib_mtu active_mtu; 223 enum ib_mtu active_mtu;
224 int gid_tbl_len; 224 int gid_tbl_len;
225 u32 port_cap_flags; 225 u32 port_cap_flags;
226 u32 max_msg_sz; 226 u32 max_msg_sz;
227 u32 bad_pkey_cntr; 227 u32 bad_pkey_cntr;
228 u32 qkey_viol_cntr; 228 u32 qkey_viol_cntr;
229 u16 pkey_tbl_len; 229 u16 pkey_tbl_len;
230 u16 lid; 230 u16 lid;
231 u16 sm_lid; 231 u16 sm_lid;
232 u8 lmc; 232 u8 lmc;
233 u8 max_vl_num; 233 u8 max_vl_num;
234 u8 sm_sl; 234 u8 sm_sl;
235 u8 subnet_timeout; 235 u8 subnet_timeout;
236 u8 init_type_reply; 236 u8 init_type_reply;
237 u8 active_width; 237 u8 active_width;
238 u8 active_speed; 238 u8 active_speed;
239 u8 phys_state; 239 u8 phys_state;
240 }; 240 };
241 241
242 enum ib_device_modify_flags { 242 enum ib_device_modify_flags {
243 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 243 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
244 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 244 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
245 }; 245 };
246 246
247 struct ib_device_modify { 247 struct ib_device_modify {
248 u64 sys_image_guid; 248 u64 sys_image_guid;
249 char node_desc[64]; 249 char node_desc[64];
250 }; 250 };
251 251
252 enum ib_port_modify_flags { 252 enum ib_port_modify_flags {
253 IB_PORT_SHUTDOWN = 1, 253 IB_PORT_SHUTDOWN = 1,
254 IB_PORT_INIT_TYPE = (1<<2), 254 IB_PORT_INIT_TYPE = (1<<2),
255 IB_PORT_RESET_QKEY_CNTR = (1<<3) 255 IB_PORT_RESET_QKEY_CNTR = (1<<3)
256 }; 256 };
257 257
258 struct ib_port_modify { 258 struct ib_port_modify {
259 u32 set_port_cap_mask; 259 u32 set_port_cap_mask;
260 u32 clr_port_cap_mask; 260 u32 clr_port_cap_mask;
261 u8 init_type; 261 u8 init_type;
262 }; 262 };
263 263
264 enum ib_event_type { 264 enum ib_event_type {
265 IB_EVENT_CQ_ERR, 265 IB_EVENT_CQ_ERR,
266 IB_EVENT_QP_FATAL, 266 IB_EVENT_QP_FATAL,
267 IB_EVENT_QP_REQ_ERR, 267 IB_EVENT_QP_REQ_ERR,
268 IB_EVENT_QP_ACCESS_ERR, 268 IB_EVENT_QP_ACCESS_ERR,
269 IB_EVENT_COMM_EST, 269 IB_EVENT_COMM_EST,
270 IB_EVENT_SQ_DRAINED, 270 IB_EVENT_SQ_DRAINED,
271 IB_EVENT_PATH_MIG, 271 IB_EVENT_PATH_MIG,
272 IB_EVENT_PATH_MIG_ERR, 272 IB_EVENT_PATH_MIG_ERR,
273 IB_EVENT_DEVICE_FATAL, 273 IB_EVENT_DEVICE_FATAL,
274 IB_EVENT_PORT_ACTIVE, 274 IB_EVENT_PORT_ACTIVE,
275 IB_EVENT_PORT_ERR, 275 IB_EVENT_PORT_ERR,
276 IB_EVENT_LID_CHANGE, 276 IB_EVENT_LID_CHANGE,
277 IB_EVENT_PKEY_CHANGE, 277 IB_EVENT_PKEY_CHANGE,
278 IB_EVENT_SM_CHANGE, 278 IB_EVENT_SM_CHANGE,
279 IB_EVENT_SRQ_ERR, 279 IB_EVENT_SRQ_ERR,
280 IB_EVENT_SRQ_LIMIT_REACHED, 280 IB_EVENT_SRQ_LIMIT_REACHED,
281 IB_EVENT_QP_LAST_WQE_REACHED, 281 IB_EVENT_QP_LAST_WQE_REACHED,
282 IB_EVENT_CLIENT_REREGISTER 282 IB_EVENT_CLIENT_REREGISTER
283 }; 283 };
284 284
285 struct ib_event { 285 struct ib_event {
286 struct ib_device *device; 286 struct ib_device *device;
287 union { 287 union {
288 struct ib_cq *cq; 288 struct ib_cq *cq;
289 struct ib_qp *qp; 289 struct ib_qp *qp;
290 struct ib_srq *srq; 290 struct ib_srq *srq;
291 u8 port_num; 291 u8 port_num;
292 } element; 292 } element;
293 enum ib_event_type event; 293 enum ib_event_type event;
294 }; 294 };
295 295
296 struct ib_event_handler { 296 struct ib_event_handler {
297 struct ib_device *device; 297 struct ib_device *device;
298 void (*handler)(struct ib_event_handler *, struct ib_event *); 298 void (*handler)(struct ib_event_handler *, struct ib_event *);
299 struct list_head list; 299 struct list_head list;
300 }; 300 };
301 301
302 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 302 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
303 do { \ 303 do { \
304 (_ptr)->device = _device; \ 304 (_ptr)->device = _device; \
305 (_ptr)->handler = _handler; \ 305 (_ptr)->handler = _handler; \
306 INIT_LIST_HEAD(&(_ptr)->list); \ 306 INIT_LIST_HEAD(&(_ptr)->list); \
307 } while (0) 307 } while (0)
308 308
309 struct ib_global_route { 309 struct ib_global_route {
310 union ib_gid dgid; 310 union ib_gid dgid;
311 u32 flow_label; 311 u32 flow_label;
312 u8 sgid_index; 312 u8 sgid_index;
313 u8 hop_limit; 313 u8 hop_limit;
314 u8 traffic_class; 314 u8 traffic_class;
315 }; 315 };
316 316
317 struct ib_grh { 317 struct ib_grh {
318 __be32 version_tclass_flow; 318 __be32 version_tclass_flow;
319 __be16 paylen; 319 __be16 paylen;
320 u8 next_hdr; 320 u8 next_hdr;
321 u8 hop_limit; 321 u8 hop_limit;
322 union ib_gid sgid; 322 union ib_gid sgid;
323 union ib_gid dgid; 323 union ib_gid dgid;
324 }; 324 };
325 325
326 enum { 326 enum {
327 IB_MULTICAST_QPN = 0xffffff 327 IB_MULTICAST_QPN = 0xffffff
328 }; 328 };
329 329
330 #define IB_LID_PERMISSIVE __constant_htons(0xFFFF) 330 #define IB_LID_PERMISSIVE __constant_htons(0xFFFF)
331 331
332 enum ib_ah_flags { 332 enum ib_ah_flags {
333 IB_AH_GRH = 1 333 IB_AH_GRH = 1
334 }; 334 };
335 335
336 enum ib_rate { 336 enum ib_rate {
337 IB_RATE_PORT_CURRENT = 0, 337 IB_RATE_PORT_CURRENT = 0,
338 IB_RATE_2_5_GBPS = 2, 338 IB_RATE_2_5_GBPS = 2,
339 IB_RATE_5_GBPS = 5, 339 IB_RATE_5_GBPS = 5,
340 IB_RATE_10_GBPS = 3, 340 IB_RATE_10_GBPS = 3,
341 IB_RATE_20_GBPS = 6, 341 IB_RATE_20_GBPS = 6,
342 IB_RATE_30_GBPS = 4, 342 IB_RATE_30_GBPS = 4,
343 IB_RATE_40_GBPS = 7, 343 IB_RATE_40_GBPS = 7,
344 IB_RATE_60_GBPS = 8, 344 IB_RATE_60_GBPS = 8,
345 IB_RATE_80_GBPS = 9, 345 IB_RATE_80_GBPS = 9,
346 IB_RATE_120_GBPS = 10 346 IB_RATE_120_GBPS = 10
347 }; 347 };
348 348
349 /** 349 /**
350 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 350 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
351 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 351 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
352 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 352 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
353 * @rate: rate to convert. 353 * @rate: rate to convert.
354 */ 354 */
355 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; 355 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
356 356
357 /** 357 /**
358 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 358 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
359 * enum. 359 * enum.
360 * @mult: multiple to convert. 360 * @mult: multiple to convert.
361 */ 361 */
362 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__; 362 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
363 363
364 struct ib_ah_attr { 364 struct ib_ah_attr {
365 struct ib_global_route grh; 365 struct ib_global_route grh;
366 u16 dlid; 366 u16 dlid;
367 u8 sl; 367 u8 sl;
368 u8 src_path_bits; 368 u8 src_path_bits;
369 u8 static_rate; 369 u8 static_rate;
370 u8 ah_flags; 370 u8 ah_flags;
371 u8 port_num; 371 u8 port_num;
372 }; 372 };
373 373
374 enum ib_wc_status { 374 enum ib_wc_status {
375 IB_WC_SUCCESS, 375 IB_WC_SUCCESS,
376 IB_WC_LOC_LEN_ERR, 376 IB_WC_LOC_LEN_ERR,
377 IB_WC_LOC_QP_OP_ERR, 377 IB_WC_LOC_QP_OP_ERR,
378 IB_WC_LOC_EEC_OP_ERR, 378 IB_WC_LOC_EEC_OP_ERR,
379 IB_WC_LOC_PROT_ERR, 379 IB_WC_LOC_PROT_ERR,
380 IB_WC_WR_FLUSH_ERR, 380 IB_WC_WR_FLUSH_ERR,
381 IB_WC_MW_BIND_ERR, 381 IB_WC_MW_BIND_ERR,
382 IB_WC_BAD_RESP_ERR, 382 IB_WC_BAD_RESP_ERR,
383 IB_WC_LOC_ACCESS_ERR, 383 IB_WC_LOC_ACCESS_ERR,
384 IB_WC_REM_INV_REQ_ERR, 384 IB_WC_REM_INV_REQ_ERR,
385 IB_WC_REM_ACCESS_ERR, 385 IB_WC_REM_ACCESS_ERR,
386 IB_WC_REM_OP_ERR, 386 IB_WC_REM_OP_ERR,
387 IB_WC_RETRY_EXC_ERR, 387 IB_WC_RETRY_EXC_ERR,
388 IB_WC_RNR_RETRY_EXC_ERR, 388 IB_WC_RNR_RETRY_EXC_ERR,
389 IB_WC_LOC_RDD_VIOL_ERR, 389 IB_WC_LOC_RDD_VIOL_ERR,
390 IB_WC_REM_INV_RD_REQ_ERR, 390 IB_WC_REM_INV_RD_REQ_ERR,
391 IB_WC_REM_ABORT_ERR, 391 IB_WC_REM_ABORT_ERR,
392 IB_WC_INV_EECN_ERR, 392 IB_WC_INV_EECN_ERR,
393 IB_WC_INV_EEC_STATE_ERR, 393 IB_WC_INV_EEC_STATE_ERR,
394 IB_WC_FATAL_ERR, 394 IB_WC_FATAL_ERR,
395 IB_WC_RESP_TIMEOUT_ERR, 395 IB_WC_RESP_TIMEOUT_ERR,
396 IB_WC_GENERAL_ERR 396 IB_WC_GENERAL_ERR
397 }; 397 };
398 398
399 enum ib_wc_opcode { 399 enum ib_wc_opcode {
400 IB_WC_SEND, 400 IB_WC_SEND,
401 IB_WC_RDMA_WRITE, 401 IB_WC_RDMA_WRITE,
402 IB_WC_RDMA_READ, 402 IB_WC_RDMA_READ,
403 IB_WC_COMP_SWAP, 403 IB_WC_COMP_SWAP,
404 IB_WC_FETCH_ADD, 404 IB_WC_FETCH_ADD,
405 IB_WC_BIND_MW, 405 IB_WC_BIND_MW,
406 /* 406 /*
407 * Set value of IB_WC_RECV so consumers can test if a completion is a 407 * Set value of IB_WC_RECV so consumers can test if a completion is a
408 * receive by testing (opcode & IB_WC_RECV). 408 * receive by testing (opcode & IB_WC_RECV).
409 */ 409 */
410 IB_WC_RECV = 1 << 7, 410 IB_WC_RECV = 1 << 7,
411 IB_WC_RECV_RDMA_WITH_IMM 411 IB_WC_RECV_RDMA_WITH_IMM
412 }; 412 };
413 413
414 enum ib_wc_flags { 414 enum ib_wc_flags {
415 IB_WC_GRH = 1, 415 IB_WC_GRH = 1,
416 IB_WC_WITH_IMM = (1<<1) 416 IB_WC_WITH_IMM = (1<<1)
417 }; 417 };
418 418
419 struct ib_wc { 419 struct ib_wc {
420 u64 wr_id; 420 u64 wr_id;
421 enum ib_wc_status status; 421 enum ib_wc_status status;
422 enum ib_wc_opcode opcode; 422 enum ib_wc_opcode opcode;
423 u32 vendor_err; 423 u32 vendor_err;
424 u32 byte_len; 424 u32 byte_len;
425 struct ib_qp *qp; 425 struct ib_qp *qp;
426 __be32 imm_data; 426 __be32 imm_data;
427 u32 src_qp; 427 u32 src_qp;
428 int wc_flags; 428 int wc_flags;
429 u16 pkey_index; 429 u16 pkey_index;
430 u16 slid; 430 u16 slid;
431 u8 sl; 431 u8 sl;
432 u8 dlid_path_bits; 432 u8 dlid_path_bits;
433 u8 port_num; /* valid only for DR SMPs on switches */ 433 u8 port_num; /* valid only for DR SMPs on switches */
434 }; 434 };
435 435
436 enum ib_cq_notify_flags { 436 enum ib_cq_notify_flags {
437 IB_CQ_SOLICITED = 1 << 0, 437 IB_CQ_SOLICITED = 1 << 0,
438 IB_CQ_NEXT_COMP = 1 << 1, 438 IB_CQ_NEXT_COMP = 1 << 1,
439 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 439 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
440 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 440 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
441 }; 441 };
442 442
443 enum ib_srq_attr_mask { 443 enum ib_srq_attr_mask {
444 IB_SRQ_MAX_WR = 1 << 0, 444 IB_SRQ_MAX_WR = 1 << 0,
445 IB_SRQ_LIMIT = 1 << 1, 445 IB_SRQ_LIMIT = 1 << 1,
446 }; 446 };
447 447
448 struct ib_srq_attr { 448 struct ib_srq_attr {
449 u32 max_wr; 449 u32 max_wr;
450 u32 max_sge; 450 u32 max_sge;
451 u32 srq_limit; 451 u32 srq_limit;
452 }; 452 };
453 453
454 struct ib_srq_init_attr { 454 struct ib_srq_init_attr {
455 void (*event_handler)(struct ib_event *, void *); 455 void (*event_handler)(struct ib_event *, void *);
456 void *srq_context; 456 void *srq_context;
457 struct ib_srq_attr attr; 457 struct ib_srq_attr attr;
458 }; 458 };
459 459
460 struct ib_qp_cap { 460 struct ib_qp_cap {
461 u32 max_send_wr; 461 u32 max_send_wr;
462 u32 max_recv_wr; 462 u32 max_recv_wr;
463 u32 max_send_sge; 463 u32 max_send_sge;
464 u32 max_recv_sge; 464 u32 max_recv_sge;
465 u32 max_inline_data; 465 u32 max_inline_data;
466 }; 466 };
467 467
468 enum ib_sig_type { 468 enum ib_sig_type {
469 IB_SIGNAL_ALL_WR, 469 IB_SIGNAL_ALL_WR,
470 IB_SIGNAL_REQ_WR 470 IB_SIGNAL_REQ_WR
471 }; 471 };
472 472
473 enum ib_qp_type { 473 enum ib_qp_type {
474 /* 474 /*
475 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 475 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
476 * here (and in that order) since the MAD layer uses them as 476 * here (and in that order) since the MAD layer uses them as
477 * indices into a 2-entry table. 477 * indices into a 2-entry table.
478 */ 478 */
479 IB_QPT_SMI, 479 IB_QPT_SMI,
480 IB_QPT_GSI, 480 IB_QPT_GSI,
481 481
482 IB_QPT_RC, 482 IB_QPT_RC,
483 IB_QPT_UC, 483 IB_QPT_UC,
484 IB_QPT_UD, 484 IB_QPT_UD,
485 IB_QPT_RAW_IPV6, 485 IB_QPT_RAW_IPV6,
486 IB_QPT_RAW_ETY 486 IB_QPT_RAW_ETY
487 }; 487 };
488 488
489 struct ib_qp_init_attr { 489 struct ib_qp_init_attr {
490 void (*event_handler)(struct ib_event *, void *); 490 void (*event_handler)(struct ib_event *, void *);
491 void *qp_context; 491 void *qp_context;
492 struct ib_cq *send_cq; 492 struct ib_cq *send_cq;
493 struct ib_cq *recv_cq; 493 struct ib_cq *recv_cq;
494 struct ib_srq *srq; 494 struct ib_srq *srq;
495 struct ib_qp_cap cap; 495 struct ib_qp_cap cap;
496 enum ib_sig_type sq_sig_type; 496 enum ib_sig_type sq_sig_type;
497 enum ib_qp_type qp_type; 497 enum ib_qp_type qp_type;
498 u8 port_num; /* special QP types only */ 498 u8 port_num; /* special QP types only */
499 }; 499 };
500 500
501 enum ib_rnr_timeout { 501 enum ib_rnr_timeout {
502 IB_RNR_TIMER_655_36 = 0, 502 IB_RNR_TIMER_655_36 = 0,
503 IB_RNR_TIMER_000_01 = 1, 503 IB_RNR_TIMER_000_01 = 1,
504 IB_RNR_TIMER_000_02 = 2, 504 IB_RNR_TIMER_000_02 = 2,
505 IB_RNR_TIMER_000_03 = 3, 505 IB_RNR_TIMER_000_03 = 3,
506 IB_RNR_TIMER_000_04 = 4, 506 IB_RNR_TIMER_000_04 = 4,
507 IB_RNR_TIMER_000_06 = 5, 507 IB_RNR_TIMER_000_06 = 5,
508 IB_RNR_TIMER_000_08 = 6, 508 IB_RNR_TIMER_000_08 = 6,
509 IB_RNR_TIMER_000_12 = 7, 509 IB_RNR_TIMER_000_12 = 7,
510 IB_RNR_TIMER_000_16 = 8, 510 IB_RNR_TIMER_000_16 = 8,
511 IB_RNR_TIMER_000_24 = 9, 511 IB_RNR_TIMER_000_24 = 9,
512 IB_RNR_TIMER_000_32 = 10, 512 IB_RNR_TIMER_000_32 = 10,
513 IB_RNR_TIMER_000_48 = 11, 513 IB_RNR_TIMER_000_48 = 11,
514 IB_RNR_TIMER_000_64 = 12, 514 IB_RNR_TIMER_000_64 = 12,
515 IB_RNR_TIMER_000_96 = 13, 515 IB_RNR_TIMER_000_96 = 13,
516 IB_RNR_TIMER_001_28 = 14, 516 IB_RNR_TIMER_001_28 = 14,
517 IB_RNR_TIMER_001_92 = 15, 517 IB_RNR_TIMER_001_92 = 15,
518 IB_RNR_TIMER_002_56 = 16, 518 IB_RNR_TIMER_002_56 = 16,
519 IB_RNR_TIMER_003_84 = 17, 519 IB_RNR_TIMER_003_84 = 17,
520 IB_RNR_TIMER_005_12 = 18, 520 IB_RNR_TIMER_005_12 = 18,
521 IB_RNR_TIMER_007_68 = 19, 521 IB_RNR_TIMER_007_68 = 19,
522 IB_RNR_TIMER_010_24 = 20, 522 IB_RNR_TIMER_010_24 = 20,
523 IB_RNR_TIMER_015_36 = 21, 523 IB_RNR_TIMER_015_36 = 21,
524 IB_RNR_TIMER_020_48 = 22, 524 IB_RNR_TIMER_020_48 = 22,
525 IB_RNR_TIMER_030_72 = 23, 525 IB_RNR_TIMER_030_72 = 23,
526 IB_RNR_TIMER_040_96 = 24, 526 IB_RNR_TIMER_040_96 = 24,
527 IB_RNR_TIMER_061_44 = 25, 527 IB_RNR_TIMER_061_44 = 25,
528 IB_RNR_TIMER_081_92 = 26, 528 IB_RNR_TIMER_081_92 = 26,
529 IB_RNR_TIMER_122_88 = 27, 529 IB_RNR_TIMER_122_88 = 27,
530 IB_RNR_TIMER_163_84 = 28, 530 IB_RNR_TIMER_163_84 = 28,
531 IB_RNR_TIMER_245_76 = 29, 531 IB_RNR_TIMER_245_76 = 29,
532 IB_RNR_TIMER_327_68 = 30, 532 IB_RNR_TIMER_327_68 = 30,
533 IB_RNR_TIMER_491_52 = 31 533 IB_RNR_TIMER_491_52 = 31
534 }; 534 };
535 535
536 enum ib_qp_attr_mask { 536 enum ib_qp_attr_mask {
537 IB_QP_STATE = 1, 537 IB_QP_STATE = 1,
538 IB_QP_CUR_STATE = (1<<1), 538 IB_QP_CUR_STATE = (1<<1),
539 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 539 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
540 IB_QP_ACCESS_FLAGS = (1<<3), 540 IB_QP_ACCESS_FLAGS = (1<<3),
541 IB_QP_PKEY_INDEX = (1<<4), 541 IB_QP_PKEY_INDEX = (1<<4),
542 IB_QP_PORT = (1<<5), 542 IB_QP_PORT = (1<<5),
543 IB_QP_QKEY = (1<<6), 543 IB_QP_QKEY = (1<<6),
544 IB_QP_AV = (1<<7), 544 IB_QP_AV = (1<<7),
545 IB_QP_PATH_MTU = (1<<8), 545 IB_QP_PATH_MTU = (1<<8),
546 IB_QP_TIMEOUT = (1<<9), 546 IB_QP_TIMEOUT = (1<<9),
547 IB_QP_RETRY_CNT = (1<<10), 547 IB_QP_RETRY_CNT = (1<<10),
548 IB_QP_RNR_RETRY = (1<<11), 548 IB_QP_RNR_RETRY = (1<<11),
549 IB_QP_RQ_PSN = (1<<12), 549 IB_QP_RQ_PSN = (1<<12),
550 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 550 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
551 IB_QP_ALT_PATH = (1<<14), 551 IB_QP_ALT_PATH = (1<<14),
552 IB_QP_MIN_RNR_TIMER = (1<<15), 552 IB_QP_MIN_RNR_TIMER = (1<<15),
553 IB_QP_SQ_PSN = (1<<16), 553 IB_QP_SQ_PSN = (1<<16),
554 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 554 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
555 IB_QP_PATH_MIG_STATE = (1<<18), 555 IB_QP_PATH_MIG_STATE = (1<<18),
556 IB_QP_CAP = (1<<19), 556 IB_QP_CAP = (1<<19),
557 IB_QP_DEST_QPN = (1<<20) 557 IB_QP_DEST_QPN = (1<<20)
558 }; 558 };
559 559
560 enum ib_qp_state { 560 enum ib_qp_state {
561 IB_QPS_RESET, 561 IB_QPS_RESET,
562 IB_QPS_INIT, 562 IB_QPS_INIT,
563 IB_QPS_RTR, 563 IB_QPS_RTR,
564 IB_QPS_RTS, 564 IB_QPS_RTS,
565 IB_QPS_SQD, 565 IB_QPS_SQD,
566 IB_QPS_SQE, 566 IB_QPS_SQE,
567 IB_QPS_ERR 567 IB_QPS_ERR
568 }; 568 };
569 569
570 enum ib_mig_state { 570 enum ib_mig_state {
571 IB_MIG_MIGRATED, 571 IB_MIG_MIGRATED,
572 IB_MIG_REARM, 572 IB_MIG_REARM,
573 IB_MIG_ARMED 573 IB_MIG_ARMED
574 }; 574 };
575 575
576 struct ib_qp_attr { 576 struct ib_qp_attr {
577 enum ib_qp_state qp_state; 577 enum ib_qp_state qp_state;
578 enum ib_qp_state cur_qp_state; 578 enum ib_qp_state cur_qp_state;
579 enum ib_mtu path_mtu; 579 enum ib_mtu path_mtu;
580 enum ib_mig_state path_mig_state; 580 enum ib_mig_state path_mig_state;
581 u32 qkey; 581 u32 qkey;
582 u32 rq_psn; 582 u32 rq_psn;
583 u32 sq_psn; 583 u32 sq_psn;
584 u32 dest_qp_num; 584 u32 dest_qp_num;
585 int qp_access_flags; 585 int qp_access_flags;
586 struct ib_qp_cap cap; 586 struct ib_qp_cap cap;
587 struct ib_ah_attr ah_attr; 587 struct ib_ah_attr ah_attr;
588 struct ib_ah_attr alt_ah_attr; 588 struct ib_ah_attr alt_ah_attr;
589 u16 pkey_index; 589 u16 pkey_index;
590 u16 alt_pkey_index; 590 u16 alt_pkey_index;
591 u8 en_sqd_async_notify; 591 u8 en_sqd_async_notify;
592 u8 sq_draining; 592 u8 sq_draining;
593 u8 max_rd_atomic; 593 u8 max_rd_atomic;
594 u8 max_dest_rd_atomic; 594 u8 max_dest_rd_atomic;
595 u8 min_rnr_timer; 595 u8 min_rnr_timer;
596 u8 port_num; 596 u8 port_num;
597 u8 timeout; 597 u8 timeout;
598 u8 retry_cnt; 598 u8 retry_cnt;
599 u8 rnr_retry; 599 u8 rnr_retry;
600 u8 alt_port_num; 600 u8 alt_port_num;
601 u8 alt_timeout; 601 u8 alt_timeout;
602 }; 602 };
603 603
604 enum ib_wr_opcode { 604 enum ib_wr_opcode {
605 IB_WR_RDMA_WRITE, 605 IB_WR_RDMA_WRITE,
606 IB_WR_RDMA_WRITE_WITH_IMM, 606 IB_WR_RDMA_WRITE_WITH_IMM,
607 IB_WR_SEND, 607 IB_WR_SEND,
608 IB_WR_SEND_WITH_IMM, 608 IB_WR_SEND_WITH_IMM,
609 IB_WR_RDMA_READ, 609 IB_WR_RDMA_READ,
610 IB_WR_ATOMIC_CMP_AND_SWP, 610 IB_WR_ATOMIC_CMP_AND_SWP,
611 IB_WR_ATOMIC_FETCH_AND_ADD 611 IB_WR_ATOMIC_FETCH_AND_ADD
612 }; 612 };
613 613
614 enum ib_send_flags { 614 enum ib_send_flags {
615 IB_SEND_FENCE = 1, 615 IB_SEND_FENCE = 1,
616 IB_SEND_SIGNALED = (1<<1), 616 IB_SEND_SIGNALED = (1<<1),
617 IB_SEND_SOLICITED = (1<<2), 617 IB_SEND_SOLICITED = (1<<2),
618 IB_SEND_INLINE = (1<<3) 618 IB_SEND_INLINE = (1<<3)
619 }; 619 };
620 620
621 struct ib_sge { 621 struct ib_sge {
622 u64 addr; 622 u64 addr;
623 u32 length; 623 u32 length;
624 u32 lkey; 624 u32 lkey;
625 }; 625 };
626 626
627 struct ib_send_wr { 627 struct ib_send_wr {
628 struct ib_send_wr *next; 628 struct ib_send_wr *next;
629 u64 wr_id; 629 u64 wr_id;
630 struct ib_sge *sg_list; 630 struct ib_sge *sg_list;
631 int num_sge; 631 int num_sge;
632 enum ib_wr_opcode opcode; 632 enum ib_wr_opcode opcode;
633 int send_flags; 633 int send_flags;
634 __be32 imm_data; 634 __be32 imm_data;
635 union { 635 union {
636 struct { 636 struct {
637 u64 remote_addr; 637 u64 remote_addr;
638 u32 rkey; 638 u32 rkey;
639 } rdma; 639 } rdma;
640 struct { 640 struct {
641 u64 remote_addr; 641 u64 remote_addr;
642 u64 compare_add; 642 u64 compare_add;
643 u64 swap; 643 u64 swap;
644 u32 rkey; 644 u32 rkey;
645 } atomic; 645 } atomic;
646 struct { 646 struct {
647 struct ib_ah *ah; 647 struct ib_ah *ah;
648 u32 remote_qpn; 648 u32 remote_qpn;
649 u32 remote_qkey; 649 u32 remote_qkey;
650 u16 pkey_index; /* valid for GSI only */ 650 u16 pkey_index; /* valid for GSI only */
651 u8 port_num; /* valid for DR SMPs on switch only */ 651 u8 port_num; /* valid for DR SMPs on switch only */
652 } ud; 652 } ud;
653 } wr; 653 } wr;
654 }; 654 };
655 655
656 struct ib_recv_wr { 656 struct ib_recv_wr {
657 struct ib_recv_wr *next; 657 struct ib_recv_wr *next;
658 u64 wr_id; 658 u64 wr_id;
659 struct ib_sge *sg_list; 659 struct ib_sge *sg_list;
660 int num_sge; 660 int num_sge;
661 }; 661 };
662 662
663 enum ib_access_flags { 663 enum ib_access_flags {
664 IB_ACCESS_LOCAL_WRITE = 1, 664 IB_ACCESS_LOCAL_WRITE = 1,
665 IB_ACCESS_REMOTE_WRITE = (1<<1), 665 IB_ACCESS_REMOTE_WRITE = (1<<1),
666 IB_ACCESS_REMOTE_READ = (1<<2), 666 IB_ACCESS_REMOTE_READ = (1<<2),
667 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 667 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
668 IB_ACCESS_MW_BIND = (1<<4) 668 IB_ACCESS_MW_BIND = (1<<4)
669 }; 669 };
670 670
671 struct ib_phys_buf { 671 struct ib_phys_buf {
672 u64 addr; 672 u64 addr;
673 u64 size; 673 u64 size;
674 }; 674 };
675 675
676 struct ib_mr_attr { 676 struct ib_mr_attr {
677 struct ib_pd *pd; 677 struct ib_pd *pd;
678 u64 device_virt_addr; 678 u64 device_virt_addr;
679 u64 size; 679 u64 size;
680 int mr_access_flags; 680 int mr_access_flags;
681 u32 lkey; 681 u32 lkey;
682 u32 rkey; 682 u32 rkey;
683 }; 683 };
684 684
685 enum ib_mr_rereg_flags { 685 enum ib_mr_rereg_flags {
686 IB_MR_REREG_TRANS = 1, 686 IB_MR_REREG_TRANS = 1,
687 IB_MR_REREG_PD = (1<<1), 687 IB_MR_REREG_PD = (1<<1),
688 IB_MR_REREG_ACCESS = (1<<2) 688 IB_MR_REREG_ACCESS = (1<<2)
689 }; 689 };
690 690
691 struct ib_mw_bind { 691 struct ib_mw_bind {
692 struct ib_mr *mr; 692 struct ib_mr *mr;
693 u64 wr_id; 693 u64 wr_id;
694 u64 addr; 694 u64 addr;
695 u32 length; 695 u32 length;
696 int send_flags; 696 int send_flags;
697 int mw_access_flags; 697 int mw_access_flags;
698 }; 698 };
699 699
700 struct ib_fmr_attr { 700 struct ib_fmr_attr {
701 int max_pages; 701 int max_pages;
702 int max_maps; 702 int max_maps;
703 u8 page_shift; 703 u8 page_shift;
704 }; 704 };
705 705
706 struct ib_ucontext { 706 struct ib_ucontext {
707 struct ib_device *device; 707 struct ib_device *device;
708 struct list_head pd_list; 708 struct list_head pd_list;
709 struct list_head mr_list; 709 struct list_head mr_list;
710 struct list_head mw_list; 710 struct list_head mw_list;
711 struct list_head cq_list; 711 struct list_head cq_list;
712 struct list_head qp_list; 712 struct list_head qp_list;
713 struct list_head srq_list; 713 struct list_head srq_list;
714 struct list_head ah_list; 714 struct list_head ah_list;
715 int closing; 715 int closing;
716 }; 716 };
717 717
718 struct ib_uobject { 718 struct ib_uobject {
719 u64 user_handle; /* handle given to us by userspace */ 719 u64 user_handle; /* handle given to us by userspace */
720 struct ib_ucontext *context; /* associated user context */ 720 struct ib_ucontext *context; /* associated user context */
721 void *object; /* containing object */ 721 void *object; /* containing object */
722 struct list_head list; /* link to context's list */ 722 struct list_head list; /* link to context's list */
723 u32 id; /* index into kernel idr */ 723 u32 id; /* index into kernel idr */
724 struct kref ref; 724 struct kref ref;
725 struct rw_semaphore mutex; /* protects .live */ 725 struct rw_semaphore mutex; /* protects .live */
726 int live; 726 int live;
727 }; 727 };
728 728
729 struct ib_udata { 729 struct ib_udata {
730 void __user *inbuf; 730 void __user *inbuf;
731 void __user *outbuf; 731 void __user *outbuf;
732 size_t inlen; 732 size_t inlen;
733 size_t outlen; 733 size_t outlen;
734 }; 734 };
735 735
736 #define IB_UMEM_MAX_PAGE_CHUNK \
737 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
738 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
739 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
740
741 struct ib_pd { 736 struct ib_pd {
742 struct ib_device *device; 737 struct ib_device *device;
743 struct ib_uobject *uobject; 738 struct ib_uobject *uobject;
744 atomic_t usecnt; /* count all resources */ 739 atomic_t usecnt; /* count all resources */
745 }; 740 };
746 741
747 struct ib_ah { 742 struct ib_ah {
748 struct ib_device *device; 743 struct ib_device *device;
749 struct ib_pd *pd; 744 struct ib_pd *pd;
750 struct ib_uobject *uobject; 745 struct ib_uobject *uobject;
751 }; 746 };
752 747
753 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 748 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
754 749
755 struct ib_cq { 750 struct ib_cq {
756 struct ib_device *device; 751 struct ib_device *device;
757 struct ib_uobject *uobject; 752 struct ib_uobject *uobject;
758 ib_comp_handler comp_handler; 753 ib_comp_handler comp_handler;
759 void (*event_handler)(struct ib_event *, void *); 754 void (*event_handler)(struct ib_event *, void *);
760 void * cq_context; 755 void * cq_context;
761 int cqe; 756 int cqe;
762 atomic_t usecnt; /* count number of work queues */ 757 atomic_t usecnt; /* count number of work queues */
763 }; 758 };
764 759
765 struct ib_srq { 760 struct ib_srq {
766 struct ib_device *device; 761 struct ib_device *device;
767 struct ib_pd *pd; 762 struct ib_pd *pd;
768 struct ib_uobject *uobject; 763 struct ib_uobject *uobject;
769 void (*event_handler)(struct ib_event *, void *); 764 void (*event_handler)(struct ib_event *, void *);
770 void *srq_context; 765 void *srq_context;
771 atomic_t usecnt; 766 atomic_t usecnt;
772 }; 767 };
773 768
774 struct ib_qp { 769 struct ib_qp {
775 struct ib_device *device; 770 struct ib_device *device;
776 struct ib_pd *pd; 771 struct ib_pd *pd;
777 struct ib_cq *send_cq; 772 struct ib_cq *send_cq;
778 struct ib_cq *recv_cq; 773 struct ib_cq *recv_cq;
779 struct ib_srq *srq; 774 struct ib_srq *srq;
780 struct ib_uobject *uobject; 775 struct ib_uobject *uobject;
781 void (*event_handler)(struct ib_event *, void *); 776 void (*event_handler)(struct ib_event *, void *);
782 void *qp_context; 777 void *qp_context;
783 u32 qp_num; 778 u32 qp_num;
784 enum ib_qp_type qp_type; 779 enum ib_qp_type qp_type;
785 }; 780 };
786 781
787 struct ib_mr { 782 struct ib_mr {
788 struct ib_device *device; 783 struct ib_device *device;
789 struct ib_pd *pd; 784 struct ib_pd *pd;
790 struct ib_uobject *uobject; 785 struct ib_uobject *uobject;
791 u32 lkey; 786 u32 lkey;
792 u32 rkey; 787 u32 rkey;
793 atomic_t usecnt; /* count number of MWs */ 788 atomic_t usecnt; /* count number of MWs */
794 }; 789 };
795 790
796 struct ib_mw { 791 struct ib_mw {
797 struct ib_device *device; 792 struct ib_device *device;
798 struct ib_pd *pd; 793 struct ib_pd *pd;
799 struct ib_uobject *uobject; 794 struct ib_uobject *uobject;
800 u32 rkey; 795 u32 rkey;
801 }; 796 };
802 797
803 struct ib_fmr { 798 struct ib_fmr {
804 struct ib_device *device; 799 struct ib_device *device;
805 struct ib_pd *pd; 800 struct ib_pd *pd;
806 struct list_head list; 801 struct list_head list;
807 u32 lkey; 802 u32 lkey;
808 u32 rkey; 803 u32 rkey;
809 }; 804 };
810 805
811 struct ib_mad; 806 struct ib_mad;
812 struct ib_grh; 807 struct ib_grh;
813 808
814 enum ib_process_mad_flags { 809 enum ib_process_mad_flags {
815 IB_MAD_IGNORE_MKEY = 1, 810 IB_MAD_IGNORE_MKEY = 1,
816 IB_MAD_IGNORE_BKEY = 2, 811 IB_MAD_IGNORE_BKEY = 2,
817 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 812 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
818 }; 813 };
819 814
820 enum ib_mad_result { 815 enum ib_mad_result {
821 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 816 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
822 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 817 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
823 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 818 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
824 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 819 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
825 }; 820 };
826 821
827 #define IB_DEVICE_NAME_MAX 64 822 #define IB_DEVICE_NAME_MAX 64
828 823
829 struct ib_cache { 824 struct ib_cache {
830 rwlock_t lock; 825 rwlock_t lock;
831 struct ib_event_handler event_handler; 826 struct ib_event_handler event_handler;
832 struct ib_pkey_cache **pkey_cache; 827 struct ib_pkey_cache **pkey_cache;
833 struct ib_gid_cache **gid_cache; 828 struct ib_gid_cache **gid_cache;
834 u8 *lmc_cache; 829 u8 *lmc_cache;
835 }; 830 };
836 831
837 struct ib_dma_mapping_ops { 832 struct ib_dma_mapping_ops {
838 int (*mapping_error)(struct ib_device *dev, 833 int (*mapping_error)(struct ib_device *dev,
839 u64 dma_addr); 834 u64 dma_addr);
840 u64 (*map_single)(struct ib_device *dev, 835 u64 (*map_single)(struct ib_device *dev,
841 void *ptr, size_t size, 836 void *ptr, size_t size,
842 enum dma_data_direction direction); 837 enum dma_data_direction direction);
843 void (*unmap_single)(struct ib_device *dev, 838 void (*unmap_single)(struct ib_device *dev,
844 u64 addr, size_t size, 839 u64 addr, size_t size,
845 enum dma_data_direction direction); 840 enum dma_data_direction direction);
846 u64 (*map_page)(struct ib_device *dev, 841 u64 (*map_page)(struct ib_device *dev,
847 struct page *page, unsigned long offset, 842 struct page *page, unsigned long offset,
848 size_t size, 843 size_t size,
849 enum dma_data_direction direction); 844 enum dma_data_direction direction);
850 void (*unmap_page)(struct ib_device *dev, 845 void (*unmap_page)(struct ib_device *dev,
851 u64 addr, size_t size, 846 u64 addr, size_t size,
852 enum dma_data_direction direction); 847 enum dma_data_direction direction);
853 int (*map_sg)(struct ib_device *dev, 848 int (*map_sg)(struct ib_device *dev,
854 struct scatterlist *sg, int nents, 849 struct scatterlist *sg, int nents,
855 enum dma_data_direction direction); 850 enum dma_data_direction direction);
856 void (*unmap_sg)(struct ib_device *dev, 851 void (*unmap_sg)(struct ib_device *dev,
857 struct scatterlist *sg, int nents, 852 struct scatterlist *sg, int nents,
858 enum dma_data_direction direction); 853 enum dma_data_direction direction);
859 u64 (*dma_address)(struct ib_device *dev, 854 u64 (*dma_address)(struct ib_device *dev,
860 struct scatterlist *sg); 855 struct scatterlist *sg);
861 unsigned int (*dma_len)(struct ib_device *dev, 856 unsigned int (*dma_len)(struct ib_device *dev,
862 struct scatterlist *sg); 857 struct scatterlist *sg);
863 void (*sync_single_for_cpu)(struct ib_device *dev, 858 void (*sync_single_for_cpu)(struct ib_device *dev,
864 u64 dma_handle, 859 u64 dma_handle,
865 size_t size, 860 size_t size,
866 enum dma_data_direction dir); 861 enum dma_data_direction dir);
867 void (*sync_single_for_device)(struct ib_device *dev, 862 void (*sync_single_for_device)(struct ib_device *dev,
868 u64 dma_handle, 863 u64 dma_handle,
869 size_t size, 864 size_t size,
870 enum dma_data_direction dir); 865 enum dma_data_direction dir);
871 void *(*alloc_coherent)(struct ib_device *dev, 866 void *(*alloc_coherent)(struct ib_device *dev,
872 size_t size, 867 size_t size,
873 u64 *dma_handle, 868 u64 *dma_handle,
874 gfp_t flag); 869 gfp_t flag);
875 void (*free_coherent)(struct ib_device *dev, 870 void (*free_coherent)(struct ib_device *dev,
876 size_t size, void *cpu_addr, 871 size_t size, void *cpu_addr,
877 u64 dma_handle); 872 u64 dma_handle);
878 }; 873 };
879 874
880 struct iw_cm_verbs; 875 struct iw_cm_verbs;
881 876
882 struct ib_device { 877 struct ib_device {
883 struct device *dma_device; 878 struct device *dma_device;
884 879
885 char name[IB_DEVICE_NAME_MAX]; 880 char name[IB_DEVICE_NAME_MAX];
886 881
887 struct list_head event_handler_list; 882 struct list_head event_handler_list;
888 spinlock_t event_handler_lock; 883 spinlock_t event_handler_lock;
889 884
890 struct list_head core_list; 885 struct list_head core_list;
891 struct list_head client_data_list; 886 struct list_head client_data_list;
892 spinlock_t client_data_lock; 887 spinlock_t client_data_lock;
893 888
894 struct ib_cache cache; 889 struct ib_cache cache;
895 int *pkey_tbl_len; 890 int *pkey_tbl_len;
896 int *gid_tbl_len; 891 int *gid_tbl_len;
897 892
898 u32 flags; 893 u32 flags;
899 894
900 int num_comp_vectors; 895 int num_comp_vectors;
901 896
902 struct iw_cm_verbs *iwcm; 897 struct iw_cm_verbs *iwcm;
903 898
904 int (*query_device)(struct ib_device *device, 899 int (*query_device)(struct ib_device *device,
905 struct ib_device_attr *device_attr); 900 struct ib_device_attr *device_attr);
906 int (*query_port)(struct ib_device *device, 901 int (*query_port)(struct ib_device *device,
907 u8 port_num, 902 u8 port_num,
908 struct ib_port_attr *port_attr); 903 struct ib_port_attr *port_attr);
909 int (*query_gid)(struct ib_device *device, 904 int (*query_gid)(struct ib_device *device,
910 u8 port_num, int index, 905 u8 port_num, int index,
911 union ib_gid *gid); 906 union ib_gid *gid);
912 int (*query_pkey)(struct ib_device *device, 907 int (*query_pkey)(struct ib_device *device,
913 u8 port_num, u16 index, u16 *pkey); 908 u8 port_num, u16 index, u16 *pkey);
914 int (*modify_device)(struct ib_device *device, 909 int (*modify_device)(struct ib_device *device,
915 int device_modify_mask, 910 int device_modify_mask,
916 struct ib_device_modify *device_modify); 911 struct ib_device_modify *device_modify);
917 int (*modify_port)(struct ib_device *device, 912 int (*modify_port)(struct ib_device *device,
918 u8 port_num, int port_modify_mask, 913 u8 port_num, int port_modify_mask,
919 struct ib_port_modify *port_modify); 914 struct ib_port_modify *port_modify);
920 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 915 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
921 struct ib_udata *udata); 916 struct ib_udata *udata);
922 int (*dealloc_ucontext)(struct ib_ucontext *context); 917 int (*dealloc_ucontext)(struct ib_ucontext *context);
923 int (*mmap)(struct ib_ucontext *context, 918 int (*mmap)(struct ib_ucontext *context,
924 struct vm_area_struct *vma); 919 struct vm_area_struct *vma);
925 struct ib_pd * (*alloc_pd)(struct ib_device *device, 920 struct ib_pd * (*alloc_pd)(struct ib_device *device,
926 struct ib_ucontext *context, 921 struct ib_ucontext *context,
927 struct ib_udata *udata); 922 struct ib_udata *udata);
928 int (*dealloc_pd)(struct ib_pd *pd); 923 int (*dealloc_pd)(struct ib_pd *pd);
929 struct ib_ah * (*create_ah)(struct ib_pd *pd, 924 struct ib_ah * (*create_ah)(struct ib_pd *pd,
930 struct ib_ah_attr *ah_attr); 925 struct ib_ah_attr *ah_attr);
931 int (*modify_ah)(struct ib_ah *ah, 926 int (*modify_ah)(struct ib_ah *ah,
932 struct ib_ah_attr *ah_attr); 927 struct ib_ah_attr *ah_attr);
933 int (*query_ah)(struct ib_ah *ah, 928 int (*query_ah)(struct ib_ah *ah,
934 struct ib_ah_attr *ah_attr); 929 struct ib_ah_attr *ah_attr);
935 int (*destroy_ah)(struct ib_ah *ah); 930 int (*destroy_ah)(struct ib_ah *ah);
936 struct ib_srq * (*create_srq)(struct ib_pd *pd, 931 struct ib_srq * (*create_srq)(struct ib_pd *pd,
937 struct ib_srq_init_attr *srq_init_attr, 932 struct ib_srq_init_attr *srq_init_attr,
938 struct ib_udata *udata); 933 struct ib_udata *udata);
939 int (*modify_srq)(struct ib_srq *srq, 934 int (*modify_srq)(struct ib_srq *srq,
940 struct ib_srq_attr *srq_attr, 935 struct ib_srq_attr *srq_attr,
941 enum ib_srq_attr_mask srq_attr_mask, 936 enum ib_srq_attr_mask srq_attr_mask,
942 struct ib_udata *udata); 937 struct ib_udata *udata);
943 int (*query_srq)(struct ib_srq *srq, 938 int (*query_srq)(struct ib_srq *srq,
944 struct ib_srq_attr *srq_attr); 939 struct ib_srq_attr *srq_attr);
945 int (*destroy_srq)(struct ib_srq *srq); 940 int (*destroy_srq)(struct ib_srq *srq);
946 int (*post_srq_recv)(struct ib_srq *srq, 941 int (*post_srq_recv)(struct ib_srq *srq,
947 struct ib_recv_wr *recv_wr, 942 struct ib_recv_wr *recv_wr,
948 struct ib_recv_wr **bad_recv_wr); 943 struct ib_recv_wr **bad_recv_wr);
949 struct ib_qp * (*create_qp)(struct ib_pd *pd, 944 struct ib_qp * (*create_qp)(struct ib_pd *pd,
950 struct ib_qp_init_attr *qp_init_attr, 945 struct ib_qp_init_attr *qp_init_attr,
951 struct ib_udata *udata); 946 struct ib_udata *udata);
952 int (*modify_qp)(struct ib_qp *qp, 947 int (*modify_qp)(struct ib_qp *qp,
953 struct ib_qp_attr *qp_attr, 948 struct ib_qp_attr *qp_attr,
954 int qp_attr_mask, 949 int qp_attr_mask,
955 struct ib_udata *udata); 950 struct ib_udata *udata);
956 int (*query_qp)(struct ib_qp *qp, 951 int (*query_qp)(struct ib_qp *qp,
957 struct ib_qp_attr *qp_attr, 952 struct ib_qp_attr *qp_attr,
958 int qp_attr_mask, 953 int qp_attr_mask,
959 struct ib_qp_init_attr *qp_init_attr); 954 struct ib_qp_init_attr *qp_init_attr);
960 int (*destroy_qp)(struct ib_qp *qp); 955 int (*destroy_qp)(struct ib_qp *qp);
961 int (*post_send)(struct ib_qp *qp, 956 int (*post_send)(struct ib_qp *qp,
962 struct ib_send_wr *send_wr, 957 struct ib_send_wr *send_wr,
963 struct ib_send_wr **bad_send_wr); 958 struct ib_send_wr **bad_send_wr);
964 int (*post_recv)(struct ib_qp *qp, 959 int (*post_recv)(struct ib_qp *qp,
965 struct ib_recv_wr *recv_wr, 960 struct ib_recv_wr *recv_wr,
966 struct ib_recv_wr **bad_recv_wr); 961 struct ib_recv_wr **bad_recv_wr);
967 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, 962 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
968 int comp_vector, 963 int comp_vector,
969 struct ib_ucontext *context, 964 struct ib_ucontext *context,
970 struct ib_udata *udata); 965 struct ib_udata *udata);
971 int (*destroy_cq)(struct ib_cq *cq); 966 int (*destroy_cq)(struct ib_cq *cq);
972 int (*resize_cq)(struct ib_cq *cq, int cqe, 967 int (*resize_cq)(struct ib_cq *cq, int cqe,
973 struct ib_udata *udata); 968 struct ib_udata *udata);
974 int (*poll_cq)(struct ib_cq *cq, int num_entries, 969 int (*poll_cq)(struct ib_cq *cq, int num_entries,
975 struct ib_wc *wc); 970 struct ib_wc *wc);
976 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 971 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
977 int (*req_notify_cq)(struct ib_cq *cq, 972 int (*req_notify_cq)(struct ib_cq *cq,
978 enum ib_cq_notify_flags flags); 973 enum ib_cq_notify_flags flags);
979 int (*req_ncomp_notif)(struct ib_cq *cq, 974 int (*req_ncomp_notif)(struct ib_cq *cq,
980 int wc_cnt); 975 int wc_cnt);
981 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 976 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
982 int mr_access_flags); 977 int mr_access_flags);
983 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, 978 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
984 struct ib_phys_buf *phys_buf_array, 979 struct ib_phys_buf *phys_buf_array,
985 int num_phys_buf, 980 int num_phys_buf,
986 int mr_access_flags, 981 int mr_access_flags,
987 u64 *iova_start); 982 u64 *iova_start);
988 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 983 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
989 u64 start, u64 length, 984 u64 start, u64 length,
990 u64 virt_addr, 985 u64 virt_addr,
991 int mr_access_flags, 986 int mr_access_flags,
992 struct ib_udata *udata); 987 struct ib_udata *udata);
993 int (*query_mr)(struct ib_mr *mr, 988 int (*query_mr)(struct ib_mr *mr,
994 struct ib_mr_attr *mr_attr); 989 struct ib_mr_attr *mr_attr);
995 int (*dereg_mr)(struct ib_mr *mr); 990 int (*dereg_mr)(struct ib_mr *mr);
996 int (*rereg_phys_mr)(struct ib_mr *mr, 991 int (*rereg_phys_mr)(struct ib_mr *mr,
997 int mr_rereg_mask, 992 int mr_rereg_mask,
998 struct ib_pd *pd, 993 struct ib_pd *pd,
999 struct ib_phys_buf *phys_buf_array, 994 struct ib_phys_buf *phys_buf_array,
1000 int num_phys_buf, 995 int num_phys_buf,
1001 int mr_access_flags, 996 int mr_access_flags,
1002 u64 *iova_start); 997 u64 *iova_start);
1003 struct ib_mw * (*alloc_mw)(struct ib_pd *pd); 998 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
1004 int (*bind_mw)(struct ib_qp *qp, 999 int (*bind_mw)(struct ib_qp *qp,
1005 struct ib_mw *mw, 1000 struct ib_mw *mw,
1006 struct ib_mw_bind *mw_bind); 1001 struct ib_mw_bind *mw_bind);
1007 int (*dealloc_mw)(struct ib_mw *mw); 1002 int (*dealloc_mw)(struct ib_mw *mw);
1008 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 1003 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
1009 int mr_access_flags, 1004 int mr_access_flags,
1010 struct ib_fmr_attr *fmr_attr); 1005 struct ib_fmr_attr *fmr_attr);
1011 int (*map_phys_fmr)(struct ib_fmr *fmr, 1006 int (*map_phys_fmr)(struct ib_fmr *fmr,
1012 u64 *page_list, int list_len, 1007 u64 *page_list, int list_len,
1013 u64 iova); 1008 u64 iova);
1014 int (*unmap_fmr)(struct list_head *fmr_list); 1009 int (*unmap_fmr)(struct list_head *fmr_list);
1015 int (*dealloc_fmr)(struct ib_fmr *fmr); 1010 int (*dealloc_fmr)(struct ib_fmr *fmr);
1016 int (*attach_mcast)(struct ib_qp *qp, 1011 int (*attach_mcast)(struct ib_qp *qp,
1017 union ib_gid *gid, 1012 union ib_gid *gid,
1018 u16 lid); 1013 u16 lid);
1019 int (*detach_mcast)(struct ib_qp *qp, 1014 int (*detach_mcast)(struct ib_qp *qp,
1020 union ib_gid *gid, 1015 union ib_gid *gid,
1021 u16 lid); 1016 u16 lid);
1022 int (*process_mad)(struct ib_device *device, 1017 int (*process_mad)(struct ib_device *device,
1023 int process_mad_flags, 1018 int process_mad_flags,
1024 u8 port_num, 1019 u8 port_num,
1025 struct ib_wc *in_wc, 1020 struct ib_wc *in_wc,
1026 struct ib_grh *in_grh, 1021 struct ib_grh *in_grh,
1027 struct ib_mad *in_mad, 1022 struct ib_mad *in_mad,
1028 struct ib_mad *out_mad); 1023 struct ib_mad *out_mad);
1029 1024
1030 struct ib_dma_mapping_ops *dma_ops; 1025 struct ib_dma_mapping_ops *dma_ops;
1031 1026
1032 struct module *owner; 1027 struct module *owner;
1033 struct class_device class_dev; 1028 struct class_device class_dev;
1034 struct kobject ports_parent; 1029 struct kobject ports_parent;
1035 struct list_head port_list; 1030 struct list_head port_list;
1036 1031
1037 enum { 1032 enum {
1038 IB_DEV_UNINITIALIZED, 1033 IB_DEV_UNINITIALIZED,
1039 IB_DEV_REGISTERED, 1034 IB_DEV_REGISTERED,
1040 IB_DEV_UNREGISTERED 1035 IB_DEV_UNREGISTERED
1041 } reg_state; 1036 } reg_state;
1042 1037
1043 u64 uverbs_cmd_mask; 1038 u64 uverbs_cmd_mask;
1044 int uverbs_abi_ver; 1039 int uverbs_abi_ver;
1045 1040
1046 char node_desc[64]; 1041 char node_desc[64];
1047 __be64 node_guid; 1042 __be64 node_guid;
1048 u8 node_type; 1043 u8 node_type;
1049 u8 phys_port_cnt; 1044 u8 phys_port_cnt;
1050 }; 1045 };
1051 1046
1052 struct ib_client { 1047 struct ib_client {
1053 char *name; 1048 char *name;
1054 void (*add) (struct ib_device *); 1049 void (*add) (struct ib_device *);
1055 void (*remove)(struct ib_device *); 1050 void (*remove)(struct ib_device *);
1056 1051
1057 struct list_head list; 1052 struct list_head list;
1058 }; 1053 };
1059 1054
1060 struct ib_device *ib_alloc_device(size_t size); 1055 struct ib_device *ib_alloc_device(size_t size);
1061 void ib_dealloc_device(struct ib_device *device); 1056 void ib_dealloc_device(struct ib_device *device);
1062 1057
1063 int ib_register_device (struct ib_device *device); 1058 int ib_register_device (struct ib_device *device);
1064 void ib_unregister_device(struct ib_device *device); 1059 void ib_unregister_device(struct ib_device *device);
1065 1060
1066 int ib_register_client (struct ib_client *client); 1061 int ib_register_client (struct ib_client *client);
1067 void ib_unregister_client(struct ib_client *client); 1062 void ib_unregister_client(struct ib_client *client);
1068 1063
1069 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 1064 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
1070 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 1065 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1071 void *data); 1066 void *data);
1072 1067
1073 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 1068 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
1074 { 1069 {
1075 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 1070 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
1076 } 1071 }
1077 1072
1078 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 1073 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
1079 { 1074 {
1080 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 1075 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
1081 } 1076 }
1082 1077
1083 /** 1078 /**
1084 * ib_modify_qp_is_ok - Check that the supplied attribute mask 1079 * ib_modify_qp_is_ok - Check that the supplied attribute mask
1085 * contains all required attributes and no attributes not allowed for 1080 * contains all required attributes and no attributes not allowed for
1086 * the given QP state transition. 1081 * the given QP state transition.
1087 * @cur_state: Current QP state 1082 * @cur_state: Current QP state
1088 * @next_state: Next QP state 1083 * @next_state: Next QP state
1089 * @type: QP type 1084 * @type: QP type
1090 * @mask: Mask of supplied QP attributes 1085 * @mask: Mask of supplied QP attributes
1091 * 1086 *
1092 * This function is a helper function that a low-level driver's 1087 * This function is a helper function that a low-level driver's
1093 * modify_qp method can use to validate the consumer's input. It 1088 * modify_qp method can use to validate the consumer's input. It
1094 * checks that cur_state and next_state are valid QP states, that a 1089 * checks that cur_state and next_state are valid QP states, that a
1095 * transition from cur_state to next_state is allowed by the IB spec, 1090 * transition from cur_state to next_state is allowed by the IB spec,
1096 * and that the attribute mask supplied is allowed for the transition. 1091 * and that the attribute mask supplied is allowed for the transition.
1097 */ 1092 */
1098 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1093 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1099 enum ib_qp_type type, enum ib_qp_attr_mask mask); 1094 enum ib_qp_type type, enum ib_qp_attr_mask mask);
1100 1095
1101 int ib_register_event_handler (struct ib_event_handler *event_handler); 1096 int ib_register_event_handler (struct ib_event_handler *event_handler);
1102 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 1097 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
1103 void ib_dispatch_event(struct ib_event *event); 1098 void ib_dispatch_event(struct ib_event *event);
1104 1099
1105 int ib_query_device(struct ib_device *device, 1100 int ib_query_device(struct ib_device *device,
1106 struct ib_device_attr *device_attr); 1101 struct ib_device_attr *device_attr);
1107 1102
1108 int ib_query_port(struct ib_device *device, 1103 int ib_query_port(struct ib_device *device,
1109 u8 port_num, struct ib_port_attr *port_attr); 1104 u8 port_num, struct ib_port_attr *port_attr);
1110 1105
1111 int ib_query_gid(struct ib_device *device, 1106 int ib_query_gid(struct ib_device *device,
1112 u8 port_num, int index, union ib_gid *gid); 1107 u8 port_num, int index, union ib_gid *gid);
1113 1108
1114 int ib_query_pkey(struct ib_device *device, 1109 int ib_query_pkey(struct ib_device *device,
1115 u8 port_num, u16 index, u16 *pkey); 1110 u8 port_num, u16 index, u16 *pkey);
1116 1111
1117 int ib_modify_device(struct ib_device *device, 1112 int ib_modify_device(struct ib_device *device,
1118 int device_modify_mask, 1113 int device_modify_mask,
1119 struct ib_device_modify *device_modify); 1114 struct ib_device_modify *device_modify);
1120 1115
1121 int ib_modify_port(struct ib_device *device, 1116 int ib_modify_port(struct ib_device *device,
1122 u8 port_num, int port_modify_mask, 1117 u8 port_num, int port_modify_mask,
1123 struct ib_port_modify *port_modify); 1118 struct ib_port_modify *port_modify);
1124 1119
1125 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 1120 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1126 u8 *port_num, u16 *index); 1121 u8 *port_num, u16 *index);
1127 1122
1128 int ib_find_pkey(struct ib_device *device, 1123 int ib_find_pkey(struct ib_device *device,
1129 u8 port_num, u16 pkey, u16 *index); 1124 u8 port_num, u16 pkey, u16 *index);
1130 1125
1131 /** 1126 /**
1132 * ib_alloc_pd - Allocates an unused protection domain. 1127 * ib_alloc_pd - Allocates an unused protection domain.
1133 * @device: The device on which to allocate the protection domain. 1128 * @device: The device on which to allocate the protection domain.
1134 * 1129 *
1135 * A protection domain object provides an association between QPs, shared 1130 * A protection domain object provides an association between QPs, shared
1136 * receive queues, address handles, memory regions, and memory windows. 1131 * receive queues, address handles, memory regions, and memory windows.
1137 */ 1132 */
1138 struct ib_pd *ib_alloc_pd(struct ib_device *device); 1133 struct ib_pd *ib_alloc_pd(struct ib_device *device);
1139 1134
1140 /** 1135 /**
1141 * ib_dealloc_pd - Deallocates a protection domain. 1136 * ib_dealloc_pd - Deallocates a protection domain.
1142 * @pd: The protection domain to deallocate. 1137 * @pd: The protection domain to deallocate.
1143 */ 1138 */
1144 int ib_dealloc_pd(struct ib_pd *pd); 1139 int ib_dealloc_pd(struct ib_pd *pd);
1145 1140
1146 /** 1141 /**
1147 * ib_create_ah - Creates an address handle for the given address vector. 1142 * ib_create_ah - Creates an address handle for the given address vector.
1148 * @pd: The protection domain associated with the address handle. 1143 * @pd: The protection domain associated with the address handle.
1149 * @ah_attr: The attributes of the address vector. 1144 * @ah_attr: The attributes of the address vector.
1150 * 1145 *
1151 * The address handle is used to reference a local or global destination 1146 * The address handle is used to reference a local or global destination
1152 * in all UD QP post sends. 1147 * in all UD QP post sends.
1153 */ 1148 */
1154 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 1149 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1155 1150
1156 /** 1151 /**
1157 * ib_init_ah_from_wc - Initializes address handle attributes from a 1152 * ib_init_ah_from_wc - Initializes address handle attributes from a
1158 * work completion. 1153 * work completion.
1159 * @device: Device on which the received message arrived. 1154 * @device: Device on which the received message arrived.
1160 * @port_num: Port on which the received message arrived. 1155 * @port_num: Port on which the received message arrived.
1161 * @wc: Work completion associated with the received message. 1156 * @wc: Work completion associated with the received message.
1162 * @grh: References the received global route header. This parameter is 1157 * @grh: References the received global route header. This parameter is
1163 * ignored unless the work completion indicates that the GRH is valid. 1158 * ignored unless the work completion indicates that the GRH is valid.
1164 * @ah_attr: Returned attributes that can be used when creating an address 1159 * @ah_attr: Returned attributes that can be used when creating an address
1165 * handle for replying to the message. 1160 * handle for replying to the message.
1166 */ 1161 */
1167 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, 1162 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
1168 struct ib_grh *grh, struct ib_ah_attr *ah_attr); 1163 struct ib_grh *grh, struct ib_ah_attr *ah_attr);
1169 1164
1170 /** 1165 /**
1171 * ib_create_ah_from_wc - Creates an address handle associated with the 1166 * ib_create_ah_from_wc - Creates an address handle associated with the
1172 * sender of the specified work completion. 1167 * sender of the specified work completion.
1173 * @pd: The protection domain associated with the address handle. 1168 * @pd: The protection domain associated with the address handle.
1174 * @wc: Work completion information associated with a received message. 1169 * @wc: Work completion information associated with a received message.
1175 * @grh: References the received global route header. This parameter is 1170 * @grh: References the received global route header. This parameter is
1176 * ignored unless the work completion indicates that the GRH is valid. 1171 * ignored unless the work completion indicates that the GRH is valid.
1177 * @port_num: The outbound port number to associate with the address. 1172 * @port_num: The outbound port number to associate with the address.
1178 * 1173 *
1179 * The address handle is used to reference a local or global destination 1174 * The address handle is used to reference a local or global destination
1180 * in all UD QP post sends. 1175 * in all UD QP post sends.
1181 */ 1176 */
1182 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, 1177 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1183 struct ib_grh *grh, u8 port_num); 1178 struct ib_grh *grh, u8 port_num);
1184 1179
1185 /** 1180 /**
1186 * ib_modify_ah - Modifies the address vector associated with an address 1181 * ib_modify_ah - Modifies the address vector associated with an address
1187 * handle. 1182 * handle.
1188 * @ah: The address handle to modify. 1183 * @ah: The address handle to modify.
1189 * @ah_attr: The new address vector attributes to associate with the 1184 * @ah_attr: The new address vector attributes to associate with the
1190 * address handle. 1185 * address handle.
1191 */ 1186 */
1192 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 1187 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1193 1188
1194 /** 1189 /**
1195 * ib_query_ah - Queries the address vector associated with an address 1190 * ib_query_ah - Queries the address vector associated with an address
1196 * handle. 1191 * handle.
1197 * @ah: The address handle to query. 1192 * @ah: The address handle to query.
1198 * @ah_attr: The address vector attributes associated with the address 1193 * @ah_attr: The address vector attributes associated with the address
1199 * handle. 1194 * handle.
1200 */ 1195 */
1201 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 1196 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1202 1197
1203 /** 1198 /**
1204 * ib_destroy_ah - Destroys an address handle. 1199 * ib_destroy_ah - Destroys an address handle.
1205 * @ah: The address handle to destroy. 1200 * @ah: The address handle to destroy.
1206 */ 1201 */
1207 int ib_destroy_ah(struct ib_ah *ah); 1202 int ib_destroy_ah(struct ib_ah *ah);
1208 1203
1209 /** 1204 /**
1210 * ib_create_srq - Creates a SRQ associated with the specified protection 1205 * ib_create_srq - Creates a SRQ associated with the specified protection
1211 * domain. 1206 * domain.
1212 * @pd: The protection domain associated with the SRQ. 1207 * @pd: The protection domain associated with the SRQ.
1213 * @srq_init_attr: A list of initial attributes required to create the 1208 * @srq_init_attr: A list of initial attributes required to create the
1214 * SRQ. If SRQ creation succeeds, then the attributes are updated to 1209 * SRQ. If SRQ creation succeeds, then the attributes are updated to
1215 * the actual capabilities of the created SRQ. 1210 * the actual capabilities of the created SRQ.
1216 * 1211 *
1217 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 1212 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1218 * requested size of the SRQ, and set to the actual values allocated 1213 * requested size of the SRQ, and set to the actual values allocated
1219 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 1214 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1220 * will always be at least as large as the requested values. 1215 * will always be at least as large as the requested values.
1221 */ 1216 */
1222 struct ib_srq *ib_create_srq(struct ib_pd *pd, 1217 struct ib_srq *ib_create_srq(struct ib_pd *pd,
1223 struct ib_srq_init_attr *srq_init_attr); 1218 struct ib_srq_init_attr *srq_init_attr);
1224 1219
1225 /** 1220 /**
1226 * ib_modify_srq - Modifies the attributes for the specified SRQ. 1221 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1227 * @srq: The SRQ to modify. 1222 * @srq: The SRQ to modify.
1228 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 1223 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1229 * the current values of selected SRQ attributes are returned. 1224 * the current values of selected SRQ attributes are returned.
1230 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 1225 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1231 * are being modified. 1226 * are being modified.
1232 * 1227 *
1233 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 1228 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1234 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 1229 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1235 * the number of receives queued drops below the limit. 1230 * the number of receives queued drops below the limit.
1236 */ 1231 */
1237 int ib_modify_srq(struct ib_srq *srq, 1232 int ib_modify_srq(struct ib_srq *srq,
1238 struct ib_srq_attr *srq_attr, 1233 struct ib_srq_attr *srq_attr,
1239 enum ib_srq_attr_mask srq_attr_mask); 1234 enum ib_srq_attr_mask srq_attr_mask);
1240 1235
1241 /** 1236 /**
1242 * ib_query_srq - Returns the attribute list and current values for the 1237 * ib_query_srq - Returns the attribute list and current values for the
1243 * specified SRQ. 1238 * specified SRQ.
1244 * @srq: The SRQ to query. 1239 * @srq: The SRQ to query.
1245 * @srq_attr: The attributes of the specified SRQ. 1240 * @srq_attr: The attributes of the specified SRQ.
1246 */ 1241 */
1247 int ib_query_srq(struct ib_srq *srq, 1242 int ib_query_srq(struct ib_srq *srq,
1248 struct ib_srq_attr *srq_attr); 1243 struct ib_srq_attr *srq_attr);
1249 1244
1250 /** 1245 /**
1251 * ib_destroy_srq - Destroys the specified SRQ. 1246 * ib_destroy_srq - Destroys the specified SRQ.
1252 * @srq: The SRQ to destroy. 1247 * @srq: The SRQ to destroy.
1253 */ 1248 */
1254 int ib_destroy_srq(struct ib_srq *srq); 1249 int ib_destroy_srq(struct ib_srq *srq);
1255 1250
1256 /** 1251 /**
1257 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 1252 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1258 * @srq: The SRQ to post the work request on. 1253 * @srq: The SRQ to post the work request on.
1259 * @recv_wr: A list of work requests to post on the receive queue. 1254 * @recv_wr: A list of work requests to post on the receive queue.
1260 * @bad_recv_wr: On an immediate failure, this parameter will reference 1255 * @bad_recv_wr: On an immediate failure, this parameter will reference
1261 * the work request that failed to be posted on the QP. 1256 * the work request that failed to be posted on the QP.
1262 */ 1257 */
1263 static inline int ib_post_srq_recv(struct ib_srq *srq, 1258 static inline int ib_post_srq_recv(struct ib_srq *srq,
1264 struct ib_recv_wr *recv_wr, 1259 struct ib_recv_wr *recv_wr,
1265 struct ib_recv_wr **bad_recv_wr) 1260 struct ib_recv_wr **bad_recv_wr)
1266 { 1261 {
1267 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 1262 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1268 } 1263 }
1269 1264
1270 /** 1265 /**
1271 * ib_create_qp - Creates a QP associated with the specified protection 1266 * ib_create_qp - Creates a QP associated with the specified protection
1272 * domain. 1267 * domain.
1273 * @pd: The protection domain associated with the QP. 1268 * @pd: The protection domain associated with the QP.
1274 * @qp_init_attr: A list of initial attributes required to create the 1269 * @qp_init_attr: A list of initial attributes required to create the
1275 * QP. If QP creation succeeds, then the attributes are updated to 1270 * QP. If QP creation succeeds, then the attributes are updated to
1276 * the actual capabilities of the created QP. 1271 * the actual capabilities of the created QP.
1277 */ 1272 */
1278 struct ib_qp *ib_create_qp(struct ib_pd *pd, 1273 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1279 struct ib_qp_init_attr *qp_init_attr); 1274 struct ib_qp_init_attr *qp_init_attr);
1280 1275
1281 /** 1276 /**
1282 * ib_modify_qp - Modifies the attributes for the specified QP and then 1277 * ib_modify_qp - Modifies the attributes for the specified QP and then
1283 * transitions the QP to the given state. 1278 * transitions the QP to the given state.
1284 * @qp: The QP to modify. 1279 * @qp: The QP to modify.
1285 * @qp_attr: On input, specifies the QP attributes to modify. On output, 1280 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1286 * the current values of selected QP attributes are returned. 1281 * the current values of selected QP attributes are returned.
1287 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 1282 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1288 * are being modified. 1283 * are being modified.
1289 */ 1284 */
1290 int ib_modify_qp(struct ib_qp *qp, 1285 int ib_modify_qp(struct ib_qp *qp,
1291 struct ib_qp_attr *qp_attr, 1286 struct ib_qp_attr *qp_attr,
1292 int qp_attr_mask); 1287 int qp_attr_mask);
1293 1288
1294 /** 1289 /**
1295 * ib_query_qp - Returns the attribute list and current values for the 1290 * ib_query_qp - Returns the attribute list and current values for the
1296 * specified QP. 1291 * specified QP.
1297 * @qp: The QP to query. 1292 * @qp: The QP to query.
1298 * @qp_attr: The attributes of the specified QP. 1293 * @qp_attr: The attributes of the specified QP.
1299 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 1294 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1300 * @qp_init_attr: Additional attributes of the selected QP. 1295 * @qp_init_attr: Additional attributes of the selected QP.
1301 * 1296 *
1302 * The qp_attr_mask may be used to limit the query to gathering only the 1297 * The qp_attr_mask may be used to limit the query to gathering only the
1303 * selected attributes. 1298 * selected attributes.
1304 */ 1299 */
1305 int ib_query_qp(struct ib_qp *qp, 1300 int ib_query_qp(struct ib_qp *qp,
1306 struct ib_qp_attr *qp_attr, 1301 struct ib_qp_attr *qp_attr,
1307 int qp_attr_mask, 1302 int qp_attr_mask,
1308 struct ib_qp_init_attr *qp_init_attr); 1303 struct ib_qp_init_attr *qp_init_attr);
1309 1304
1310 /** 1305 /**
1311 * ib_destroy_qp - Destroys the specified QP. 1306 * ib_destroy_qp - Destroys the specified QP.
1312 * @qp: The QP to destroy. 1307 * @qp: The QP to destroy.
1313 */ 1308 */
1314 int ib_destroy_qp(struct ib_qp *qp); 1309 int ib_destroy_qp(struct ib_qp *qp);
1315 1310
1316 /** 1311 /**
1317 * ib_post_send - Posts a list of work requests to the send queue of 1312 * ib_post_send - Posts a list of work requests to the send queue of
1318 * the specified QP. 1313 * the specified QP.
1319 * @qp: The QP to post the work request on. 1314 * @qp: The QP to post the work request on.
1320 * @send_wr: A list of work requests to post on the send queue. 1315 * @send_wr: A list of work requests to post on the send queue.
1321 * @bad_send_wr: On an immediate failure, this parameter will reference 1316 * @bad_send_wr: On an immediate failure, this parameter will reference
1322 * the work request that failed to be posted on the QP. 1317 * the work request that failed to be posted on the QP.
1323 */ 1318 */
1324 static inline int ib_post_send(struct ib_qp *qp, 1319 static inline int ib_post_send(struct ib_qp *qp,
1325 struct ib_send_wr *send_wr, 1320 struct ib_send_wr *send_wr,
1326 struct ib_send_wr **bad_send_wr) 1321 struct ib_send_wr **bad_send_wr)
1327 { 1322 {
1328 return qp->device->post_send(qp, send_wr, bad_send_wr); 1323 return qp->device->post_send(qp, send_wr, bad_send_wr);
1329 } 1324 }
1330 1325
1331 /** 1326 /**
1332 * ib_post_recv - Posts a list of work requests to the receive queue of 1327 * ib_post_recv - Posts a list of work requests to the receive queue of
1333 * the specified QP. 1328 * the specified QP.
1334 * @qp: The QP to post the work request on. 1329 * @qp: The QP to post the work request on.
1335 * @recv_wr: A list of work requests to post on the receive queue. 1330 * @recv_wr: A list of work requests to post on the receive queue.
1336 * @bad_recv_wr: On an immediate failure, this parameter will reference 1331 * @bad_recv_wr: On an immediate failure, this parameter will reference
1337 * the work request that failed to be posted on the QP. 1332 * the work request that failed to be posted on the QP.
1338 */ 1333 */
1339 static inline int ib_post_recv(struct ib_qp *qp, 1334 static inline int ib_post_recv(struct ib_qp *qp,
1340 struct ib_recv_wr *recv_wr, 1335 struct ib_recv_wr *recv_wr,
1341 struct ib_recv_wr **bad_recv_wr) 1336 struct ib_recv_wr **bad_recv_wr)
1342 { 1337 {
1343 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 1338 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1344 } 1339 }
1345 1340
1346 /** 1341 /**
1347 * ib_create_cq - Creates a CQ on the specified device. 1342 * ib_create_cq - Creates a CQ on the specified device.
1348 * @device: The device on which to create the CQ. 1343 * @device: The device on which to create the CQ.
1349 * @comp_handler: A user-specified callback that is invoked when a 1344 * @comp_handler: A user-specified callback that is invoked when a
1350 * completion event occurs on the CQ. 1345 * completion event occurs on the CQ.
1351 * @event_handler: A user-specified callback that is invoked when an 1346 * @event_handler: A user-specified callback that is invoked when an
1352 * asynchronous event not associated with a completion occurs on the CQ. 1347 * asynchronous event not associated with a completion occurs on the CQ.
1353 * @cq_context: Context associated with the CQ returned to the user via 1348 * @cq_context: Context associated with the CQ returned to the user via
1354 * the associated completion and event handlers. 1349 * the associated completion and event handlers.
1355 * @cqe: The minimum size of the CQ. 1350 * @cqe: The minimum size of the CQ.
1356 * @comp_vector - Completion vector used to signal completion events. 1351 * @comp_vector - Completion vector used to signal completion events.
1357 * Must be >= 0 and < context->num_comp_vectors. 1352 * Must be >= 0 and < context->num_comp_vectors.
1358 * 1353 *
1359 * Users can examine the cq structure to determine the actual CQ size. 1354 * Users can examine the cq structure to determine the actual CQ size.
1360 */ 1355 */
1361 struct ib_cq *ib_create_cq(struct ib_device *device, 1356 struct ib_cq *ib_create_cq(struct ib_device *device,
1362 ib_comp_handler comp_handler, 1357 ib_comp_handler comp_handler,
1363 void (*event_handler)(struct ib_event *, void *), 1358 void (*event_handler)(struct ib_event *, void *),
1364 void *cq_context, int cqe, int comp_vector); 1359 void *cq_context, int cqe, int comp_vector);
1365 1360
1366 /** 1361 /**
1367 * ib_resize_cq - Modifies the capacity of the CQ. 1362 * ib_resize_cq - Modifies the capacity of the CQ.
1368 * @cq: The CQ to resize. 1363 * @cq: The CQ to resize.
1369 * @cqe: The minimum size of the CQ. 1364 * @cqe: The minimum size of the CQ.
1370 * 1365 *
1371 * Users can examine the cq structure to determine the actual CQ size. 1366 * Users can examine the cq structure to determine the actual CQ size.
1372 */ 1367 */
1373 int ib_resize_cq(struct ib_cq *cq, int cqe); 1368 int ib_resize_cq(struct ib_cq *cq, int cqe);
1374 1369
1375 /** 1370 /**
1376 * ib_destroy_cq - Destroys the specified CQ. 1371 * ib_destroy_cq - Destroys the specified CQ.
1377 * @cq: The CQ to destroy. 1372 * @cq: The CQ to destroy.
1378 */ 1373 */
1379 int ib_destroy_cq(struct ib_cq *cq); 1374 int ib_destroy_cq(struct ib_cq *cq);
1380 1375
1381 /** 1376 /**
1382 * ib_poll_cq - poll a CQ for completion(s) 1377 * ib_poll_cq - poll a CQ for completion(s)
1383 * @cq:the CQ being polled 1378 * @cq:the CQ being polled
1384 * @num_entries:maximum number of completions to return 1379 * @num_entries:maximum number of completions to return
1385 * @wc:array of at least @num_entries &struct ib_wc where completions 1380 * @wc:array of at least @num_entries &struct ib_wc where completions
1386 * will be returned 1381 * will be returned
1387 * 1382 *
1388 * Poll a CQ for (possibly multiple) completions. If the return value 1383 * Poll a CQ for (possibly multiple) completions. If the return value
1389 * is < 0, an error occurred. If the return value is >= 0, it is the 1384 * is < 0, an error occurred. If the return value is >= 0, it is the
1390 * number of completions returned. If the return value is 1385 * number of completions returned. If the return value is
1391 * non-negative and < num_entries, then the CQ was emptied. 1386 * non-negative and < num_entries, then the CQ was emptied.
1392 */ 1387 */
1393 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 1388 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1394 struct ib_wc *wc) 1389 struct ib_wc *wc)
1395 { 1390 {
1396 return cq->device->poll_cq(cq, num_entries, wc); 1391 return cq->device->poll_cq(cq, num_entries, wc);
1397 } 1392 }
1398 1393
1399 /** 1394 /**
1400 * ib_peek_cq - Returns the number of unreaped completions currently 1395 * ib_peek_cq - Returns the number of unreaped completions currently
1401 * on the specified CQ. 1396 * on the specified CQ.
1402 * @cq: The CQ to peek. 1397 * @cq: The CQ to peek.
1403 * @wc_cnt: A minimum number of unreaped completions to check for. 1398 * @wc_cnt: A minimum number of unreaped completions to check for.
1404 * 1399 *
1405 * If the number of unreaped completions is greater than or equal to wc_cnt, 1400 * If the number of unreaped completions is greater than or equal to wc_cnt,
1406 * this function returns wc_cnt, otherwise, it returns the actual number of 1401 * this function returns wc_cnt, otherwise, it returns the actual number of
1407 * unreaped completions. 1402 * unreaped completions.
1408 */ 1403 */
1409 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 1404 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1410 1405
1411 /** 1406 /**
1412 * ib_req_notify_cq - Request completion notification on a CQ. 1407 * ib_req_notify_cq - Request completion notification on a CQ.
1413 * @cq: The CQ to generate an event for. 1408 * @cq: The CQ to generate an event for.
1414 * @flags: 1409 * @flags:
1415 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 1410 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1416 * to request an event on the next solicited event or next work 1411 * to request an event on the next solicited event or next work
1417 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 1412 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1418 * may also be |ed in to request a hint about missed events, as 1413 * may also be |ed in to request a hint about missed events, as
1419 * described below. 1414 * described below.
1420 * 1415 *
1421 * Return Value: 1416 * Return Value:
1422 * < 0 means an error occurred while requesting notification 1417 * < 0 means an error occurred while requesting notification
1423 * == 0 means notification was requested successfully, and if 1418 * == 0 means notification was requested successfully, and if
1424 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 1419 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1425 * were missed and it is safe to wait for another event. In 1420 * were missed and it is safe to wait for another event. In
1426 * this case is it guaranteed that any work completions added 1421 * this case is it guaranteed that any work completions added
1427 * to the CQ since the last CQ poll will trigger a completion 1422 * to the CQ since the last CQ poll will trigger a completion
1428 * notification event. 1423 * notification event.
1429 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 1424 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1430 * in. It means that the consumer must poll the CQ again to 1425 * in. It means that the consumer must poll the CQ again to
1431 * make sure it is empty to avoid missing an event because of a 1426 * make sure it is empty to avoid missing an event because of a
1432 * race between requesting notification and an entry being 1427 * race between requesting notification and an entry being
1433 * added to the CQ. This return value means it is possible 1428 * added to the CQ. This return value means it is possible
1434 * (but not guaranteed) that a work completion has been added 1429 * (but not guaranteed) that a work completion has been added
1435 * to the CQ since the last poll without triggering a 1430 * to the CQ since the last poll without triggering a
1436 * completion notification event. 1431 * completion notification event.
1437 */ 1432 */
1438 static inline int ib_req_notify_cq(struct ib_cq *cq, 1433 static inline int ib_req_notify_cq(struct ib_cq *cq,
1439 enum ib_cq_notify_flags flags) 1434 enum ib_cq_notify_flags flags)
1440 { 1435 {
1441 return cq->device->req_notify_cq(cq, flags); 1436 return cq->device->req_notify_cq(cq, flags);
1442 } 1437 }
1443 1438
1444 /** 1439 /**
1445 * ib_req_ncomp_notif - Request completion notification when there are 1440 * ib_req_ncomp_notif - Request completion notification when there are
1446 * at least the specified number of unreaped completions on the CQ. 1441 * at least the specified number of unreaped completions on the CQ.
1447 * @cq: The CQ to generate an event for. 1442 * @cq: The CQ to generate an event for.
1448 * @wc_cnt: The number of unreaped completions that should be on the 1443 * @wc_cnt: The number of unreaped completions that should be on the
1449 * CQ before an event is generated. 1444 * CQ before an event is generated.
1450 */ 1445 */
1451 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 1446 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1452 { 1447 {
1453 return cq->device->req_ncomp_notif ? 1448 return cq->device->req_ncomp_notif ?
1454 cq->device->req_ncomp_notif(cq, wc_cnt) : 1449 cq->device->req_ncomp_notif(cq, wc_cnt) :
1455 -ENOSYS; 1450 -ENOSYS;
1456 } 1451 }
1457 1452
1458 /** 1453 /**
1459 * ib_get_dma_mr - Returns a memory region for system memory that is 1454 * ib_get_dma_mr - Returns a memory region for system memory that is
1460 * usable for DMA. 1455 * usable for DMA.
1461 * @pd: The protection domain associated with the memory region. 1456 * @pd: The protection domain associated with the memory region.
1462 * @mr_access_flags: Specifies the memory access rights. 1457 * @mr_access_flags: Specifies the memory access rights.
1463 * 1458 *
1464 * Note that the ib_dma_*() functions defined below must be used 1459 * Note that the ib_dma_*() functions defined below must be used
1465 * to create/destroy addresses used with the Lkey or Rkey returned 1460 * to create/destroy addresses used with the Lkey or Rkey returned
1466 * by ib_get_dma_mr(). 1461 * by ib_get_dma_mr().
1467 */ 1462 */
1468 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1463 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1469 1464
1470 /** 1465 /**
1471 * ib_dma_mapping_error - check a DMA addr for error 1466 * ib_dma_mapping_error - check a DMA addr for error
1472 * @dev: The device for which the dma_addr was created 1467 * @dev: The device for which the dma_addr was created
1473 * @dma_addr: The DMA address to check 1468 * @dma_addr: The DMA address to check
1474 */ 1469 */
1475 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 1470 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1476 { 1471 {
1477 if (dev->dma_ops) 1472 if (dev->dma_ops)
1478 return dev->dma_ops->mapping_error(dev, dma_addr); 1473 return dev->dma_ops->mapping_error(dev, dma_addr);
1479 return dma_mapping_error(dma_addr); 1474 return dma_mapping_error(dma_addr);
1480 } 1475 }
1481 1476
1482 /** 1477 /**
1483 * ib_dma_map_single - Map a kernel virtual address to DMA address 1478 * ib_dma_map_single - Map a kernel virtual address to DMA address
1484 * @dev: The device for which the dma_addr is to be created 1479 * @dev: The device for which the dma_addr is to be created
1485 * @cpu_addr: The kernel virtual address 1480 * @cpu_addr: The kernel virtual address
1486 * @size: The size of the region in bytes 1481 * @size: The size of the region in bytes
1487 * @direction: The direction of the DMA 1482 * @direction: The direction of the DMA
1488 */ 1483 */
1489 static inline u64 ib_dma_map_single(struct ib_device *dev, 1484 static inline u64 ib_dma_map_single(struct ib_device *dev,
1490 void *cpu_addr, size_t size, 1485 void *cpu_addr, size_t size,
1491 enum dma_data_direction direction) 1486 enum dma_data_direction direction)
1492 { 1487 {
1493 if (dev->dma_ops) 1488 if (dev->dma_ops)
1494 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 1489 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1495 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 1490 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1496 } 1491 }
1497 1492
1498 /** 1493 /**
1499 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 1494 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1500 * @dev: The device for which the DMA address was created 1495 * @dev: The device for which the DMA address was created
1501 * @addr: The DMA address 1496 * @addr: The DMA address
1502 * @size: The size of the region in bytes 1497 * @size: The size of the region in bytes
1503 * @direction: The direction of the DMA 1498 * @direction: The direction of the DMA
1504 */ 1499 */
1505 static inline void ib_dma_unmap_single(struct ib_device *dev, 1500 static inline void ib_dma_unmap_single(struct ib_device *dev,
1506 u64 addr, size_t size, 1501 u64 addr, size_t size,
1507 enum dma_data_direction direction) 1502 enum dma_data_direction direction)
1508 { 1503 {
1509 if (dev->dma_ops) 1504 if (dev->dma_ops)
1510 dev->dma_ops->unmap_single(dev, addr, size, direction); 1505 dev->dma_ops->unmap_single(dev, addr, size, direction);
1511 else 1506 else
1512 dma_unmap_single(dev->dma_device, addr, size, direction); 1507 dma_unmap_single(dev->dma_device, addr, size, direction);
1513 } 1508 }
1514 1509
1515 /** 1510 /**
1516 * ib_dma_map_page - Map a physical page to DMA address 1511 * ib_dma_map_page - Map a physical page to DMA address
1517 * @dev: The device for which the dma_addr is to be created 1512 * @dev: The device for which the dma_addr is to be created
1518 * @page: The page to be mapped 1513 * @page: The page to be mapped
1519 * @offset: The offset within the page 1514 * @offset: The offset within the page
1520 * @size: The size of the region in bytes 1515 * @size: The size of the region in bytes
1521 * @direction: The direction of the DMA 1516 * @direction: The direction of the DMA
1522 */ 1517 */
1523 static inline u64 ib_dma_map_page(struct ib_device *dev, 1518 static inline u64 ib_dma_map_page(struct ib_device *dev,
1524 struct page *page, 1519 struct page *page,
1525 unsigned long offset, 1520 unsigned long offset,
1526 size_t size, 1521 size_t size,
1527 enum dma_data_direction direction) 1522 enum dma_data_direction direction)
1528 { 1523 {
1529 if (dev->dma_ops) 1524 if (dev->dma_ops)
1530 return dev->dma_ops->map_page(dev, page, offset, size, direction); 1525 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1531 return dma_map_page(dev->dma_device, page, offset, size, direction); 1526 return dma_map_page(dev->dma_device, page, offset, size, direction);
1532 } 1527 }
1533 1528
1534 /** 1529 /**
1535 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 1530 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1536 * @dev: The device for which the DMA address was created 1531 * @dev: The device for which the DMA address was created
1537 * @addr: The DMA address 1532 * @addr: The DMA address
1538 * @size: The size of the region in bytes 1533 * @size: The size of the region in bytes
1539 * @direction: The direction of the DMA 1534 * @direction: The direction of the DMA
1540 */ 1535 */
1541 static inline void ib_dma_unmap_page(struct ib_device *dev, 1536 static inline void ib_dma_unmap_page(struct ib_device *dev,
1542 u64 addr, size_t size, 1537 u64 addr, size_t size,
1543 enum dma_data_direction direction) 1538 enum dma_data_direction direction)
1544 { 1539 {
1545 if (dev->dma_ops) 1540 if (dev->dma_ops)
1546 dev->dma_ops->unmap_page(dev, addr, size, direction); 1541 dev->dma_ops->unmap_page(dev, addr, size, direction);
1547 else 1542 else
1548 dma_unmap_page(dev->dma_device, addr, size, direction); 1543 dma_unmap_page(dev->dma_device, addr, size, direction);
1549 } 1544 }
1550 1545
1551 /** 1546 /**
1552 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 1547 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1553 * @dev: The device for which the DMA addresses are to be created 1548 * @dev: The device for which the DMA addresses are to be created
1554 * @sg: The array of scatter/gather entries 1549 * @sg: The array of scatter/gather entries
1555 * @nents: The number of scatter/gather entries 1550 * @nents: The number of scatter/gather entries
1556 * @direction: The direction of the DMA 1551 * @direction: The direction of the DMA
1557 */ 1552 */
1558 static inline int ib_dma_map_sg(struct ib_device *dev, 1553 static inline int ib_dma_map_sg(struct ib_device *dev,
1559 struct scatterlist *sg, int nents, 1554 struct scatterlist *sg, int nents,
1560 enum dma_data_direction direction) 1555 enum dma_data_direction direction)
1561 { 1556 {
1562 if (dev->dma_ops) 1557 if (dev->dma_ops)
1563 return dev->dma_ops->map_sg(dev, sg, nents, direction); 1558 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1564 return dma_map_sg(dev->dma_device, sg, nents, direction); 1559 return dma_map_sg(dev->dma_device, sg, nents, direction);
1565 } 1560 }
1566 1561
1567 /** 1562 /**
1568 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 1563 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1569 * @dev: The device for which the DMA addresses were created 1564 * @dev: The device for which the DMA addresses were created
1570 * @sg: The array of scatter/gather entries 1565 * @sg: The array of scatter/gather entries
1571 * @nents: The number of scatter/gather entries 1566 * @nents: The number of scatter/gather entries
1572 * @direction: The direction of the DMA 1567 * @direction: The direction of the DMA
1573 */ 1568 */
1574 static inline void ib_dma_unmap_sg(struct ib_device *dev, 1569 static inline void ib_dma_unmap_sg(struct ib_device *dev,
1575 struct scatterlist *sg, int nents, 1570 struct scatterlist *sg, int nents,
1576 enum dma_data_direction direction) 1571 enum dma_data_direction direction)
1577 { 1572 {
1578 if (dev->dma_ops) 1573 if (dev->dma_ops)
1579 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 1574 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1580 else 1575 else
1581 dma_unmap_sg(dev->dma_device, sg, nents, direction); 1576 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1582 } 1577 }
1583 1578
1584 /** 1579 /**
1585 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 1580 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1586 * @dev: The device for which the DMA addresses were created 1581 * @dev: The device for which the DMA addresses were created
1587 * @sg: The scatter/gather entry 1582 * @sg: The scatter/gather entry
1588 */ 1583 */
1589 static inline u64 ib_sg_dma_address(struct ib_device *dev, 1584 static inline u64 ib_sg_dma_address(struct ib_device *dev,
1590 struct scatterlist *sg) 1585 struct scatterlist *sg)
1591 { 1586 {
1592 if (dev->dma_ops) 1587 if (dev->dma_ops)
1593 return dev->dma_ops->dma_address(dev, sg); 1588 return dev->dma_ops->dma_address(dev, sg);
1594 return sg_dma_address(sg); 1589 return sg_dma_address(sg);
1595 } 1590 }
1596 1591
1597 /** 1592 /**
1598 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 1593 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1599 * @dev: The device for which the DMA addresses were created 1594 * @dev: The device for which the DMA addresses were created
1600 * @sg: The scatter/gather entry 1595 * @sg: The scatter/gather entry
1601 */ 1596 */
1602 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 1597 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1603 struct scatterlist *sg) 1598 struct scatterlist *sg)
1604 { 1599 {
1605 if (dev->dma_ops) 1600 if (dev->dma_ops)
1606 return dev->dma_ops->dma_len(dev, sg); 1601 return dev->dma_ops->dma_len(dev, sg);
1607 return sg_dma_len(sg); 1602 return sg_dma_len(sg);
1608 } 1603 }
1609 1604
1610 /** 1605 /**
1611 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 1606 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1612 * @dev: The device for which the DMA address was created 1607 * @dev: The device for which the DMA address was created
1613 * @addr: The DMA address 1608 * @addr: The DMA address
1614 * @size: The size of the region in bytes 1609 * @size: The size of the region in bytes
1615 * @dir: The direction of the DMA 1610 * @dir: The direction of the DMA
1616 */ 1611 */
1617 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 1612 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1618 u64 addr, 1613 u64 addr,
1619 size_t size, 1614 size_t size,
1620 enum dma_data_direction dir) 1615 enum dma_data_direction dir)
1621 { 1616 {
1622 if (dev->dma_ops) 1617 if (dev->dma_ops)
1623 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 1618 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1624 else 1619 else
1625 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 1620 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1626 } 1621 }
1627 1622
1628 /** 1623 /**
1629 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 1624 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1630 * @dev: The device for which the DMA address was created 1625 * @dev: The device for which the DMA address was created
1631 * @addr: The DMA address 1626 * @addr: The DMA address
1632 * @size: The size of the region in bytes 1627 * @size: The size of the region in bytes
1633 * @dir: The direction of the DMA 1628 * @dir: The direction of the DMA
1634 */ 1629 */
1635 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 1630 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1636 u64 addr, 1631 u64 addr,
1637 size_t size, 1632 size_t size,
1638 enum dma_data_direction dir) 1633 enum dma_data_direction dir)
1639 { 1634 {
1640 if (dev->dma_ops) 1635 if (dev->dma_ops)
1641 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 1636 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1642 else 1637 else
1643 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 1638 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1644 } 1639 }
1645 1640
1646 /** 1641 /**
1647 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 1642 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1648 * @dev: The device for which the DMA address is requested 1643 * @dev: The device for which the DMA address is requested
1649 * @size: The size of the region to allocate in bytes 1644 * @size: The size of the region to allocate in bytes
1650 * @dma_handle: A pointer for returning the DMA address of the region 1645 * @dma_handle: A pointer for returning the DMA address of the region
1651 * @flag: memory allocator flags 1646 * @flag: memory allocator flags
1652 */ 1647 */
1653 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 1648 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1654 size_t size, 1649 size_t size,
1655 u64 *dma_handle, 1650 u64 *dma_handle,
1656 gfp_t flag) 1651 gfp_t flag)
1657 { 1652 {
1658 if (dev->dma_ops) 1653 if (dev->dma_ops)
1659 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 1654 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1660 else { 1655 else {
1661 dma_addr_t handle; 1656 dma_addr_t handle;
1662 void *ret; 1657 void *ret;
1663 1658
1664 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 1659 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1665 *dma_handle = handle; 1660 *dma_handle = handle;
1666 return ret; 1661 return ret;
1667 } 1662 }
1668 } 1663 }
1669 1664
1670 /** 1665 /**
1671 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 1666 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1672 * @dev: The device for which the DMA addresses were allocated 1667 * @dev: The device for which the DMA addresses were allocated
1673 * @size: The size of the region 1668 * @size: The size of the region
1674 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 1669 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1675 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 1670 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1676 */ 1671 */
1677 static inline void ib_dma_free_coherent(struct ib_device *dev, 1672 static inline void ib_dma_free_coherent(struct ib_device *dev,
1678 size_t size, void *cpu_addr, 1673 size_t size, void *cpu_addr,
1679 u64 dma_handle) 1674 u64 dma_handle)
1680 { 1675 {
1681 if (dev->dma_ops) 1676 if (dev->dma_ops)
1682 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 1677 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1683 else 1678 else
1684 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 1679 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1685 } 1680 }
1686 1681
1687 /** 1682 /**
1688 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 1683 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1689 * by an HCA. 1684 * by an HCA.
1690 * @pd: The protection domain associated assigned to the registered region. 1685 * @pd: The protection domain associated assigned to the registered region.
1691 * @phys_buf_array: Specifies a list of physical buffers to use in the 1686 * @phys_buf_array: Specifies a list of physical buffers to use in the
1692 * memory region. 1687 * memory region.
1693 * @num_phys_buf: Specifies the size of the phys_buf_array. 1688 * @num_phys_buf: Specifies the size of the phys_buf_array.
1694 * @mr_access_flags: Specifies the memory access rights. 1689 * @mr_access_flags: Specifies the memory access rights.
1695 * @iova_start: The offset of the region's starting I/O virtual address. 1690 * @iova_start: The offset of the region's starting I/O virtual address.
1696 */ 1691 */
1697 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 1692 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1698 struct ib_phys_buf *phys_buf_array, 1693 struct ib_phys_buf *phys_buf_array,
1699 int num_phys_buf, 1694 int num_phys_buf,
1700 int mr_access_flags, 1695 int mr_access_flags,
1701 u64 *iova_start); 1696 u64 *iova_start);
1702 1697
1703 /** 1698 /**
1704 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region. 1699 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1705 * Conceptually, this call performs the functions deregister memory region 1700 * Conceptually, this call performs the functions deregister memory region
1706 * followed by register physical memory region. Where possible, 1701 * followed by register physical memory region. Where possible,
1707 * resources are reused instead of deallocated and reallocated. 1702 * resources are reused instead of deallocated and reallocated.
1708 * @mr: The memory region to modify. 1703 * @mr: The memory region to modify.
1709 * @mr_rereg_mask: A bit-mask used to indicate which of the following 1704 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1710 * properties of the memory region are being modified. 1705 * properties of the memory region are being modified.
1711 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies 1706 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1712 * the new protection domain to associated with the memory region, 1707 * the new protection domain to associated with the memory region,
1713 * otherwise, this parameter is ignored. 1708 * otherwise, this parameter is ignored.
1714 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 1709 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1715 * field specifies a list of physical buffers to use in the new 1710 * field specifies a list of physical buffers to use in the new
1716 * translation, otherwise, this parameter is ignored. 1711 * translation, otherwise, this parameter is ignored.
1717 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 1712 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1718 * field specifies the size of the phys_buf_array, otherwise, this 1713 * field specifies the size of the phys_buf_array, otherwise, this
1719 * parameter is ignored. 1714 * parameter is ignored.
1720 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this 1715 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1721 * field specifies the new memory access rights, otherwise, this 1716 * field specifies the new memory access rights, otherwise, this
1722 * parameter is ignored. 1717 * parameter is ignored.
1723 * @iova_start: The offset of the region's starting I/O virtual address. 1718 * @iova_start: The offset of the region's starting I/O virtual address.
1724 */ 1719 */
1725 int ib_rereg_phys_mr(struct ib_mr *mr, 1720 int ib_rereg_phys_mr(struct ib_mr *mr,
1726 int mr_rereg_mask, 1721 int mr_rereg_mask,
1727 struct ib_pd *pd, 1722 struct ib_pd *pd,
1728 struct ib_phys_buf *phys_buf_array, 1723 struct ib_phys_buf *phys_buf_array,
1729 int num_phys_buf, 1724 int num_phys_buf,
1730 int mr_access_flags, 1725 int mr_access_flags,
1731 u64 *iova_start); 1726 u64 *iova_start);
1732 1727
1733 /** 1728 /**
1734 * ib_query_mr - Retrieves information about a specific memory region. 1729 * ib_query_mr - Retrieves information about a specific memory region.
1735 * @mr: The memory region to retrieve information about. 1730 * @mr: The memory region to retrieve information about.
1736 * @mr_attr: The attributes of the specified memory region. 1731 * @mr_attr: The attributes of the specified memory region.
1737 */ 1732 */
1738 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); 1733 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1739 1734
1740 /** 1735 /**
1741 * ib_dereg_mr - Deregisters a memory region and removes it from the 1736 * ib_dereg_mr - Deregisters a memory region and removes it from the
1742 * HCA translation table. 1737 * HCA translation table.
1743 * @mr: The memory region to deregister. 1738 * @mr: The memory region to deregister.
1744 */ 1739 */
1745 int ib_dereg_mr(struct ib_mr *mr); 1740 int ib_dereg_mr(struct ib_mr *mr);
1746 1741
1747 /** 1742 /**
1748 * ib_alloc_mw - Allocates a memory window. 1743 * ib_alloc_mw - Allocates a memory window.
1749 * @pd: The protection domain associated with the memory window. 1744 * @pd: The protection domain associated with the memory window.
1750 */ 1745 */
1751 struct ib_mw *ib_alloc_mw(struct ib_pd *pd); 1746 struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1752 1747
1753 /** 1748 /**
1754 * ib_bind_mw - Posts a work request to the send queue of the specified 1749 * ib_bind_mw - Posts a work request to the send queue of the specified
1755 * QP, which binds the memory window to the given address range and 1750 * QP, which binds the memory window to the given address range and
1756 * remote access attributes. 1751 * remote access attributes.
1757 * @qp: QP to post the bind work request on. 1752 * @qp: QP to post the bind work request on.
1758 * @mw: The memory window to bind. 1753 * @mw: The memory window to bind.
1759 * @mw_bind: Specifies information about the memory window, including 1754 * @mw_bind: Specifies information about the memory window, including
1760 * its address range, remote access rights, and associated memory region. 1755 * its address range, remote access rights, and associated memory region.
1761 */ 1756 */
1762 static inline int ib_bind_mw(struct ib_qp *qp, 1757 static inline int ib_bind_mw(struct ib_qp *qp,
1763 struct ib_mw *mw, 1758 struct ib_mw *mw,
1764 struct ib_mw_bind *mw_bind) 1759 struct ib_mw_bind *mw_bind)
1765 { 1760 {
1766 /* XXX reference counting in corresponding MR? */ 1761 /* XXX reference counting in corresponding MR? */
1767 return mw->device->bind_mw ? 1762 return mw->device->bind_mw ?
1768 mw->device->bind_mw(qp, mw, mw_bind) : 1763 mw->device->bind_mw(qp, mw, mw_bind) :
1769 -ENOSYS; 1764 -ENOSYS;
1770 } 1765 }
1771 1766
1772 /** 1767 /**
1773 * ib_dealloc_mw - Deallocates a memory window. 1768 * ib_dealloc_mw - Deallocates a memory window.
1774 * @mw: The memory window to deallocate. 1769 * @mw: The memory window to deallocate.
1775 */ 1770 */
1776 int ib_dealloc_mw(struct ib_mw *mw); 1771 int ib_dealloc_mw(struct ib_mw *mw);
1777 1772
1778 /** 1773 /**
1779 * ib_alloc_fmr - Allocates a unmapped fast memory region. 1774 * ib_alloc_fmr - Allocates a unmapped fast memory region.
1780 * @pd: The protection domain associated with the unmapped region. 1775 * @pd: The protection domain associated with the unmapped region.
1781 * @mr_access_flags: Specifies the memory access rights. 1776 * @mr_access_flags: Specifies the memory access rights.
1782 * @fmr_attr: Attributes of the unmapped region. 1777 * @fmr_attr: Attributes of the unmapped region.
1783 * 1778 *
1784 * A fast memory region must be mapped before it can be used as part of 1779 * A fast memory region must be mapped before it can be used as part of
1785 * a work request. 1780 * a work request.
1786 */ 1781 */
1787 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1782 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1788 int mr_access_flags, 1783 int mr_access_flags,
1789 struct ib_fmr_attr *fmr_attr); 1784 struct ib_fmr_attr *fmr_attr);
1790 1785
1791 /** 1786 /**
1792 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 1787 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1793 * @fmr: The fast memory region to associate with the pages. 1788 * @fmr: The fast memory region to associate with the pages.
1794 * @page_list: An array of physical pages to map to the fast memory region. 1789 * @page_list: An array of physical pages to map to the fast memory region.
1795 * @list_len: The number of pages in page_list. 1790 * @list_len: The number of pages in page_list.
1796 * @iova: The I/O virtual address to use with the mapped region. 1791 * @iova: The I/O virtual address to use with the mapped region.
1797 */ 1792 */
1798 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 1793 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1799 u64 *page_list, int list_len, 1794 u64 *page_list, int list_len,
1800 u64 iova) 1795 u64 iova)
1801 { 1796 {
1802 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 1797 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1803 } 1798 }
1804 1799
1805 /** 1800 /**
1806 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 1801 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
1807 * @fmr_list: A linked list of fast memory regions to unmap. 1802 * @fmr_list: A linked list of fast memory regions to unmap.
1808 */ 1803 */
1809 int ib_unmap_fmr(struct list_head *fmr_list); 1804 int ib_unmap_fmr(struct list_head *fmr_list);
1810 1805
1811 /** 1806 /**
1812 * ib_dealloc_fmr - Deallocates a fast memory region. 1807 * ib_dealloc_fmr - Deallocates a fast memory region.
1813 * @fmr: The fast memory region to deallocate. 1808 * @fmr: The fast memory region to deallocate.
1814 */ 1809 */
1815 int ib_dealloc_fmr(struct ib_fmr *fmr); 1810 int ib_dealloc_fmr(struct ib_fmr *fmr);
1816 1811
1817 /** 1812 /**
1818 * ib_attach_mcast - Attaches the specified QP to a multicast group. 1813 * ib_attach_mcast - Attaches the specified QP to a multicast group.
1819 * @qp: QP to attach to the multicast group. The QP must be type 1814 * @qp: QP to attach to the multicast group. The QP must be type
1820 * IB_QPT_UD. 1815 * IB_QPT_UD.
1821 * @gid: Multicast group GID. 1816 * @gid: Multicast group GID.
1822 * @lid: Multicast group LID in host byte order. 1817 * @lid: Multicast group LID in host byte order.
1823 * 1818 *
1824 * In order to send and receive multicast packets, subnet 1819 * In order to send and receive multicast packets, subnet
1825 * administration must have created the multicast group and configured 1820 * administration must have created the multicast group and configured
1826 * the fabric appropriately. The port associated with the specified 1821 * the fabric appropriately. The port associated with the specified
1827 * QP must also be a member of the multicast group. 1822 * QP must also be a member of the multicast group.
1828 */ 1823 */
1829 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 1824 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1830 1825
1831 /** 1826 /**
1832 * ib_detach_mcast - Detaches the specified QP from a multicast group. 1827 * ib_detach_mcast - Detaches the specified QP from a multicast group.
1833 * @qp: QP to detach from the multicast group. 1828 * @qp: QP to detach from the multicast group.
1834 * @gid: Multicast group GID. 1829 * @gid: Multicast group GID.
1835 * @lid: Multicast group LID in host byte order. 1830 * @lid: Multicast group LID in host byte order.
1836 */ 1831 */
1837 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 1832 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1838 1833
1839 #endif /* IB_VERBS_H */ 1834 #endif /* IB_VERBS_H */
1840 1835