Commit a67baeb77375199bbd842fa308cb565164dd1f19
Committed by
Konrad Rzeszutek Wilk
1 parent
b6514633bd
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
xen/gntdev: don't leak memory from IOCTL_GNTDEV_MAP_GRANT_REF
map->kmap_ops allocated in gntdev_alloc_map() wasn't freed by gntdev_put_map(). Add a gntdev_free_map() helper function to free everything allocated by gntdev_alloc_map(). Signed-off-by: David Vrabel <david.vrabel@citrix.com> Cc: stable@vger.kernel.org Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Showing 1 changed file with 19 additions and 17 deletions Inline Diff
drivers/xen/gntdev.c
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | * gntdev.c | 2 | * gntdev.c |
3 | * | 3 | * |
4 | * Device for accessing (in user-space) pages that have been granted by other | 4 | * Device for accessing (in user-space) pages that have been granted by other |
5 | * domains. | 5 | * domains. |
6 | * | 6 | * |
7 | * Copyright (c) 2006-2007, D G Murray. | 7 | * Copyright (c) 2006-2007, D G Murray. |
8 | * (c) 2009 Gerd Hoffmann <kraxel@redhat.com> | 8 | * (c) 2009 Gerd Hoffmann <kraxel@redhat.com> |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #undef DEBUG | 20 | #undef DEBUG |
21 | 21 | ||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/miscdevice.h> | 25 | #include <linux/miscdevice.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/mman.h> | 28 | #include <linux/mman.h> |
29 | #include <linux/mmu_notifier.h> | 29 | #include <linux/mmu_notifier.h> |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/uaccess.h> | 31 | #include <linux/uaccess.h> |
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/highmem.h> | 35 | #include <linux/highmem.h> |
36 | 36 | ||
37 | #include <xen/xen.h> | 37 | #include <xen/xen.h> |
38 | #include <xen/grant_table.h> | 38 | #include <xen/grant_table.h> |
39 | #include <xen/balloon.h> | 39 | #include <xen/balloon.h> |
40 | #include <xen/gntdev.h> | 40 | #include <xen/gntdev.h> |
41 | #include <xen/events.h> | 41 | #include <xen/events.h> |
42 | #include <asm/xen/hypervisor.h> | 42 | #include <asm/xen/hypervisor.h> |
43 | #include <asm/xen/hypercall.h> | 43 | #include <asm/xen/hypercall.h> |
44 | #include <asm/xen/page.h> | 44 | #include <asm/xen/page.h> |
45 | 45 | ||
46 | MODULE_LICENSE("GPL"); | 46 | MODULE_LICENSE("GPL"); |
47 | MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, " | 47 | MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, " |
48 | "Gerd Hoffmann <kraxel@redhat.com>"); | 48 | "Gerd Hoffmann <kraxel@redhat.com>"); |
49 | MODULE_DESCRIPTION("User-space granted page access driver"); | 49 | MODULE_DESCRIPTION("User-space granted page access driver"); |
50 | 50 | ||
51 | static int limit = 1024*1024; | 51 | static int limit = 1024*1024; |
52 | module_param(limit, int, 0644); | 52 | module_param(limit, int, 0644); |
53 | MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by " | 53 | MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by " |
54 | "the gntdev device"); | 54 | "the gntdev device"); |
55 | 55 | ||
56 | static atomic_t pages_mapped = ATOMIC_INIT(0); | 56 | static atomic_t pages_mapped = ATOMIC_INIT(0); |
57 | 57 | ||
58 | static int use_ptemod; | 58 | static int use_ptemod; |
59 | 59 | ||
60 | struct gntdev_priv { | 60 | struct gntdev_priv { |
61 | struct list_head maps; | 61 | struct list_head maps; |
62 | /* lock protects maps from concurrent changes */ | 62 | /* lock protects maps from concurrent changes */ |
63 | spinlock_t lock; | 63 | spinlock_t lock; |
64 | struct mm_struct *mm; | 64 | struct mm_struct *mm; |
65 | struct mmu_notifier mn; | 65 | struct mmu_notifier mn; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | struct unmap_notify { | 68 | struct unmap_notify { |
69 | int flags; | 69 | int flags; |
70 | /* Address relative to the start of the grant_map */ | 70 | /* Address relative to the start of the grant_map */ |
71 | int addr; | 71 | int addr; |
72 | int event; | 72 | int event; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | struct grant_map { | 75 | struct grant_map { |
76 | struct list_head next; | 76 | struct list_head next; |
77 | struct vm_area_struct *vma; | 77 | struct vm_area_struct *vma; |
78 | int index; | 78 | int index; |
79 | int count; | 79 | int count; |
80 | int flags; | 80 | int flags; |
81 | atomic_t users; | 81 | atomic_t users; |
82 | struct unmap_notify notify; | 82 | struct unmap_notify notify; |
83 | struct ioctl_gntdev_grant_ref *grants; | 83 | struct ioctl_gntdev_grant_ref *grants; |
84 | struct gnttab_map_grant_ref *map_ops; | 84 | struct gnttab_map_grant_ref *map_ops; |
85 | struct gnttab_unmap_grant_ref *unmap_ops; | 85 | struct gnttab_unmap_grant_ref *unmap_ops; |
86 | struct gnttab_map_grant_ref *kmap_ops; | 86 | struct gnttab_map_grant_ref *kmap_ops; |
87 | struct page **pages; | 87 | struct page **pages; |
88 | }; | 88 | }; |
89 | 89 | ||
90 | static int unmap_grant_pages(struct grant_map *map, int offset, int pages); | 90 | static int unmap_grant_pages(struct grant_map *map, int offset, int pages); |
91 | 91 | ||
92 | /* ------------------------------------------------------------------ */ | 92 | /* ------------------------------------------------------------------ */ |
93 | 93 | ||
94 | static void gntdev_print_maps(struct gntdev_priv *priv, | 94 | static void gntdev_print_maps(struct gntdev_priv *priv, |
95 | char *text, int text_index) | 95 | char *text, int text_index) |
96 | { | 96 | { |
97 | #ifdef DEBUG | 97 | #ifdef DEBUG |
98 | struct grant_map *map; | 98 | struct grant_map *map; |
99 | 99 | ||
100 | pr_debug("%s: maps list (priv %p)\n", __func__, priv); | 100 | pr_debug("%s: maps list (priv %p)\n", __func__, priv); |
101 | list_for_each_entry(map, &priv->maps, next) | 101 | list_for_each_entry(map, &priv->maps, next) |
102 | pr_debug(" index %2d, count %2d %s\n", | 102 | pr_debug(" index %2d, count %2d %s\n", |
103 | map->index, map->count, | 103 | map->index, map->count, |
104 | map->index == text_index && text ? text : ""); | 104 | map->index == text_index && text ? text : ""); |
105 | #endif | 105 | #endif |
106 | } | 106 | } |
107 | 107 | ||
108 | static void gntdev_free_map(struct grant_map *map) | ||
109 | { | ||
110 | if (map == NULL) | ||
111 | return; | ||
112 | |||
113 | if (map->pages) | ||
114 | free_xenballooned_pages(map->count, map->pages); | ||
115 | kfree(map->pages); | ||
116 | kfree(map->grants); | ||
117 | kfree(map->map_ops); | ||
118 | kfree(map->unmap_ops); | ||
119 | kfree(map->kmap_ops); | ||
120 | kfree(map); | ||
121 | } | ||
122 | |||
108 | static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) | 123 | static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) |
109 | { | 124 | { |
110 | struct grant_map *add; | 125 | struct grant_map *add; |
111 | int i; | 126 | int i; |
112 | 127 | ||
113 | add = kzalloc(sizeof(struct grant_map), GFP_KERNEL); | 128 | add = kzalloc(sizeof(struct grant_map), GFP_KERNEL); |
114 | if (NULL == add) | 129 | if (NULL == add) |
115 | return NULL; | 130 | return NULL; |
116 | 131 | ||
117 | add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL); | 132 | add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL); |
118 | add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL); | 133 | add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL); |
119 | add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); | 134 | add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); |
120 | add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); | 135 | add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL); |
121 | add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); | 136 | add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); |
122 | if (NULL == add->grants || | 137 | if (NULL == add->grants || |
123 | NULL == add->map_ops || | 138 | NULL == add->map_ops || |
124 | NULL == add->unmap_ops || | 139 | NULL == add->unmap_ops || |
125 | NULL == add->kmap_ops || | 140 | NULL == add->kmap_ops || |
126 | NULL == add->pages) | 141 | NULL == add->pages) |
127 | goto err; | 142 | goto err; |
128 | 143 | ||
129 | if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */)) | 144 | if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */)) |
130 | goto err; | 145 | goto err; |
131 | 146 | ||
132 | for (i = 0; i < count; i++) { | 147 | for (i = 0; i < count; i++) { |
133 | add->map_ops[i].handle = -1; | 148 | add->map_ops[i].handle = -1; |
134 | add->unmap_ops[i].handle = -1; | 149 | add->unmap_ops[i].handle = -1; |
135 | add->kmap_ops[i].handle = -1; | 150 | add->kmap_ops[i].handle = -1; |
136 | } | 151 | } |
137 | 152 | ||
138 | add->index = 0; | 153 | add->index = 0; |
139 | add->count = count; | 154 | add->count = count; |
140 | atomic_set(&add->users, 1); | 155 | atomic_set(&add->users, 1); |
141 | 156 | ||
142 | return add; | 157 | return add; |
143 | 158 | ||
144 | err: | 159 | err: |
145 | kfree(add->pages); | 160 | gntdev_free_map(add); |
146 | kfree(add->grants); | ||
147 | kfree(add->map_ops); | ||
148 | kfree(add->unmap_ops); | ||
149 | kfree(add->kmap_ops); | ||
150 | kfree(add); | ||
151 | return NULL; | 161 | return NULL; |
152 | } | 162 | } |
153 | 163 | ||
154 | static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add) | 164 | static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add) |
155 | { | 165 | { |
156 | struct grant_map *map; | 166 | struct grant_map *map; |
157 | 167 | ||
158 | list_for_each_entry(map, &priv->maps, next) { | 168 | list_for_each_entry(map, &priv->maps, next) { |
159 | if (add->index + add->count < map->index) { | 169 | if (add->index + add->count < map->index) { |
160 | list_add_tail(&add->next, &map->next); | 170 | list_add_tail(&add->next, &map->next); |
161 | goto done; | 171 | goto done; |
162 | } | 172 | } |
163 | add->index = map->index + map->count; | 173 | add->index = map->index + map->count; |
164 | } | 174 | } |
165 | list_add_tail(&add->next, &priv->maps); | 175 | list_add_tail(&add->next, &priv->maps); |
166 | 176 | ||
167 | done: | 177 | done: |
168 | gntdev_print_maps(priv, "[new]", add->index); | 178 | gntdev_print_maps(priv, "[new]", add->index); |
169 | } | 179 | } |
170 | 180 | ||
171 | static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, | 181 | static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, |
172 | int index, int count) | 182 | int index, int count) |
173 | { | 183 | { |
174 | struct grant_map *map; | 184 | struct grant_map *map; |
175 | 185 | ||
176 | list_for_each_entry(map, &priv->maps, next) { | 186 | list_for_each_entry(map, &priv->maps, next) { |
177 | if (map->index != index) | 187 | if (map->index != index) |
178 | continue; | 188 | continue; |
179 | if (count && map->count != count) | 189 | if (count && map->count != count) |
180 | continue; | 190 | continue; |
181 | return map; | 191 | return map; |
182 | } | 192 | } |
183 | return NULL; | 193 | return NULL; |
184 | } | 194 | } |
185 | 195 | ||
186 | static void gntdev_put_map(struct grant_map *map) | 196 | static void gntdev_put_map(struct grant_map *map) |
187 | { | 197 | { |
188 | if (!map) | 198 | if (!map) |
189 | return; | 199 | return; |
190 | 200 | ||
191 | if (!atomic_dec_and_test(&map->users)) | 201 | if (!atomic_dec_and_test(&map->users)) |
192 | return; | 202 | return; |
193 | 203 | ||
194 | atomic_sub(map->count, &pages_mapped); | 204 | atomic_sub(map->count, &pages_mapped); |
195 | 205 | ||
196 | if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { | 206 | if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { |
197 | notify_remote_via_evtchn(map->notify.event); | 207 | notify_remote_via_evtchn(map->notify.event); |
198 | evtchn_put(map->notify.event); | 208 | evtchn_put(map->notify.event); |
199 | } | 209 | } |
200 | 210 | ||
201 | if (map->pages) { | 211 | if (map->pages && !use_ptemod) |
202 | if (!use_ptemod) | 212 | unmap_grant_pages(map, 0, map->count); |
203 | unmap_grant_pages(map, 0, map->count); | 213 | gntdev_free_map(map); |
204 | |||
205 | free_xenballooned_pages(map->count, map->pages); | ||
206 | } | ||
207 | kfree(map->pages); | ||
208 | kfree(map->grants); | ||
209 | kfree(map->map_ops); | ||
210 | kfree(map->unmap_ops); | ||
211 | kfree(map); | ||
212 | } | 214 | } |
213 | 215 | ||
214 | /* ------------------------------------------------------------------ */ | 216 | /* ------------------------------------------------------------------ */ |
215 | 217 | ||
216 | static int find_grant_ptes(pte_t *pte, pgtable_t token, | 218 | static int find_grant_ptes(pte_t *pte, pgtable_t token, |
217 | unsigned long addr, void *data) | 219 | unsigned long addr, void *data) |
218 | { | 220 | { |
219 | struct grant_map *map = data; | 221 | struct grant_map *map = data; |
220 | unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; | 222 | unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; |
221 | int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; | 223 | int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; |
222 | u64 pte_maddr; | 224 | u64 pte_maddr; |
223 | 225 | ||
224 | BUG_ON(pgnr >= map->count); | 226 | BUG_ON(pgnr >= map->count); |
225 | pte_maddr = arbitrary_virt_to_machine(pte).maddr; | 227 | pte_maddr = arbitrary_virt_to_machine(pte).maddr; |
226 | 228 | ||
227 | gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, | 229 | gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, |
228 | map->grants[pgnr].ref, | 230 | map->grants[pgnr].ref, |
229 | map->grants[pgnr].domid); | 231 | map->grants[pgnr].domid); |
230 | gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags, | 232 | gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags, |
231 | -1 /* handle */); | 233 | -1 /* handle */); |
232 | return 0; | 234 | return 0; |
233 | } | 235 | } |
234 | 236 | ||
235 | static int map_grant_pages(struct grant_map *map) | 237 | static int map_grant_pages(struct grant_map *map) |
236 | { | 238 | { |
237 | int i, err = 0; | 239 | int i, err = 0; |
238 | 240 | ||
239 | if (!use_ptemod) { | 241 | if (!use_ptemod) { |
240 | /* Note: it could already be mapped */ | 242 | /* Note: it could already be mapped */ |
241 | if (map->map_ops[0].handle != -1) | 243 | if (map->map_ops[0].handle != -1) |
242 | return 0; | 244 | return 0; |
243 | for (i = 0; i < map->count; i++) { | 245 | for (i = 0; i < map->count; i++) { |
244 | unsigned long addr = (unsigned long) | 246 | unsigned long addr = (unsigned long) |
245 | pfn_to_kaddr(page_to_pfn(map->pages[i])); | 247 | pfn_to_kaddr(page_to_pfn(map->pages[i])); |
246 | gnttab_set_map_op(&map->map_ops[i], addr, map->flags, | 248 | gnttab_set_map_op(&map->map_ops[i], addr, map->flags, |
247 | map->grants[i].ref, | 249 | map->grants[i].ref, |
248 | map->grants[i].domid); | 250 | map->grants[i].domid); |
249 | gnttab_set_unmap_op(&map->unmap_ops[i], addr, | 251 | gnttab_set_unmap_op(&map->unmap_ops[i], addr, |
250 | map->flags, -1 /* handle */); | 252 | map->flags, -1 /* handle */); |
251 | } | 253 | } |
252 | } else { | 254 | } else { |
253 | /* | 255 | /* |
254 | * Setup the map_ops corresponding to the pte entries pointing | 256 | * Setup the map_ops corresponding to the pte entries pointing |
255 | * to the kernel linear addresses of the struct pages. | 257 | * to the kernel linear addresses of the struct pages. |
256 | * These ptes are completely different from the user ptes dealt | 258 | * These ptes are completely different from the user ptes dealt |
257 | * with find_grant_ptes. | 259 | * with find_grant_ptes. |
258 | */ | 260 | */ |
259 | for (i = 0; i < map->count; i++) { | 261 | for (i = 0; i < map->count; i++) { |
260 | unsigned level; | 262 | unsigned level; |
261 | unsigned long address = (unsigned long) | 263 | unsigned long address = (unsigned long) |
262 | pfn_to_kaddr(page_to_pfn(map->pages[i])); | 264 | pfn_to_kaddr(page_to_pfn(map->pages[i])); |
263 | pte_t *ptep; | 265 | pte_t *ptep; |
264 | u64 pte_maddr = 0; | 266 | u64 pte_maddr = 0; |
265 | BUG_ON(PageHighMem(map->pages[i])); | 267 | BUG_ON(PageHighMem(map->pages[i])); |
266 | 268 | ||
267 | ptep = lookup_address(address, &level); | 269 | ptep = lookup_address(address, &level); |
268 | pte_maddr = arbitrary_virt_to_machine(ptep).maddr; | 270 | pte_maddr = arbitrary_virt_to_machine(ptep).maddr; |
269 | gnttab_set_map_op(&map->kmap_ops[i], pte_maddr, | 271 | gnttab_set_map_op(&map->kmap_ops[i], pte_maddr, |
270 | map->flags | | 272 | map->flags | |
271 | GNTMAP_host_map | | 273 | GNTMAP_host_map | |
272 | GNTMAP_contains_pte, | 274 | GNTMAP_contains_pte, |
273 | map->grants[i].ref, | 275 | map->grants[i].ref, |
274 | map->grants[i].domid); | 276 | map->grants[i].domid); |
275 | } | 277 | } |
276 | } | 278 | } |
277 | 279 | ||
278 | pr_debug("map %d+%d\n", map->index, map->count); | 280 | pr_debug("map %d+%d\n", map->index, map->count); |
279 | err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, | 281 | err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, |
280 | map->pages, map->count); | 282 | map->pages, map->count); |
281 | if (err) | 283 | if (err) |
282 | return err; | 284 | return err; |
283 | 285 | ||
284 | for (i = 0; i < map->count; i++) { | 286 | for (i = 0; i < map->count; i++) { |
285 | if (map->map_ops[i].status) | 287 | if (map->map_ops[i].status) |
286 | err = -EINVAL; | 288 | err = -EINVAL; |
287 | else { | 289 | else { |
288 | BUG_ON(map->map_ops[i].handle == -1); | 290 | BUG_ON(map->map_ops[i].handle == -1); |
289 | map->unmap_ops[i].handle = map->map_ops[i].handle; | 291 | map->unmap_ops[i].handle = map->map_ops[i].handle; |
290 | pr_debug("map handle=%d\n", map->map_ops[i].handle); | 292 | pr_debug("map handle=%d\n", map->map_ops[i].handle); |
291 | } | 293 | } |
292 | } | 294 | } |
293 | return err; | 295 | return err; |
294 | } | 296 | } |
295 | 297 | ||
296 | static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | 298 | static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) |
297 | { | 299 | { |
298 | int i, err = 0; | 300 | int i, err = 0; |
299 | 301 | ||
300 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { | 302 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { |
301 | int pgno = (map->notify.addr >> PAGE_SHIFT); | 303 | int pgno = (map->notify.addr >> PAGE_SHIFT); |
302 | if (pgno >= offset && pgno < offset + pages && use_ptemod) { | 304 | if (pgno >= offset && pgno < offset + pages && use_ptemod) { |
303 | void __user *tmp = (void __user *) | 305 | void __user *tmp = (void __user *) |
304 | map->vma->vm_start + map->notify.addr; | 306 | map->vma->vm_start + map->notify.addr; |
305 | err = copy_to_user(tmp, &err, 1); | 307 | err = copy_to_user(tmp, &err, 1); |
306 | if (err) | 308 | if (err) |
307 | return -EFAULT; | 309 | return -EFAULT; |
308 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; | 310 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; |
309 | } else if (pgno >= offset && pgno < offset + pages) { | 311 | } else if (pgno >= offset && pgno < offset + pages) { |
310 | uint8_t *tmp = kmap(map->pages[pgno]); | 312 | uint8_t *tmp = kmap(map->pages[pgno]); |
311 | tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; | 313 | tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; |
312 | kunmap(map->pages[pgno]); | 314 | kunmap(map->pages[pgno]); |
313 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; | 315 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; |
314 | } | 316 | } |
315 | } | 317 | } |
316 | 318 | ||
317 | err = gnttab_unmap_refs(map->unmap_ops + offset, | 319 | err = gnttab_unmap_refs(map->unmap_ops + offset, |
318 | use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, | 320 | use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, |
319 | pages); | 321 | pages); |
320 | if (err) | 322 | if (err) |
321 | return err; | 323 | return err; |
322 | 324 | ||
323 | for (i = 0; i < pages; i++) { | 325 | for (i = 0; i < pages; i++) { |
324 | if (map->unmap_ops[offset+i].status) | 326 | if (map->unmap_ops[offset+i].status) |
325 | err = -EINVAL; | 327 | err = -EINVAL; |
326 | pr_debug("unmap handle=%d st=%d\n", | 328 | pr_debug("unmap handle=%d st=%d\n", |
327 | map->unmap_ops[offset+i].handle, | 329 | map->unmap_ops[offset+i].handle, |
328 | map->unmap_ops[offset+i].status); | 330 | map->unmap_ops[offset+i].status); |
329 | map->unmap_ops[offset+i].handle = -1; | 331 | map->unmap_ops[offset+i].handle = -1; |
330 | } | 332 | } |
331 | return err; | 333 | return err; |
332 | } | 334 | } |
333 | 335 | ||
334 | static int unmap_grant_pages(struct grant_map *map, int offset, int pages) | 336 | static int unmap_grant_pages(struct grant_map *map, int offset, int pages) |
335 | { | 337 | { |
336 | int range, err = 0; | 338 | int range, err = 0; |
337 | 339 | ||
338 | pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); | 340 | pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); |
339 | 341 | ||
340 | /* It is possible the requested range will have a "hole" where we | 342 | /* It is possible the requested range will have a "hole" where we |
341 | * already unmapped some of the grants. Only unmap valid ranges. | 343 | * already unmapped some of the grants. Only unmap valid ranges. |
342 | */ | 344 | */ |
343 | while (pages && !err) { | 345 | while (pages && !err) { |
344 | while (pages && map->unmap_ops[offset].handle == -1) { | 346 | while (pages && map->unmap_ops[offset].handle == -1) { |
345 | offset++; | 347 | offset++; |
346 | pages--; | 348 | pages--; |
347 | } | 349 | } |
348 | range = 0; | 350 | range = 0; |
349 | while (range < pages) { | 351 | while (range < pages) { |
350 | if (map->unmap_ops[offset+range].handle == -1) { | 352 | if (map->unmap_ops[offset+range].handle == -1) { |
351 | range--; | 353 | range--; |
352 | break; | 354 | break; |
353 | } | 355 | } |
354 | range++; | 356 | range++; |
355 | } | 357 | } |
356 | err = __unmap_grant_pages(map, offset, range); | 358 | err = __unmap_grant_pages(map, offset, range); |
357 | offset += range; | 359 | offset += range; |
358 | pages -= range; | 360 | pages -= range; |
359 | } | 361 | } |
360 | 362 | ||
361 | return err; | 363 | return err; |
362 | } | 364 | } |
363 | 365 | ||
364 | /* ------------------------------------------------------------------ */ | 366 | /* ------------------------------------------------------------------ */ |
365 | 367 | ||
366 | static void gntdev_vma_open(struct vm_area_struct *vma) | 368 | static void gntdev_vma_open(struct vm_area_struct *vma) |
367 | { | 369 | { |
368 | struct grant_map *map = vma->vm_private_data; | 370 | struct grant_map *map = vma->vm_private_data; |
369 | 371 | ||
370 | pr_debug("gntdev_vma_open %p\n", vma); | 372 | pr_debug("gntdev_vma_open %p\n", vma); |
371 | atomic_inc(&map->users); | 373 | atomic_inc(&map->users); |
372 | } | 374 | } |
373 | 375 | ||
374 | static void gntdev_vma_close(struct vm_area_struct *vma) | 376 | static void gntdev_vma_close(struct vm_area_struct *vma) |
375 | { | 377 | { |
376 | struct grant_map *map = vma->vm_private_data; | 378 | struct grant_map *map = vma->vm_private_data; |
377 | 379 | ||
378 | pr_debug("gntdev_vma_close %p\n", vma); | 380 | pr_debug("gntdev_vma_close %p\n", vma); |
379 | map->vma = NULL; | 381 | map->vma = NULL; |
380 | vma->vm_private_data = NULL; | 382 | vma->vm_private_data = NULL; |
381 | gntdev_put_map(map); | 383 | gntdev_put_map(map); |
382 | } | 384 | } |
383 | 385 | ||
384 | static struct vm_operations_struct gntdev_vmops = { | 386 | static struct vm_operations_struct gntdev_vmops = { |
385 | .open = gntdev_vma_open, | 387 | .open = gntdev_vma_open, |
386 | .close = gntdev_vma_close, | 388 | .close = gntdev_vma_close, |
387 | }; | 389 | }; |
388 | 390 | ||
389 | /* ------------------------------------------------------------------ */ | 391 | /* ------------------------------------------------------------------ */ |
390 | 392 | ||
391 | static void mn_invl_range_start(struct mmu_notifier *mn, | 393 | static void mn_invl_range_start(struct mmu_notifier *mn, |
392 | struct mm_struct *mm, | 394 | struct mm_struct *mm, |
393 | unsigned long start, unsigned long end) | 395 | unsigned long start, unsigned long end) |
394 | { | 396 | { |
395 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); | 397 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); |
396 | struct grant_map *map; | 398 | struct grant_map *map; |
397 | unsigned long mstart, mend; | 399 | unsigned long mstart, mend; |
398 | int err; | 400 | int err; |
399 | 401 | ||
400 | spin_lock(&priv->lock); | 402 | spin_lock(&priv->lock); |
401 | list_for_each_entry(map, &priv->maps, next) { | 403 | list_for_each_entry(map, &priv->maps, next) { |
402 | if (!map->vma) | 404 | if (!map->vma) |
403 | continue; | 405 | continue; |
404 | if (map->vma->vm_start >= end) | 406 | if (map->vma->vm_start >= end) |
405 | continue; | 407 | continue; |
406 | if (map->vma->vm_end <= start) | 408 | if (map->vma->vm_end <= start) |
407 | continue; | 409 | continue; |
408 | mstart = max(start, map->vma->vm_start); | 410 | mstart = max(start, map->vma->vm_start); |
409 | mend = min(end, map->vma->vm_end); | 411 | mend = min(end, map->vma->vm_end); |
410 | pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", | 412 | pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", |
411 | map->index, map->count, | 413 | map->index, map->count, |
412 | map->vma->vm_start, map->vma->vm_end, | 414 | map->vma->vm_start, map->vma->vm_end, |
413 | start, end, mstart, mend); | 415 | start, end, mstart, mend); |
414 | err = unmap_grant_pages(map, | 416 | err = unmap_grant_pages(map, |
415 | (mstart - map->vma->vm_start) >> PAGE_SHIFT, | 417 | (mstart - map->vma->vm_start) >> PAGE_SHIFT, |
416 | (mend - mstart) >> PAGE_SHIFT); | 418 | (mend - mstart) >> PAGE_SHIFT); |
417 | WARN_ON(err); | 419 | WARN_ON(err); |
418 | } | 420 | } |
419 | spin_unlock(&priv->lock); | 421 | spin_unlock(&priv->lock); |
420 | } | 422 | } |
421 | 423 | ||
422 | static void mn_invl_page(struct mmu_notifier *mn, | 424 | static void mn_invl_page(struct mmu_notifier *mn, |
423 | struct mm_struct *mm, | 425 | struct mm_struct *mm, |
424 | unsigned long address) | 426 | unsigned long address) |
425 | { | 427 | { |
426 | mn_invl_range_start(mn, mm, address, address + PAGE_SIZE); | 428 | mn_invl_range_start(mn, mm, address, address + PAGE_SIZE); |
427 | } | 429 | } |
428 | 430 | ||
429 | static void mn_release(struct mmu_notifier *mn, | 431 | static void mn_release(struct mmu_notifier *mn, |
430 | struct mm_struct *mm) | 432 | struct mm_struct *mm) |
431 | { | 433 | { |
432 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); | 434 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); |
433 | struct grant_map *map; | 435 | struct grant_map *map; |
434 | int err; | 436 | int err; |
435 | 437 | ||
436 | spin_lock(&priv->lock); | 438 | spin_lock(&priv->lock); |
437 | list_for_each_entry(map, &priv->maps, next) { | 439 | list_for_each_entry(map, &priv->maps, next) { |
438 | if (!map->vma) | 440 | if (!map->vma) |
439 | continue; | 441 | continue; |
440 | pr_debug("map %d+%d (%lx %lx)\n", | 442 | pr_debug("map %d+%d (%lx %lx)\n", |
441 | map->index, map->count, | 443 | map->index, map->count, |
442 | map->vma->vm_start, map->vma->vm_end); | 444 | map->vma->vm_start, map->vma->vm_end); |
443 | err = unmap_grant_pages(map, /* offset */ 0, map->count); | 445 | err = unmap_grant_pages(map, /* offset */ 0, map->count); |
444 | WARN_ON(err); | 446 | WARN_ON(err); |
445 | } | 447 | } |
446 | spin_unlock(&priv->lock); | 448 | spin_unlock(&priv->lock); |
447 | } | 449 | } |
448 | 450 | ||
449 | static struct mmu_notifier_ops gntdev_mmu_ops = { | 451 | static struct mmu_notifier_ops gntdev_mmu_ops = { |
450 | .release = mn_release, | 452 | .release = mn_release, |
451 | .invalidate_page = mn_invl_page, | 453 | .invalidate_page = mn_invl_page, |
452 | .invalidate_range_start = mn_invl_range_start, | 454 | .invalidate_range_start = mn_invl_range_start, |
453 | }; | 455 | }; |
454 | 456 | ||
455 | /* ------------------------------------------------------------------ */ | 457 | /* ------------------------------------------------------------------ */ |
456 | 458 | ||
457 | static int gntdev_open(struct inode *inode, struct file *flip) | 459 | static int gntdev_open(struct inode *inode, struct file *flip) |
458 | { | 460 | { |
459 | struct gntdev_priv *priv; | 461 | struct gntdev_priv *priv; |
460 | int ret = 0; | 462 | int ret = 0; |
461 | 463 | ||
462 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 464 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
463 | if (!priv) | 465 | if (!priv) |
464 | return -ENOMEM; | 466 | return -ENOMEM; |
465 | 467 | ||
466 | INIT_LIST_HEAD(&priv->maps); | 468 | INIT_LIST_HEAD(&priv->maps); |
467 | spin_lock_init(&priv->lock); | 469 | spin_lock_init(&priv->lock); |
468 | 470 | ||
469 | if (use_ptemod) { | 471 | if (use_ptemod) { |
470 | priv->mm = get_task_mm(current); | 472 | priv->mm = get_task_mm(current); |
471 | if (!priv->mm) { | 473 | if (!priv->mm) { |
472 | kfree(priv); | 474 | kfree(priv); |
473 | return -ENOMEM; | 475 | return -ENOMEM; |
474 | } | 476 | } |
475 | priv->mn.ops = &gntdev_mmu_ops; | 477 | priv->mn.ops = &gntdev_mmu_ops; |
476 | ret = mmu_notifier_register(&priv->mn, priv->mm); | 478 | ret = mmu_notifier_register(&priv->mn, priv->mm); |
477 | mmput(priv->mm); | 479 | mmput(priv->mm); |
478 | } | 480 | } |
479 | 481 | ||
480 | if (ret) { | 482 | if (ret) { |
481 | kfree(priv); | 483 | kfree(priv); |
482 | return ret; | 484 | return ret; |
483 | } | 485 | } |
484 | 486 | ||
485 | flip->private_data = priv; | 487 | flip->private_data = priv; |
486 | pr_debug("priv %p\n", priv); | 488 | pr_debug("priv %p\n", priv); |
487 | 489 | ||
488 | return 0; | 490 | return 0; |
489 | } | 491 | } |
490 | 492 | ||
491 | static int gntdev_release(struct inode *inode, struct file *flip) | 493 | static int gntdev_release(struct inode *inode, struct file *flip) |
492 | { | 494 | { |
493 | struct gntdev_priv *priv = flip->private_data; | 495 | struct gntdev_priv *priv = flip->private_data; |
494 | struct grant_map *map; | 496 | struct grant_map *map; |
495 | 497 | ||
496 | pr_debug("priv %p\n", priv); | 498 | pr_debug("priv %p\n", priv); |
497 | 499 | ||
498 | while (!list_empty(&priv->maps)) { | 500 | while (!list_empty(&priv->maps)) { |
499 | map = list_entry(priv->maps.next, struct grant_map, next); | 501 | map = list_entry(priv->maps.next, struct grant_map, next); |
500 | list_del(&map->next); | 502 | list_del(&map->next); |
501 | gntdev_put_map(map); | 503 | gntdev_put_map(map); |
502 | } | 504 | } |
503 | 505 | ||
504 | if (use_ptemod) | 506 | if (use_ptemod) |
505 | mmu_notifier_unregister(&priv->mn, priv->mm); | 507 | mmu_notifier_unregister(&priv->mn, priv->mm); |
506 | kfree(priv); | 508 | kfree(priv); |
507 | return 0; | 509 | return 0; |
508 | } | 510 | } |
509 | 511 | ||
510 | static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, | 512 | static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, |
511 | struct ioctl_gntdev_map_grant_ref __user *u) | 513 | struct ioctl_gntdev_map_grant_ref __user *u) |
512 | { | 514 | { |
513 | struct ioctl_gntdev_map_grant_ref op; | 515 | struct ioctl_gntdev_map_grant_ref op; |
514 | struct grant_map *map; | 516 | struct grant_map *map; |
515 | int err; | 517 | int err; |
516 | 518 | ||
517 | if (copy_from_user(&op, u, sizeof(op)) != 0) | 519 | if (copy_from_user(&op, u, sizeof(op)) != 0) |
518 | return -EFAULT; | 520 | return -EFAULT; |
519 | pr_debug("priv %p, add %d\n", priv, op.count); | 521 | pr_debug("priv %p, add %d\n", priv, op.count); |
520 | if (unlikely(op.count <= 0)) | 522 | if (unlikely(op.count <= 0)) |
521 | return -EINVAL; | 523 | return -EINVAL; |
522 | 524 | ||
523 | err = -ENOMEM; | 525 | err = -ENOMEM; |
524 | map = gntdev_alloc_map(priv, op.count); | 526 | map = gntdev_alloc_map(priv, op.count); |
525 | if (!map) | 527 | if (!map) |
526 | return err; | 528 | return err; |
527 | 529 | ||
528 | if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { | 530 | if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { |
529 | pr_debug("can't map: over limit\n"); | 531 | pr_debug("can't map: over limit\n"); |
530 | gntdev_put_map(map); | 532 | gntdev_put_map(map); |
531 | return err; | 533 | return err; |
532 | } | 534 | } |
533 | 535 | ||
534 | if (copy_from_user(map->grants, &u->refs, | 536 | if (copy_from_user(map->grants, &u->refs, |
535 | sizeof(map->grants[0]) * op.count) != 0) { | 537 | sizeof(map->grants[0]) * op.count) != 0) { |
536 | gntdev_put_map(map); | 538 | gntdev_put_map(map); |
537 | return err; | 539 | return err; |
538 | } | 540 | } |
539 | 541 | ||
540 | spin_lock(&priv->lock); | 542 | spin_lock(&priv->lock); |
541 | gntdev_add_map(priv, map); | 543 | gntdev_add_map(priv, map); |
542 | op.index = map->index << PAGE_SHIFT; | 544 | op.index = map->index << PAGE_SHIFT; |
543 | spin_unlock(&priv->lock); | 545 | spin_unlock(&priv->lock); |
544 | 546 | ||
545 | if (copy_to_user(u, &op, sizeof(op)) != 0) | 547 | if (copy_to_user(u, &op, sizeof(op)) != 0) |
546 | return -EFAULT; | 548 | return -EFAULT; |
547 | 549 | ||
548 | return 0; | 550 | return 0; |
549 | } | 551 | } |
550 | 552 | ||
551 | static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, | 553 | static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, |
552 | struct ioctl_gntdev_unmap_grant_ref __user *u) | 554 | struct ioctl_gntdev_unmap_grant_ref __user *u) |
553 | { | 555 | { |
554 | struct ioctl_gntdev_unmap_grant_ref op; | 556 | struct ioctl_gntdev_unmap_grant_ref op; |
555 | struct grant_map *map; | 557 | struct grant_map *map; |
556 | int err = -ENOENT; | 558 | int err = -ENOENT; |
557 | 559 | ||
558 | if (copy_from_user(&op, u, sizeof(op)) != 0) | 560 | if (copy_from_user(&op, u, sizeof(op)) != 0) |
559 | return -EFAULT; | 561 | return -EFAULT; |
560 | pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); | 562 | pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); |
561 | 563 | ||
562 | spin_lock(&priv->lock); | 564 | spin_lock(&priv->lock); |
563 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); | 565 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); |
564 | if (map) { | 566 | if (map) { |
565 | list_del(&map->next); | 567 | list_del(&map->next); |
566 | err = 0; | 568 | err = 0; |
567 | } | 569 | } |
568 | spin_unlock(&priv->lock); | 570 | spin_unlock(&priv->lock); |
569 | if (map) | 571 | if (map) |
570 | gntdev_put_map(map); | 572 | gntdev_put_map(map); |
571 | return err; | 573 | return err; |
572 | } | 574 | } |
573 | 575 | ||
574 | static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv, | 576 | static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv, |
575 | struct ioctl_gntdev_get_offset_for_vaddr __user *u) | 577 | struct ioctl_gntdev_get_offset_for_vaddr __user *u) |
576 | { | 578 | { |
577 | struct ioctl_gntdev_get_offset_for_vaddr op; | 579 | struct ioctl_gntdev_get_offset_for_vaddr op; |
578 | struct vm_area_struct *vma; | 580 | struct vm_area_struct *vma; |
579 | struct grant_map *map; | 581 | struct grant_map *map; |
580 | 582 | ||
581 | if (copy_from_user(&op, u, sizeof(op)) != 0) | 583 | if (copy_from_user(&op, u, sizeof(op)) != 0) |
582 | return -EFAULT; | 584 | return -EFAULT; |
583 | pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); | 585 | pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); |
584 | 586 | ||
585 | vma = find_vma(current->mm, op.vaddr); | 587 | vma = find_vma(current->mm, op.vaddr); |
586 | if (!vma || vma->vm_ops != &gntdev_vmops) | 588 | if (!vma || vma->vm_ops != &gntdev_vmops) |
587 | return -EINVAL; | 589 | return -EINVAL; |
588 | 590 | ||
589 | map = vma->vm_private_data; | 591 | map = vma->vm_private_data; |
590 | if (!map) | 592 | if (!map) |
591 | return -EINVAL; | 593 | return -EINVAL; |
592 | 594 | ||
593 | op.offset = map->index << PAGE_SHIFT; | 595 | op.offset = map->index << PAGE_SHIFT; |
594 | op.count = map->count; | 596 | op.count = map->count; |
595 | 597 | ||
596 | if (copy_to_user(u, &op, sizeof(op)) != 0) | 598 | if (copy_to_user(u, &op, sizeof(op)) != 0) |
597 | return -EFAULT; | 599 | return -EFAULT; |
598 | return 0; | 600 | return 0; |
599 | } | 601 | } |
600 | 602 | ||
601 | static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) | 603 | static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) |
602 | { | 604 | { |
603 | struct ioctl_gntdev_unmap_notify op; | 605 | struct ioctl_gntdev_unmap_notify op; |
604 | struct grant_map *map; | 606 | struct grant_map *map; |
605 | int rc; | 607 | int rc; |
606 | int out_flags; | 608 | int out_flags; |
607 | unsigned int out_event; | 609 | unsigned int out_event; |
608 | 610 | ||
609 | if (copy_from_user(&op, u, sizeof(op))) | 611 | if (copy_from_user(&op, u, sizeof(op))) |
610 | return -EFAULT; | 612 | return -EFAULT; |
611 | 613 | ||
612 | if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) | 614 | if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) |
613 | return -EINVAL; | 615 | return -EINVAL; |
614 | 616 | ||
615 | /* We need to grab a reference to the event channel we are going to use | 617 | /* We need to grab a reference to the event channel we are going to use |
616 | * to send the notify before releasing the reference we may already have | 618 | * to send the notify before releasing the reference we may already have |
617 | * (if someone has called this ioctl twice). This is required so that | 619 | * (if someone has called this ioctl twice). This is required so that |
618 | * it is possible to change the clear_byte part of the notification | 620 | * it is possible to change the clear_byte part of the notification |
619 | * without disturbing the event channel part, which may now be the last | 621 | * without disturbing the event channel part, which may now be the last |
620 | * reference to that event channel. | 622 | * reference to that event channel. |
621 | */ | 623 | */ |
622 | if (op.action & UNMAP_NOTIFY_SEND_EVENT) { | 624 | if (op.action & UNMAP_NOTIFY_SEND_EVENT) { |
623 | if (evtchn_get(op.event_channel_port)) | 625 | if (evtchn_get(op.event_channel_port)) |
624 | return -EINVAL; | 626 | return -EINVAL; |
625 | } | 627 | } |
626 | 628 | ||
627 | out_flags = op.action; | 629 | out_flags = op.action; |
628 | out_event = op.event_channel_port; | 630 | out_event = op.event_channel_port; |
629 | 631 | ||
630 | spin_lock(&priv->lock); | 632 | spin_lock(&priv->lock); |
631 | 633 | ||
632 | list_for_each_entry(map, &priv->maps, next) { | 634 | list_for_each_entry(map, &priv->maps, next) { |
633 | uint64_t begin = map->index << PAGE_SHIFT; | 635 | uint64_t begin = map->index << PAGE_SHIFT; |
634 | uint64_t end = (map->index + map->count) << PAGE_SHIFT; | 636 | uint64_t end = (map->index + map->count) << PAGE_SHIFT; |
635 | if (op.index >= begin && op.index < end) | 637 | if (op.index >= begin && op.index < end) |
636 | goto found; | 638 | goto found; |
637 | } | 639 | } |
638 | rc = -ENOENT; | 640 | rc = -ENOENT; |
639 | goto unlock_out; | 641 | goto unlock_out; |
640 | 642 | ||
641 | found: | 643 | found: |
642 | if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) && | 644 | if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) && |
643 | (map->flags & GNTMAP_readonly)) { | 645 | (map->flags & GNTMAP_readonly)) { |
644 | rc = -EINVAL; | 646 | rc = -EINVAL; |
645 | goto unlock_out; | 647 | goto unlock_out; |
646 | } | 648 | } |
647 | 649 | ||
648 | out_flags = map->notify.flags; | 650 | out_flags = map->notify.flags; |
649 | out_event = map->notify.event; | 651 | out_event = map->notify.event; |
650 | 652 | ||
651 | map->notify.flags = op.action; | 653 | map->notify.flags = op.action; |
652 | map->notify.addr = op.index - (map->index << PAGE_SHIFT); | 654 | map->notify.addr = op.index - (map->index << PAGE_SHIFT); |
653 | map->notify.event = op.event_channel_port; | 655 | map->notify.event = op.event_channel_port; |
654 | 656 | ||
655 | rc = 0; | 657 | rc = 0; |
656 | 658 | ||
657 | unlock_out: | 659 | unlock_out: |
658 | spin_unlock(&priv->lock); | 660 | spin_unlock(&priv->lock); |
659 | 661 | ||
660 | /* Drop the reference to the event channel we did not save in the map */ | 662 | /* Drop the reference to the event channel we did not save in the map */ |
661 | if (out_flags & UNMAP_NOTIFY_SEND_EVENT) | 663 | if (out_flags & UNMAP_NOTIFY_SEND_EVENT) |
662 | evtchn_put(out_event); | 664 | evtchn_put(out_event); |
663 | 665 | ||
664 | return rc; | 666 | return rc; |
665 | } | 667 | } |
666 | 668 | ||
667 | static long gntdev_ioctl(struct file *flip, | 669 | static long gntdev_ioctl(struct file *flip, |
668 | unsigned int cmd, unsigned long arg) | 670 | unsigned int cmd, unsigned long arg) |
669 | { | 671 | { |
670 | struct gntdev_priv *priv = flip->private_data; | 672 | struct gntdev_priv *priv = flip->private_data; |
671 | void __user *ptr = (void __user *)arg; | 673 | void __user *ptr = (void __user *)arg; |
672 | 674 | ||
673 | switch (cmd) { | 675 | switch (cmd) { |
674 | case IOCTL_GNTDEV_MAP_GRANT_REF: | 676 | case IOCTL_GNTDEV_MAP_GRANT_REF: |
675 | return gntdev_ioctl_map_grant_ref(priv, ptr); | 677 | return gntdev_ioctl_map_grant_ref(priv, ptr); |
676 | 678 | ||
677 | case IOCTL_GNTDEV_UNMAP_GRANT_REF: | 679 | case IOCTL_GNTDEV_UNMAP_GRANT_REF: |
678 | return gntdev_ioctl_unmap_grant_ref(priv, ptr); | 680 | return gntdev_ioctl_unmap_grant_ref(priv, ptr); |
679 | 681 | ||
680 | case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR: | 682 | case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR: |
681 | return gntdev_ioctl_get_offset_for_vaddr(priv, ptr); | 683 | return gntdev_ioctl_get_offset_for_vaddr(priv, ptr); |
682 | 684 | ||
683 | case IOCTL_GNTDEV_SET_UNMAP_NOTIFY: | 685 | case IOCTL_GNTDEV_SET_UNMAP_NOTIFY: |
684 | return gntdev_ioctl_notify(priv, ptr); | 686 | return gntdev_ioctl_notify(priv, ptr); |
685 | 687 | ||
686 | default: | 688 | default: |
687 | pr_debug("priv %p, unknown cmd %x\n", priv, cmd); | 689 | pr_debug("priv %p, unknown cmd %x\n", priv, cmd); |
688 | return -ENOIOCTLCMD; | 690 | return -ENOIOCTLCMD; |
689 | } | 691 | } |
690 | 692 | ||
691 | return 0; | 693 | return 0; |
692 | } | 694 | } |
693 | 695 | ||
694 | static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | 696 | static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) |
695 | { | 697 | { |
696 | struct gntdev_priv *priv = flip->private_data; | 698 | struct gntdev_priv *priv = flip->private_data; |
697 | int index = vma->vm_pgoff; | 699 | int index = vma->vm_pgoff; |
698 | int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 700 | int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
699 | struct grant_map *map; | 701 | struct grant_map *map; |
700 | int i, err = -EINVAL; | 702 | int i, err = -EINVAL; |
701 | 703 | ||
702 | if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) | 704 | if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) |
703 | return -EINVAL; | 705 | return -EINVAL; |
704 | 706 | ||
705 | pr_debug("map %d+%d at %lx (pgoff %lx)\n", | 707 | pr_debug("map %d+%d at %lx (pgoff %lx)\n", |
706 | index, count, vma->vm_start, vma->vm_pgoff); | 708 | index, count, vma->vm_start, vma->vm_pgoff); |
707 | 709 | ||
708 | spin_lock(&priv->lock); | 710 | spin_lock(&priv->lock); |
709 | map = gntdev_find_map_index(priv, index, count); | 711 | map = gntdev_find_map_index(priv, index, count); |
710 | if (!map) | 712 | if (!map) |
711 | goto unlock_out; | 713 | goto unlock_out; |
712 | if (use_ptemod && map->vma) | 714 | if (use_ptemod && map->vma) |
713 | goto unlock_out; | 715 | goto unlock_out; |
714 | if (use_ptemod && priv->mm != vma->vm_mm) { | 716 | if (use_ptemod && priv->mm != vma->vm_mm) { |
715 | printk(KERN_WARNING "Huh? Other mm?\n"); | 717 | printk(KERN_WARNING "Huh? Other mm?\n"); |
716 | goto unlock_out; | 718 | goto unlock_out; |
717 | } | 719 | } |
718 | 720 | ||
719 | atomic_inc(&map->users); | 721 | atomic_inc(&map->users); |
720 | 722 | ||
721 | vma->vm_ops = &gntdev_vmops; | 723 | vma->vm_ops = &gntdev_vmops; |
722 | 724 | ||
723 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | 725 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; |
724 | 726 | ||
725 | if (use_ptemod) | 727 | if (use_ptemod) |
726 | vma->vm_flags |= VM_DONTCOPY; | 728 | vma->vm_flags |= VM_DONTCOPY; |
727 | 729 | ||
728 | vma->vm_private_data = map; | 730 | vma->vm_private_data = map; |
729 | 731 | ||
730 | if (use_ptemod) | 732 | if (use_ptemod) |
731 | map->vma = vma; | 733 | map->vma = vma; |
732 | 734 | ||
733 | if (map->flags) { | 735 | if (map->flags) { |
734 | if ((vma->vm_flags & VM_WRITE) && | 736 | if ((vma->vm_flags & VM_WRITE) && |
735 | (map->flags & GNTMAP_readonly)) | 737 | (map->flags & GNTMAP_readonly)) |
736 | goto out_unlock_put; | 738 | goto out_unlock_put; |
737 | } else { | 739 | } else { |
738 | map->flags = GNTMAP_host_map; | 740 | map->flags = GNTMAP_host_map; |
739 | if (!(vma->vm_flags & VM_WRITE)) | 741 | if (!(vma->vm_flags & VM_WRITE)) |
740 | map->flags |= GNTMAP_readonly; | 742 | map->flags |= GNTMAP_readonly; |
741 | } | 743 | } |
742 | 744 | ||
743 | spin_unlock(&priv->lock); | 745 | spin_unlock(&priv->lock); |
744 | 746 | ||
745 | if (use_ptemod) { | 747 | if (use_ptemod) { |
746 | err = apply_to_page_range(vma->vm_mm, vma->vm_start, | 748 | err = apply_to_page_range(vma->vm_mm, vma->vm_start, |
747 | vma->vm_end - vma->vm_start, | 749 | vma->vm_end - vma->vm_start, |
748 | find_grant_ptes, map); | 750 | find_grant_ptes, map); |
749 | if (err) { | 751 | if (err) { |
750 | printk(KERN_WARNING "find_grant_ptes() failure.\n"); | 752 | printk(KERN_WARNING "find_grant_ptes() failure.\n"); |
751 | goto out_put_map; | 753 | goto out_put_map; |
752 | } | 754 | } |
753 | } | 755 | } |
754 | 756 | ||
755 | err = map_grant_pages(map); | 757 | err = map_grant_pages(map); |
756 | if (err) | 758 | if (err) |
757 | goto out_put_map; | 759 | goto out_put_map; |
758 | 760 | ||
759 | if (!use_ptemod) { | 761 | if (!use_ptemod) { |
760 | for (i = 0; i < count; i++) { | 762 | for (i = 0; i < count; i++) { |
761 | err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE, | 763 | err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE, |
762 | map->pages[i]); | 764 | map->pages[i]); |
763 | if (err) | 765 | if (err) |
764 | goto out_put_map; | 766 | goto out_put_map; |
765 | } | 767 | } |
766 | } | 768 | } |
767 | 769 | ||
768 | return 0; | 770 | return 0; |
769 | 771 | ||
770 | unlock_out: | 772 | unlock_out: |
771 | spin_unlock(&priv->lock); | 773 | spin_unlock(&priv->lock); |
772 | return err; | 774 | return err; |
773 | 775 | ||
774 | out_unlock_put: | 776 | out_unlock_put: |
775 | spin_unlock(&priv->lock); | 777 | spin_unlock(&priv->lock); |
776 | out_put_map: | 778 | out_put_map: |
777 | if (use_ptemod) | 779 | if (use_ptemod) |
778 | map->vma = NULL; | 780 | map->vma = NULL; |
779 | gntdev_put_map(map); | 781 | gntdev_put_map(map); |
780 | return err; | 782 | return err; |
781 | } | 783 | } |
782 | 784 | ||
783 | static const struct file_operations gntdev_fops = { | 785 | static const struct file_operations gntdev_fops = { |
784 | .owner = THIS_MODULE, | 786 | .owner = THIS_MODULE, |
785 | .open = gntdev_open, | 787 | .open = gntdev_open, |
786 | .release = gntdev_release, | 788 | .release = gntdev_release, |
787 | .mmap = gntdev_mmap, | 789 | .mmap = gntdev_mmap, |
788 | .unlocked_ioctl = gntdev_ioctl | 790 | .unlocked_ioctl = gntdev_ioctl |
789 | }; | 791 | }; |
790 | 792 | ||
791 | static struct miscdevice gntdev_miscdev = { | 793 | static struct miscdevice gntdev_miscdev = { |
792 | .minor = MISC_DYNAMIC_MINOR, | 794 | .minor = MISC_DYNAMIC_MINOR, |
793 | .name = "xen/gntdev", | 795 | .name = "xen/gntdev", |
794 | .fops = &gntdev_fops, | 796 | .fops = &gntdev_fops, |
795 | }; | 797 | }; |
796 | 798 | ||
797 | /* ------------------------------------------------------------------ */ | 799 | /* ------------------------------------------------------------------ */ |
798 | 800 | ||
799 | static int __init gntdev_init(void) | 801 | static int __init gntdev_init(void) |
800 | { | 802 | { |
801 | int err; | 803 | int err; |
802 | 804 | ||
803 | if (!xen_domain()) | 805 | if (!xen_domain()) |
804 | return -ENODEV; | 806 | return -ENODEV; |
805 | 807 | ||
806 | use_ptemod = xen_pv_domain(); | 808 | use_ptemod = xen_pv_domain(); |
807 | 809 | ||
808 | err = misc_register(&gntdev_miscdev); | 810 | err = misc_register(&gntdev_miscdev); |
809 | if (err != 0) { | 811 | if (err != 0) { |
810 | printk(KERN_ERR "Could not register gntdev device\n"); | 812 | printk(KERN_ERR "Could not register gntdev device\n"); |
811 | return err; | 813 | return err; |
812 | } | 814 | } |