Commit 9d73777e500929b71dcfed16eec05f6760e345a6

Authored by Peter Zijlstra
Committed by Linus Torvalds
1 parent ab420e6d9c

clarify get_user_pages() prototype

Currently the 4th parameter of get_user_pages() is called len, but its
in pages, not bytes. Rename the thing to nr_pages to avoid future
confusion.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 3 changed files with 18 additions and 22 deletions Side-by-side Diff

... ... @@ -826,7 +826,7 @@
826 826 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
827 827  
828 828 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
829   - unsigned long start, int len, int write, int force,
  829 + unsigned long start, int nr_pages, int write, int force,
830 830 struct page **pages, struct vm_area_struct **vmas);
831 831 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
832 832 struct page **pages);
... ... @@ -1207,8 +1207,8 @@
1207 1207  
1208 1208  
1209 1209 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1210   - unsigned long start, int len, int flags,
1211   - struct page **pages, struct vm_area_struct **vmas)
  1210 + unsigned long start, int nr_pages, int flags,
  1211 + struct page **pages, struct vm_area_struct **vmas)
1212 1212 {
1213 1213 int i;
1214 1214 unsigned int vm_flags = 0;
... ... @@ -1217,7 +1217,7 @@
1217 1217 int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
1218 1218 int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
1219 1219  
1220   - if (len <= 0)
  1220 + if (nr_pages <= 0)
1221 1221 return 0;
1222 1222 /*
1223 1223 * Require read or write permissions.
... ... @@ -1269,7 +1269,7 @@
1269 1269 vmas[i] = gate_vma;
1270 1270 i++;
1271 1271 start += PAGE_SIZE;
1272   - len--;
  1272 + nr_pages--;
1273 1273 continue;
1274 1274 }
1275 1275  
... ... @@ -1280,7 +1280,7 @@
1280 1280  
1281 1281 if (is_vm_hugetlb_page(vma)) {
1282 1282 i = follow_hugetlb_page(mm, vma, pages, vmas,
1283   - &start, &len, i, write);
  1283 + &start, &nr_pages, i, write);
1284 1284 continue;
1285 1285 }
1286 1286  
... ... @@ -1357,9 +1357,9 @@
1357 1357 vmas[i] = vma;
1358 1358 i++;
1359 1359 start += PAGE_SIZE;
1360   - len--;
1361   - } while (len && start < vma->vm_end);
1362   - } while (len);
  1360 + nr_pages--;
  1361 + } while (nr_pages && start < vma->vm_end);
  1362 + } while (nr_pages);
1363 1363 return i;
1364 1364 }
1365 1365  
... ... @@ -1368,7 +1368,7 @@
1368 1368 * @tsk: task_struct of target task
1369 1369 * @mm: mm_struct of target mm
1370 1370 * @start: starting user address
1371   - * @len: number of pages from start to pin
  1371 + * @nr_pages: number of pages from start to pin
1372 1372 * @write: whether pages will be written to by the caller
1373 1373 * @force: whether to force write access even if user mapping is
1374 1374 * readonly. This will result in the page being COWed even
... ... @@ -1380,7 +1380,7 @@
1380 1380 * Or NULL if the caller does not require them.
1381 1381 *
1382 1382 * Returns number of pages pinned. This may be fewer than the number
1383   - * requested. If len is 0 or negative, returns 0. If no pages
  1383 + * requested. If nr_pages is 0 or negative, returns 0. If no pages
1384 1384 * were pinned, returns -errno. Each page returned must be released
1385 1385 * with a put_page() call when it is finished with. vmas will only
1386 1386 * remain valid while mmap_sem is held.
... ... @@ -1414,7 +1414,7 @@
1414 1414 * See also get_user_pages_fast, for performance critical applications.
1415 1415 */
1416 1416 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1417   - unsigned long start, int len, int write, int force,
  1417 + unsigned long start, int nr_pages, int write, int force,
1418 1418 struct page **pages, struct vm_area_struct **vmas)
1419 1419 {
1420 1420 int flags = 0;
... ... @@ -1424,9 +1424,7 @@
1424 1424 if (force)
1425 1425 flags |= GUP_FLAGS_FORCE;
1426 1426  
1427   - return __get_user_pages(tsk, mm,
1428   - start, len, flags,
1429   - pages, vmas);
  1427 + return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
1430 1428 }
1431 1429  
1432 1430 EXPORT_SYMBOL(get_user_pages);
... ... @@ -173,8 +173,8 @@
173 173 }
174 174  
175 175 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
176   - unsigned long start, int len, int flags,
177   - struct page **pages, struct vm_area_struct **vmas)
  176 + unsigned long start, int nr_pages, int flags,
  177 + struct page **pages, struct vm_area_struct **vmas)
178 178 {
179 179 struct vm_area_struct *vma;
180 180 unsigned long vm_flags;
... ... @@ -189,7 +189,7 @@
189 189 vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
190 190 vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
191 191  
192   - for (i = 0; i < len; i++) {
  192 + for (i = 0; i < nr_pages; i++) {
193 193 vma = find_vma(mm, start);
194 194 if (!vma)
195 195 goto finish_or_fault;
... ... @@ -224,7 +224,7 @@
224 224 * - don't permit access to VMAs that don't support it, such as I/O mappings
225 225 */
226 226 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
227   - unsigned long start, int len, int write, int force,
  227 + unsigned long start, int nr_pages, int write, int force,
228 228 struct page **pages, struct vm_area_struct **vmas)
229 229 {
230 230 int flags = 0;
... ... @@ -234,9 +234,7 @@
234 234 if (force)
235 235 flags |= GUP_FLAGS_FORCE;
236 236  
237   - return __get_user_pages(tsk, mm,
238   - start, len, flags,
239   - pages, vmas);
  237 + return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
240 238 }
241 239 EXPORT_SYMBOL(get_user_pages);
242 240