Commit f67d9b1576c1c6e02100f8b27f4e9d66bbeb4d49

Authored by Bob Liu
Committed by Linus Torvalds
1 parent eb709b0d06

nommu: add page alignment to mmap

Currently on nommu arch mmap(),mremap() and munmap() doesn't do
page_align() which isn't consist with mmu arch and cause some issues.

First, some drivers' mmap() function depends on vma->vm_end - vma->start
is page aligned which is true on mmu arch but not on nommu.  eg: uvc
camera driver.

Second munmap() may return -EINVAL[split file] error in cases when end is
not page aligned(passed into from userspace) but vma->vm_end is aligned
dure to split or driver's mmap() ops.

Add page alignment to fix those issues.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Bob Liu <lliubbo@gmail.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Greg Ungerer <gerg@snapgear.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 14 additions and 9 deletions Side-by-side Diff

... ... @@ -1124,7 +1124,7 @@
1124 1124 unsigned long capabilities)
1125 1125 {
1126 1126 struct page *pages;
1127   - unsigned long total, point, n, rlen;
  1127 + unsigned long total, point, n;
1128 1128 void *base;
1129 1129 int ret, order;
1130 1130  
1131 1131  
... ... @@ -1148,13 +1148,12 @@
1148 1148 * make a private copy of the data and map that instead */
1149 1149 }
1150 1150  
1151   - rlen = PAGE_ALIGN(len);
1152 1151  
1153 1152 /* allocate some memory to hold the mapping
1154 1153 * - note that this may not return a page-aligned address if the object
1155 1154 * we're allocating is smaller than a page
1156 1155 */
1157   - order = get_order(rlen);
  1156 + order = get_order(len);
1158 1157 kdebug("alloc order %d for %lx", order, len);
1159 1158  
1160 1159 pages = alloc_pages(GFP_KERNEL, order);
... ... @@ -1164,7 +1163,7 @@
1164 1163 total = 1 << order;
1165 1164 atomic_long_add(total, &mmap_pages_allocated);
1166 1165  
1167   - point = rlen >> PAGE_SHIFT;
  1166 + point = len >> PAGE_SHIFT;
1168 1167  
1169 1168 /* we allocated a power-of-2 sized page set, so we may want to trim off
1170 1169 * the excess */
... ... @@ -1186,7 +1185,7 @@
1186 1185 base = page_address(pages);
1187 1186 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1188 1187 region->vm_start = (unsigned long) base;
1189   - region->vm_end = region->vm_start + rlen;
  1188 + region->vm_end = region->vm_start + len;
1190 1189 region->vm_top = region->vm_start + (total << PAGE_SHIFT);
1191 1190  
1192 1191 vma->vm_start = region->vm_start;
1193 1192  
... ... @@ -1202,15 +1201,15 @@
1202 1201  
1203 1202 old_fs = get_fs();
1204 1203 set_fs(KERNEL_DS);
1205   - ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos);
  1204 + ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1206 1205 set_fs(old_fs);
1207 1206  
1208 1207 if (ret < 0)
1209 1208 goto error_free;
1210 1209  
1211 1210 /* clear the last little bit */
1212   - if (ret < rlen)
1213   - memset(base + ret, 0, rlen - ret);
  1211 + if (ret < len)
  1212 + memset(base + ret, 0, len - ret);
1214 1213  
1215 1214 }
1216 1215  
... ... @@ -1259,6 +1258,7 @@
1259 1258  
1260 1259 /* we ignore the address hint */
1261 1260 addr = 0;
  1261 + len = PAGE_ALIGN(len);
1262 1262  
1263 1263 /* we've determined that we can make the mapping, now translate what we
1264 1264 * now know into VMA flags */
1265 1265  
1266 1266  
... ... @@ -1635,14 +1635,17 @@
1635 1635 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1636 1636 {
1637 1637 struct vm_area_struct *vma;
1638   - unsigned long end = start + len;
  1638 + unsigned long end;
1639 1639 int ret;
1640 1640  
1641 1641 kenter(",%lx,%zx", start, len);
1642 1642  
  1643 + len = PAGE_ALIGN(len);
1643 1644 if (len == 0)
1644 1645 return -EINVAL;
1645 1646  
  1647 + end = start + len;
  1648 +
1646 1649 /* find the first potentially overlapping VMA */
1647 1650 vma = find_vma(mm, start);
1648 1651 if (!vma) {
... ... @@ -1762,6 +1765,8 @@
1762 1765 struct vm_area_struct *vma;
1763 1766  
1764 1767 /* insanity checks first */
  1768 + old_len = PAGE_ALIGN(old_len);
  1769 + new_len = PAGE_ALIGN(new_len);
1765 1770 if (old_len == 0 || new_len == 0)
1766 1771 return (unsigned long) -EINVAL;
1767 1772