Blame view
drivers/char/mem.c
20 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c
|
2 3 4 5 6 |
/* * linux/drivers/char/mem.c * * Copyright (C) 1991, 1992 Linus Torvalds * |
d7d4d849b
|
7 |
* Added devfs support. |
1da177e4c
|
8 |
* Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> |
af901ca18
|
9 |
* Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> |
1da177e4c
|
10 |
*/ |
1da177e4c
|
11 12 13 14 15 16 17 18 19 20 |
#include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mman.h> #include <linux/random.h> #include <linux/init.h> #include <linux/raw.h> #include <linux/tty.h> #include <linux/capability.h> |
1da177e4c
|
21 22 |
#include <linux/ptrace.h> #include <linux/device.h> |
50b1fdbd8
|
23 |
#include <linux/highmem.h> |
1da177e4c
|
24 |
#include <linux/backing-dev.h> |
c01d5b300
|
25 |
#include <linux/shmem_fs.h> |
d6b29d7ce
|
26 |
#include <linux/splice.h> |
b8a3ad5b5
|
27 |
#include <linux/pfn.h> |
66300e66c
|
28 |
#include <linux/export.h> |
e1612de9e
|
29 |
#include <linux/io.h> |
e2e40f2c1
|
30 |
#include <linux/uio.h> |
1da177e4c
|
31 |
|
35b6c7e4a
|
32 |
#include <linux/uaccess.h> |
1da177e4c
|
33 34 35 36 |
#ifdef CONFIG_IA64 # include <linux/efi.h> #endif |
e1612de9e
|
37 |
#define DEVPORT_MINOR 4 |
f222318e9
|
38 39 40 41 |
static inline unsigned long size_inside_page(unsigned long start, unsigned long size) { unsigned long sz; |
7fabaddd0
|
42 |
sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); |
f222318e9
|
43 |
|
7fabaddd0
|
44 |
return min(sz, size); |
f222318e9
|
45 |
} |
1da177e4c
|
46 |
#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE |
7e6735c35
|
47 |
static inline int valid_phys_addr_range(phys_addr_t addr, size_t count) |
1da177e4c
|
48 |
{ |
cfaf346cb
|
49 |
return addr + count <= __pa(high_memory); |
1da177e4c
|
50 |
} |
80851ef2a
|
51 |
|
06c67befe
|
52 |
static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) |
80851ef2a
|
53 54 55 |
{ return 1; } |
1da177e4c
|
56 |
#endif |
d092633bf
|
57 |
#ifdef CONFIG_STRICT_DEVMEM |
a4866aa81
|
58 59 60 61 |
static inline int page_is_allowed(unsigned long pfn) { return devmem_is_allowed(pfn); } |
e2beb3eae
|
62 |
static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
ae531c26c
|
63 |
{ |
e2beb3eae
|
64 65 66 67 68 |
u64 from = ((u64)pfn) << PAGE_SHIFT; u64 to = from + size; u64 cursor = from; while (cursor < to) { |
39380b80d
|
69 |
if (!devmem_is_allowed(pfn)) |
ae531c26c
|
70 |
return 0; |
e2beb3eae
|
71 72 |
cursor += PAGE_SIZE; pfn++; |
ae531c26c
|
73 74 75 76 |
} return 1; } #else |
a4866aa81
|
77 78 79 80 |
static inline int page_is_allowed(unsigned long pfn) { return 1; } |
e2beb3eae
|
81 |
static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
ae531c26c
|
82 83 84 85 |
{ return 1; } #endif |
4707a341b
|
86 87 88 |
#ifndef unxlate_dev_mem_ptr #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) |
e045fb2a9
|
89 90 |
{ } |
4707a341b
|
91 |
#endif |
e045fb2a9
|
92 |
|
1da177e4c
|
93 |
/* |
d7d4d849b
|
94 95 |
* This funcion reads the *physical* memory. The f_pos points directly to the * memory location. |
1da177e4c
|
96 |
*/ |
d7d4d849b
|
97 |
static ssize_t read_mem(struct file *file, char __user *buf, |
1da177e4c
|
98 99 |
size_t count, loff_t *ppos) { |
7e6735c35
|
100 |
phys_addr_t p = *ppos; |
1da177e4c
|
101 |
ssize_t read, sz; |
4707a341b
|
102 |
void *ptr; |
ea60e54b2
|
103 104 |
char *bounce; int err; |
1da177e4c
|
105 |
|
08d2d00b2
|
106 107 |
if (p != *ppos) return 0; |
136939a2b
|
108 |
if (!valid_phys_addr_range(p, count)) |
1da177e4c
|
109 110 111 112 113 |
return -EFAULT; read = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { |
7fabaddd0
|
114 |
sz = size_inside_page(p, count); |
1da177e4c
|
115 116 117 |
if (sz > 0) { if (clear_user(buf, sz)) return -EFAULT; |
d7d4d849b
|
118 119 120 121 |
buf += sz; p += sz; count -= sz; read += sz; |
1da177e4c
|
122 123 124 |
} } #endif |
ea60e54b2
|
125 126 127 |
bounce = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!bounce) return -ENOMEM; |
1da177e4c
|
128 |
while (count > 0) { |
fa29e97bb
|
129 |
unsigned long remaining; |
5f834dd24
|
130 |
int allowed, probe; |
fa29e97bb
|
131 |
|
f222318e9
|
132 |
sz = size_inside_page(p, count); |
1da177e4c
|
133 |
|
ea60e54b2
|
134 |
err = -EPERM; |
a4866aa81
|
135 136 |
allowed = page_is_allowed(p >> PAGE_SHIFT); if (!allowed) |
ea60e54b2
|
137 138 139 |
goto failed; err = -EFAULT; |
a4866aa81
|
140 141 142 143 144 145 146 147 148 149 150 |
if (allowed == 2) { /* Show zeros for restricted memory. */ remaining = clear_user(buf, sz); } else { /* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur. */ ptr = xlate_dev_mem_ptr(p); if (!ptr) |
ea60e54b2
|
151 |
goto failed; |
a4866aa81
|
152 |
|
5f834dd24
|
153 |
probe = probe_kernel_read(bounce, ptr, sz); |
a4866aa81
|
154 |
unxlate_dev_mem_ptr(p, ptr); |
5f834dd24
|
155 |
if (probe) |
ea60e54b2
|
156 157 158 |
goto failed; remaining = copy_to_user(buf, bounce, sz); |
a4866aa81
|
159 |
} |
1da177e4c
|
160 |
|
fa29e97bb
|
161 |
if (remaining) |
ea60e54b2
|
162 |
goto failed; |
e045fb2a9
|
163 |
|
1da177e4c
|
164 165 166 167 168 |
buf += sz; p += sz; count -= sz; read += sz; } |
ea60e54b2
|
169 |
kfree(bounce); |
1da177e4c
|
170 171 172 |
*ppos += read; return read; |
ea60e54b2
|
173 174 175 176 |
failed: kfree(bounce); return err; |
1da177e4c
|
177 |
} |
d7d4d849b
|
178 |
static ssize_t write_mem(struct file *file, const char __user *buf, |
1da177e4c
|
179 180 |
size_t count, loff_t *ppos) { |
7e6735c35
|
181 |
phys_addr_t p = *ppos; |
1da177e4c
|
182 183 184 |
ssize_t written, sz; unsigned long copied; void *ptr; |
08d2d00b2
|
185 186 |
if (p != *ppos) return -EFBIG; |
136939a2b
|
187 |
if (!valid_phys_addr_range(p, count)) |
1da177e4c
|
188 189 190 191 192 193 194 |
return -EFAULT; written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE) { |
7fabaddd0
|
195 |
sz = size_inside_page(p, count); |
1da177e4c
|
196 197 198 199 200 201 202 203 204 |
/* Hmm. Do something? */ buf += sz; p += sz; count -= sz; written += sz; } #endif while (count > 0) { |
a4866aa81
|
205 |
int allowed; |
f222318e9
|
206 |
sz = size_inside_page(p, count); |
1da177e4c
|
207 |
|
a4866aa81
|
208 209 |
allowed = page_is_allowed(p >> PAGE_SHIFT); if (!allowed) |
e045fb2a9
|
210 |
return -EPERM; |
a4866aa81
|
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 |
/* Skip actual writing when a page is marked as restricted. */ if (allowed == 1) { /* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur. */ ptr = xlate_dev_mem_ptr(p); if (!ptr) { if (written) break; return -EFAULT; } copied = copy_from_user(ptr, buf, sz); unxlate_dev_mem_ptr(p, ptr); if (copied) { written += sz - copied; if (written) break; return -EFAULT; } |
1da177e4c
|
233 |
} |
e045fb2a9
|
234 |
|
1da177e4c
|
235 236 237 238 239 240 241 242 243 |
buf += sz; p += sz; count -= sz; written += sz; } *ppos += written; return written; } |
d7d4d849b
|
244 |
int __weak phys_mem_access_prot_allowed(struct file *file, |
f0970c13b
|
245 246 247 248 |
unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { return 1; } |
44ac84139
|
249 |
#ifndef __HAVE_PHYS_MEM_ACCESS_PROT |
d7d4d849b
|
250 251 252 253 254 255 |
/* * Architectures vary in how they handle caching for addresses * outside of main memory. * */ |
ea56f411e
|
256 |
#ifdef pgprot_noncached |
7e6735c35
|
257 |
static int uncached_access(struct file *file, phys_addr_t addr) |
d7d4d849b
|
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 |
{ #if defined(CONFIG_IA64) /* * On ia64, we ignore O_DSYNC because we cannot tolerate memory * attribute aliases. */ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); #elif defined(CONFIG_MIPS) { extern int __uncached_access(struct file *file, unsigned long addr); return __uncached_access(file, addr); } #else /* * Accessing memory above the top the kernel knows about or through a * file pointer * that was marked O_DSYNC will be done non-cached. */ if (file->f_flags & O_DSYNC) return 1; return addr >= __pa(high_memory); #endif } |
ea56f411e
|
283 |
#endif |
d7d4d849b
|
284 |
|
44ac84139
|
285 286 287 288 |
static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { #ifdef pgprot_noncached |
7e6735c35
|
289 |
phys_addr_t offset = pfn << PAGE_SHIFT; |
44ac84139
|
290 291 292 293 294 295 296 |
if (uncached_access(file, offset)) return pgprot_noncached(vma_prot); #endif return vma_prot; } #endif |
5da6185bc
|
297 298 299 300 301 302 303 304 305 |
#ifndef CONFIG_MMU static unsigned long get_unmapped_area_mem(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if (!valid_mmap_phys_addr_range(pgoff, len)) return (unsigned long) -EINVAL; |
8a93258ce
|
306 |
return pgoff << PAGE_SHIFT; |
5da6185bc
|
307 |
} |
b4caecd48
|
308 309 310 311 312 313 314 315 316 317 318 |
/* permit direct mmap, for read, write or exec */ static unsigned memory_mmap_capabilities(struct file *file) { return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; } static unsigned zero_mmap_capabilities(struct file *file) { return NOMMU_MAP_COPY; } |
5da6185bc
|
319 320 321 322 323 324 |
/* can't do an in-place private mapping if there's no MMU */ static inline int private_mapping_ok(struct vm_area_struct *vma) { return vma->vm_flags & VM_MAYSHARE; } #else |
5da6185bc
|
325 326 327 328 329 330 |
static inline int private_mapping_ok(struct vm_area_struct *vma) { return 1; } #endif |
f0f37e2f7
|
331 |
static const struct vm_operations_struct mmap_mem_ops = { |
7ae8ed505
|
332 333 334 |
#ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys #endif |
e7f260a27
|
335 |
}; |
d7d4d849b
|
336 |
static int mmap_mem(struct file *file, struct vm_area_struct *vma) |
1da177e4c
|
337 |
{ |
80851ef2a
|
338 |
size_t size = vma->vm_end - vma->vm_start; |
b299cde24
|
339 340 341 |
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; /* It's illegal to wrap around the end of the physical address space. */ |
32829da54
|
342 |
if (offset + (phys_addr_t)size - 1 < offset) |
b299cde24
|
343 |
return -EINVAL; |
80851ef2a
|
344 |
|
06c67befe
|
345 |
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) |
80851ef2a
|
346 |
return -EINVAL; |
5da6185bc
|
347 348 |
if (!private_mapping_ok(vma)) return -ENOSYS; |
e2beb3eae
|
349 350 |
if (!range_is_allowed(vma->vm_pgoff, size)) return -EPERM; |
f0970c13b
|
351 352 353 |
if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, &vma->vm_page_prot)) return -EINVAL; |
8b150478a
|
354 |
vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, |
80851ef2a
|
355 |
size, |
1da177e4c
|
356 |
vma->vm_page_prot); |
1da177e4c
|
357 |
|
e7f260a27
|
358 |
vma->vm_ops = &mmap_mem_ops; |
314e51b98
|
359 |
/* Remap-pfn-range will mark the range VM_IO */ |
1da177e4c
|
360 361 362 |
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
80851ef2a
|
363 |
size, |
e7f260a27
|
364 |
vma->vm_page_prot)) { |
1da177e4c
|
365 |
return -EAGAIN; |
e7f260a27
|
366 |
} |
1da177e4c
|
367 368 |
return 0; } |
d7d4d849b
|
369 |
static int mmap_kmem(struct file *file, struct vm_area_struct *vma) |
1da177e4c
|
370 |
{ |
4bb82551e
|
371 |
unsigned long pfn; |
6d3154cc1
|
372 373 |
/* Turn a kernel-virtual address into a physical page frame */ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; |
4bb82551e
|
374 |
|
1da177e4c
|
375 |
/* |
d7d4d849b
|
376 377 378 |
* RED-PEN: on some architectures there is more mapped memory than * available in mem_map which pfn_valid checks for. Perhaps should add a * new macro here. |
1da177e4c
|
379 380 381 |
* * RED-PEN: vmalloc is not supported right now. */ |
4bb82551e
|
382 |
if (!pfn_valid(pfn)) |
1da177e4c
|
383 |
return -EIO; |
4bb82551e
|
384 385 |
vma->vm_pgoff = pfn; |
1da177e4c
|
386 387 |
return mmap_mem(file, vma); } |
1da177e4c
|
388 389 390 |
/* * This function reads the *virtual* memory as seen by the kernel. */ |
d7d4d849b
|
391 |
static ssize_t read_kmem(struct file *file, char __user *buf, |
1da177e4c
|
392 393 394 395 |
size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t low_count, read, sz; |
890537b3a
|
396 |
char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ |
325fda71d
|
397 |
int err = 0; |
1da177e4c
|
398 399 400 401 |
read = 0; if (p < (unsigned long) high_memory) { low_count = count; |
d7d4d849b
|
402 403 |
if (count > (unsigned long)high_memory - p) low_count = (unsigned long)high_memory - p; |
1da177e4c
|
404 405 406 407 |
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ if (p < PAGE_SIZE && low_count > 0) { |
7fabaddd0
|
408 409 |
sz = size_inside_page(p, low_count); if (clear_user(buf, sz)) |
1da177e4c
|
410 |
return -EFAULT; |
7fabaddd0
|
411 412 413 414 415 |
buf += sz; p += sz; read += sz; low_count -= sz; count -= sz; |
1da177e4c
|
416 417 418 |
} #endif while (low_count > 0) { |
f222318e9
|
419 |
sz = size_inside_page(p, low_count); |
1da177e4c
|
420 421 422 423 424 425 |
/* * On ia64 if a page has been mapped somewhere as * uncached, then it must also be accessed uncached * by the kernel or data corruption may occur */ |
4707a341b
|
426 |
kbuf = xlate_dev_kmem_ptr((void *)p); |
488debb99
|
427 428 |
if (!virt_addr_valid(kbuf)) return -ENXIO; |
1da177e4c
|
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 |
if (copy_to_user(buf, kbuf, sz)) return -EFAULT; buf += sz; p += sz; read += sz; low_count -= sz; count -= sz; } } if (count > 0) { kbuf = (char *)__get_free_page(GFP_KERNEL); if (!kbuf) return -ENOMEM; while (count > 0) { |
80ad89a0c
|
445 |
sz = size_inside_page(p, count); |
325fda71d
|
446 447 448 449 |
if (!is_vmalloc_or_module_addr((void *)p)) { err = -ENXIO; break; } |
80ad89a0c
|
450 451 |
sz = vread(kbuf, (char *)p, sz); if (!sz) |
1da177e4c
|
452 |
break; |
80ad89a0c
|
453 |
if (copy_to_user(buf, kbuf, sz)) { |
325fda71d
|
454 455 |
err = -EFAULT; break; |
1da177e4c
|
456 |
} |
80ad89a0c
|
457 458 459 460 |
count -= sz; buf += sz; read += sz; p += sz; |
1da177e4c
|
461 462 463 |
} free_page((unsigned long)kbuf); } |
325fda71d
|
464 465 |
*ppos = p; return read ? read : err; |
1da177e4c
|
466 |
} |
d7d4d849b
|
467 468 |
static ssize_t do_write_kmem(unsigned long p, const char __user *buf, size_t count, loff_t *ppos) |
1da177e4c
|
469 470 471 472 473 474 475 |
{ ssize_t written, sz; unsigned long copied; written = 0; #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED /* we don't have page 0 mapped on sparc and m68k.. */ |
ee32398fd
|
476 477 |
if (p < PAGE_SIZE) { sz = size_inside_page(p, count); |
1da177e4c
|
478 479 480 |
/* Hmm. Do something? */ buf += sz; p += sz; |
1da177e4c
|
481 482 483 484 485 486 |
count -= sz; written += sz; } #endif while (count > 0) { |
4707a341b
|
487 |
void *ptr; |
1da177e4c
|
488 |
|
ee32398fd
|
489 |
sz = size_inside_page(p, count); |
1da177e4c
|
490 491 |
/* |
d7d4d849b
|
492 493 494 |
* On ia64 if a page has been mapped somewhere as uncached, then * it must also be accessed uncached by the kernel or data * corruption may occur. |
1da177e4c
|
495 |
*/ |
4707a341b
|
496 |
ptr = xlate_dev_kmem_ptr((void *)p); |
488debb99
|
497 498 |
if (!virt_addr_valid(ptr)) return -ENXIO; |
1da177e4c
|
499 500 501 |
copied = copy_from_user(ptr, buf, sz); if (copied) { |
c654d60e8
|
502 503 504 |
written += sz - copied; if (written) break; |
1da177e4c
|
505 506 507 508 |
return -EFAULT; } buf += sz; p += sz; |
1da177e4c
|
509 510 511 512 513 514 515 |
count -= sz; written += sz; } *ppos += written; return written; } |
1da177e4c
|
516 517 518 |
/* * This function writes to the *virtual* memory as seen by the kernel. */ |
d7d4d849b
|
519 |
static ssize_t write_kmem(struct file *file, const char __user *buf, |
1da177e4c
|
520 521 522 523 524 |
size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t wrote = 0; ssize_t virtr = 0; |
890537b3a
|
525 |
char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ |
325fda71d
|
526 |
int err = 0; |
1da177e4c
|
527 528 |
if (p < (unsigned long) high_memory) { |
80ad89a0c
|
529 530 |
unsigned long to_write = min_t(unsigned long, count, (unsigned long)high_memory - p); |
ee32398fd
|
531 |
wrote = do_write_kmem(p, buf, to_write, ppos); |
80ad89a0c
|
532 533 |
if (wrote != to_write) return wrote; |
1da177e4c
|
534 535 536 537 538 539 540 541 542 543 |
p += wrote; buf += wrote; count -= wrote; } if (count > 0) { kbuf = (char *)__get_free_page(GFP_KERNEL); if (!kbuf) return wrote ? wrote : -ENOMEM; while (count > 0) { |
80ad89a0c
|
544 545 |
unsigned long sz = size_inside_page(p, count); unsigned long n; |
1da177e4c
|
546 |
|
325fda71d
|
547 548 549 550 |
if (!is_vmalloc_or_module_addr((void *)p)) { err = -ENXIO; break; } |
80ad89a0c
|
551 552 |
n = copy_from_user(kbuf, buf, sz); if (n) { |
325fda71d
|
553 554 |
err = -EFAULT; break; |
1da177e4c
|
555 |
} |
c85e9a97c
|
556 |
vwrite(kbuf, (char *)p, sz); |
80ad89a0c
|
557 558 559 560 |
count -= sz; buf += sz; virtr += sz; p += sz; |
1da177e4c
|
561 562 563 |
} free_page((unsigned long)kbuf); } |
325fda71d
|
564 565 |
*ppos = p; return virtr + wrote ? : err; |
1da177e4c
|
566 |
} |
d7d4d849b
|
567 |
static ssize_t read_port(struct file *file, char __user *buf, |
1da177e4c
|
568 569 570 571 572 573 |
size_t count, loff_t *ppos) { unsigned long i = *ppos; char __user *tmp = buf; if (!access_ok(VERIFY_WRITE, buf, count)) |
d7d4d849b
|
574 |
return -EFAULT; |
1da177e4c
|
575 |
while (count-- > 0 && i < 65536) { |
d7d4d849b
|
576 577 |
if (__put_user(inb(i), tmp) < 0) return -EFAULT; |
1da177e4c
|
578 579 580 581 582 583 |
i++; tmp++; } *ppos = i; return tmp-buf; } |
d7d4d849b
|
584 |
static ssize_t write_port(struct file *file, const char __user *buf, |
1da177e4c
|
585 586 587 |
size_t count, loff_t *ppos) { unsigned long i = *ppos; |
890537b3a
|
588 |
const char __user *tmp = buf; |
1da177e4c
|
589 |
|
d7d4d849b
|
590 |
if (!access_ok(VERIFY_READ, buf, count)) |
1da177e4c
|
591 592 593 |
return -EFAULT; while (count-- > 0 && i < 65536) { char c; |
6a0061bad
|
594 |
|
c654d60e8
|
595 596 597 |
if (__get_user(c, tmp)) { if (tmp > buf) break; |
d7d4d849b
|
598 |
return -EFAULT; |
c654d60e8
|
599 |
} |
d7d4d849b
|
600 |
outb(c, i); |
1da177e4c
|
601 602 603 604 605 606 |
i++; tmp++; } *ppos = i; return tmp-buf; } |
1da177e4c
|
607 |
|
d7d4d849b
|
608 |
static ssize_t read_null(struct file *file, char __user *buf, |
1da177e4c
|
609 610 611 612 |
size_t count, loff_t *ppos) { return 0; } |
d7d4d849b
|
613 |
static ssize_t write_null(struct file *file, const char __user *buf, |
1da177e4c
|
614 615 616 617 |
size_t count, loff_t *ppos) { return count; } |
cd28e28d8
|
618 |
static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to) |
162934de5
|
619 620 621 |
{ return 0; } |
cd28e28d8
|
622 |
static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from) |
162934de5
|
623 |
{ |
cd28e28d8
|
624 625 626 |
size_t count = iov_iter_count(from); iov_iter_advance(from, count); return count; |
162934de5
|
627 |
} |
1ebd32fc5
|
628 629 630 631 632 |
static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, struct splice_desc *sd) { return sd->len; } |
d7d4d849b
|
633 |
static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out, |
1ebd32fc5
|
634 635 636 637 |
loff_t *ppos, size_t len, unsigned int flags) { return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); } |
13ba33e89
|
638 |
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter) |
1da177e4c
|
639 |
{ |
13ba33e89
|
640 |
size_t written = 0; |
1da177e4c
|
641 |
|
13ba33e89
|
642 643 |
while (iov_iter_count(iter)) { size_t chunk = iov_iter_count(iter), n; |
6a0061bad
|
644 |
|
557ed1fa2
|
645 646 |
if (chunk > PAGE_SIZE) chunk = PAGE_SIZE; /* Just for latency reasons */ |
13ba33e89
|
647 648 649 650 |
n = iov_iter_zero(chunk, iter); if (!n && iov_iter_count(iter)) return written ? written : -EFAULT; written += n; |
2b8386872
|
651 652 |
if (signal_pending(current)) return written ? written : -ERESTARTSYS; |
1da177e4c
|
653 654 |
cond_resched(); } |
13ba33e89
|
655 |
return written; |
162934de5
|
656 |
} |
d7d4d849b
|
657 |
static int mmap_zero(struct file *file, struct vm_area_struct *vma) |
1da177e4c
|
658 |
{ |
557ed1fa2
|
659 |
#ifndef CONFIG_MMU |
1da177e4c
|
660 |
return -ENOSYS; |
557ed1fa2
|
661 662 663 664 |
#endif if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); return 0; |
1da177e4c
|
665 |
} |
1da177e4c
|
666 |
|
c01d5b300
|
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 |
static unsigned long get_unmapped_area_zero(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { #ifdef CONFIG_MMU if (flags & MAP_SHARED) { /* * mmap_zero() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge; * and pass NULL for file as in mmap.c's get_unmapped_area(), * so as not to confuse shmem with our handle on "/dev/zero". */ return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); } /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); #else return -ENOSYS; #endif } |
d7d4d849b
|
688 |
static ssize_t write_full(struct file *file, const char __user *buf, |
1da177e4c
|
689 690 691 692 693 694 695 696 697 698 |
size_t count, loff_t *ppos) { return -ENOSPC; } /* * Special lseek() function for /dev/null and /dev/zero. Most notably, you * can fopen() both devices with "a" now. This was previously impossible. * -- SRB. */ |
d7d4d849b
|
699 |
static loff_t null_lseek(struct file *file, loff_t offset, int orig) |
1da177e4c
|
700 701 702 703 704 705 706 707 708 709 710 711 |
{ return file->f_pos = 0; } /* * The memory devices use the full 32/64 bits of the offset, and so we cannot * check against negative addresses: they are ok. The return value is weird, * though, in that case (0). * * also note that seeking relative to the "end of file" isn't supported: * it has no meaning, so it returns -EINVAL. */ |
d7d4d849b
|
712 |
static loff_t memory_lseek(struct file *file, loff_t offset, int orig) |
1da177e4c
|
713 714 |
{ loff_t ret; |
5955102c9
|
715 |
inode_lock(file_inode(file)); |
1da177e4c
|
716 |
switch (orig) { |
d7d4d849b
|
717 718 |
case SEEK_CUR: offset += file->f_pos; |
d7d4d849b
|
719 720 |
case SEEK_SET: /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ |
ecb63a1b6
|
721 |
if ((unsigned long long)offset >= -MAX_ERRNO) { |
d7d4d849b
|
722 |
ret = -EOVERFLOW; |
1da177e4c
|
723 |
break; |
d7d4d849b
|
724 725 726 727 728 729 730 |
} file->f_pos = offset; ret = file->f_pos; force_successful_syscall_return(); break; default: ret = -EINVAL; |
1da177e4c
|
731 |
} |
5955102c9
|
732 |
inode_unlock(file_inode(file)); |
1da177e4c
|
733 734 |
return ret; } |
890537b3a
|
735 |
static int open_port(struct inode *inode, struct file *filp) |
1da177e4c
|
736 737 738 739 740 741 742 |
{ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; } #define zero_lseek null_lseek #define full_lseek null_lseek #define write_zero write_null |
cd28e28d8
|
743 |
#define write_iter_zero write_iter_null |
1da177e4c
|
744 745 |
#define open_mem open_port #define open_kmem open_mem |
73f0718e7
|
746 |
static const struct file_operations __maybe_unused mem_fops = { |
1da177e4c
|
747 748 749 750 751 |
.llseek = memory_lseek, .read = read_mem, .write = write_mem, .mmap = mmap_mem, .open = open_mem, |
b4caecd48
|
752 |
#ifndef CONFIG_MMU |
5da6185bc
|
753 |
.get_unmapped_area = get_unmapped_area_mem, |
b4caecd48
|
754 755 |
.mmap_capabilities = memory_mmap_capabilities, #endif |
1da177e4c
|
756 |
}; |
a8c912522
|
757 |
static const struct file_operations __maybe_unused kmem_fops = { |
1da177e4c
|
758 759 760 761 762 |
.llseek = memory_lseek, .read = read_kmem, .write = write_kmem, .mmap = mmap_kmem, .open = open_kmem, |
b4caecd48
|
763 |
#ifndef CONFIG_MMU |
5da6185bc
|
764 |
.get_unmapped_area = get_unmapped_area_mem, |
b4caecd48
|
765 766 |
.mmap_capabilities = memory_mmap_capabilities, #endif |
1da177e4c
|
767 |
}; |
62322d255
|
768 |
static const struct file_operations null_fops = { |
1da177e4c
|
769 770 771 |
.llseek = null_lseek, .read = read_null, .write = write_null, |
cd28e28d8
|
772 773 |
.read_iter = read_iter_null, .write_iter = write_iter_null, |
1ebd32fc5
|
774 |
.splice_write = splice_write_null, |
1da177e4c
|
775 |
}; |
3a4bc2fb8
|
776 |
static const struct file_operations __maybe_unused port_fops = { |
1da177e4c
|
777 778 779 780 781 |
.llseek = memory_lseek, .read = read_port, .write = write_port, .open = open_port, }; |
1da177e4c
|
782 |
|
62322d255
|
783 |
static const struct file_operations zero_fops = { |
1da177e4c
|
784 |
.llseek = zero_lseek, |
1da177e4c
|
785 |
.write = write_zero, |
13ba33e89
|
786 |
.read_iter = read_iter_zero, |
cd28e28d8
|
787 |
.write_iter = write_iter_zero, |
1da177e4c
|
788 |
.mmap = mmap_zero, |
c01d5b300
|
789 |
.get_unmapped_area = get_unmapped_area_zero, |
b4caecd48
|
790 791 792 |
#ifndef CONFIG_MMU .mmap_capabilities = zero_mmap_capabilities, #endif |
1da177e4c
|
793 |
}; |
62322d255
|
794 |
static const struct file_operations full_fops = { |
1da177e4c
|
795 |
.llseek = full_lseek, |
13ba33e89
|
796 |
.read_iter = read_iter_zero, |
1da177e4c
|
797 798 |
.write = write_full, }; |
389e0cb9a
|
799 800 |
static const struct memdev { const char *name; |
2c9ede55e
|
801 |
umode_t mode; |
389e0cb9a
|
802 |
const struct file_operations *fops; |
b4caecd48
|
803 |
fmode_t fmode; |
389e0cb9a
|
804 |
} devlist[] = { |
73f0718e7
|
805 |
#ifdef CONFIG_DEVMEM |
b4caecd48
|
806 |
[1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, |
73f0718e7
|
807 |
#endif |
b781ecb6a
|
808 |
#ifdef CONFIG_DEVKMEM |
b4caecd48
|
809 |
[2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, |
b781ecb6a
|
810 |
#endif |
b4caecd48
|
811 |
[3] = { "null", 0666, &null_fops, 0 }, |
4f911d64e
|
812 |
#ifdef CONFIG_DEVPORT |
b4caecd48
|
813 |
[4] = { "port", 0, &port_fops, 0 }, |
1da177e4c
|
814 |
#endif |
b4caecd48
|
815 816 817 818 |
[5] = { "zero", 0666, &zero_fops, 0 }, [7] = { "full", 0666, &full_fops, 0 }, [8] = { "random", 0666, &random_fops, 0 }, [9] = { "urandom", 0666, &urandom_fops, 0 }, |
7f3a781d6
|
819 |
#ifdef CONFIG_PRINTK |
b4caecd48
|
820 |
[11] = { "kmsg", 0644, &kmsg_fops, 0 }, |
7f3a781d6
|
821 |
#endif |
d6f47befd
|
822 823 824 825 |
}; static int memory_open(struct inode *inode, struct file *filp) { |
389e0cb9a
|
826 827 |
int minor; const struct memdev *dev; |
d6f47befd
|
828 |
|
389e0cb9a
|
829 830 |
minor = iminor(inode); if (minor >= ARRAY_SIZE(devlist)) |
205153aa4
|
831 |
return -ENXIO; |
d6f47befd
|
832 |
|
389e0cb9a
|
833 834 |
dev = &devlist[minor]; if (!dev->fops) |
205153aa4
|
835 |
return -ENXIO; |
d6f47befd
|
836 |
|
389e0cb9a
|
837 |
filp->f_op = dev->fops; |
b4caecd48
|
838 |
filp->f_mode |= dev->fmode; |
4a3956c79
|
839 |
|
389e0cb9a
|
840 |
if (dev->fops->open) |
205153aa4
|
841 842 843 |
return dev->fops->open(inode, filp); return 0; |
1da177e4c
|
844 |
} |
62322d255
|
845 |
static const struct file_operations memory_fops = { |
d7d4d849b
|
846 |
.open = memory_open, |
6038f373a
|
847 |
.llseek = noop_llseek, |
1da177e4c
|
848 |
}; |
2c9ede55e
|
849 |
static char *mem_devnode(struct device *dev, umode_t *mode) |
e454cea20
|
850 851 852 853 854 |
{ if (mode && devlist[MINOR(dev->devt)].mode) *mode = devlist[MINOR(dev->devt)].mode; return NULL; } |
ca8eca688
|
855 |
static struct class *mem_class; |
1da177e4c
|
856 857 858 |
static int __init chr_dev_init(void) { |
389e0cb9a
|
859 |
int minor; |
1da177e4c
|
860 |
|
d7d4d849b
|
861 |
if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) |
1da177e4c
|
862 863 |
printk("unable to get major %d for memory devs ", MEM_MAJOR); |
ca8eca688
|
864 |
mem_class = class_create(THIS_MODULE, "mem"); |
6e191f7bb
|
865 866 |
if (IS_ERR(mem_class)) return PTR_ERR(mem_class); |
e454cea20
|
867 |
mem_class->devnode = mem_devnode; |
389e0cb9a
|
868 869 870 |
for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { if (!devlist[minor].name) continue; |
e1612de9e
|
871 872 |
/* |
890537b3a
|
873 |
* Create /dev/port? |
e1612de9e
|
874 875 876 |
*/ if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) continue; |
389e0cb9a
|
877 878 879 |
device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), NULL, devlist[minor].name); } |
ebf644c46
|
880 |
|
31d1d48e1
|
881 |
return tty_init(); |
1da177e4c
|
882 883 884 |
} fs_initcall(chr_dev_init); |