Commit cae2e6cc002d6fdee7d8a230736fa7c685e54b35

Authored by Greg Ungerer
1 parent 48e1328e06

m68k: merge the mmu and non-mmu versions of sys_m68k.c

There is a lot of common code in the sys_m68k.c files. The mmu and non-mmu
versions can easily be merged into a single file.

There is really only 2 functions that differ in the 2 cases. A single
ifdef on CONFIG_MMU can take care of this. Alternatively we could break
those 2 functions out and maintain sys_m68k_no.c and sys_m68k_mm.c with
just this code in it (Makefile could then just build the right one).
Does anyone have strong feelings on which way they want this done?

Signed-off-by: Greg Ungerer <gerg@uclinux.org>

Showing 3 changed files with 578 additions and 643 deletions Side-by-side Diff

arch/m68k/kernel/sys_m68k.c
  1 +/*
  2 + * linux/arch/m68k/kernel/sys_m68k.c
  3 + *
  4 + * This file contains various random system calls that
  5 + * have a non-standard calling sequence on the Linux/m68k
  6 + * platform.
  7 + */
  8 +
  9 +#include <linux/capability.h>
  10 +#include <linux/errno.h>
  11 +#include <linux/sched.h>
  12 +#include <linux/mm.h>
  13 +#include <linux/fs.h>
  14 +#include <linux/smp.h>
  15 +#include <linux/sem.h>
  16 +#include <linux/msg.h>
  17 +#include <linux/shm.h>
  18 +#include <linux/stat.h>
  19 +#include <linux/syscalls.h>
  20 +#include <linux/mman.h>
  21 +#include <linux/file.h>
  22 +#include <linux/ipc.h>
  23 +
  24 +#include <asm/setup.h>
  25 +#include <asm/uaccess.h>
  26 +#include <asm/cachectl.h>
  27 +#include <asm/traps.h>
  28 +#include <asm/page.h>
  29 +#include <asm/unistd.h>
  30 +#include <asm/cacheflush.h>
  31 +
1 32 #ifdef CONFIG_MMU
2   -#include "sys_m68k_mm.c"
  33 +
  34 +#include <asm/tlb.h>
  35 +
  36 +asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
  37 + unsigned long error_code);
  38 +
  39 +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  40 + unsigned long prot, unsigned long flags,
  41 + unsigned long fd, unsigned long pgoff)
  42 +{
  43 + /*
  44 + * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
  45 + * so we need to shift the argument down by 1; m68k mmap64(3)
  46 + * (in libc) expects the last argument of mmap2 in 4Kb units.
  47 + */
  48 + return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
  49 +}
  50 +
  51 +/* Convert virtual (user) address VADDR to physical address PADDR */
  52 +#define virt_to_phys_040(vaddr) \
  53 +({ \
  54 + unsigned long _mmusr, _paddr; \
  55 + \
  56 + __asm__ __volatile__ (".chip 68040\n\t" \
  57 + "ptestr (%1)\n\t" \
  58 + "movec %%mmusr,%0\n\t" \
  59 + ".chip 68k" \
  60 + : "=r" (_mmusr) \
  61 + : "a" (vaddr)); \
  62 + _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  63 + _paddr; \
  64 +})
  65 +
  66 +static inline int
  67 +cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  68 +{
  69 + unsigned long paddr, i;
  70 +
  71 + switch (scope)
  72 + {
  73 + case FLUSH_SCOPE_ALL:
  74 + switch (cache)
  75 + {
  76 + case FLUSH_CACHE_DATA:
  77 + /* This nop is needed for some broken versions of the 68040. */
  78 + __asm__ __volatile__ ("nop\n\t"
  79 + ".chip 68040\n\t"
  80 + "cpusha %dc\n\t"
  81 + ".chip 68k");
  82 + break;
  83 + case FLUSH_CACHE_INSN:
  84 + __asm__ __volatile__ ("nop\n\t"
  85 + ".chip 68040\n\t"
  86 + "cpusha %ic\n\t"
  87 + ".chip 68k");
  88 + break;
  89 + default:
  90 + case FLUSH_CACHE_BOTH:
  91 + __asm__ __volatile__ ("nop\n\t"
  92 + ".chip 68040\n\t"
  93 + "cpusha %bc\n\t"
  94 + ".chip 68k");
  95 + break;
  96 + }
  97 + break;
  98 +
  99 + case FLUSH_SCOPE_LINE:
  100 + /* Find the physical address of the first mapped page in the
  101 + address range. */
  102 + if ((paddr = virt_to_phys_040(addr))) {
  103 + paddr += addr & ~(PAGE_MASK | 15);
  104 + len = (len + (addr & 15) + 15) >> 4;
  105 + } else {
  106 + unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  107 +
  108 + if (len <= tmp)
  109 + return 0;
  110 + addr += tmp;
  111 + len -= tmp;
  112 + tmp = PAGE_SIZE;
  113 + for (;;)
  114 + {
  115 + if ((paddr = virt_to_phys_040(addr)))
  116 + break;
  117 + if (len <= tmp)
  118 + return 0;
  119 + addr += tmp;
  120 + len -= tmp;
  121 + }
  122 + len = (len + 15) >> 4;
  123 + }
  124 + i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  125 + while (len--)
  126 + {
  127 + switch (cache)
  128 + {
  129 + case FLUSH_CACHE_DATA:
  130 + __asm__ __volatile__ ("nop\n\t"
  131 + ".chip 68040\n\t"
  132 + "cpushl %%dc,(%0)\n\t"
  133 + ".chip 68k"
  134 + : : "a" (paddr));
  135 + break;
  136 + case FLUSH_CACHE_INSN:
  137 + __asm__ __volatile__ ("nop\n\t"
  138 + ".chip 68040\n\t"
  139 + "cpushl %%ic,(%0)\n\t"
  140 + ".chip 68k"
  141 + : : "a" (paddr));
  142 + break;
  143 + default:
  144 + case FLUSH_CACHE_BOTH:
  145 + __asm__ __volatile__ ("nop\n\t"
  146 + ".chip 68040\n\t"
  147 + "cpushl %%bc,(%0)\n\t"
  148 + ".chip 68k"
  149 + : : "a" (paddr));
  150 + break;
  151 + }
  152 + if (!--i && len)
  153 + {
  154 + /*
  155 + * No need to page align here since it is done by
  156 + * virt_to_phys_040().
  157 + */
  158 + addr += PAGE_SIZE;
  159 + i = PAGE_SIZE / 16;
  160 + /* Recompute physical address when crossing a page
  161 + boundary. */
  162 + for (;;)
  163 + {
  164 + if ((paddr = virt_to_phys_040(addr)))
  165 + break;
  166 + if (len <= i)
  167 + return 0;
  168 + len -= i;
  169 + addr += PAGE_SIZE;
  170 + }
  171 + }
  172 + else
  173 + paddr += 16;
  174 + }
  175 + break;
  176 +
  177 + default:
  178 + case FLUSH_SCOPE_PAGE:
  179 + len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  180 + for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  181 + {
  182 + if (!(paddr = virt_to_phys_040(addr)))
  183 + continue;
  184 + switch (cache)
  185 + {
  186 + case FLUSH_CACHE_DATA:
  187 + __asm__ __volatile__ ("nop\n\t"
  188 + ".chip 68040\n\t"
  189 + "cpushp %%dc,(%0)\n\t"
  190 + ".chip 68k"
  191 + : : "a" (paddr));
  192 + break;
  193 + case FLUSH_CACHE_INSN:
  194 + __asm__ __volatile__ ("nop\n\t"
  195 + ".chip 68040\n\t"
  196 + "cpushp %%ic,(%0)\n\t"
  197 + ".chip 68k"
  198 + : : "a" (paddr));
  199 + break;
  200 + default:
  201 + case FLUSH_CACHE_BOTH:
  202 + __asm__ __volatile__ ("nop\n\t"
  203 + ".chip 68040\n\t"
  204 + "cpushp %%bc,(%0)\n\t"
  205 + ".chip 68k"
  206 + : : "a" (paddr));
  207 + break;
  208 + }
  209 + }
  210 + break;
  211 + }
  212 + return 0;
  213 +}
  214 +
  215 +#define virt_to_phys_060(vaddr) \
  216 +({ \
  217 + unsigned long paddr; \
  218 + __asm__ __volatile__ (".chip 68060\n\t" \
  219 + "plpar (%0)\n\t" \
  220 + ".chip 68k" \
  221 + : "=a" (paddr) \
  222 + : "0" (vaddr)); \
  223 + (paddr); /* XXX */ \
  224 +})
  225 +
  226 +static inline int
  227 +cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  228 +{
  229 + unsigned long paddr, i;
  230 +
  231 + /*
  232 + * 68060 manual says:
  233 + * cpush %dc : flush DC, remains valid (with our %cacr setup)
  234 + * cpush %ic : invalidate IC
  235 + * cpush %bc : flush DC + invalidate IC
  236 + */
  237 + switch (scope)
  238 + {
  239 + case FLUSH_SCOPE_ALL:
  240 + switch (cache)
  241 + {
  242 + case FLUSH_CACHE_DATA:
  243 + __asm__ __volatile__ (".chip 68060\n\t"
  244 + "cpusha %dc\n\t"
  245 + ".chip 68k");
  246 + break;
  247 + case FLUSH_CACHE_INSN:
  248 + __asm__ __volatile__ (".chip 68060\n\t"
  249 + "cpusha %ic\n\t"
  250 + ".chip 68k");
  251 + break;
  252 + default:
  253 + case FLUSH_CACHE_BOTH:
  254 + __asm__ __volatile__ (".chip 68060\n\t"
  255 + "cpusha %bc\n\t"
  256 + ".chip 68k");
  257 + break;
  258 + }
  259 + break;
  260 +
  261 + case FLUSH_SCOPE_LINE:
  262 + /* Find the physical address of the first mapped page in the
  263 + address range. */
  264 + len += addr & 15;
  265 + addr &= -16;
  266 + if (!(paddr = virt_to_phys_060(addr))) {
  267 + unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  268 +
  269 + if (len <= tmp)
  270 + return 0;
  271 + addr += tmp;
  272 + len -= tmp;
  273 + tmp = PAGE_SIZE;
  274 + for (;;)
  275 + {
  276 + if ((paddr = virt_to_phys_060(addr)))
  277 + break;
  278 + if (len <= tmp)
  279 + return 0;
  280 + addr += tmp;
  281 + len -= tmp;
  282 + }
  283 + }
  284 + len = (len + 15) >> 4;
  285 + i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  286 + while (len--)
  287 + {
  288 + switch (cache)
  289 + {
  290 + case FLUSH_CACHE_DATA:
  291 + __asm__ __volatile__ (".chip 68060\n\t"
  292 + "cpushl %%dc,(%0)\n\t"
  293 + ".chip 68k"
  294 + : : "a" (paddr));
  295 + break;
  296 + case FLUSH_CACHE_INSN:
  297 + __asm__ __volatile__ (".chip 68060\n\t"
  298 + "cpushl %%ic,(%0)\n\t"
  299 + ".chip 68k"
  300 + : : "a" (paddr));
  301 + break;
  302 + default:
  303 + case FLUSH_CACHE_BOTH:
  304 + __asm__ __volatile__ (".chip 68060\n\t"
  305 + "cpushl %%bc,(%0)\n\t"
  306 + ".chip 68k"
  307 + : : "a" (paddr));
  308 + break;
  309 + }
  310 + if (!--i && len)
  311 + {
  312 +
  313 + /*
  314 + * We just want to jump to the first cache line
  315 + * in the next page.
  316 + */
  317 + addr += PAGE_SIZE;
  318 + addr &= PAGE_MASK;
  319 +
  320 + i = PAGE_SIZE / 16;
  321 + /* Recompute physical address when crossing a page
  322 + boundary. */
  323 + for (;;)
  324 + {
  325 + if ((paddr = virt_to_phys_060(addr)))
  326 + break;
  327 + if (len <= i)
  328 + return 0;
  329 + len -= i;
  330 + addr += PAGE_SIZE;
  331 + }
  332 + }
  333 + else
  334 + paddr += 16;
  335 + }
  336 + break;
  337 +
  338 + default:
  339 + case FLUSH_SCOPE_PAGE:
  340 + len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  341 + addr &= PAGE_MASK; /* Workaround for bug in some
  342 + revisions of the 68060 */
  343 + for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  344 + {
  345 + if (!(paddr = virt_to_phys_060(addr)))
  346 + continue;
  347 + switch (cache)
  348 + {
  349 + case FLUSH_CACHE_DATA:
  350 + __asm__ __volatile__ (".chip 68060\n\t"
  351 + "cpushp %%dc,(%0)\n\t"
  352 + ".chip 68k"
  353 + : : "a" (paddr));
  354 + break;
  355 + case FLUSH_CACHE_INSN:
  356 + __asm__ __volatile__ (".chip 68060\n\t"
  357 + "cpushp %%ic,(%0)\n\t"
  358 + ".chip 68k"
  359 + : : "a" (paddr));
  360 + break;
  361 + default:
  362 + case FLUSH_CACHE_BOTH:
  363 + __asm__ __volatile__ (".chip 68060\n\t"
  364 + "cpushp %%bc,(%0)\n\t"
  365 + ".chip 68k"
  366 + : : "a" (paddr));
  367 + break;
  368 + }
  369 + }
  370 + break;
  371 + }
  372 + return 0;
  373 +}
  374 +
  375 +/* sys_cacheflush -- flush (part of) the processor cache. */
  376 +asmlinkage int
  377 +sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  378 +{
  379 + struct vm_area_struct *vma;
  380 + int ret = -EINVAL;
  381 +
  382 + if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  383 + cache & ~FLUSH_CACHE_BOTH)
  384 + goto out;
  385 +
  386 + if (scope == FLUSH_SCOPE_ALL) {
  387 + /* Only the superuser may explicitly flush the whole cache. */
  388 + ret = -EPERM;
  389 + if (!capable(CAP_SYS_ADMIN))
  390 + goto out;
  391 + } else {
  392 + /*
  393 + * Verify that the specified address region actually belongs
  394 + * to this process.
  395 + */
  396 + vma = find_vma (current->mm, addr);
  397 + ret = -EINVAL;
  398 + /* Check for overflow. */
  399 + if (addr + len < addr)
  400 + goto out;
  401 + if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  402 + goto out;
  403 + }
  404 +
  405 + if (CPU_IS_020_OR_030) {
  406 + if (scope == FLUSH_SCOPE_LINE && len < 256) {
  407 + unsigned long cacr;
  408 + __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  409 + if (cache & FLUSH_CACHE_INSN)
  410 + cacr |= 4;
  411 + if (cache & FLUSH_CACHE_DATA)
  412 + cacr |= 0x400;
  413 + len >>= 2;
  414 + while (len--) {
  415 + __asm__ __volatile__ ("movec %1, %%caar\n\t"
  416 + "movec %0, %%cacr"
  417 + : /* no outputs */
  418 + : "r" (cacr), "r" (addr));
  419 + addr += 4;
  420 + }
  421 + } else {
  422 + /* Flush the whole cache, even if page granularity requested. */
  423 + unsigned long cacr;
  424 + __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  425 + if (cache & FLUSH_CACHE_INSN)
  426 + cacr |= 8;
  427 + if (cache & FLUSH_CACHE_DATA)
  428 + cacr |= 0x800;
  429 + __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  430 + }
  431 + ret = 0;
  432 + goto out;
  433 + } else {
  434 + /*
  435 + * 040 or 060: don't blindly trust 'scope', someone could
  436 + * try to flush a few megs of memory.
  437 + */
  438 +
  439 + if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  440 + scope=FLUSH_SCOPE_PAGE;
  441 + if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  442 + scope=FLUSH_SCOPE_ALL;
  443 + if (CPU_IS_040) {
  444 + ret = cache_flush_040 (addr, scope, cache, len);
  445 + } else if (CPU_IS_060) {
  446 + ret = cache_flush_060 (addr, scope, cache, len);
  447 + }
  448 + }
  449 +out:
  450 + return ret;
  451 +}
  452 +
  453 +/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
  454 + D1 (newval). */
  455 +asmlinkage int
  456 +sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
  457 + unsigned long __user * mem)
  458 +{
  459 + /* This was borrowed from ARM's implementation. */
  460 + for (;;) {
  461 + struct mm_struct *mm = current->mm;
  462 + pgd_t *pgd;
  463 + pmd_t *pmd;
  464 + pte_t *pte;
  465 + spinlock_t *ptl;
  466 + unsigned long mem_value;
  467 +
  468 + down_read(&mm->mmap_sem);
  469 + pgd = pgd_offset(mm, (unsigned long)mem);
  470 + if (!pgd_present(*pgd))
  471 + goto bad_access;
  472 + pmd = pmd_offset(pgd, (unsigned long)mem);
  473 + if (!pmd_present(*pmd))
  474 + goto bad_access;
  475 + pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
  476 + if (!pte_present(*pte) || !pte_dirty(*pte)
  477 + || !pte_write(*pte)) {
  478 + pte_unmap_unlock(pte, ptl);
  479 + goto bad_access;
  480 + }
  481 +
  482 + mem_value = *mem;
  483 + if (mem_value == oldval)
  484 + *mem = newval;
  485 +
  486 + pte_unmap_unlock(pte, ptl);
  487 + up_read(&mm->mmap_sem);
  488 + return mem_value;
  489 +
  490 + bad_access:
  491 + up_read(&mm->mmap_sem);
  492 + /* This is not necessarily a bad access, we can get here if
  493 + a memory we're trying to write to should be copied-on-write.
  494 + Make the kernel do the necessary page stuff, then re-iterate.
  495 + Simulate a write access fault to do that. */
  496 + {
  497 + /* The first argument of the function corresponds to
  498 + D1, which is the first field of struct pt_regs. */
  499 + struct pt_regs *fp = (struct pt_regs *)&newval;
  500 +
  501 + /* '3' is an RMW flag. */
  502 + if (do_page_fault(fp, (unsigned long)mem, 3))
  503 + /* If the do_page_fault() failed, we don't
  504 + have anything meaningful to return.
  505 + There should be a SIGSEGV pending for
  506 + the process. */
  507 + return 0xdeadbeef;
  508 + }
  509 + }
  510 +}
  511 +
3 512 #else
4   -#include "sys_m68k_no.c"
5   -#endif
  513 +
  514 +/* sys_cacheflush -- flush (part of) the processor cache. */
  515 +asmlinkage int
  516 +sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  517 +{
  518 + flush_cache_all();
  519 + return 0;
  520 +}
  521 +
  522 +/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
  523 + D1 (newval). */
  524 +asmlinkage int
  525 +sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
  526 + unsigned long __user * mem)
  527 +{
  528 + struct mm_struct *mm = current->mm;
  529 + unsigned long mem_value;
  530 +
  531 + down_read(&mm->mmap_sem);
  532 +
  533 + mem_value = *mem;
  534 + if (mem_value == oldval)
  535 + *mem = newval;
  536 +
  537 + up_read(&mm->mmap_sem);
  538 + return mem_value;
  539 +}
  540 +
  541 +#endif /* CONFIG_MMU */
  542 +
  543 +asmlinkage int sys_getpagesize(void)
  544 +{
  545 + return PAGE_SIZE;
  546 +}
  547 +
  548 +/*
  549 + * Do a system call from kernel instead of calling sys_execve so we
  550 + * end up with proper pt_regs.
  551 + */
  552 +int kernel_execve(const char *filename,
  553 + const char *const argv[],
  554 + const char *const envp[])
  555 +{
  556 + register long __res asm ("%d0") = __NR_execve;
  557 + register long __a asm ("%d1") = (long)(filename);
  558 + register long __b asm ("%d2") = (long)(argv);
  559 + register long __c asm ("%d3") = (long)(envp);
  560 + asm volatile ("trap #0" : "+d" (__res)
  561 + : "d" (__a), "d" (__b), "d" (__c));
  562 + return __res;
  563 +}
  564 +
  565 +asmlinkage unsigned long sys_get_thread_area(void)
  566 +{
  567 + return current_thread_info()->tp_value;
  568 +}
  569 +
  570 +asmlinkage int sys_set_thread_area(unsigned long tp)
  571 +{
  572 + current_thread_info()->tp_value = tp;
  573 + return 0;
  574 +}
  575 +
  576 +asmlinkage int sys_atomic_barrier(void)
  577 +{
  578 + /* no code needed for uniprocs */
  579 + return 0;
  580 +}
arch/m68k/kernel/sys_m68k_mm.c
1   -/*
2   - * linux/arch/m68k/kernel/sys_m68k.c
3   - *
4   - * This file contains various random system calls that
5   - * have a non-standard calling sequence on the Linux/m68k
6   - * platform.
7   - */
8   -
9   -#include <linux/capability.h>
10   -#include <linux/errno.h>
11   -#include <linux/sched.h>
12   -#include <linux/mm.h>
13   -#include <linux/fs.h>
14   -#include <linux/smp.h>
15   -#include <linux/sem.h>
16   -#include <linux/msg.h>
17   -#include <linux/shm.h>
18   -#include <linux/stat.h>
19   -#include <linux/syscalls.h>
20   -#include <linux/mman.h>
21   -#include <linux/file.h>
22   -#include <linux/ipc.h>
23   -
24   -#include <asm/setup.h>
25   -#include <asm/uaccess.h>
26   -#include <asm/cachectl.h>
27   -#include <asm/traps.h>
28   -#include <asm/page.h>
29   -#include <asm/unistd.h>
30   -#include <linux/elf.h>
31   -#include <asm/tlb.h>
32   -
33   -asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
34   - unsigned long error_code);
35   -
36   -asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
37   - unsigned long prot, unsigned long flags,
38   - unsigned long fd, unsigned long pgoff)
39   -{
40   - /*
41   - * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
42   - * so we need to shift the argument down by 1; m68k mmap64(3)
43   - * (in libc) expects the last argument of mmap2 in 4Kb units.
44   - */
45   - return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
46   -}
47   -
48   -/* Convert virtual (user) address VADDR to physical address PADDR */
49   -#define virt_to_phys_040(vaddr) \
50   -({ \
51   - unsigned long _mmusr, _paddr; \
52   - \
53   - __asm__ __volatile__ (".chip 68040\n\t" \
54   - "ptestr (%1)\n\t" \
55   - "movec %%mmusr,%0\n\t" \
56   - ".chip 68k" \
57   - : "=r" (_mmusr) \
58   - : "a" (vaddr)); \
59   - _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
60   - _paddr; \
61   -})
62   -
63   -static inline int
64   -cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
65   -{
66   - unsigned long paddr, i;
67   -
68   - switch (scope)
69   - {
70   - case FLUSH_SCOPE_ALL:
71   - switch (cache)
72   - {
73   - case FLUSH_CACHE_DATA:
74   - /* This nop is needed for some broken versions of the 68040. */
75   - __asm__ __volatile__ ("nop\n\t"
76   - ".chip 68040\n\t"
77   - "cpusha %dc\n\t"
78   - ".chip 68k");
79   - break;
80   - case FLUSH_CACHE_INSN:
81   - __asm__ __volatile__ ("nop\n\t"
82   - ".chip 68040\n\t"
83   - "cpusha %ic\n\t"
84   - ".chip 68k");
85   - break;
86   - default:
87   - case FLUSH_CACHE_BOTH:
88   - __asm__ __volatile__ ("nop\n\t"
89   - ".chip 68040\n\t"
90   - "cpusha %bc\n\t"
91   - ".chip 68k");
92   - break;
93   - }
94   - break;
95   -
96   - case FLUSH_SCOPE_LINE:
97   - /* Find the physical address of the first mapped page in the
98   - address range. */
99   - if ((paddr = virt_to_phys_040(addr))) {
100   - paddr += addr & ~(PAGE_MASK | 15);
101   - len = (len + (addr & 15) + 15) >> 4;
102   - } else {
103   - unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
104   -
105   - if (len <= tmp)
106   - return 0;
107   - addr += tmp;
108   - len -= tmp;
109   - tmp = PAGE_SIZE;
110   - for (;;)
111   - {
112   - if ((paddr = virt_to_phys_040(addr)))
113   - break;
114   - if (len <= tmp)
115   - return 0;
116   - addr += tmp;
117   - len -= tmp;
118   - }
119   - len = (len + 15) >> 4;
120   - }
121   - i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
122   - while (len--)
123   - {
124   - switch (cache)
125   - {
126   - case FLUSH_CACHE_DATA:
127   - __asm__ __volatile__ ("nop\n\t"
128   - ".chip 68040\n\t"
129   - "cpushl %%dc,(%0)\n\t"
130   - ".chip 68k"
131   - : : "a" (paddr));
132   - break;
133   - case FLUSH_CACHE_INSN:
134   - __asm__ __volatile__ ("nop\n\t"
135   - ".chip 68040\n\t"
136   - "cpushl %%ic,(%0)\n\t"
137   - ".chip 68k"
138   - : : "a" (paddr));
139   - break;
140   - default:
141   - case FLUSH_CACHE_BOTH:
142   - __asm__ __volatile__ ("nop\n\t"
143   - ".chip 68040\n\t"
144   - "cpushl %%bc,(%0)\n\t"
145   - ".chip 68k"
146   - : : "a" (paddr));
147   - break;
148   - }
149   - if (!--i && len)
150   - {
151   - /*
152   - * No need to page align here since it is done by
153   - * virt_to_phys_040().
154   - */
155   - addr += PAGE_SIZE;
156   - i = PAGE_SIZE / 16;
157   - /* Recompute physical address when crossing a page
158   - boundary. */
159   - for (;;)
160   - {
161   - if ((paddr = virt_to_phys_040(addr)))
162   - break;
163   - if (len <= i)
164   - return 0;
165   - len -= i;
166   - addr += PAGE_SIZE;
167   - }
168   - }
169   - else
170   - paddr += 16;
171   - }
172   - break;
173   -
174   - default:
175   - case FLUSH_SCOPE_PAGE:
176   - len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
177   - for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
178   - {
179   - if (!(paddr = virt_to_phys_040(addr)))
180   - continue;
181   - switch (cache)
182   - {
183   - case FLUSH_CACHE_DATA:
184   - __asm__ __volatile__ ("nop\n\t"
185   - ".chip 68040\n\t"
186   - "cpushp %%dc,(%0)\n\t"
187   - ".chip 68k"
188   - : : "a" (paddr));
189   - break;
190   - case FLUSH_CACHE_INSN:
191   - __asm__ __volatile__ ("nop\n\t"
192   - ".chip 68040\n\t"
193   - "cpushp %%ic,(%0)\n\t"
194   - ".chip 68k"
195   - : : "a" (paddr));
196   - break;
197   - default:
198   - case FLUSH_CACHE_BOTH:
199   - __asm__ __volatile__ ("nop\n\t"
200   - ".chip 68040\n\t"
201   - "cpushp %%bc,(%0)\n\t"
202   - ".chip 68k"
203   - : : "a" (paddr));
204   - break;
205   - }
206   - }
207   - break;
208   - }
209   - return 0;
210   -}
211   -
212   -#define virt_to_phys_060(vaddr) \
213   -({ \
214   - unsigned long paddr; \
215   - __asm__ __volatile__ (".chip 68060\n\t" \
216   - "plpar (%0)\n\t" \
217   - ".chip 68k" \
218   - : "=a" (paddr) \
219   - : "0" (vaddr)); \
220   - (paddr); /* XXX */ \
221   -})
222   -
223   -static inline int
224   -cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
225   -{
226   - unsigned long paddr, i;
227   -
228   - /*
229   - * 68060 manual says:
230   - * cpush %dc : flush DC, remains valid (with our %cacr setup)
231   - * cpush %ic : invalidate IC
232   - * cpush %bc : flush DC + invalidate IC
233   - */
234   - switch (scope)
235   - {
236   - case FLUSH_SCOPE_ALL:
237   - switch (cache)
238   - {
239   - case FLUSH_CACHE_DATA:
240   - __asm__ __volatile__ (".chip 68060\n\t"
241   - "cpusha %dc\n\t"
242   - ".chip 68k");
243   - break;
244   - case FLUSH_CACHE_INSN:
245   - __asm__ __volatile__ (".chip 68060\n\t"
246   - "cpusha %ic\n\t"
247   - ".chip 68k");
248   - break;
249   - default:
250   - case FLUSH_CACHE_BOTH:
251   - __asm__ __volatile__ (".chip 68060\n\t"
252   - "cpusha %bc\n\t"
253   - ".chip 68k");
254   - break;
255   - }
256   - break;
257   -
258   - case FLUSH_SCOPE_LINE:
259   - /* Find the physical address of the first mapped page in the
260   - address range. */
261   - len += addr & 15;
262   - addr &= -16;
263   - if (!(paddr = virt_to_phys_060(addr))) {
264   - unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
265   -
266   - if (len <= tmp)
267   - return 0;
268   - addr += tmp;
269   - len -= tmp;
270   - tmp = PAGE_SIZE;
271   - for (;;)
272   - {
273   - if ((paddr = virt_to_phys_060(addr)))
274   - break;
275   - if (len <= tmp)
276   - return 0;
277   - addr += tmp;
278   - len -= tmp;
279   - }
280   - }
281   - len = (len + 15) >> 4;
282   - i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
283   - while (len--)
284   - {
285   - switch (cache)
286   - {
287   - case FLUSH_CACHE_DATA:
288   - __asm__ __volatile__ (".chip 68060\n\t"
289   - "cpushl %%dc,(%0)\n\t"
290   - ".chip 68k"
291   - : : "a" (paddr));
292   - break;
293   - case FLUSH_CACHE_INSN:
294   - __asm__ __volatile__ (".chip 68060\n\t"
295   - "cpushl %%ic,(%0)\n\t"
296   - ".chip 68k"
297   - : : "a" (paddr));
298   - break;
299   - default:
300   - case FLUSH_CACHE_BOTH:
301   - __asm__ __volatile__ (".chip 68060\n\t"
302   - "cpushl %%bc,(%0)\n\t"
303   - ".chip 68k"
304   - : : "a" (paddr));
305   - break;
306   - }
307   - if (!--i && len)
308   - {
309   -
310   - /*
311   - * We just want to jump to the first cache line
312   - * in the next page.
313   - */
314   - addr += PAGE_SIZE;
315   - addr &= PAGE_MASK;
316   -
317   - i = PAGE_SIZE / 16;
318   - /* Recompute physical address when crossing a page
319   - boundary. */
320   - for (;;)
321   - {
322   - if ((paddr = virt_to_phys_060(addr)))
323   - break;
324   - if (len <= i)
325   - return 0;
326   - len -= i;
327   - addr += PAGE_SIZE;
328   - }
329   - }
330   - else
331   - paddr += 16;
332   - }
333   - break;
334   -
335   - default:
336   - case FLUSH_SCOPE_PAGE:
337   - len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
338   - addr &= PAGE_MASK; /* Workaround for bug in some
339   - revisions of the 68060 */
340   - for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
341   - {
342   - if (!(paddr = virt_to_phys_060(addr)))
343   - continue;
344   - switch (cache)
345   - {
346   - case FLUSH_CACHE_DATA:
347   - __asm__ __volatile__ (".chip 68060\n\t"
348   - "cpushp %%dc,(%0)\n\t"
349   - ".chip 68k"
350   - : : "a" (paddr));
351   - break;
352   - case FLUSH_CACHE_INSN:
353   - __asm__ __volatile__ (".chip 68060\n\t"
354   - "cpushp %%ic,(%0)\n\t"
355   - ".chip 68k"
356   - : : "a" (paddr));
357   - break;
358   - default:
359   - case FLUSH_CACHE_BOTH:
360   - __asm__ __volatile__ (".chip 68060\n\t"
361   - "cpushp %%bc,(%0)\n\t"
362   - ".chip 68k"
363   - : : "a" (paddr));
364   - break;
365   - }
366   - }
367   - break;
368   - }
369   - return 0;
370   -}
371   -
372   -/* sys_cacheflush -- flush (part of) the processor cache. */
373   -asmlinkage int
374   -sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
375   -{
376   - struct vm_area_struct *vma;
377   - int ret = -EINVAL;
378   -
379   - if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
380   - cache & ~FLUSH_CACHE_BOTH)
381   - goto out;
382   -
383   - if (scope == FLUSH_SCOPE_ALL) {
384   - /* Only the superuser may explicitly flush the whole cache. */
385   - ret = -EPERM;
386   - if (!capable(CAP_SYS_ADMIN))
387   - goto out;
388   - } else {
389   - /*
390   - * Verify that the specified address region actually belongs
391   - * to this process.
392   - */
393   - vma = find_vma (current->mm, addr);
394   - ret = -EINVAL;
395   - /* Check for overflow. */
396   - if (addr + len < addr)
397   - goto out;
398   - if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
399   - goto out;
400   - }
401   -
402   - if (CPU_IS_020_OR_030) {
403   - if (scope == FLUSH_SCOPE_LINE && len < 256) {
404   - unsigned long cacr;
405   - __asm__ ("movec %%cacr, %0" : "=r" (cacr));
406   - if (cache & FLUSH_CACHE_INSN)
407   - cacr |= 4;
408   - if (cache & FLUSH_CACHE_DATA)
409   - cacr |= 0x400;
410   - len >>= 2;
411   - while (len--) {
412   - __asm__ __volatile__ ("movec %1, %%caar\n\t"
413   - "movec %0, %%cacr"
414   - : /* no outputs */
415   - : "r" (cacr), "r" (addr));
416   - addr += 4;
417   - }
418   - } else {
419   - /* Flush the whole cache, even if page granularity requested. */
420   - unsigned long cacr;
421   - __asm__ ("movec %%cacr, %0" : "=r" (cacr));
422   - if (cache & FLUSH_CACHE_INSN)
423   - cacr |= 8;
424   - if (cache & FLUSH_CACHE_DATA)
425   - cacr |= 0x800;
426   - __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
427   - }
428   - ret = 0;
429   - goto out;
430   - } else {
431   - /*
432   - * 040 or 060: don't blindly trust 'scope', someone could
433   - * try to flush a few megs of memory.
434   - */
435   -
436   - if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
437   - scope=FLUSH_SCOPE_PAGE;
438   - if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
439   - scope=FLUSH_SCOPE_ALL;
440   - if (CPU_IS_040) {
441   - ret = cache_flush_040 (addr, scope, cache, len);
442   - } else if (CPU_IS_060) {
443   - ret = cache_flush_060 (addr, scope, cache, len);
444   - }
445   - }
446   -out:
447   - return ret;
448   -}
449   -
450   -asmlinkage int sys_getpagesize(void)
451   -{
452   - return PAGE_SIZE;
453   -}
454   -
455   -/*
456   - * Do a system call from kernel instead of calling sys_execve so we
457   - * end up with proper pt_regs.
458   - */
459   -int kernel_execve(const char *filename,
460   - const char *const argv[],
461   - const char *const envp[])
462   -{
463   - register long __res asm ("%d0") = __NR_execve;
464   - register long __a asm ("%d1") = (long)(filename);
465   - register long __b asm ("%d2") = (long)(argv);
466   - register long __c asm ("%d3") = (long)(envp);
467   - asm volatile ("trap #0" : "+d" (__res)
468   - : "d" (__a), "d" (__b), "d" (__c));
469   - return __res;
470   -}
471   -
472   -asmlinkage unsigned long sys_get_thread_area(void)
473   -{
474   - return current_thread_info()->tp_value;
475   -}
476   -
477   -asmlinkage int sys_set_thread_area(unsigned long tp)
478   -{
479   - current_thread_info()->tp_value = tp;
480   - return 0;
481   -}
482   -
483   -/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
484   - D1 (newval). */
485   -asmlinkage int
486   -sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
487   - unsigned long __user * mem)
488   -{
489   - /* This was borrowed from ARM's implementation. */
490   - for (;;) {
491   - struct mm_struct *mm = current->mm;
492   - pgd_t *pgd;
493   - pmd_t *pmd;
494   - pte_t *pte;
495   - spinlock_t *ptl;
496   - unsigned long mem_value;
497   -
498   - down_read(&mm->mmap_sem);
499   - pgd = pgd_offset(mm, (unsigned long)mem);
500   - if (!pgd_present(*pgd))
501   - goto bad_access;
502   - pmd = pmd_offset(pgd, (unsigned long)mem);
503   - if (!pmd_present(*pmd))
504   - goto bad_access;
505   - pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
506   - if (!pte_present(*pte) || !pte_dirty(*pte)
507   - || !pte_write(*pte)) {
508   - pte_unmap_unlock(pte, ptl);
509   - goto bad_access;
510   - }
511   -
512   - mem_value = *mem;
513   - if (mem_value == oldval)
514   - *mem = newval;
515   -
516   - pte_unmap_unlock(pte, ptl);
517   - up_read(&mm->mmap_sem);
518   - return mem_value;
519   -
520   - bad_access:
521   - up_read(&mm->mmap_sem);
522   - /* This is not necessarily a bad access, we can get here if
523   - a memory we're trying to write to should be copied-on-write.
524   - Make the kernel do the necessary page stuff, then re-iterate.
525   - Simulate a write access fault to do that. */
526   - {
527   - /* The first argument of the function corresponds to
528   - D1, which is the first field of struct pt_regs. */
529   - struct pt_regs *fp = (struct pt_regs *)&newval;
530   -
531   - /* '3' is an RMW flag. */
532   - if (do_page_fault(fp, (unsigned long)mem, 3))
533   - /* If the do_page_fault() failed, we don't
534   - have anything meaningful to return.
535   - There should be a SIGSEGV pending for
536   - the process. */
537   - return 0xdeadbeef;
538   - }
539   - }
540   -}
541   -
542   -asmlinkage int sys_atomic_barrier(void)
543   -{
544   - /* no code needed for uniprocs */
545   - return 0;
546   -}
arch/m68k/kernel/sys_m68k_no.c
1   -/*
2   - * linux/arch/m68knommu/kernel/sys_m68k.c
3   - *
4   - * This file contains various random system calls that
5   - * have a non-standard calling sequence on the Linux/m68k
6   - * platform.
7   - */
8   -
9   -#include <linux/errno.h>
10   -#include <linux/sched.h>
11   -#include <linux/mm.h>
12   -#include <linux/smp.h>
13   -#include <linux/sem.h>
14   -#include <linux/msg.h>
15   -#include <linux/shm.h>
16   -#include <linux/stat.h>
17   -#include <linux/syscalls.h>
18   -#include <linux/mman.h>
19   -#include <linux/file.h>
20   -#include <linux/ipc.h>
21   -#include <linux/fs.h>
22   -
23   -#include <asm/setup.h>
24   -#include <asm/uaccess.h>
25   -#include <asm/cachectl.h>
26   -#include <asm/traps.h>
27   -#include <asm/cacheflush.h>
28   -#include <asm/unistd.h>
29   -
30   -/* sys_cacheflush -- flush (part of) the processor cache. */
31   -asmlinkage int
32   -sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
33   -{
34   - flush_cache_all();
35   - return(0);
36   -}
37   -
38   -asmlinkage int sys_getpagesize(void)
39   -{
40   - return PAGE_SIZE;
41   -}
42   -
43   -/*
44   - * Do a system call from kernel instead of calling sys_execve so we
45   - * end up with proper pt_regs.
46   - */
47   -int kernel_execve(const char *filename,
48   - const char *const argv[],
49   - const char *const envp[])
50   -{
51   - register long __res asm ("%d0") = __NR_execve;
52   - register long __a asm ("%d1") = (long)(filename);
53   - register long __b asm ("%d2") = (long)(argv);
54   - register long __c asm ("%d3") = (long)(envp);
55   - asm volatile ("trap #0" : "+d" (__res)
56   - : "d" (__a), "d" (__b), "d" (__c));
57   - return __res;
58   -}
59   -
60   -asmlinkage unsigned long sys_get_thread_area(void)
61   -{
62   - return current_thread_info()->tp_value;
63   -}
64   -
65   -asmlinkage int sys_set_thread_area(unsigned long tp)
66   -{
67   - current_thread_info()->tp_value = tp;
68   - return 0;
69   -}
70   -
71   -/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
72   - D1 (newval). */
73   -asmlinkage int
74   -sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
75   - unsigned long __user * mem)
76   -{
77   - struct mm_struct *mm = current->mm;
78   - unsigned long mem_value;
79   -
80   - down_read(&mm->mmap_sem);
81   -
82   - mem_value = *mem;
83   - if (mem_value == oldval)
84   - *mem = newval;
85   -
86   - up_read(&mm->mmap_sem);
87   - return mem_value;
88   -}
89   -
90   -asmlinkage int sys_atomic_barrier(void)
91   -{
92   - /* no code needed for uniprocs */
93   - return 0;
94   -}