Commit 10c9c10c31514564b09c153432a42ffaea3ce831

Authored by GuanXuetao
1 parent 56372b0b2f

unicore32 core architecture: mm related: consistent device DMA handling

This patch implements consistent device DMA handling of memory management.
DMA device operations are also here.

Signed-off-by: Guan Xuetao <gxt@mprc.pku.edu.cn>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>

Showing 10 changed files with 1207 additions and 0 deletions Side-by-side Diff

arch/unicore32/include/asm/cacheflush.h
  1 +/*
  2 + * linux/arch/unicore32/include/asm/cacheflush.h
  3 + *
  4 + * Code specific to PKUnity SoC and UniCore ISA
  5 + *
  6 + * Copyright (C) 2001-2010 GUAN Xue-tao
  7 + *
  8 + * This program is free software; you can redistribute it and/or modify
  9 + * it under the terms of the GNU General Public License version 2 as
  10 + * published by the Free Software Foundation.
  11 + */
  12 +#ifndef __UNICORE_CACHEFLUSH_H__
  13 +#define __UNICORE_CACHEFLUSH_H__
  14 +
  15 +#include <linux/mm.h>
  16 +
  17 +#include <asm/shmparam.h>
  18 +
  19 +#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
  20 +
  21 +/*
  22 + * This flag is used to indicate that the page pointed to by a pte is clean
  23 + * and does not require cleaning before returning it to the user.
  24 + */
  25 +#define PG_dcache_clean PG_arch_1
  26 +
  27 +/*
  28 + * MM Cache Management
  29 + * ===================
  30 + *
  31 + * The arch/unicore32/mm/cache.S files implement these methods.
  32 + *
  33 + * Start addresses are inclusive and end addresses are exclusive;
  34 + * start addresses should be rounded down, end addresses up.
  35 + *
  36 + * See Documentation/cachetlb.txt for more information.
  37 + * Please note that the implementation of these, and the required
  38 + * effects are cache-type (VIVT/VIPT/PIPT) specific.
  39 + *
  40 + * flush_icache_all()
  41 + *
  42 + * Unconditionally clean and invalidate the entire icache.
  43 + * Currently only needed for cache-v6.S and cache-v7.S, see
  44 + * __flush_icache_all for the generic implementation.
  45 + *
  46 + * flush_kern_all()
  47 + *
  48 + * Unconditionally clean and invalidate the entire cache.
  49 + *
  50 + * flush_user_all()
  51 + *
  52 + * Clean and invalidate all user space cache entries
  53 + * before a change of page tables.
  54 + *
  55 + * flush_user_range(start, end, flags)
  56 + *
  57 + * Clean and invalidate a range of cache entries in the
  58 + * specified address space before a change of page tables.
  59 + * - start - user start address (inclusive, page aligned)
  60 + * - end - user end address (exclusive, page aligned)
  61 + * - flags - vma->vm_flags field
  62 + *
  63 + * coherent_kern_range(start, end)
  64 + *
  65 + * Ensure coherency between the Icache and the Dcache in the
  66 + * region described by start, end. If you have non-snooping
  67 + * Harvard caches, you need to implement this function.
  68 + * - start - virtual start address
  69 + * - end - virtual end address
  70 + *
  71 + * coherent_user_range(start, end)
  72 + *
  73 + * Ensure coherency between the Icache and the Dcache in the
  74 + * region described by start, end. If you have non-snooping
  75 + * Harvard caches, you need to implement this function.
  76 + * - start - virtual start address
  77 + * - end - virtual end address
  78 + *
  79 + * flush_kern_dcache_area(kaddr, size)
  80 + *
  81 + * Ensure that the data held in page is written back.
  82 + * - kaddr - page address
  83 + * - size - region size
  84 + *
  85 + * DMA Cache Coherency
  86 + * ===================
  87 + *
  88 + * dma_flush_range(start, end)
  89 + *
  90 + * Clean and invalidate the specified virtual address range.
  91 + * - start - virtual start address
  92 + * - end - virtual end address
  93 + */
  94 +
  95 +extern void __cpuc_flush_icache_all(void);
  96 +extern void __cpuc_flush_kern_all(void);
  97 +extern void __cpuc_flush_user_all(void);
  98 +extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
  99 +extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
  100 +extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
  101 +extern void __cpuc_flush_dcache_area(void *, size_t);
  102 +extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size);
  103 +
  104 +/*
  105 + * These are private to the dma-mapping API. Do not use directly.
  106 + * Their sole purpose is to ensure that data held in the cache
  107 + * is visible to DMA, or data written by DMA to system memory is
  108 + * visible to the CPU.
  109 + */
  110 +extern void __cpuc_dma_clean_range(unsigned long, unsigned long);
  111 +extern void __cpuc_dma_flush_range(unsigned long, unsigned long);
  112 +
  113 +/*
  114 + * Copy user data from/to a page which is mapped into a different
  115 + * processes address space. Really, we want to allow our "user
  116 + * space" model to handle this.
  117 + */
  118 +extern void copy_to_user_page(struct vm_area_struct *, struct page *,
  119 + unsigned long, void *, const void *, unsigned long);
  120 +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  121 + do { \
  122 + memcpy(dst, src, len); \
  123 + } while (0)
  124 +
  125 +/*
  126 + * Convert calls to our calling convention.
  127 + */
  128 +/* Invalidate I-cache */
  129 +static inline void __flush_icache_all(void)
  130 +{
  131 + asm("movc p0.c5, %0, #20;\n"
  132 + "nop; nop; nop; nop; nop; nop; nop; nop\n"
  133 + :
  134 + : "r" (0));
  135 +}
  136 +
  137 +#define flush_cache_all() __cpuc_flush_kern_all()
  138 +
  139 +extern void flush_cache_mm(struct mm_struct *mm);
  140 +extern void flush_cache_range(struct vm_area_struct *vma,
  141 + unsigned long start, unsigned long end);
  142 +extern void flush_cache_page(struct vm_area_struct *vma,
  143 + unsigned long user_addr, unsigned long pfn);
  144 +
  145 +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  146 +
  147 +/*
  148 + * flush_cache_user_range is used when we want to ensure that the
  149 + * Harvard caches are synchronised for the user space address range.
  150 + * This is used for the UniCore private sys_cacheflush system call.
  151 + */
  152 +#define flush_cache_user_range(vma, start, end) \
  153 + __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
  154 +
  155 +/*
  156 + * Perform necessary cache operations to ensure that data previously
  157 + * stored within this range of addresses can be executed by the CPU.
  158 + */
  159 +#define flush_icache_range(s, e) __cpuc_coherent_kern_range(s, e)
  160 +
  161 +/*
  162 + * Perform necessary cache operations to ensure that the TLB will
  163 + * see data written in the specified area.
  164 + */
  165 +#define clean_dcache_area(start, size) cpu_dcache_clean_area(start, size)
  166 +
  167 +/*
  168 + * flush_dcache_page is used when the kernel has written to the page
  169 + * cache page at virtual address page->virtual.
  170 + *
  171 + * If this page isn't mapped (ie, page_mapping == NULL), or it might
  172 + * have userspace mappings, then we _must_ always clean + invalidate
  173 + * the dcache entries associated with the kernel mapping.
  174 + *
  175 + * Otherwise we can defer the operation, and clean the cache when we are
  176 + * about to change to user space. This is the same method as used on SPARC64.
  177 + * See update_mmu_cache for the user space part.
  178 + */
  179 +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  180 +extern void flush_dcache_page(struct page *);
  181 +
  182 +#define flush_dcache_mmap_lock(mapping) \
  183 + spin_lock_irq(&(mapping)->tree_lock)
  184 +#define flush_dcache_mmap_unlock(mapping) \
  185 + spin_unlock_irq(&(mapping)->tree_lock)
  186 +
  187 +#define flush_icache_user_range(vma, page, addr, len) \
  188 + flush_dcache_page(page)
  189 +
  190 +/*
  191 + * We don't appear to need to do anything here. In fact, if we did, we'd
  192 + * duplicate cache flushing elsewhere performed by flush_dcache_page().
  193 + */
  194 +#define flush_icache_page(vma, page) do { } while (0)
  195 +
  196 +/*
  197 + * flush_cache_vmap() is used when creating mappings (eg, via vmap,
  198 + * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
  199 + * caches, since the direct-mappings of these pages may contain cached
  200 + * data, we need to do a full cache flush to ensure that writebacks
  201 + * don't corrupt data placed into these pages via the new mappings.
  202 + */
  203 +static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  204 +{
  205 +}
  206 +
  207 +static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
  208 +{
  209 +}
  210 +
  211 +#endif
arch/unicore32/include/asm/dma-mapping.h
  1 +/*
  2 + * linux/arch/unicore32/include/asm/dma-mapping.h
  3 + *
  4 + * Code specific to PKUnity SoC and UniCore ISA
  5 + *
  6 + * Copyright (C) 2001-2010 GUAN Xue-tao
  7 + *
  8 + * This program is free software; you can redistribute it and/or modify
  9 + * it under the terms of the GNU General Public License version 2 as
  10 + * published by the Free Software Foundation.
  11 + */
  12 +#ifndef __UNICORE_DMA_MAPPING_H__
  13 +#define __UNICORE_DMA_MAPPING_H__
  14 +
  15 +#ifdef __KERNEL__
  16 +
  17 +#include <linux/mm_types.h>
  18 +#include <linux/scatterlist.h>
  19 +#include <linux/swiotlb.h>
  20 +
  21 +#include <asm-generic/dma-coherent.h>
  22 +
  23 +#include <asm/memory.h>
  24 +#include <asm/cacheflush.h>
  25 +
  26 +extern struct dma_map_ops swiotlb_dma_map_ops;
  27 +
  28 +static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  29 +{
  30 + return &swiotlb_dma_map_ops;
  31 +}
  32 +
  33 +static inline int dma_supported(struct device *dev, u64 mask)
  34 +{
  35 + struct dma_map_ops *dma_ops = get_dma_ops(dev);
  36 +
  37 + if (unlikely(dma_ops == NULL))
  38 + return 0;
  39 +
  40 + return dma_ops->dma_supported(dev, mask);
  41 +}
  42 +
  43 +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  44 +{
  45 + struct dma_map_ops *dma_ops = get_dma_ops(dev);
  46 +
  47 + if (dma_ops->mapping_error)
  48 + return dma_ops->mapping_error(dev, dma_addr);
  49 +
  50 + return 0;
  51 +}
  52 +
  53 +#include <asm-generic/dma-mapping-common.h>
  54 +
  55 +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
  56 +{
  57 + if (dev && dev->dma_mask)
  58 + return addr + size - 1 <= *dev->dma_mask;
  59 +
  60 + return 1;
  61 +}
  62 +
  63 +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
  64 +{
  65 + return paddr;
  66 +}
  67 +
  68 +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
  69 +{
  70 + return daddr;
  71 +}
  72 +
  73 +static inline void dma_mark_clean(void *addr, size_t size) {}
  74 +
  75 +static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  76 +{
  77 + if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  78 + return -EIO;
  79 +
  80 + *dev->dma_mask = dma_mask;
  81 +
  82 + return 0;
  83 +}
  84 +
  85 +static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  86 + dma_addr_t *dma_handle, gfp_t flag)
  87 +{
  88 + struct dma_map_ops *dma_ops = get_dma_ops(dev);
  89 +
  90 + return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
  91 +}
  92 +
  93 +static inline void dma_free_coherent(struct device *dev, size_t size,
  94 + void *cpu_addr, dma_addr_t dma_handle)
  95 +{
  96 + struct dma_map_ops *dma_ops = get_dma_ops(dev);
  97 +
  98 + dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
  99 +}
  100 +
  101 +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  102 +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  103 +
  104 +static inline void dma_cache_sync(struct device *dev, void *vaddr,
  105 + size_t size, enum dma_data_direction direction)
  106 +{
  107 + unsigned long start = (unsigned long)vaddr;
  108 + unsigned long end = start + size;
  109 +
  110 + switch (direction) {
  111 + case DMA_NONE:
  112 + BUG();
  113 + case DMA_FROM_DEVICE:
  114 + case DMA_BIDIRECTIONAL: /* writeback and invalidate */
  115 + __cpuc_dma_flush_range(start, end);
  116 + break;
  117 + case DMA_TO_DEVICE: /* writeback only */
  118 + __cpuc_dma_clean_range(start, end);
  119 + break;
  120 + }
  121 +}
  122 +
  123 +#endif /* __KERNEL__ */
  124 +#endif
arch/unicore32/include/asm/dma.h
  1 +/*
  2 + * linux/arch/unicore32/include/asm/dma.h
  3 + *
  4 + * Code specific to PKUnity SoC and UniCore ISA
  5 + *
  6 + * Copyright (C) 2001-2010 GUAN Xue-tao
  7 + *
  8 + * This program is free software; you can redistribute it and/or modify
  9 + * it under the terms of the GNU General Public License version 2 as
  10 + * published by the Free Software Foundation.
  11 + */
  12 +
  13 +#ifndef __UNICORE_DMA_H__
  14 +#define __UNICORE_DMA_H__
  15 +
  16 +#include <asm/memory.h>
  17 +#include <asm-generic/dma.h>
  18 +
  19 +#ifdef CONFIG_PCI
  20 +extern int isa_dma_bridge_buggy;
  21 +#endif
  22 +
  23 +#endif /* __UNICORE_DMA_H__ */
arch/unicore32/include/asm/tlbflush.h
  1 +/*
  2 + * linux/arch/unicore32/include/asm/tlbflush.h
  3 + *
  4 + * Code specific to PKUnity SoC and UniCore ISA
  5 + *
  6 + * Copyright (C) 2001-2010 GUAN Xue-tao
  7 + *
  8 + * This program is free software; you can redistribute it and/or modify
  9 + * it under the terms of the GNU General Public License version 2 as
  10 + * published by the Free Software Foundation.
  11 + */
  12 +#ifndef __UNICORE_TLBFLUSH_H__
  13 +#define __UNICORE_TLBFLUSH_H__
  14 +
  15 +#ifndef __ASSEMBLY__
  16 +
  17 +#include <linux/sched.h>
  18 +
  19 +extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long,
  20 + struct vm_area_struct *);
  21 +extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
  22 +
  23 +/*
  24 + * TLB Management
  25 + * ==============
  26 + *
  27 + * The arch/unicore/mm/tlb-*.S files implement these methods.
  28 + *
  29 + * The TLB specific code is expected to perform whatever tests it
  30 + * needs to determine if it should invalidate the TLB for each
  31 + * call. Start addresses are inclusive and end addresses are
  32 + * exclusive; it is safe to round these addresses down.
  33 + *
  34 + * flush_tlb_all()
  35 + *
  36 + * Invalidate the entire TLB.
  37 + *
  38 + * flush_tlb_mm(mm)
  39 + *
  40 + * Invalidate all TLB entries in a particular address
  41 + * space.
  42 + * - mm - mm_struct describing address space
  43 + *
  44 + * flush_tlb_range(mm,start,end)
  45 + *
  46 + * Invalidate a range of TLB entries in the specified
  47 + * address space.
  48 + * - mm - mm_struct describing address space
  49 + * - start - start address (may not be aligned)
  50 + * - end - end address (exclusive, may not be aligned)
  51 + *
  52 + * flush_tlb_page(vaddr,vma)
  53 + *
  54 + * Invalidate the specified page in the specified address range.
  55 + * - vaddr - virtual address (may not be aligned)
  56 + * - vma - vma_struct describing address range
  57 + *
  58 + * flush_kern_tlb_page(kaddr)
  59 + *
  60 + * Invalidate the TLB entry for the specified page. The address
  61 + * will be in the kernels virtual memory space. Current uses
  62 + * only require the D-TLB to be invalidated.
  63 + * - kaddr - Kernel virtual memory address
  64 + */
  65 +
  66 +static inline void local_flush_tlb_all(void)
  67 +{
  68 + const int zero = 0;
  69 +
  70 + /* TLB invalidate all */
  71 + asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  72 + : : "r" (zero) : "cc");
  73 +}
  74 +
  75 +static inline void local_flush_tlb_mm(struct mm_struct *mm)
  76 +{
  77 + const int zero = 0;
  78 +
  79 + if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
  80 + /* TLB invalidate all */
  81 + asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  82 + : : "r" (zero) : "cc");
  83 + }
  84 + put_cpu();
  85 +}
  86 +
  87 +static inline void
  88 +local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
  89 +{
  90 + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
  91 +#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
  92 + /* iTLB invalidate page */
  93 + asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
  94 + : : "r" (uaddr & PAGE_MASK) : "cc");
  95 + /* dTLB invalidate page */
  96 + asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
  97 + : : "r" (uaddr & PAGE_MASK) : "cc");
  98 +#else
  99 + /* TLB invalidate all */
  100 + asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  101 + : : "r" (uaddr & PAGE_MASK) : "cc");
  102 +#endif
  103 + }
  104 +}
  105 +
  106 +static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
  107 +{
  108 +#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
  109 + /* iTLB invalidate page */
  110 + asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
  111 + : : "r" (kaddr & PAGE_MASK) : "cc");
  112 + /* dTLB invalidate page */
  113 + asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
  114 + : : "r" (kaddr & PAGE_MASK) : "cc");
  115 +#else
  116 + /* TLB invalidate all */
  117 + asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  118 + : : "r" (kaddr & PAGE_MASK) : "cc");
  119 +#endif
  120 +}
  121 +
  122 +/*
  123 + * flush_pmd_entry
  124 + *
  125 + * Flush a PMD entry (word aligned, or double-word aligned) to
  126 + * RAM if the TLB for the CPU we are running on requires this.
  127 + * This is typically used when we are creating PMD entries.
  128 + *
  129 + * clean_pmd_entry
  130 + *
  131 + * Clean (but don't drain the write buffer) if the CPU requires
  132 + * these operations. This is typically used when we are removing
  133 + * PMD entries.
  134 + */
  135 +static inline void flush_pmd_entry(pmd_t *pmd)
  136 +{
  137 +#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
  138 + /* flush dcache line, see dcacheline_flush in proc-macros.S */
  139 + asm("mov r1, %0 << #20\n"
  140 + "ldw r2, =_stext\n"
  141 + "add r2, r2, r1 >> #20\n"
  142 + "ldw r1, [r2+], #0x0000\n"
  143 + "ldw r1, [r2+], #0x1000\n"
  144 + "ldw r1, [r2+], #0x2000\n"
  145 + "ldw r1, [r2+], #0x3000\n"
  146 + : : "r" (pmd) : "r1", "r2");
  147 +#else
  148 + /* flush dcache all */
  149 + asm("movc p0.c5, %0, #14; nop; nop; nop; nop; nop; nop; nop; nop"
  150 + : : "r" (pmd) : "cc");
  151 +#endif
  152 +}
  153 +
  154 +static inline void clean_pmd_entry(pmd_t *pmd)
  155 +{
  156 +#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
  157 + /* clean dcache line */
  158 + asm("movc p0.c5, %0, #11; nop; nop; nop; nop; nop; nop; nop; nop"
  159 + : : "r" (__pa(pmd) & ~(L1_CACHE_BYTES - 1)) : "cc");
  160 +#else
  161 + /* clean dcache all */
  162 + asm("movc p0.c5, %0, #10; nop; nop; nop; nop; nop; nop; nop; nop"
  163 + : : "r" (pmd) : "cc");
  164 +#endif
  165 +}
  166 +
  167 +/*
  168 + * Convert calls to our calling convention.
  169 + */
  170 +#define local_flush_tlb_range(vma, start, end) \
  171 + __cpu_flush_user_tlb_range(start, end, vma)
  172 +#define local_flush_tlb_kernel_range(s, e) \
  173 + __cpu_flush_kern_tlb_range(s, e)
  174 +
  175 +#define flush_tlb_all local_flush_tlb_all
  176 +#define flush_tlb_mm local_flush_tlb_mm
  177 +#define flush_tlb_page local_flush_tlb_page
  178 +#define flush_tlb_kernel_page local_flush_tlb_kernel_page
  179 +#define flush_tlb_range local_flush_tlb_range
  180 +#define flush_tlb_kernel_range local_flush_tlb_kernel_range
  181 +
  182 +/*
  183 + * if PG_dcache_clean is not set for the page, we need to ensure that any
  184 + * cache entries for the kernels virtual memory range are written
  185 + * back to the page.
  186 + */
  187 +extern void update_mmu_cache(struct vm_area_struct *vma,
  188 + unsigned long addr, pte_t *ptep);
  189 +
  190 +extern void do_bad_area(unsigned long addr, unsigned int fsr,
  191 + struct pt_regs *regs);
  192 +
  193 +#endif
  194 +
  195 +#endif
arch/unicore32/include/mach/dma.h
  1 +/*
  2 + * linux/arch/unicore32/include/mach/dma.h
  3 + *
  4 + * Code specific to PKUnity SoC and UniCore ISA
  5 + *
  6 + * Copyright (C) 2001-2010 GUAN Xue-tao
  7 + *
  8 + * This program is free software; you can redistribute it and/or modify
  9 + * it under the terms of the GNU General Public License version 2 as
  10 + * published by the Free Software Foundation.
  11 + */
  12 +#ifndef __MACH_PUV3_DMA_H__
  13 +#define __MACH_PUV3_DMA_H__
  14 +
  15 +/*
  16 + * The PKUnity has six internal DMA channels.
  17 + */
  18 +#define MAX_DMA_CHANNELS 6
  19 +
  20 +typedef enum {
  21 + DMA_PRIO_HIGH = 0,
  22 + DMA_PRIO_MEDIUM = 1,
  23 + DMA_PRIO_LOW = 2
  24 +} puv3_dma_prio;
  25 +
  26 +/*
  27 + * DMA registration
  28 + */
  29 +
  30 +extern int puv3_request_dma(char *name,
  31 + puv3_dma_prio prio,
  32 + void (*irq_handler)(int, void *),
  33 + void (*err_handler)(int, void *),
  34 + void *data);
  35 +
  36 +extern void puv3_free_dma(int dma_ch);
  37 +
  38 +#define puv3_stop_dma(ch) (DMAC_CONFIG(ch) &= ~DMAC_CONFIG_EN)
  39 +#define puv3_resume_dma(ch) (DMAC_CONFIG(ch) |= DMAC_CONFIG_EN)
  40 +
  41 +#endif /* __MACH_PUV3_DMA_H__ */
arch/unicore32/kernel/dma.c
  1 +/*
  2 + * linux/arch/unicore32/kernel/dma.c
  3 + *
  4 + * Code specific to PKUnity SoC and UniCore ISA
  5 + *
  6 + * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
  7 + * Copyright (C) 2001-2010 Guan Xuetao
  8 + *
  9 + * This program is free software; you can redistribute it and/or modify
  10 + * it under the terms of the GNU General Public License version 2 as
  11 + * published by the Free Software Foundation.
  12 + */
  13 +
  14 +#include <linux/module.h>
  15 +#include <linux/init.h>
  16 +#include <linux/kernel.h>
  17 +#include <linux/interrupt.h>
  18 +#include <linux/errno.h>
  19 +
  20 +#include <asm/system.h>
  21 +#include <asm/irq.h>
  22 +#include <mach/hardware.h>
  23 +#include <mach/dma.h>
  24 +
  25 +struct dma_channel {
  26 + char *name;
  27 + puv3_dma_prio prio;
  28 + void (*irq_handler)(int, void *);
  29 + void (*err_handler)(int, void *);
  30 + void *data;
  31 +};
  32 +
  33 +static struct dma_channel dma_channels[MAX_DMA_CHANNELS];
  34 +
  35 +int puv3_request_dma(char *name, puv3_dma_prio prio,
  36 + void (*irq_handler)(int, void *),
  37 + void (*err_handler)(int, void *),
  38 + void *data)
  39 +{
  40 + unsigned long flags;
  41 + int i, found = 0;
  42 +
  43 + /* basic sanity checks */
  44 + if (!name)
  45 + return -EINVAL;
  46 +
  47 + local_irq_save(flags);
  48 +
  49 + do {
  50 + /* try grabbing a DMA channel with the requested priority */
  51 + for (i = 0; i < MAX_DMA_CHANNELS; i++) {
  52 + if ((dma_channels[i].prio == prio) &&
  53 + !dma_channels[i].name) {
  54 + found = 1;
  55 + break;
  56 + }
  57 + }
  58 + /* if requested prio group is full, try a hier priority */
  59 + } while (!found && prio--);
  60 +
  61 + if (found) {
  62 + dma_channels[i].name = name;
  63 + dma_channels[i].irq_handler = irq_handler;
  64 + dma_channels[i].err_handler = err_handler;
  65 + dma_channels[i].data = data;
  66 + } else {
  67 + printk(KERN_WARNING "No more available DMA channels for %s\n",
  68 + name);
  69 + i = -ENODEV;
  70 + }
  71 +
  72 + local_irq_restore(flags);
  73 + return i;
  74 +}
  75 +EXPORT_SYMBOL(puv3_request_dma);
  76 +
  77 +void puv3_free_dma(int dma_ch)
  78 +{
  79 + unsigned long flags;
  80 +
  81 + if (!dma_channels[dma_ch].name) {
  82 + printk(KERN_CRIT
  83 + "%s: trying to free channel %d which is already freed\n",
  84 + __func__, dma_ch);
  85 + return;
  86 + }
  87 +
  88 + local_irq_save(flags);
  89 + dma_channels[dma_ch].name = NULL;
  90 + dma_channels[dma_ch].err_handler = NULL;
  91 + local_irq_restore(flags);
  92 +}
  93 +EXPORT_SYMBOL(puv3_free_dma);
  94 +
  95 +static irqreturn_t dma_irq_handler(int irq, void *dev_id)
  96 +{
  97 + int i, dint = DMAC_ITCSR;
  98 +
  99 + for (i = 0; i < MAX_DMA_CHANNELS; i++) {
  100 + if (dint & DMAC_CHANNEL(i)) {
  101 + struct dma_channel *channel = &dma_channels[i];
  102 +
  103 + /* Clear TC interrupt of channel i */
  104 + DMAC_ITCCR = DMAC_CHANNEL(i);
  105 + DMAC_ITCCR = 0;
  106 +
  107 + if (channel->name && channel->irq_handler) {
  108 + channel->irq_handler(i, channel->data);
  109 + } else {
  110 + /*
  111 + * IRQ for an unregistered DMA channel:
  112 + * let's clear the interrupts and disable it.
  113 + */
  114 + printk(KERN_WARNING "spurious IRQ for"
  115 + " DMA channel %d\n", i);
  116 + }
  117 + }
  118 + }
  119 + return IRQ_HANDLED;
  120 +}
  121 +
  122 +static irqreturn_t dma_err_handler(int irq, void *dev_id)
  123 +{
  124 + int i, dint = DMAC_IESR;
  125 +
  126 + for (i = 0; i < MAX_DMA_CHANNELS; i++) {
  127 + if (dint & DMAC_CHANNEL(i)) {
  128 + struct dma_channel *channel = &dma_channels[i];
  129 +
  130 + /* Clear Err interrupt of channel i */
  131 + DMAC_IECR = DMAC_CHANNEL(i);
  132 + DMAC_IECR = 0;
  133 +
  134 + if (channel->name && channel->err_handler) {
  135 + channel->err_handler(i, channel->data);
  136 + } else {
  137 + /*
  138 + * IRQ for an unregistered DMA channel:
  139 + * let's clear the interrupts and disable it.
  140 + */
  141 + printk(KERN_WARNING "spurious IRQ for"
  142 + " DMA channel %d\n", i);
  143 + }
  144 + }
  145 + }
  146 + return IRQ_HANDLED;
  147 +}
  148 +
  149 +int __init puv3_init_dma(void)
  150 +{
  151 + int i, ret;
  152 +
  153 + /* dma channel priorities on v8 processors:
  154 + * ch 0 - 1 <--> (0) DMA_PRIO_HIGH
  155 + * ch 2 - 3 <--> (1) DMA_PRIO_MEDIUM
  156 + * ch 4 - 5 <--> (2) DMA_PRIO_LOW
  157 + */
  158 + for (i = 0; i < MAX_DMA_CHANNELS; i++) {
  159 + puv3_stop_dma(i);
  160 + dma_channels[i].name = NULL;
  161 + dma_channels[i].prio = min((i & 0x7) >> 1, DMA_PRIO_LOW);
  162 + }
  163 +
  164 + ret = request_irq(IRQ_DMA, dma_irq_handler, 0, "DMA", NULL);
  165 + if (ret) {
  166 + printk(KERN_CRIT "Can't register IRQ for DMA\n");
  167 + return ret;
  168 + }
  169 +
  170 + ret = request_irq(IRQ_DMAERR, dma_err_handler, 0, "DMAERR", NULL);
  171 + if (ret) {
  172 + printk(KERN_CRIT "Can't register IRQ for DMAERR\n");
  173 + free_irq(IRQ_DMA, "DMA");
  174 + return ret;
  175 + }
  176 +
  177 + return 0;
  178 +}
  179 +
  180 +postcore_initcall(puv3_init_dma);
arch/unicore32/mm/cache-ucv2.S
  1 +/*
  2 + * linux/arch/unicore32/mm/cache-ucv2.S
  3 + *
  4 + * Code specific to PKUnity SoC and UniCore ISA
  5 + *
  6 + * Copyright (C) 2001-2010 GUAN Xue-tao
  7 + *
  8 + * This program is free software; you can redistribute it and/or modify
  9 + * it under the terms of the GNU General Public License version 2 as
  10 + * published by the Free Software Foundation.
  11 + *
  12 + * This is the "shell" of the UniCore-v2 processor support.
  13 + */
  14 +#include <linux/linkage.h>
  15 +#include <linux/init.h>
  16 +#include <asm/assembler.h>
  17 +#include <asm/page.h>
  18 +
  19 +#include "proc-macros.S"
  20 +
  21 +/*
  22 + * __cpuc_flush_icache_all()
  23 + * __cpuc_flush_kern_all()
  24 + * __cpuc_flush_user_all()
  25 + *
  26 + * Flush the entire cache.
  27 + */
  28 +ENTRY(__cpuc_flush_icache_all)
  29 + /*FALLTHROUGH*/
  30 +ENTRY(__cpuc_flush_kern_all)
  31 + /*FALLTHROUGH*/
  32 +ENTRY(__cpuc_flush_user_all)
  33 + mov r0, #0
  34 + movc p0.c5, r0, #14 @ Dcache flush all
  35 + nop8
  36 +
  37 + mov r0, #0
  38 + movc p0.c5, r0, #20 @ Icache invalidate all
  39 + nop8
  40 +
  41 + mov pc, lr
  42 +
  43 +/*
  44 + * __cpuc_flush_user_range(start, end, flags)
  45 + *
  46 + * Flush a range of TLB entries in the specified address space.
  47 + *
  48 + * - start - start address (may not be aligned)
  49 + * - end - end address (exclusive, may not be aligned)
  50 + * - flags - vm_area_struct flags describing address space
  51 + */
  52 +ENTRY(__cpuc_flush_user_range)
  53 + cxor.a r2, #0
  54 + beq __cpuc_dma_flush_range
  55 +
  56 +#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
  57 + andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check
  58 + sub r1, r1, r0
  59 + csub.a r1, #MAX_AREA_SIZE
  60 + bsg 2f
  61 +
  62 + andn r1, r1, #CACHE_LINESIZE - 1
  63 + add r1, r1, #CACHE_LINESIZE
  64 +
  65 +101: dcacheline_flush r0, r11, r12
  66 +
  67 + add r0, r0, #CACHE_LINESIZE
  68 + sub.a r1, r1, #CACHE_LINESIZE
  69 + bns 101b
  70 + b 3f
  71 +#endif
  72 +2: mov ip, #0
  73 + movc p0.c5, ip, #14 @ Dcache flush all
  74 + nop8
  75 +
  76 +3: mov ip, #0
  77 + movc p0.c5, ip, #20 @ Icache invalidate all
  78 + nop8
  79 +
  80 + mov pc, lr
  81 +
  82 +/*
  83 + * __cpuc_coherent_kern_range(start,end)
  84 + * __cpuc_coherent_user_range(start,end)
  85 + *
  86 + * Ensure that the I and D caches are coherent within specified
  87 + * region. This is typically used when code has been written to
  88 + * a memory region, and will be executed.
  89 + *
  90 + * - start - virtual start address of region
  91 + * - end - virtual end address of region
  92 + */
  93 +ENTRY(__cpuc_coherent_kern_range)
  94 + /* FALLTHROUGH */
  95 +ENTRY(__cpuc_coherent_user_range)
  96 +#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
  97 + andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check
  98 + sub r1, r1, r0
  99 + csub.a r1, #MAX_AREA_SIZE
  100 + bsg 2f
  101 +
  102 + andn r1, r1, #CACHE_LINESIZE - 1
  103 + add r1, r1, #CACHE_LINESIZE
  104 +
  105 + @ r0 va2pa r10
  106 + mov r9, #PAGE_SZ
  107 + sub r9, r9, #1 @ PAGE_MASK
  108 +101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA
  109 + b 103f
  110 +102: cand.a r0, r9
  111 + beq 101b
  112 +
  113 +103: movc p0.c5, r10, #11 @ Dcache clean line of R10
  114 + nop8
  115 +
  116 + add r0, r0, #CACHE_LINESIZE
  117 + add r10, r10, #CACHE_LINESIZE
  118 + sub.a r1, r1, #CACHE_LINESIZE
  119 + bns 102b
  120 + b 3f
  121 +#endif
  122 +2: mov ip, #0
  123 + movc p0.c5, ip, #10 @ Dcache clean all
  124 + nop8
  125 +
  126 +3: mov ip, #0
  127 + movc p0.c5, ip, #20 @ Icache invalidate all
  128 + nop8
  129 +
  130 + mov pc, lr
  131 +
  132 +/*
  133 + * __cpuc_flush_kern_dcache_area(void *addr, size_t size)
  134 + *
  135 + * - addr - kernel address
  136 + * - size - region size
  137 + */
  138 +ENTRY(__cpuc_flush_kern_dcache_area)
  139 + mov ip, #0
  140 + movc p0.c5, ip, #14 @ Dcache flush all
  141 + nop8
  142 + mov pc, lr
  143 +
  144 +/*
  145 + * __cpuc_dma_clean_range(start,end)
  146 + * - start - virtual start address of region
  147 + * - end - virtual end address of region
  148 + */
  149 +ENTRY(__cpuc_dma_clean_range)
  150 +#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
  151 + andn r0, r0, #CACHE_LINESIZE - 1
  152 + sub r1, r1, r0
  153 + andn r1, r1, #CACHE_LINESIZE - 1
  154 + add r1, r1, #CACHE_LINESIZE
  155 +
  156 + csub.a r1, #MAX_AREA_SIZE
  157 + bsg 2f
  158 +
  159 + @ r0 va2pa r10
  160 + mov r9, #PAGE_SZ
  161 + sub r9, r9, #1 @ PAGE_MASK
  162 +101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA
  163 + b 1f
  164 +102: cand.a r0, r9
  165 + beq 101b
  166 +
  167 +1: movc p0.c5, r10, #11 @ Dcache clean line of R10
  168 + nop8
  169 + add r0, r0, #CACHE_LINESIZE
  170 + add r10, r10, #CACHE_LINESIZE
  171 + sub.a r1, r1, #CACHE_LINESIZE
  172 + bns 102b
  173 + mov pc, lr
  174 +#endif
  175 +2: mov ip, #0
  176 + movc p0.c5, ip, #10 @ Dcache clean all
  177 + nop8
  178 +
  179 + mov pc, lr
  180 +
  181 +/*
  182 + * __cpuc_dma_inv_range(start,end)
  183 + * __cpuc_dma_flush_range(start,end)
  184 + * - start - virtual start address of region
  185 + * - end - virtual end address of region
  186 + */
  187 +__cpuc_dma_inv_range:
  188 + /* FALLTHROUGH */
  189 +ENTRY(__cpuc_dma_flush_range)
  190 +#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
  191 + andn r0, r0, #CACHE_LINESIZE - 1
  192 + sub r1, r1, r0
  193 + andn r1, r1, #CACHE_LINESIZE - 1
  194 + add r1, r1, #CACHE_LINESIZE
  195 +
  196 + csub.a r1, #MAX_AREA_SIZE
  197 + bsg 2f
  198 +
  199 + @ r0 va2pa r10
  200 +101: dcacheline_flush r0, r11, r12
  201 +
  202 + add r0, r0, #CACHE_LINESIZE
  203 + sub.a r1, r1, #CACHE_LINESIZE
  204 + bns 101b
  205 + mov pc, lr
  206 +#endif
  207 +2: mov ip, #0
  208 + movc p0.c5, ip, #14 @ Dcache flush all
  209 + nop8
  210 +
  211 + mov pc, lr
arch/unicore32/mm/dma-swiotlb.c
  1 +/*
  2 + * Contains routines needed to support swiotlb for UniCore32.
  3 + *
  4 + * Copyright (C) 2010 Guan Xuetao
  5 + *
  6 + * This program is free software; you can redistribute it and/or modify it
  7 + * under the terms of the GNU General Public License as published by the
  8 + * Free Software Foundation; either version 2 of the License, or (at your
  9 + * option) any later version.
  10 + */
  11 +#include <linux/pci.h>
  12 +#include <linux/cache.h>
  13 +#include <linux/module.h>
  14 +#include <linux/dma-mapping.h>
  15 +#include <linux/swiotlb.h>
  16 +#include <linux/bootmem.h>
  17 +
  18 +#include <asm/dma.h>
  19 +
  20 +struct dma_map_ops swiotlb_dma_map_ops = {
  21 + .alloc_coherent = swiotlb_alloc_coherent,
  22 + .free_coherent = swiotlb_free_coherent,
  23 + .map_sg = swiotlb_map_sg_attrs,
  24 + .unmap_sg = swiotlb_unmap_sg_attrs,
  25 + .dma_supported = swiotlb_dma_supported,
  26 + .map_page = swiotlb_map_page,
  27 + .unmap_page = swiotlb_unmap_page,
  28 + .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
  29 + .sync_single_for_device = swiotlb_sync_single_for_device,
  30 + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
  31 + .sync_sg_for_device = swiotlb_sync_sg_for_device,
  32 + .mapping_error = swiotlb_dma_mapping_error,
  33 +};
  34 +EXPORT_SYMBOL(swiotlb_dma_map_ops);
arch/unicore32/mm/flush.c
  1 +/*
  2 + * linux/arch/unicore32/mm/flush.c
  3 + *
  4 + * Code specific to PKUnity SoC and UniCore ISA
  5 + *
  6 + * Copyright (C) 2001-2010 GUAN Xue-tao
  7 + *
  8 + * This program is free software; you can redistribute it and/or modify
  9 + * it under the terms of the GNU General Public License version 2 as
  10 + * published by the Free Software Foundation.
  11 + */
  12 +#include <linux/module.h>
  13 +#include <linux/mm.h>
  14 +#include <linux/pagemap.h>
  15 +
  16 +#include <asm/cacheflush.h>
  17 +#include <asm/system.h>
  18 +#include <asm/tlbflush.h>
  19 +
  20 +void flush_cache_mm(struct mm_struct *mm)
  21 +{
  22 +}
  23 +
  24 +void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  25 + unsigned long end)
  26 +{
  27 + if (vma->vm_flags & VM_EXEC)
  28 + __flush_icache_all();
  29 +}
  30 +
  31 +void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
  32 + unsigned long pfn)
  33 +{
  34 +}
  35 +
  36 +static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  37 + unsigned long uaddr, void *kaddr, unsigned long len)
  38 +{
  39 + /* VIPT non-aliasing D-cache */
  40 + if (vma->vm_flags & VM_EXEC) {
  41 + unsigned long addr = (unsigned long)kaddr;
  42 +
  43 + __cpuc_coherent_kern_range(addr, addr + len);
  44 + }
  45 +}
  46 +
  47 +/*
  48 + * Copy user data from/to a page which is mapped into a different
  49 + * processes address space. Really, we want to allow our "user
  50 + * space" model to handle this.
  51 + *
  52 + * Note that this code needs to run on the current CPU.
  53 + */
  54 +void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  55 + unsigned long uaddr, void *dst, const void *src,
  56 + unsigned long len)
  57 +{
  58 + memcpy(dst, src, len);
  59 + flush_ptrace_access(vma, page, uaddr, dst, len);
  60 +}
  61 +
  62 +void __flush_dcache_page(struct address_space *mapping, struct page *page)
  63 +{
  64 + /*
  65 + * Writeback any data associated with the kernel mapping of this
  66 + * page. This ensures that data in the physical page is mutually
  67 + * coherent with the kernels mapping.
  68 + */
  69 + __cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE);
  70 +}
  71 +
  72 +/*
  73 + * Ensure cache coherency between kernel mapping and userspace mapping
  74 + * of this page.
  75 + */
  76 +void flush_dcache_page(struct page *page)
  77 +{
  78 + struct address_space *mapping;
  79 +
  80 + /*
  81 + * The zero page is never written to, so never has any dirty
  82 + * cache lines, and therefore never needs to be flushed.
  83 + */
  84 + if (page == ZERO_PAGE(0))
  85 + return;
  86 +
  87 + mapping = page_mapping(page);
  88 +
  89 + if (mapping && !mapping_mapped(mapping))
  90 + clear_bit(PG_dcache_clean, &page->flags);
  91 + else {
  92 + __flush_dcache_page(mapping, page);
  93 + if (mapping)
  94 + __flush_icache_all();
  95 + set_bit(PG_dcache_clean, &page->flags);
  96 + }
  97 +}
  98 +EXPORT_SYMBOL(flush_dcache_page);
arch/unicore32/mm/tlb-ucv2.S
  1 +/*
  2 + * linux/arch/unicore32/mm/tlb-ucv2.S
  3 + *
  4 + * Code specific to PKUnity SoC and UniCore ISA
  5 + *
  6 + * Copyright (C) 2001-2010 GUAN Xue-tao
  7 + *
  8 + * This program is free software; you can redistribute it and/or modify
  9 + * it under the terms of the GNU General Public License version 2 as
  10 + * published by the Free Software Foundation.
  11 + */
  12 +#include <linux/init.h>
  13 +#include <linux/linkage.h>
  14 +#include <asm/assembler.h>
  15 +#include <asm/page.h>
  16 +#include <asm/tlbflush.h>
  17 +#include "proc-macros.S"
  18 +
  19 +/*
  20 + * __cpu_flush_user_tlb_range(start, end, vma)
  21 + *
  22 + * Invalidate a range of TLB entries in the specified address space.
  23 + *
  24 + * - start - start address (may not be aligned)
  25 + * - end - end address (exclusive, may not be aligned)
  26 + * - vma - vma_struct describing address range
  27 + */
  28 +ENTRY(__cpu_flush_user_tlb_range)
  29 +#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
  30 + mov r0, r0 >> #PAGE_SHIFT @ align address
  31 + mov r0, r0 << #PAGE_SHIFT
  32 + vma_vm_flags r2, r2 @ get vma->vm_flags
  33 +1:
  34 + movc p0.c6, r0, #3
  35 + nop8
  36 +
  37 + cand.a r2, #VM_EXEC @ Executable area ?
  38 + beq 2f
  39 +
  40 + movc p0.c6, r0, #5
  41 + nop8
  42 +2:
  43 + add r0, r0, #PAGE_SZ
  44 + csub.a r0, r1
  45 + beb 1b
  46 +#else
  47 + movc p0.c6, r0, #2
  48 + nop8
  49 +
  50 + cand.a r2, #VM_EXEC @ Executable area ?
  51 + beq 2f
  52 +
  53 + movc p0.c6, r0, #4
  54 + nop8
  55 +2:
  56 +#endif
  57 + mov pc, lr
  58 +
  59 +/*
  60 + * __cpu_flush_kern_tlb_range(start,end)
  61 + *
  62 + * Invalidate a range of kernel TLB entries
  63 + *
  64 + * - start - start address (may not be aligned)
  65 + * - end - end address (exclusive, may not be aligned)
  66 + */
  67 +ENTRY(__cpu_flush_kern_tlb_range)
  68 +#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
  69 + mov r0, r0 >> #PAGE_SHIFT @ align address
  70 + mov r0, r0 << #PAGE_SHIFT
  71 +1:
  72 + movc p0.c6, r0, #3
  73 + nop8
  74 +
  75 + movc p0.c6, r0, #5
  76 + nop8
  77 +
  78 + add r0, r0, #PAGE_SZ
  79 + csub.a r0, r1
  80 + beb 1b
  81 +#else
  82 + movc p0.c6, r0, #2
  83 + nop8
  84 +
  85 + movc p0.c6, r0, #4
  86 + nop8
  87 +#endif
  88 + mov pc, lr