Commit 4a31c08d2fecc74a630653828f5388fbb037f8c2

Authored by Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (26 commits)
  sh: Convert sh to use read/update_persistent_clock
  sh: Move PMB debugfs entry initialization to later stage
  sh: Fix up flush_cache_vmap() on SMP.
  sh: fix up MMU reset with variable PMB mapping sizes.
  sh: establish PMB mappings for NUMA nodes.
  sh: check for existing mappings for bolted PMB entries.
  sh: fixed virt/phys mapping helpers for PMB.
  sh: make pmb iomapping configurable.
  sh: reworked dynamic PMB mapping.
  sh: Fix up cpumask_of_pcibus() for the NUMA build.
  serial: sh-sci: Tidy up build warnings.
  sh: Fix up ctrl_read/write stragglers in migor setup.
  serial: sh-sci: Add DMA support.
  dmaengine: shdma: extend .device_terminate_all() to record partial transfer
  sh: merge sh7722 and sh7724 DMA register definitions
  sh: activate runtime PM for dmaengine on sh7722 and sh7724
  dmaengine: shdma: add runtime PM support.
  dmaengine: shdma: separate DMA headers.
  dmaengine: shdma: convert to platform device resources
  dmaengine: shdma: fix DMA error handling.
  ...

Showing 35 changed files Side-by-side Diff

arch/sh/boards/mach-migor/setup.c
... ... @@ -419,6 +419,9 @@
419 419 I2C_BOARD_INFO("migor_ts", 0x51),
420 420 .irq = 38, /* IRQ6 */
421 421 },
  422 + {
  423 + I2C_BOARD_INFO("wm8978", 0x1a),
  424 + },
422 425 };
423 426  
424 427 static struct i2c_board_info migor_i2c_camera[] = {
... ... @@ -618,6 +621,19 @@
618 621 __raw_writew(__raw_readw(PORT_MSELCRB) | 0x2000, PORT_MSELCRB); /* D15->D8 */
619 622  
620 623 platform_resource_setup_memory(&migor_ceu_device, "ceu", 4 << 20);
  624 +
  625 + /* SIU: Port B */
  626 + gpio_request(GPIO_FN_SIUBOLR, NULL);
  627 + gpio_request(GPIO_FN_SIUBOBT, NULL);
  628 + gpio_request(GPIO_FN_SIUBISLD, NULL);
  629 + gpio_request(GPIO_FN_SIUBOSLD, NULL);
  630 + gpio_request(GPIO_FN_SIUMCKB, NULL);
  631 +
  632 + /*
  633 + * The original driver sets SIUB OLR/OBT, ILR/IBT, and SIUA OLR/OBT to
  634 + * output. Need only SIUB, set to output for master mode (table 34.2)
  635 + */
  636 + __raw_writew(__raw_readw(PORT_MSELCRA) | 1, PORT_MSELCRA);
621 637  
622 638 i2c_register_board_info(0, migor_i2c_devices,
623 639 ARRAY_SIZE(migor_i2c_devices));
arch/sh/boot/compressed/cache.c
... ... @@ -5,7 +5,7 @@
5 5  
6 6 for (i = 0; i < (32 * 1024); i += 32) {
7 7 (void)*p;
8   - p += (32 / sizeof (int));
  8 + p += (32 / sizeof(int));
9 9 }
10 10  
11 11 return 0;
arch/sh/include/asm/cacheflush.h
... ... @@ -86,8 +86,8 @@
86 86 struct page *page, unsigned long vaddr, void *dst, const void *src,
87 87 unsigned long len);
88 88  
89   -#define flush_cache_vmap(start, end) flush_cache_all()
90   -#define flush_cache_vunmap(start, end) flush_cache_all()
  89 +#define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
  90 +#define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
91 91  
92 92 #define flush_dcache_mmap_lock(mapping) do { } while (0)
93 93 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
arch/sh/include/asm/dma-register.h
  1 +/*
  2 + * Common header for the legacy SH DMA driver and the new dmaengine driver
  3 + *
  4 + * extracted from arch/sh/include/asm/dma-sh.h:
  5 + *
  6 + * Copyright (C) 2000 Takashi YOSHII
  7 + * Copyright (C) 2003 Paul Mundt
  8 + *
  9 + * This file is subject to the terms and conditions of the GNU General Public
  10 + * License. See the file "COPYING" in the main directory of this archive
  11 + * for more details.
  12 + */
  13 +#ifndef DMA_REGISTER_H
  14 +#define DMA_REGISTER_H
  15 +
  16 +/* DMA register */
  17 +#define SAR 0x00
  18 +#define DAR 0x04
  19 +#define TCR 0x08
  20 +#define CHCR 0x0C
  21 +#define DMAOR 0x40
  22 +
  23 +/* DMAOR definitions */
  24 +#define DMAOR_AE 0x00000004
  25 +#define DMAOR_NMIF 0x00000002
  26 +#define DMAOR_DME 0x00000001
  27 +
  28 +/* Definitions for the SuperH DMAC */
  29 +#define REQ_L 0x00000000
  30 +#define REQ_E 0x00080000
  31 +#define RACK_H 0x00000000
  32 +#define RACK_L 0x00040000
  33 +#define ACK_R 0x00000000
  34 +#define ACK_W 0x00020000
  35 +#define ACK_H 0x00000000
  36 +#define ACK_L 0x00010000
  37 +#define DM_INC 0x00004000
  38 +#define DM_DEC 0x00008000
  39 +#define DM_FIX 0x0000c000
  40 +#define SM_INC 0x00001000
  41 +#define SM_DEC 0x00002000
  42 +#define SM_FIX 0x00003000
  43 +#define RS_IN 0x00000200
  44 +#define RS_OUT 0x00000300
  45 +#define TS_BLK 0x00000040
  46 +#define TM_BUR 0x00000020
  47 +#define CHCR_DE 0x00000001
  48 +#define CHCR_TE 0x00000002
  49 +#define CHCR_IE 0x00000004
  50 +
  51 +#endif
arch/sh/include/asm/dma-sh.h
... ... @@ -11,7 +11,8 @@
11 11 #ifndef __DMA_SH_H
12 12 #define __DMA_SH_H
13 13  
14   -#include <asm/dma.h>
  14 +#include <asm/dma-register.h>
  15 +#include <cpu/dma-register.h>
15 16 #include <cpu/dma.h>
16 17  
17 18 /* DMAOR contorl: The DMAOR access size is different by CPU.*/
... ... @@ -53,34 +54,6 @@
53 54 #endif
54 55 };
55 56  
56   -/* Definitions for the SuperH DMAC */
57   -#define REQ_L 0x00000000
58   -#define REQ_E 0x00080000
59   -#define RACK_H 0x00000000
60   -#define RACK_L 0x00040000
61   -#define ACK_R 0x00000000
62   -#define ACK_W 0x00020000
63   -#define ACK_H 0x00000000
64   -#define ACK_L 0x00010000
65   -#define DM_INC 0x00004000
66   -#define DM_DEC 0x00008000
67   -#define DM_FIX 0x0000c000
68   -#define SM_INC 0x00001000
69   -#define SM_DEC 0x00002000
70   -#define SM_FIX 0x00003000
71   -#define RS_IN 0x00000200
72   -#define RS_OUT 0x00000300
73   -#define TS_BLK 0x00000040
74   -#define TM_BUR 0x00000020
75   -#define CHCR_DE 0x00000001
76   -#define CHCR_TE 0x00000002
77   -#define CHCR_IE 0x00000004
78   -
79   -/* DMAOR definitions */
80   -#define DMAOR_AE 0x00000004
81   -#define DMAOR_NMIF 0x00000002
82   -#define DMAOR_DME 0x00000001
83   -
84 57 /*
85 58 * Define the default configuration for dual address memory-memory transfer.
86 59 * The 0x400 value represents auto-request, external->external.
... ... @@ -109,63 +82,6 @@
109 82 SH_DMAC_BASE1 + 0x50,
110 83 SH_DMAC_BASE1 + 0x60, /* channel 11 */
111 84 #endif
112   -};
113   -
114   -/* DMA register */
115   -#define SAR 0x00
116   -#define DAR 0x04
117   -#define TCR 0x08
118   -#define CHCR 0x0C
119   -#define DMAOR 0x40
120   -
121   -/*
122   - * for dma engine
123   - *
124   - * SuperH DMA mode
125   - */
126   -#define SHDMA_MIX_IRQ (1 << 1)
127   -#define SHDMA_DMAOR1 (1 << 2)
128   -#define SHDMA_DMAE1 (1 << 3)
129   -
130   -enum sh_dmae_slave_chan_id {
131   - SHDMA_SLAVE_SCIF0_TX,
132   - SHDMA_SLAVE_SCIF0_RX,
133   - SHDMA_SLAVE_SCIF1_TX,
134   - SHDMA_SLAVE_SCIF1_RX,
135   - SHDMA_SLAVE_SCIF2_TX,
136   - SHDMA_SLAVE_SCIF2_RX,
137   - SHDMA_SLAVE_SCIF3_TX,
138   - SHDMA_SLAVE_SCIF3_RX,
139   - SHDMA_SLAVE_SCIF4_TX,
140   - SHDMA_SLAVE_SCIF4_RX,
141   - SHDMA_SLAVE_SCIF5_TX,
142   - SHDMA_SLAVE_SCIF5_RX,
143   - SHDMA_SLAVE_SIUA_TX,
144   - SHDMA_SLAVE_SIUA_RX,
145   - SHDMA_SLAVE_SIUB_TX,
146   - SHDMA_SLAVE_SIUB_RX,
147   - SHDMA_SLAVE_NUMBER, /* Must stay last */
148   -};
149   -
150   -struct sh_dmae_slave_config {
151   - enum sh_dmae_slave_chan_id slave_id;
152   - dma_addr_t addr;
153   - u32 chcr;
154   - char mid_rid;
155   -};
156   -
157   -struct sh_dmae_pdata {
158   - unsigned int mode;
159   - struct sh_dmae_slave_config *config;
160   - int config_num;
161   -};
162   -
163   -struct device;
164   -
165   -struct sh_dmae_slave {
166   - enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */
167   - struct device *dma_dev; /* Set by the platform */
168   - struct sh_dmae_slave_config *config; /* Set by the driver */
169 85 };
170 86  
171 87 #endif /* __DMA_SH_H */
arch/sh/include/asm/dmaengine.h
  1 +/*
  2 + * Header for the new SH dmaengine driver
  3 + *
  4 + * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
  5 + *
  6 + * This program is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License version 2 as
  8 + * published by the Free Software Foundation.
  9 + */
  10 +#ifndef ASM_DMAENGINE_H
  11 +#define ASM_DMAENGINE_H
  12 +
  13 +#include <linux/dmaengine.h>
  14 +#include <linux/list.h>
  15 +
  16 +#include <asm/dma-register.h>
  17 +
  18 +#define SH_DMAC_MAX_CHANNELS 6
  19 +
  20 +enum sh_dmae_slave_chan_id {
  21 + SHDMA_SLAVE_SCIF0_TX,
  22 + SHDMA_SLAVE_SCIF0_RX,
  23 + SHDMA_SLAVE_SCIF1_TX,
  24 + SHDMA_SLAVE_SCIF1_RX,
  25 + SHDMA_SLAVE_SCIF2_TX,
  26 + SHDMA_SLAVE_SCIF2_RX,
  27 + SHDMA_SLAVE_SCIF3_TX,
  28 + SHDMA_SLAVE_SCIF3_RX,
  29 + SHDMA_SLAVE_SCIF4_TX,
  30 + SHDMA_SLAVE_SCIF4_RX,
  31 + SHDMA_SLAVE_SCIF5_TX,
  32 + SHDMA_SLAVE_SCIF5_RX,
  33 + SHDMA_SLAVE_SIUA_TX,
  34 + SHDMA_SLAVE_SIUA_RX,
  35 + SHDMA_SLAVE_SIUB_TX,
  36 + SHDMA_SLAVE_SIUB_RX,
  37 + SHDMA_SLAVE_NUMBER, /* Must stay last */
  38 +};
  39 +
  40 +struct sh_dmae_slave_config {
  41 + enum sh_dmae_slave_chan_id slave_id;
  42 + dma_addr_t addr;
  43 + u32 chcr;
  44 + char mid_rid;
  45 +};
  46 +
  47 +struct sh_dmae_channel {
  48 + unsigned int offset;
  49 + unsigned int dmars;
  50 + unsigned int dmars_bit;
  51 +};
  52 +
  53 +struct sh_dmae_pdata {
  54 + struct sh_dmae_slave_config *slave;
  55 + int slave_num;
  56 + struct sh_dmae_channel *channel;
  57 + int channel_num;
  58 + unsigned int ts_low_shift;
  59 + unsigned int ts_low_mask;
  60 + unsigned int ts_high_shift;
  61 + unsigned int ts_high_mask;
  62 + unsigned int *ts_shift;
  63 + int ts_shift_num;
  64 + u16 dmaor_init;
  65 +};
  66 +
  67 +struct device;
  68 +
  69 +/* Used by slave DMA clients to request DMA to/from a specific peripheral */
  70 +struct sh_dmae_slave {
  71 + enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */
  72 + struct device *dma_dev; /* Set by the platform */
  73 + struct sh_dmae_slave_config *config; /* Set by the driver */
  74 +};
  75 +
  76 +struct sh_dmae_regs {
  77 + u32 sar; /* SAR / source address */
  78 + u32 dar; /* DAR / destination address */
  79 + u32 tcr; /* TCR / transfer count */
  80 +};
  81 +
  82 +struct sh_desc {
  83 + struct sh_dmae_regs hw;
  84 + struct list_head node;
  85 + struct dma_async_tx_descriptor async_tx;
  86 + enum dma_data_direction direction;
  87 + dma_cookie_t cookie;
  88 + size_t partial;
  89 + int chunks;
  90 + int mark;
  91 +};
  92 +
  93 +#endif
arch/sh/include/asm/io.h
... ... @@ -291,21 +291,21 @@
291 291 * doesn't exist, so everything must go through page tables.
292 292 */
293 293 #ifdef CONFIG_MMU
294   -void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
  294 +void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
295 295 pgprot_t prot, void *caller);
296 296 void __iounmap(void __iomem *addr);
297 297  
298 298 static inline void __iomem *
299   -__ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
  299 +__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
300 300 {
301 301 return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
302 302 }
303 303  
304 304 static inline void __iomem *
305   -__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
  305 +__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
306 306 {
307 307 #ifdef CONFIG_29BIT
308   - unsigned long last_addr = offset + size - 1;
  308 + phys_addr_t last_addr = offset + size - 1;
309 309  
310 310 /*
311 311 * For P1 and P2 space this is trivial, as everything is already
... ... @@ -329,7 +329,7 @@
329 329 }
330 330  
331 331 static inline void __iomem *
332   -__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
  332 +__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
333 333 {
334 334 void __iomem *ret;
335 335  
336 336  
337 337  
338 338  
339 339  
... ... @@ -349,35 +349,32 @@
349 349 #define __iounmap(addr) do { } while (0)
350 350 #endif /* CONFIG_MMU */
351 351  
352   -static inline void __iomem *
353   -ioremap(unsigned long offset, unsigned long size)
  352 +static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
354 353 {
355 354 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
356 355 }
357 356  
358 357 static inline void __iomem *
359   -ioremap_cache(unsigned long offset, unsigned long size)
  358 +ioremap_cache(phys_addr_t offset, unsigned long size)
360 359 {
361 360 return __ioremap_mode(offset, size, PAGE_KERNEL);
362 361 }
363 362  
364 363 #ifdef CONFIG_HAVE_IOREMAP_PROT
365 364 static inline void __iomem *
366   -ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
  365 +ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
367 366 {
368 367 return __ioremap_mode(offset, size, __pgprot(flags));
369 368 }
370 369 #endif
371 370  
372 371 #ifdef CONFIG_IOREMAP_FIXED
373   -extern void __iomem *ioremap_fixed(resource_size_t, unsigned long,
374   - unsigned long, pgprot_t);
  372 +extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
375 373 extern int iounmap_fixed(void __iomem *);
376 374 extern void ioremap_fixed_init(void);
377 375 #else
378 376 static inline void __iomem *
379   -ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
380   - unsigned long size, pgprot_t prot)
  377 +ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
381 378 {
382 379 BUG();
383 380 return NULL;
arch/sh/include/asm/mmu.h
... ... @@ -55,19 +55,29 @@
55 55  
56 56 #ifdef CONFIG_PMB
57 57 /* arch/sh/mm/pmb.c */
58   -long pmb_remap(unsigned long virt, unsigned long phys,
59   - unsigned long size, pgprot_t prot);
60   -void pmb_unmap(unsigned long addr);
61   -void pmb_init(void);
62 58 bool __in_29bit_mode(void);
  59 +
  60 +void pmb_init(void);
  61 +int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys,
  62 + unsigned long size, pgprot_t prot);
  63 +void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
  64 + pgprot_t prot, void *caller);
  65 +int pmb_unmap(void __iomem *addr);
  66 +
63 67 #else
64   -static inline long pmb_remap(unsigned long virt, unsigned long phys,
65   - unsigned long size, pgprot_t prot)
  68 +
  69 +static inline void __iomem *
  70 +pmb_remap_caller(phys_addr_t phys, unsigned long size,
  71 + pgprot_t prot, void *caller)
66 72 {
  73 + return NULL;
  74 +}
  75 +
  76 +static inline int pmb_unmap(void __iomem *addr)
  77 +{
67 78 return -EINVAL;
68 79 }
69 80  
70   -#define pmb_unmap(addr) do { } while (0)
71 81 #define pmb_init(addr) do { } while (0)
72 82  
73 83 #ifdef CONFIG_29BIT
... ... @@ -77,6 +87,13 @@
77 87 #endif
78 88  
79 89 #endif /* CONFIG_PMB */
  90 +
  91 +static inline void __iomem *
  92 +pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot)
  93 +{
  94 + return pmb_remap_caller(phys, size, prot, __builtin_return_address(0));
  95 +}
  96 +
80 97 #endif /* __ASSEMBLY__ */
81 98  
82 99 #endif /* __MMU_H */
arch/sh/include/asm/siu.h
... ... @@ -11,7 +11,7 @@
11 11 #ifndef ASM_SIU_H
12 12 #define ASM_SIU_H
13 13  
14   -#include <asm/dma-sh.h>
  14 +#include <asm/dmaengine.h>
15 15  
16 16 struct device;
17 17  
arch/sh/include/asm/topology.h
... ... @@ -35,7 +35,7 @@
35 35  
36 36 #define pcibus_to_node(bus) ((void)(bus), -1)
37 37 #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
38   - CPU_MASK_ALL_PTR : \
  38 + cpu_all_mask : \
39 39 cpumask_of_node(pcibus_to_node(bus)))
40 40  
41 41 #endif
arch/sh/include/cpu-sh3/cpu/dma-register.h
  1 +/*
  2 + * SH3 CPU-specific DMA definitions, used by both DMA drivers
  3 + *
  4 + * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
  5 + *
  6 + * This program is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License version 2 as
  8 + * published by the Free Software Foundation.
  9 + */
  10 +#ifndef CPU_DMA_REGISTER_H
  11 +#define CPU_DMA_REGISTER_H
  12 +
  13 +#define CHCR_TS_LOW_MASK 0x18
  14 +#define CHCR_TS_LOW_SHIFT 3
  15 +#define CHCR_TS_HIGH_MASK 0
  16 +#define CHCR_TS_HIGH_SHIFT 0
  17 +
  18 +#define DMAOR_INIT DMAOR_DME
  19 +
  20 +/*
  21 + * The SuperH DMAC supports a number of transmit sizes, we list them here,
  22 + * with their respective values as they appear in the CHCR registers.
  23 + */
  24 +enum {
  25 + XMIT_SZ_8BIT,
  26 + XMIT_SZ_16BIT,
  27 + XMIT_SZ_32BIT,
  28 + XMIT_SZ_128BIT,
  29 +};
  30 +
  31 +/* log2(size / 8) - used to calculate number of transfers */
  32 +#define TS_SHIFT { \
  33 + [XMIT_SZ_8BIT] = 0, \
  34 + [XMIT_SZ_16BIT] = 1, \
  35 + [XMIT_SZ_32BIT] = 2, \
  36 + [XMIT_SZ_128BIT] = 4, \
  37 +}
  38 +
  39 +#define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT)
  40 +
  41 +#endif
arch/sh/include/cpu-sh3/cpu/dma.h
... ... @@ -20,32 +20,5 @@
20 20 #define TS_32 0x00000010
21 21 #define TS_128 0x00000018
22 22  
23   -#define CHCR_TS_LOW_MASK 0x18
24   -#define CHCR_TS_LOW_SHIFT 3
25   -#define CHCR_TS_HIGH_MASK 0
26   -#define CHCR_TS_HIGH_SHIFT 0
27   -
28   -#define DMAOR_INIT DMAOR_DME
29   -
30   -/*
31   - * The SuperH DMAC supports a number of transmit sizes, we list them here,
32   - * with their respective values as they appear in the CHCR registers.
33   - */
34   -enum {
35   - XMIT_SZ_8BIT,
36   - XMIT_SZ_16BIT,
37   - XMIT_SZ_32BIT,
38   - XMIT_SZ_128BIT,
39   -};
40   -
41   -#define TS_SHIFT { \
42   - [XMIT_SZ_8BIT] = 0, \
43   - [XMIT_SZ_16BIT] = 1, \
44   - [XMIT_SZ_32BIT] = 2, \
45   - [XMIT_SZ_128BIT] = 4, \
46   -}
47   -
48   -#define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT)
49   -
50 23 #endif /* __ASM_CPU_SH3_DMA_H */
arch/sh/include/cpu-sh4/cpu/dma-register.h
  1 +/*
  2 + * SH4 CPU-specific DMA definitions, used by both DMA drivers
  3 + *
  4 + * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
  5 + *
  6 + * This program is free software; you can redistribute it and/or modify
  7 + * it under the terms of the GNU General Public License version 2 as
  8 + * published by the Free Software Foundation.
  9 + */
  10 +#ifndef CPU_DMA_REGISTER_H
  11 +#define CPU_DMA_REGISTER_H
  12 +
  13 +/* SH7751/7760/7780 DMA IRQ sources */
  14 +
  15 +#ifdef CONFIG_CPU_SH4A
  16 +
  17 +#define DMAOR_INIT DMAOR_DME
  18 +
  19 +#if defined(CONFIG_CPU_SUBTYPE_SH7343) || \
  20 + defined(CONFIG_CPU_SUBTYPE_SH7730)
  21 +#define CHCR_TS_LOW_MASK 0x00000018
  22 +#define CHCR_TS_LOW_SHIFT 3
  23 +#define CHCR_TS_HIGH_MASK 0
  24 +#define CHCR_TS_HIGH_SHIFT 0
  25 +#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \
  26 + defined(CONFIG_CPU_SUBTYPE_SH7724)
  27 +#define CHCR_TS_LOW_MASK 0x00000018
  28 +#define CHCR_TS_LOW_SHIFT 3
  29 +#define CHCR_TS_HIGH_MASK 0x00300000
  30 +#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */
  31 +#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
  32 + defined(CONFIG_CPU_SUBTYPE_SH7764)
  33 +#define CHCR_TS_LOW_MASK 0x00000018
  34 +#define CHCR_TS_LOW_SHIFT 3
  35 +#define CHCR_TS_HIGH_MASK 0
  36 +#define CHCR_TS_HIGH_SHIFT 0
  37 +#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
  38 +#define CHCR_TS_LOW_MASK 0x00000018
  39 +#define CHCR_TS_LOW_SHIFT 3
  40 +#define CHCR_TS_HIGH_MASK 0
  41 +#define CHCR_TS_HIGH_SHIFT 0
  42 +#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
  43 +#define CHCR_TS_LOW_MASK 0x00000018
  44 +#define CHCR_TS_LOW_SHIFT 3
  45 +#define CHCR_TS_HIGH_MASK 0
  46 +#define CHCR_TS_HIGH_SHIFT 0
  47 +#else /* SH7785 */
  48 +#define CHCR_TS_LOW_MASK 0x00000018
  49 +#define CHCR_TS_LOW_SHIFT 3
  50 +#define CHCR_TS_HIGH_MASK 0
  51 +#define CHCR_TS_HIGH_SHIFT 0
  52 +#endif
  53 +
  54 +/* Transmit sizes and respective CHCR register values */
  55 +enum {
  56 + XMIT_SZ_8BIT = 0,
  57 + XMIT_SZ_16BIT = 1,
  58 + XMIT_SZ_32BIT = 2,
  59 + XMIT_SZ_64BIT = 7,
  60 + XMIT_SZ_128BIT = 3,
  61 + XMIT_SZ_256BIT = 4,
  62 + XMIT_SZ_128BIT_BLK = 0xb,
  63 + XMIT_SZ_256BIT_BLK = 0xc,
  64 +};
  65 +
  66 +/* log2(size / 8) - used to calculate number of transfers */
  67 +#define TS_SHIFT { \
  68 + [XMIT_SZ_8BIT] = 0, \
  69 + [XMIT_SZ_16BIT] = 1, \
  70 + [XMIT_SZ_32BIT] = 2, \
  71 + [XMIT_SZ_64BIT] = 3, \
  72 + [XMIT_SZ_128BIT] = 4, \
  73 + [XMIT_SZ_256BIT] = 5, \
  74 + [XMIT_SZ_128BIT_BLK] = 4, \
  75 + [XMIT_SZ_256BIT_BLK] = 5, \
  76 +}
  77 +
  78 +#define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \
  79 + ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT))
  80 +
  81 +#else /* CONFIG_CPU_SH4A */
  82 +
  83 +#define DMAOR_INIT (0x8000 | DMAOR_DME)
  84 +
  85 +#define CHCR_TS_LOW_MASK 0x70
  86 +#define CHCR_TS_LOW_SHIFT 4
  87 +#define CHCR_TS_HIGH_MASK 0
  88 +#define CHCR_TS_HIGH_SHIFT 0
  89 +
  90 +/* Transmit sizes and respective CHCR register values */
  91 +enum {
  92 + XMIT_SZ_8BIT = 1,
  93 + XMIT_SZ_16BIT = 2,
  94 + XMIT_SZ_32BIT = 3,
  95 + XMIT_SZ_64BIT = 0,
  96 + XMIT_SZ_256BIT = 4,
  97 +};
  98 +
  99 +/* log2(size / 8) - used to calculate number of transfers */
  100 +#define TS_SHIFT { \
  101 + [XMIT_SZ_8BIT] = 0, \
  102 + [XMIT_SZ_16BIT] = 1, \
  103 + [XMIT_SZ_32BIT] = 2, \
  104 + [XMIT_SZ_64BIT] = 3, \
  105 + [XMIT_SZ_256BIT] = 5, \
  106 +}
  107 +
  108 +#define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT)
  109 +
  110 +#endif /* CONFIG_CPU_SH4A */
  111 +
  112 +#endif
arch/sh/include/cpu-sh4/cpu/dma-sh4a.h
... ... @@ -8,20 +8,12 @@
8 8 #define DMAE0_IRQ 78 /* DMA Error IRQ*/
9 9 #define SH_DMAC_BASE0 0xFE008020
10 10 #define SH_DMARS_BASE0 0xFE009000
11   -#define CHCR_TS_LOW_MASK 0x00000018
12   -#define CHCR_TS_LOW_SHIFT 3
13   -#define CHCR_TS_HIGH_MASK 0
14   -#define CHCR_TS_HIGH_SHIFT 0
15 11 #elif defined(CONFIG_CPU_SUBTYPE_SH7722)
16 12 #define DMTE0_IRQ 48
17 13 #define DMTE4_IRQ 76
18 14 #define DMAE0_IRQ 78 /* DMA Error IRQ*/
19 15 #define SH_DMAC_BASE0 0xFE008020
20 16 #define SH_DMARS_BASE0 0xFE009000
21   -#define CHCR_TS_LOW_MASK 0x00000018
22   -#define CHCR_TS_LOW_SHIFT 3
23   -#define CHCR_TS_HIGH_MASK 0x00300000
24   -#define CHCR_TS_HIGH_SHIFT 20
25 17 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
26 18 defined(CONFIG_CPU_SUBTYPE_SH7764)
27 19 #define DMTE0_IRQ 34
... ... @@ -29,10 +21,6 @@
29 21 #define DMAE0_IRQ 38
30 22 #define SH_DMAC_BASE0 0xFF608020
31 23 #define SH_DMARS_BASE0 0xFF609000
32   -#define CHCR_TS_LOW_MASK 0x00000018
33   -#define CHCR_TS_LOW_SHIFT 3
34   -#define CHCR_TS_HIGH_MASK 0
35   -#define CHCR_TS_HIGH_SHIFT 0
36 24 #elif defined(CONFIG_CPU_SUBTYPE_SH7723)
37 25 #define DMTE0_IRQ 48 /* DMAC0A*/
38 26 #define DMTE4_IRQ 76 /* DMAC0B */
... ... @@ -46,10 +34,6 @@
46 34 #define SH_DMAC_BASE0 0xFE008020
47 35 #define SH_DMAC_BASE1 0xFDC08020
48 36 #define SH_DMARS_BASE0 0xFDC09000
49   -#define CHCR_TS_LOW_MASK 0x00000018
50   -#define CHCR_TS_LOW_SHIFT 3
51   -#define CHCR_TS_HIGH_MASK 0
52   -#define CHCR_TS_HIGH_SHIFT 0
53 37 #elif defined(CONFIG_CPU_SUBTYPE_SH7724)
54 38 #define DMTE0_IRQ 48 /* DMAC0A*/
55 39 #define DMTE4_IRQ 76 /* DMAC0B */
... ... @@ -64,10 +48,6 @@
64 48 #define SH_DMAC_BASE1 0xFDC08020
65 49 #define SH_DMARS_BASE0 0xFE009000
66 50 #define SH_DMARS_BASE1 0xFDC09000
67   -#define CHCR_TS_LOW_MASK 0x00000018
68   -#define CHCR_TS_LOW_SHIFT 3
69   -#define CHCR_TS_HIGH_MASK 0x00600000
70   -#define CHCR_TS_HIGH_SHIFT 21
71 51 #elif defined(CONFIG_CPU_SUBTYPE_SH7780)
72 52 #define DMTE0_IRQ 34
73 53 #define DMTE4_IRQ 44
... ... @@ -80,10 +60,6 @@
80 60 #define SH_DMAC_BASE0 0xFC808020
81 61 #define SH_DMAC_BASE1 0xFC818020
82 62 #define SH_DMARS_BASE0 0xFC809000
83   -#define CHCR_TS_LOW_MASK 0x00000018
84   -#define CHCR_TS_LOW_SHIFT 3
85   -#define CHCR_TS_HIGH_MASK 0
86   -#define CHCR_TS_HIGH_SHIFT 0
87 63 #else /* SH7785 */
88 64 #define DMTE0_IRQ 33
89 65 #define DMTE4_IRQ 37
90 66  
... ... @@ -97,50 +73,12 @@
97 73 #define SH_DMAC_BASE0 0xFC808020
98 74 #define SH_DMAC_BASE1 0xFCC08020
99 75 #define SH_DMARS_BASE0 0xFC809000
100   -#define CHCR_TS_LOW_MASK 0x00000018
101   -#define CHCR_TS_LOW_SHIFT 3
102   -#define CHCR_TS_HIGH_MASK 0
103   -#define CHCR_TS_HIGH_SHIFT 0
104 76 #endif
105 77  
106 78 #define REQ_HE 0x000000C0
107 79 #define REQ_H 0x00000080
108 80 #define REQ_LE 0x00000040
109 81 #define TM_BURST 0x00000020
110   -
111   -/*
112   - * The SuperH DMAC supports a number of transmit sizes, we list them here,
113   - * with their respective values as they appear in the CHCR registers.
114   - *
115   - * Defaults to a 64-bit transfer size.
116   - */
117   -enum {
118   - XMIT_SZ_8BIT = 0,
119   - XMIT_SZ_16BIT = 1,
120   - XMIT_SZ_32BIT = 2,
121   - XMIT_SZ_64BIT = 7,
122   - XMIT_SZ_128BIT = 3,
123   - XMIT_SZ_256BIT = 4,
124   - XMIT_SZ_128BIT_BLK = 0xb,
125   - XMIT_SZ_256BIT_BLK = 0xc,
126   -};
127   -
128   -/*
129   - * The DMA count is defined as the number of bytes to transfer.
130   - */
131   -#define TS_SHIFT { \
132   - [XMIT_SZ_8BIT] = 0, \
133   - [XMIT_SZ_16BIT] = 1, \
134   - [XMIT_SZ_32BIT] = 2, \
135   - [XMIT_SZ_64BIT] = 3, \
136   - [XMIT_SZ_128BIT] = 4, \
137   - [XMIT_SZ_256BIT] = 5, \
138   - [XMIT_SZ_128BIT_BLK] = 4, \
139   - [XMIT_SZ_256BIT_BLK] = 5, \
140   -}
141   -
142   -#define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \
143   - ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT))
144 82  
145 83 #endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */
arch/sh/include/cpu-sh4/cpu/dma.h
... ... @@ -5,9 +5,8 @@
5 5  
6 6 #ifdef CONFIG_CPU_SH4A
7 7  
8   -#define DMAOR_INIT (DMAOR_DME)
9   -
10 8 #include <cpu/dma-sh4a.h>
  9 +
11 10 #else /* CONFIG_CPU_SH4A */
12 11 /*
13 12 * SH7750/SH7751/SH7760
... ... @@ -17,7 +16,6 @@
17 16 #define DMTE6_IRQ 46
18 17 #define DMAE0_IRQ 38
19 18  
20   -#define DMAOR_INIT (0x8000|DMAOR_DME)
21 19 #define SH_DMAC_BASE0 0xffa00000
22 20 #define SH_DMAC_BASE1 0xffa00070
23 21 /* Definitions for the SuperH DMAC */
24 22  
... ... @@ -27,39 +25,7 @@
27 25 #define TS_32 0x00000030
28 26 #define TS_64 0x00000000
29 27  
30   -#define CHCR_TS_LOW_MASK 0x70
31   -#define CHCR_TS_LOW_SHIFT 4
32   -#define CHCR_TS_HIGH_MASK 0
33   -#define CHCR_TS_HIGH_SHIFT 0
34   -
35 28 #define DMAOR_COD 0x00000008
36   -
37   -/*
38   - * The SuperH DMAC supports a number of transmit sizes, we list them here,
39   - * with their respective values as they appear in the CHCR registers.
40   - *
41   - * Defaults to a 64-bit transfer size.
42   - */
43   -enum {
44   - XMIT_SZ_8BIT = 1,
45   - XMIT_SZ_16BIT = 2,
46   - XMIT_SZ_32BIT = 3,
47   - XMIT_SZ_64BIT = 0,
48   - XMIT_SZ_256BIT = 4,
49   -};
50   -
51   -/*
52   - * The DMA count is defined as the number of bytes to transfer.
53   - */
54   -#define TS_SHIFT { \
55   - [XMIT_SZ_8BIT] = 0, \
56   - [XMIT_SZ_16BIT] = 1, \
57   - [XMIT_SZ_32BIT] = 2, \
58   - [XMIT_SZ_64BIT] = 3, \
59   - [XMIT_SZ_256BIT] = 5, \
60   -}
61   -
62   -#define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT)
63 29  
64 30 #endif
65 31  
arch/sh/include/mach-migor/mach/migor.h
1 1 #ifndef __ASM_SH_MIGOR_H
2 2 #define __ASM_SH_MIGOR_H
3 3  
  4 +#define PORT_MSELCRA 0xa4050180
4 5 #define PORT_MSELCRB 0xa4050182
5 6 #define BSC_CS4BCR 0xfec10010
6 7 #define BSC_CS6ABCR 0xfec1001c
arch/sh/kernel/cpu/sh4a/setup-sh7722.c
... ... @@ -7,19 +7,167 @@
7 7 * License. See the file "COPYING" in the main directory of this archive
8 8 * for more details.
9 9 */
10   -#include <linux/platform_device.h>
11 10 #include <linux/init.h>
  11 +#include <linux/mm.h>
  12 +#include <linux/platform_device.h>
12 13 #include <linux/serial.h>
13 14 #include <linux/serial_sci.h>
14   -#include <linux/mm.h>
  15 +#include <linux/sh_timer.h>
15 16 #include <linux/uio_driver.h>
16 17 #include <linux/usb/m66592.h>
17   -#include <linux/sh_timer.h>
  18 +
18 19 #include <asm/clock.h>
  20 +#include <asm/dmaengine.h>
19 21 #include <asm/mmzone.h>
20   -#include <asm/dma-sh.h>
  22 +#include <asm/siu.h>
  23 +
  24 +#include <cpu/dma-register.h>
21 25 #include <cpu/sh7722.h>
22 26  
  27 +static struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
  28 + {
  29 + .slave_id = SHDMA_SLAVE_SCIF0_TX,
  30 + .addr = 0xffe0000c,
  31 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
  32 + .mid_rid = 0x21,
  33 + }, {
  34 + .slave_id = SHDMA_SLAVE_SCIF0_RX,
  35 + .addr = 0xffe00014,
  36 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
  37 + .mid_rid = 0x22,
  38 + }, {
  39 + .slave_id = SHDMA_SLAVE_SCIF1_TX,
  40 + .addr = 0xffe1000c,
  41 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
  42 + .mid_rid = 0x25,
  43 + }, {
  44 + .slave_id = SHDMA_SLAVE_SCIF1_RX,
  45 + .addr = 0xffe10014,
  46 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
  47 + .mid_rid = 0x26,
  48 + }, {
  49 + .slave_id = SHDMA_SLAVE_SCIF2_TX,
  50 + .addr = 0xffe2000c,
  51 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
  52 + .mid_rid = 0x29,
  53 + }, {
  54 + .slave_id = SHDMA_SLAVE_SCIF2_RX,
  55 + .addr = 0xffe20014,
  56 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
  57 + .mid_rid = 0x2a,
  58 + }, {
  59 + .slave_id = SHDMA_SLAVE_SIUA_TX,
  60 + .addr = 0xa454c098,
  61 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
  62 + .mid_rid = 0xb1,
  63 + }, {
  64 + .slave_id = SHDMA_SLAVE_SIUA_RX,
  65 + .addr = 0xa454c090,
  66 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
  67 + .mid_rid = 0xb2,
  68 + }, {
  69 + .slave_id = SHDMA_SLAVE_SIUB_TX,
  70 + .addr = 0xa454c09c,
  71 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
  72 + .mid_rid = 0xb5,
  73 + }, {
  74 + .slave_id = SHDMA_SLAVE_SIUB_RX,
  75 + .addr = 0xa454c094,
  76 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
  77 + .mid_rid = 0xb6,
  78 + },
  79 +};
  80 +
  81 +static struct sh_dmae_channel sh7722_dmae_channels[] = {
  82 + {
  83 + .offset = 0,
  84 + .dmars = 0,
  85 + .dmars_bit = 0,
  86 + }, {
  87 + .offset = 0x10,
  88 + .dmars = 0,
  89 + .dmars_bit = 8,
  90 + }, {
  91 + .offset = 0x20,
  92 + .dmars = 4,
  93 + .dmars_bit = 0,
  94 + }, {
  95 + .offset = 0x30,
  96 + .dmars = 4,
  97 + .dmars_bit = 8,
  98 + }, {
  99 + .offset = 0x50,
  100 + .dmars = 8,
  101 + .dmars_bit = 0,
  102 + }, {
  103 + .offset = 0x60,
  104 + .dmars = 8,
  105 + .dmars_bit = 8,
  106 + }
  107 +};
  108 +
  109 +static unsigned int ts_shift[] = TS_SHIFT;
  110 +
  111 +static struct sh_dmae_pdata dma_platform_data = {
  112 + .slave = sh7722_dmae_slaves,
  113 + .slave_num = ARRAY_SIZE(sh7722_dmae_slaves),
  114 + .channel = sh7722_dmae_channels,
  115 + .channel_num = ARRAY_SIZE(sh7722_dmae_channels),
  116 + .ts_low_shift = CHCR_TS_LOW_SHIFT,
  117 + .ts_low_mask = CHCR_TS_LOW_MASK,
  118 + .ts_high_shift = CHCR_TS_HIGH_SHIFT,
  119 + .ts_high_mask = CHCR_TS_HIGH_MASK,
  120 + .ts_shift = ts_shift,
  121 + .ts_shift_num = ARRAY_SIZE(ts_shift),
  122 + .dmaor_init = DMAOR_INIT,
  123 +};
  124 +
  125 +static struct resource sh7722_dmae_resources[] = {
  126 + [0] = {
  127 + /* Channel registers and DMAOR */
  128 + .start = 0xfe008020,
  129 + .end = 0xfe00808f,
  130 + .flags = IORESOURCE_MEM,
  131 + },
  132 + [1] = {
  133 + /* DMARSx */
  134 + .start = 0xfe009000,
  135 + .end = 0xfe00900b,
  136 + .flags = IORESOURCE_MEM,
  137 + },
  138 + {
  139 + /* DMA error IRQ */
  140 + .start = 78,
  141 + .end = 78,
  142 + .flags = IORESOURCE_IRQ,
  143 + },
  144 + {
  145 + /* IRQ for channels 0-3 */
  146 + .start = 48,
  147 + .end = 51,
  148 + .flags = IORESOURCE_IRQ,
  149 + },
  150 + {
  151 + /* IRQ for channels 4-5 */
  152 + .start = 76,
  153 + .end = 77,
  154 + .flags = IORESOURCE_IRQ,
  155 + },
  156 +};
  157 +
  158 +struct platform_device dma_device = {
  159 + .name = "sh-dma-engine",
  160 + .id = -1,
  161 + .resource = sh7722_dmae_resources,
  162 + .num_resources = ARRAY_SIZE(sh7722_dmae_resources),
  163 + .dev = {
  164 + .platform_data = &dma_platform_data,
  165 + },
  166 + .archdata = {
  167 + .hwblk_id = HWBLK_DMAC,
  168 + },
  169 +};
  170 +
23 171 /* Serial */
24 172 static struct plat_sci_port scif0_platform_data = {
25 173 .mapbase = 0xffe00000,
26 174  
27 175  
28 176  
... ... @@ -388,16 +536,37 @@
388 536 },
389 537 };
390 538  
391   -static struct sh_dmae_pdata dma_platform_data = {
392   - .mode = 0,
  539 +static struct siu_platform siu_platform_data = {
  540 + .dma_dev = &dma_device.dev,
  541 + .dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX,
  542 + .dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX,
  543 + .dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX,
  544 + .dma_slave_rx_b = SHDMA_SLAVE_SIUB_RX,
393 545 };
394 546  
395   -static struct platform_device dma_device = {
396   - .name = "sh-dma-engine",
  547 +static struct resource siu_resources[] = {
  548 + [0] = {
  549 + .start = 0xa4540000,
  550 + .end = 0xa454c10f,
  551 + .flags = IORESOURCE_MEM,
  552 + },
  553 + [1] = {
  554 + .start = 108,
  555 + .flags = IORESOURCE_IRQ,
  556 + },
  557 +};
  558 +
  559 +static struct platform_device siu_device = {
  560 + .name = "sh_siu",
397 561 .id = -1,
398   - .dev = {
399   - .platform_data = &dma_platform_data,
  562 + .dev = {
  563 + .platform_data = &siu_platform_data,
400 564 },
  565 + .resource = siu_resources,
  566 + .num_resources = ARRAY_SIZE(siu_resources),
  567 + .archdata = {
  568 + .hwblk_id = HWBLK_SIU,
  569 + },
401 570 };
402 571  
403 572 static struct platform_device *sh7722_devices[] __initdata = {
... ... @@ -414,6 +583,7 @@
414 583 &vpu_device,
415 584 &veu_device,
416 585 &jpu_device,
  586 + &siu_device,
417 587 &dma_device,
418 588 };
419 589  
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
... ... @@ -21,25 +21,192 @@
21 21 #include <linux/sh_timer.h>
22 22 #include <linux/io.h>
23 23 #include <linux/notifier.h>
  24 +
24 25 #include <asm/suspend.h>
25 26 #include <asm/clock.h>
26   -#include <asm/dma-sh.h>
  27 +#include <asm/dmaengine.h>
27 28 #include <asm/mmzone.h>
  29 +
  30 +#include <cpu/dma-register.h>
28 31 #include <cpu/sh7724.h>
29 32  
30 33 /* DMA */
31   -static struct sh_dmae_pdata dma_platform_data = {
32   - .mode = SHDMA_DMAOR1,
  34 +static struct sh_dmae_channel sh7724_dmae0_channels[] = {
  35 + {
  36 + .offset = 0,
  37 + .dmars = 0,
  38 + .dmars_bit = 0,
  39 + }, {
  40 + .offset = 0x10,
  41 + .dmars = 0,
  42 + .dmars_bit = 8,
  43 + }, {
  44 + .offset = 0x20,
  45 + .dmars = 4,
  46 + .dmars_bit = 0,
  47 + }, {
  48 + .offset = 0x30,
  49 + .dmars = 4,
  50 + .dmars_bit = 8,
  51 + }, {
  52 + .offset = 0x50,
  53 + .dmars = 8,
  54 + .dmars_bit = 0,
  55 + }, {
  56 + .offset = 0x60,
  57 + .dmars = 8,
  58 + .dmars_bit = 8,
  59 + }
33 60 };
34 61  
35   -static struct platform_device dma_device = {
36   - .name = "sh-dma-engine",
37   - .id = -1,
38   - .dev = {
39   - .platform_data = &dma_platform_data,
  62 +static struct sh_dmae_channel sh7724_dmae1_channels[] = {
  63 + {
  64 + .offset = 0,
  65 + .dmars = 0,
  66 + .dmars_bit = 0,
  67 + }, {
  68 + .offset = 0x10,
  69 + .dmars = 0,
  70 + .dmars_bit = 8,
  71 + }, {
  72 + .offset = 0x20,
  73 + .dmars = 4,
  74 + .dmars_bit = 0,
  75 + }, {
  76 + .offset = 0x30,
  77 + .dmars = 4,
  78 + .dmars_bit = 8,
  79 + }, {
  80 + .offset = 0x50,
  81 + .dmars = 8,
  82 + .dmars_bit = 0,
  83 + }, {
  84 + .offset = 0x60,
  85 + .dmars = 8,
  86 + .dmars_bit = 8,
  87 + }
  88 +};
  89 +
  90 +static unsigned int ts_shift[] = TS_SHIFT;
  91 +
  92 +static struct sh_dmae_pdata dma0_platform_data = {
  93 + .channel = sh7724_dmae0_channels,
  94 + .channel_num = ARRAY_SIZE(sh7724_dmae0_channels),
  95 + .ts_low_shift = CHCR_TS_LOW_SHIFT,
  96 + .ts_low_mask = CHCR_TS_LOW_MASK,
  97 + .ts_high_shift = CHCR_TS_HIGH_SHIFT,
  98 + .ts_high_mask = CHCR_TS_HIGH_MASK,
  99 + .ts_shift = ts_shift,
  100 + .ts_shift_num = ARRAY_SIZE(ts_shift),
  101 + .dmaor_init = DMAOR_INIT,
  102 +};
  103 +
  104 +static struct sh_dmae_pdata dma1_platform_data = {
  105 + .channel = sh7724_dmae1_channels,
  106 + .channel_num = ARRAY_SIZE(sh7724_dmae1_channels),
  107 + .ts_low_shift = CHCR_TS_LOW_SHIFT,
  108 + .ts_low_mask = CHCR_TS_LOW_MASK,
  109 + .ts_high_shift = CHCR_TS_HIGH_SHIFT,
  110 + .ts_high_mask = CHCR_TS_HIGH_MASK,
  111 + .ts_shift = ts_shift,
  112 + .ts_shift_num = ARRAY_SIZE(ts_shift),
  113 + .dmaor_init = DMAOR_INIT,
  114 +};
  115 +
  116 +/* Resource order important! */
  117 +static struct resource sh7724_dmae0_resources[] = {
  118 + {
  119 + /* Channel registers and DMAOR */
  120 + .start = 0xfe008020,
  121 + .end = 0xfe00808f,
  122 + .flags = IORESOURCE_MEM,
40 123 },
  124 + {
  125 + /* DMARSx */
  126 + .start = 0xfe009000,
  127 + .end = 0xfe00900b,
  128 + .flags = IORESOURCE_MEM,
  129 + },
  130 + {
  131 + /* DMA error IRQ */
  132 + .start = 78,
  133 + .end = 78,
  134 + .flags = IORESOURCE_IRQ,
  135 + },
  136 + {
  137 + /* IRQ for channels 0-3 */
  138 + .start = 48,
  139 + .end = 51,
  140 + .flags = IORESOURCE_IRQ,
  141 + },
  142 + {
  143 + /* IRQ for channels 4-5 */
  144 + .start = 76,
  145 + .end = 77,
  146 + .flags = IORESOURCE_IRQ,
  147 + },
41 148 };
42 149  
  150 +/* Resource order important! */
  151 +static struct resource sh7724_dmae1_resources[] = {
  152 + {
  153 + /* Channel registers and DMAOR */
  154 + .start = 0xfdc08020,
  155 + .end = 0xfdc0808f,
  156 + .flags = IORESOURCE_MEM,
  157 + },
  158 + {
  159 + /* DMARSx */
  160 + .start = 0xfdc09000,
  161 + .end = 0xfdc0900b,
  162 + .flags = IORESOURCE_MEM,
  163 + },
  164 + {
  165 + /* DMA error IRQ */
  166 + .start = 74,
  167 + .end = 74,
  168 + .flags = IORESOURCE_IRQ,
  169 + },
  170 + {
  171 + /* IRQ for channels 0-3 */
  172 + .start = 40,
  173 + .end = 43,
  174 + .flags = IORESOURCE_IRQ,
  175 + },
  176 + {
  177 + /* IRQ for channels 4-5 */
  178 + .start = 72,
  179 + .end = 73,
  180 + .flags = IORESOURCE_IRQ,
  181 + },
  182 +};
  183 +
  184 +static struct platform_device dma0_device = {
  185 + .name = "sh-dma-engine",
  186 + .id = 0,
  187 + .resource = sh7724_dmae0_resources,
  188 + .num_resources = ARRAY_SIZE(sh7724_dmae0_resources),
  189 + .dev = {
  190 + .platform_data = &dma0_platform_data,
  191 + },
  192 + .archdata = {
  193 + .hwblk_id = HWBLK_DMAC0,
  194 + },
  195 +};
  196 +
  197 +static struct platform_device dma1_device = {
  198 + .name = "sh-dma-engine",
  199 + .id = 1,
  200 + .resource = sh7724_dmae1_resources,
  201 + .num_resources = ARRAY_SIZE(sh7724_dmae1_resources),
  202 + .dev = {
  203 + .platform_data = &dma1_platform_data,
  204 + },
  205 + .archdata = {
  206 + .hwblk_id = HWBLK_DMAC1,
  207 + },
  208 +};
  209 +
43 210 /* Serial */
44 211 static struct plat_sci_port scif0_platform_data = {
45 212 .mapbase = 0xffe00000,
... ... @@ -663,7 +830,8 @@
663 830 &tmu3_device,
664 831 &tmu4_device,
665 832 &tmu5_device,
666   - &dma_device,
  833 + &dma0_device,
  834 + &dma1_device,
667 835 &rtc_device,
668 836 &iic0_device,
669 837 &iic1_device,
arch/sh/kernel/cpu/sh4a/setup-sh7780.c
... ... @@ -13,8 +13,11 @@
13 13 #include <linux/io.h>
14 14 #include <linux/serial_sci.h>
15 15 #include <linux/sh_timer.h>
16   -#include <asm/dma-sh.h>
17 16  
  17 +#include <asm/dmaengine.h>
  18 +
  19 +#include <cpu/dma-register.h>
  20 +
18 21 static struct plat_sci_port scif0_platform_data = {
19 22 .mapbase = 0xffe00000,
20 23 .flags = UPF_BOOT_AUTOCONF,
21 24  
22 25  
23 26  
24 27  
... ... @@ -247,18 +250,134 @@
247 250 .resource = rtc_resources,
248 251 };
249 252  
250   -static struct sh_dmae_pdata dma_platform_data = {
251   - .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1),
  253 +/* DMA */
  254 +static struct sh_dmae_channel sh7780_dmae0_channels[] = {
  255 + {
  256 + .offset = 0,
  257 + .dmars = 0,
  258 + .dmars_bit = 0,
  259 + }, {
  260 + .offset = 0x10,
  261 + .dmars = 0,
  262 + .dmars_bit = 8,
  263 + }, {
  264 + .offset = 0x20,
  265 + .dmars = 4,
  266 + .dmars_bit = 0,
  267 + }, {
  268 + .offset = 0x30,
  269 + .dmars = 4,
  270 + .dmars_bit = 8,
  271 + }, {
  272 + .offset = 0x50,
  273 + .dmars = 8,
  274 + .dmars_bit = 0,
  275 + }, {
  276 + .offset = 0x60,
  277 + .dmars = 8,
  278 + .dmars_bit = 8,
  279 + }
252 280 };
253 281  
254   -static struct platform_device dma_device = {
  282 +static struct sh_dmae_channel sh7780_dmae1_channels[] = {
  283 + {
  284 + .offset = 0,
  285 + }, {
  286 + .offset = 0x10,
  287 + }, {
  288 + .offset = 0x20,
  289 + }, {
  290 + .offset = 0x30,
  291 + }, {
  292 + .offset = 0x50,
  293 + }, {
  294 + .offset = 0x60,
  295 + }
  296 +};
  297 +
  298 +static unsigned int ts_shift[] = TS_SHIFT;
  299 +
  300 +static struct sh_dmae_pdata dma0_platform_data = {
  301 + .channel = sh7780_dmae0_channels,
  302 + .channel_num = ARRAY_SIZE(sh7780_dmae0_channels),
  303 + .ts_low_shift = CHCR_TS_LOW_SHIFT,
  304 + .ts_low_mask = CHCR_TS_LOW_MASK,
  305 + .ts_high_shift = CHCR_TS_HIGH_SHIFT,
  306 + .ts_high_mask = CHCR_TS_HIGH_MASK,
  307 + .ts_shift = ts_shift,
  308 + .ts_shift_num = ARRAY_SIZE(ts_shift),
  309 + .dmaor_init = DMAOR_INIT,
  310 +};
  311 +
  312 +static struct sh_dmae_pdata dma1_platform_data = {
  313 + .channel = sh7780_dmae1_channels,
  314 + .channel_num = ARRAY_SIZE(sh7780_dmae1_channels),
  315 + .ts_low_shift = CHCR_TS_LOW_SHIFT,
  316 + .ts_low_mask = CHCR_TS_LOW_MASK,
  317 + .ts_high_shift = CHCR_TS_HIGH_SHIFT,
  318 + .ts_high_mask = CHCR_TS_HIGH_MASK,
  319 + .ts_shift = ts_shift,
  320 + .ts_shift_num = ARRAY_SIZE(ts_shift),
  321 + .dmaor_init = DMAOR_INIT,
  322 +};
  323 +
  324 +static struct resource sh7780_dmae0_resources[] = {
  325 + [0] = {
  326 + /* Channel registers and DMAOR */
  327 + .start = 0xfc808020,
  328 + .end = 0xfc80808f,
  329 + .flags = IORESOURCE_MEM,
  330 + },
  331 + [1] = {
  332 + /* DMARSx */
  333 + .start = 0xfc809000,
  334 + .end = 0xfc80900b,
  335 + .flags = IORESOURCE_MEM,
  336 + },
  337 + {
  338 + /* Real DMA error IRQ is 38, and channel IRQs are 34-37, 44-45 */
  339 + .start = 34,
  340 + .end = 34,
  341 + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
  342 + },
  343 +};
  344 +
  345 +static struct resource sh7780_dmae1_resources[] = {
  346 + [0] = {
  347 + /* Channel registers and DMAOR */
  348 + .start = 0xfc818020,
  349 + .end = 0xfc81808f,
  350 + .flags = IORESOURCE_MEM,
  351 + },
  352 + /* DMAC1 has no DMARS */
  353 + {
  354 + /* Real DMA error IRQ is 38, and channel IRQs are 46-47, 92-95 */
  355 + .start = 46,
  356 + .end = 46,
  357 + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
  358 + },
  359 +};
  360 +
  361 +static struct platform_device dma0_device = {
255 362 .name = "sh-dma-engine",
256   - .id = -1,
  363 + .id = 0,
  364 + .resource = sh7780_dmae0_resources,
  365 + .num_resources = ARRAY_SIZE(sh7780_dmae0_resources),
257 366 .dev = {
258   - .platform_data = &dma_platform_data,
  367 + .platform_data = &dma0_platform_data,
259 368 },
260 369 };
261 370  
  371 +static struct platform_device dma1_device = {
  372 + .name = "sh-dma-engine",
  373 + .id = 1,
  374 + .resource = sh7780_dmae1_resources,
  375 + .num_resources = ARRAY_SIZE(sh7780_dmae1_resources),
  376 + .dev = {
  377 + .platform_data = &dma1_platform_data,
  378 + },
  379 +};
  380 +
262 381 static struct platform_device *sh7780_devices[] __initdata = {
263 382 &scif0_device,
264 383 &scif1_device,
... ... @@ -269,7 +388,8 @@
269 388 &tmu4_device,
270 389 &tmu5_device,
271 390 &rtc_device,
272   - &dma_device,
  391 + &dma0_device,
  392 + &dma1_device,
273 393 };
274 394  
275 395 static int __init sh7780_devices_setup(void)
arch/sh/kernel/cpu/sh4a/setup-sh7785.c
... ... @@ -14,9 +14,12 @@
14 14 #include <linux/io.h>
15 15 #include <linux/mm.h>
16 16 #include <linux/sh_timer.h>
17   -#include <asm/dma-sh.h>
  17 +
  18 +#include <asm/dmaengine.h>
18 19 #include <asm/mmzone.h>
19 20  
  21 +#include <cpu/dma-register.h>
  22 +
20 23 static struct plat_sci_port scif0_platform_data = {
21 24 .mapbase = 0xffea0000,
22 25 .flags = UPF_BOOT_AUTOCONF,
23 26  
24 27  
25 28  
26 29  
... ... @@ -295,18 +298,134 @@
295 298 .num_resources = ARRAY_SIZE(tmu5_resources),
296 299 };
297 300  
298   -static struct sh_dmae_pdata dma_platform_data = {
299   - .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1),
  301 +/* DMA */
  302 +static struct sh_dmae_channel sh7785_dmae0_channels[] = {
  303 + {
  304 + .offset = 0,
  305 + .dmars = 0,
  306 + .dmars_bit = 0,
  307 + }, {
  308 + .offset = 0x10,
  309 + .dmars = 0,
  310 + .dmars_bit = 8,
  311 + }, {
  312 + .offset = 0x20,
  313 + .dmars = 4,
  314 + .dmars_bit = 0,
  315 + }, {
  316 + .offset = 0x30,
  317 + .dmars = 4,
  318 + .dmars_bit = 8,
  319 + }, {
  320 + .offset = 0x50,
  321 + .dmars = 8,
  322 + .dmars_bit = 0,
  323 + }, {
  324 + .offset = 0x60,
  325 + .dmars = 8,
  326 + .dmars_bit = 8,
  327 + }
300 328 };
301 329  
302   -static struct platform_device dma_device = {
  330 +static struct sh_dmae_channel sh7785_dmae1_channels[] = {
  331 + {
  332 + .offset = 0,
  333 + }, {
  334 + .offset = 0x10,
  335 + }, {
  336 + .offset = 0x20,
  337 + }, {
  338 + .offset = 0x30,
  339 + }, {
  340 + .offset = 0x50,
  341 + }, {
  342 + .offset = 0x60,
  343 + }
  344 +};
  345 +
  346 +static unsigned int ts_shift[] = TS_SHIFT;
  347 +
  348 +static struct sh_dmae_pdata dma0_platform_data = {
  349 + .channel = sh7785_dmae0_channels,
  350 + .channel_num = ARRAY_SIZE(sh7785_dmae0_channels),
  351 + .ts_low_shift = CHCR_TS_LOW_SHIFT,
  352 + .ts_low_mask = CHCR_TS_LOW_MASK,
  353 + .ts_high_shift = CHCR_TS_HIGH_SHIFT,
  354 + .ts_high_mask = CHCR_TS_HIGH_MASK,
  355 + .ts_shift = ts_shift,
  356 + .ts_shift_num = ARRAY_SIZE(ts_shift),
  357 + .dmaor_init = DMAOR_INIT,
  358 +};
  359 +
  360 +static struct sh_dmae_pdata dma1_platform_data = {
  361 + .channel = sh7785_dmae1_channels,
  362 + .channel_num = ARRAY_SIZE(sh7785_dmae1_channels),
  363 + .ts_low_shift = CHCR_TS_LOW_SHIFT,
  364 + .ts_low_mask = CHCR_TS_LOW_MASK,
  365 + .ts_high_shift = CHCR_TS_HIGH_SHIFT,
  366 + .ts_high_mask = CHCR_TS_HIGH_MASK,
  367 + .ts_shift = ts_shift,
  368 + .ts_shift_num = ARRAY_SIZE(ts_shift),
  369 + .dmaor_init = DMAOR_INIT,
  370 +};
  371 +
  372 +static struct resource sh7785_dmae0_resources[] = {
  373 + [0] = {
  374 + /* Channel registers and DMAOR */
  375 + .start = 0xfc808020,
  376 + .end = 0xfc80808f,
  377 + .flags = IORESOURCE_MEM,
  378 + },
  379 + [1] = {
  380 + /* DMARSx */
  381 + .start = 0xfc809000,
  382 + .end = 0xfc80900b,
  383 + .flags = IORESOURCE_MEM,
  384 + },
  385 + {
  386 + /* Real DMA error IRQ is 39, and channel IRQs are 33-38 */
  387 + .start = 33,
  388 + .end = 33,
  389 + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
  390 + },
  391 +};
  392 +
  393 +static struct resource sh7785_dmae1_resources[] = {
  394 + [0] = {
  395 + /* Channel registers and DMAOR */
  396 + .start = 0xfcc08020,
  397 + .end = 0xfcc0808f,
  398 + .flags = IORESOURCE_MEM,
  399 + },
  400 + /* DMAC1 has no DMARS */
  401 + {
  402 + /* Real DMA error IRQ is 58, and channel IRQs are 52-57 */
  403 + .start = 52,
  404 + .end = 52,
  405 + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
  406 + },
  407 +};
  408 +
  409 +static struct platform_device dma0_device = {
303 410 .name = "sh-dma-engine",
304   - .id = -1,
  411 + .id = 0,
  412 + .resource = sh7785_dmae0_resources,
  413 + .num_resources = ARRAY_SIZE(sh7785_dmae0_resources),
305 414 .dev = {
306   - .platform_data = &dma_platform_data,
  415 + .platform_data = &dma0_platform_data,
307 416 },
308 417 };
309 418  
  419 +static struct platform_device dma1_device = {
  420 + .name = "sh-dma-engine",
  421 + .id = 1,
  422 + .resource = sh7785_dmae1_resources,
  423 + .num_resources = ARRAY_SIZE(sh7785_dmae1_resources),
  424 + .dev = {
  425 + .platform_data = &dma1_platform_data,
  426 + },
  427 +};
  428 +
310 429 static struct platform_device *sh7785_devices[] __initdata = {
311 430 &scif0_device,
312 431 &scif1_device,
... ... @@ -320,7 +439,8 @@
320 439 &tmu3_device,
321 440 &tmu4_device,
322 441 &tmu5_device,
323   - &dma_device,
  442 + &dma0_device,
  443 + &dma1_device,
324 444 };
325 445  
326 446 static int __init sh7785_devices_setup(void)
arch/sh/kernel/hw_breakpoint.c
... ... @@ -143,26 +143,6 @@
143 143 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
144 144 }
145 145  
146   -/*
147   - * Store a breakpoint's encoded address, length, and type.
148   - */
149   -static int arch_store_info(struct perf_event *bp)
150   -{
151   - struct arch_hw_breakpoint *info = counter_arch_bp(bp);
152   -
153   - /*
154   - * User-space requests will always have the address field populated
155   - * For kernel-addresses, either the address or symbol name can be
156   - * specified.
157   - */
158   - if (info->name)
159   - info->address = (unsigned long)kallsyms_lookup_name(info->name);
160   - if (info->address)
161   - return 0;
162   -
163   - return -EINVAL;
164   -}
165   -
166 146 int arch_bp_generic_fields(int sh_len, int sh_type,
167 147 int *gen_len, int *gen_type)
168 148 {
... ... @@ -276,10 +256,12 @@
276 256 return ret;
277 257 }
278 258  
279   - ret = arch_store_info(bp);
280   -
281   - if (ret < 0)
282   - return ret;
  259 + /*
  260 + * For kernel-addresses, either the address or symbol name can be
  261 + * specified.
  262 + */
  263 + if (info->name)
  264 + info->address = (unsigned long)kallsyms_lookup_name(info->name);
283 265  
284 266 /*
285 267 * Check that the low-order bits of the address are appropriate
arch/sh/kernel/setup.c
... ... @@ -443,7 +443,7 @@
443 443  
444 444 nodes_clear(node_online_map);
445 445  
446   - /* Setup bootmem with available RAM */
  446 + pmb_init();
447 447 lmb_init();
448 448 setup_memory();
449 449 sparse_init();
... ... @@ -452,7 +452,6 @@
452 452 conswitchp = &dummy_con;
453 453 #endif
454 454 paging_init();
455   - pmb_init();
456 455  
457 456 ioremap_fixed_init();
458 457  
arch/sh/kernel/time.c
... ... @@ -39,12 +39,12 @@
39 39 void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
40 40 int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
41 41  
42   -#ifdef CONFIG_GENERIC_CMOS_UPDATE
43 42 void read_persistent_clock(struct timespec *ts)
44 43 {
45 44 rtc_sh_get_time(ts);
46 45 }
47 46  
  47 +#ifdef CONFIG_GENERIC_CMOS_UPDATE
48 48 int update_persistent_clock(struct timespec now)
49 49 {
50 50 return rtc_sh_set_time(now.tv_sec);
... ... @@ -112,10 +112,6 @@
112 112  
113 113 hwblk_init();
114 114 clk_init();
115   -
116   - rtc_sh_get_time(&xtime);
117   - set_normalized_timespec(&wall_to_monotonic,
118   - -xtime.tv_sec, -xtime.tv_nsec);
119 115  
120 116 late_time_init = sh_late_time_init;
121 117 }
arch/sh/lib/libgcc.h
... ... @@ -17,8 +17,7 @@
17 17 #error I feel sick.
18 18 #endif
19 19  
20   -typedef union
21   -{
  20 +typedef union {
22 21 struct DWstruct s;
23 22 long long ll;
24 23 } DWunion;
arch/sh/mm/ioremap.c
... ... @@ -34,11 +34,12 @@
34 34 * caller shouldn't need to know that small detail.
35 35 */
36 36 void __iomem * __init_refok
37   -__ioremap_caller(unsigned long phys_addr, unsigned long size,
  37 +__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
38 38 pgprot_t pgprot, void *caller)
39 39 {
40 40 struct vm_struct *area;
41 41 unsigned long offset, last_addr, addr, orig_addr;
  42 + void __iomem *mapped;
42 43  
43 44 /* Don't allow wraparound or zero size */
44 45 last_addr = phys_addr + size - 1;
... ... @@ -46,6 +47,20 @@
46 47 return NULL;
47 48  
48 49 /*
  50 + * If we can't yet use the regular approach, go the fixmap route.
  51 + */
  52 + if (!mem_init_done)
  53 + return ioremap_fixed(phys_addr, size, pgprot);
  54 +
  55 + /*
  56 + * First try to remap through the PMB.
  57 + * PMB entries are all pre-faulted.
  58 + */
  59 + mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
  60 + if (mapped && !IS_ERR(mapped))
  61 + return mapped;
  62 +
  63 + /*
49 64 * Mappings have to be page-aligned
50 65 */
51 66 offset = phys_addr & ~PAGE_MASK;
... ... @@ -53,12 +68,6 @@
53 68 size = PAGE_ALIGN(last_addr+1) - phys_addr;
54 69  
55 70 /*
56   - * If we can't yet use the regular approach, go the fixmap route.
57   - */
58   - if (!mem_init_done)
59   - return ioremap_fixed(phys_addr, offset, size, pgprot);
60   -
61   - /*
62 71 * Ok, go for it..
63 72 */
64 73 area = get_vm_area_caller(size, VM_IOREMAP, caller);
65 74  
66 75  
... ... @@ -67,34 +76,11 @@
67 76 area->phys_addr = phys_addr;
68 77 orig_addr = addr = (unsigned long)area->addr;
69 78  
70   -#ifdef CONFIG_PMB
71   - /*
72   - * First try to remap through the PMB once a valid VMA has been
73   - * established. Smaller allocations (or the rest of the size
74   - * remaining after a PMB mapping due to the size not being
75   - * perfectly aligned on a PMB size boundary) are then mapped
76   - * through the UTLB using conventional page tables.
77   - *
78   - * PMB entries are all pre-faulted.
79   - */
80   - if (unlikely(phys_addr >= P1SEG)) {
81   - unsigned long mapped;
82   -
83   - mapped = pmb_remap(addr, phys_addr, size, pgprot);
84   - if (likely(mapped)) {
85   - addr += mapped;
86   - phys_addr += mapped;
87   - size -= mapped;
88   - }
  79 + if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
  80 + vunmap((void *)orig_addr);
  81 + return NULL;
89 82 }
90   -#endif
91 83  
92   - if (likely(size))
93   - if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
94   - vunmap((void *)orig_addr);
95   - return NULL;
96   - }
97   -
98 84 return (void __iomem *)(offset + (char *)orig_addr);
99 85 }
100 86 EXPORT_SYMBOL(__ioremap_caller);
101 87  
102 88  
... ... @@ -133,23 +119,11 @@
133 119 if (iounmap_fixed(addr) == 0)
134 120 return;
135 121  
136   -#ifdef CONFIG_PMB
137 122 /*
138   - * Purge any PMB entries that may have been established for this
139   - * mapping, then proceed with conventional VMA teardown.
140   - *
141   - * XXX: Note that due to the way that remove_vm_area() does
142   - * matching of the resultant VMA, we aren't able to fast-forward
143   - * the address past the PMB space until the end of the VMA where
144   - * the page tables reside. As such, unmap_vm_area() will be
145   - * forced to linearly scan over the area until it finds the page
146   - * tables where PTEs that need to be unmapped actually reside,
147   - * which is far from optimal. Perhaps we need to use a separate
148   - * VMA for the PMB mappings?
149   - * -- PFM.
  123 + * If the PMB handled it, there's nothing else to do.
150 124 */
151   - pmb_unmap(vaddr);
152   -#endif
  125 + if (pmb_unmap(addr) == 0)
  126 + return;
153 127  
154 128 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
155 129 if (!p) {
arch/sh/mm/ioremap_fixed.c
... ... @@ -45,13 +45,20 @@
45 45 }
46 46  
47 47 void __init __iomem *
48   -ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
49   - unsigned long size, pgprot_t prot)
  48 +ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
50 49 {
51 50 enum fixed_addresses idx0, idx;
52 51 struct ioremap_map *map;
53 52 unsigned int nrpages;
  53 + unsigned long offset;
54 54 int i, slot;
  55 +
  56 + /*
  57 + * Mappings have to be page-aligned
  58 + */
  59 + offset = phys_addr & ~PAGE_MASK;
  60 + phys_addr &= PAGE_MASK;
  61 + size = PAGE_ALIGN(phys_addr + size) - phys_addr;
55 62  
56 63 slot = -1;
57 64 for (i = 0; i < FIX_N_IOREMAPS; i++) {
... ... @@ -74,6 +74,9 @@
74 74 start_pfn = start >> PAGE_SHIFT;
75 75 end_pfn = end >> PAGE_SHIFT;
76 76  
  77 + pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
  78 + PAGE_KERNEL);
  79 +
77 80 lmb_add(start, end - start);
78 81  
79 82 __add_active_range(nid, start_pfn, end_pfn);
... ... @@ -23,7 +23,8 @@
23 23 #include <linux/err.h>
24 24 #include <linux/io.h>
25 25 #include <linux/spinlock.h>
26   -#include <linux/rwlock.h>
  26 +#include <linux/vmalloc.h>
  27 +#include <asm/cacheflush.h>
27 28 #include <asm/sizes.h>
28 29 #include <asm/system.h>
29 30 #include <asm/uaccess.h>
30 31  
... ... @@ -52,12 +53,24 @@
52 53 struct pmb_entry *link;
53 54 };
54 55  
  56 +static struct {
  57 + unsigned long size;
  58 + int flag;
  59 +} pmb_sizes[] = {
  60 + { .size = SZ_512M, .flag = PMB_SZ_512M, },
  61 + { .size = SZ_128M, .flag = PMB_SZ_128M, },
  62 + { .size = SZ_64M, .flag = PMB_SZ_64M, },
  63 + { .size = SZ_16M, .flag = PMB_SZ_16M, },
  64 +};
  65 +
55 66 static void pmb_unmap_entry(struct pmb_entry *, int depth);
56 67  
57 68 static DEFINE_RWLOCK(pmb_rwlock);
58 69 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
59 70 static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
60 71  
  72 +static unsigned int pmb_iomapping_enabled;
  73 +
61 74 static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
62 75 {
63 76 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
... ... @@ -73,6 +86,142 @@
73 86 return mk_pmb_entry(entry) | PMB_DATA;
74 87 }
75 88  
  89 +static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
  90 +{
  91 + return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
  92 +}
  93 +
  94 +/*
  95 + * Ensure that the PMB entries match our cache configuration.
  96 + *
  97 + * When we are in 32-bit address extended mode, CCR.CB becomes
  98 + * invalid, so care must be taken to manually adjust cacheable
  99 + * translations.
  100 + */
  101 +static __always_inline unsigned long pmb_cache_flags(void)
  102 +{
  103 + unsigned long flags = 0;
  104 +
  105 +#if defined(CONFIG_CACHE_OFF)
  106 + flags |= PMB_WT | PMB_UB;
  107 +#elif defined(CONFIG_CACHE_WRITETHROUGH)
  108 + flags |= PMB_C | PMB_WT | PMB_UB;
  109 +#elif defined(CONFIG_CACHE_WRITEBACK)
  110 + flags |= PMB_C;
  111 +#endif
  112 +
  113 + return flags;
  114 +}
  115 +
  116 +/*
  117 + * Convert typical pgprot value to the PMB equivalent
  118 + */
  119 +static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
  120 +{
  121 + unsigned long pmb_flags = 0;
  122 + u64 flags = pgprot_val(prot);
  123 +
  124 + if (flags & _PAGE_CACHABLE)
  125 + pmb_flags |= PMB_C;
  126 + if (flags & _PAGE_WT)
  127 + pmb_flags |= PMB_WT | PMB_UB;
  128 +
  129 + return pmb_flags;
  130 +}
  131 +
  132 +static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
  133 +{
  134 + return (b->vpn == (a->vpn + a->size)) &&
  135 + (b->ppn == (a->ppn + a->size)) &&
  136 + (b->flags == a->flags);
  137 +}
  138 +
  139 +static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
  140 + unsigned long size)
  141 +{
  142 + int i;
  143 +
  144 + read_lock(&pmb_rwlock);
  145 +
  146 + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
  147 + struct pmb_entry *pmbe, *iter;
  148 + unsigned long span;
  149 +
  150 + if (!test_bit(i, pmb_map))
  151 + continue;
  152 +
  153 + pmbe = &pmb_entry_list[i];
  154 +
  155 + /*
  156 + * See if VPN and PPN are bounded by an existing mapping.
  157 + */
  158 + if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
  159 + continue;
  160 + if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
  161 + continue;
  162 +
  163 + /*
  164 + * Now see if we're in range of a simple mapping.
  165 + */
  166 + if (size <= pmbe->size) {
  167 + read_unlock(&pmb_rwlock);
  168 + return true;
  169 + }
  170 +
  171 + span = pmbe->size;
  172 +
  173 + /*
  174 + * Finally for sizes that involve compound mappings, walk
  175 + * the chain.
  176 + */
  177 + for (iter = pmbe->link; iter; iter = iter->link)
  178 + span += iter->size;
  179 +
  180 + /*
  181 + * Nothing else to do if the range requirements are met.
  182 + */
  183 + if (size <= span) {
  184 + read_unlock(&pmb_rwlock);
  185 + return true;
  186 + }
  187 + }
  188 +
  189 + read_unlock(&pmb_rwlock);
  190 + return false;
  191 +}
  192 +
  193 +static bool pmb_size_valid(unsigned long size)
  194 +{
  195 + int i;
  196 +
  197 + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  198 + if (pmb_sizes[i].size == size)
  199 + return true;
  200 +
  201 + return false;
  202 +}
  203 +
  204 +static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
  205 +{
  206 + return (addr >= P1SEG && (addr + size - 1) < P3SEG);
  207 +}
  208 +
  209 +static inline bool pmb_prot_valid(pgprot_t prot)
  210 +{
  211 + return (pgprot_val(prot) & _PAGE_USER) == 0;
  212 +}
  213 +
  214 +static int pmb_size_to_flags(unsigned long size)
  215 +{
  216 + int i;
  217 +
  218 + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  219 + if (pmb_sizes[i].size == size)
  220 + return pmb_sizes[i].flag;
  221 +
  222 + return 0;
  223 +}
  224 +
76 225 static int pmb_alloc_entry(void)
77 226 {
78 227 int pos;
79 228  
80 229  
81 230  
82 231  
83 232  
... ... @@ -140,33 +289,22 @@
140 289 }
141 290  
142 291 /*
143   - * Ensure that the PMB entries match our cache configuration.
144   - *
145   - * When we are in 32-bit address extended mode, CCR.CB becomes
146   - * invalid, so care must be taken to manually adjust cacheable
147   - * translations.
  292 + * Must be run uncached.
148 293 */
149   -static __always_inline unsigned long pmb_cache_flags(void)
  294 +static void __set_pmb_entry(struct pmb_entry *pmbe)
150 295 {
151   - unsigned long flags = 0;
  296 + unsigned long addr, data;
152 297  
153   -#if defined(CONFIG_CACHE_WRITETHROUGH)
154   - flags |= PMB_C | PMB_WT | PMB_UB;
155   -#elif defined(CONFIG_CACHE_WRITEBACK)
156   - flags |= PMB_C;
157   -#endif
  298 + addr = mk_pmb_addr(pmbe->entry);
  299 + data = mk_pmb_data(pmbe->entry);
158 300  
159   - return flags;
160   -}
  301 + jump_to_uncached();
161 302  
162   -/*
163   - * Must be run uncached.
164   - */
165   -static void __set_pmb_entry(struct pmb_entry *pmbe)
166   -{
167   - writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
168   - writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
169   - mk_pmb_data(pmbe->entry));
  303 + /* Set V-bit */
  304 + __raw_writel(pmbe->vpn | PMB_V, addr);
  305 + __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
  306 +
  307 + back_to_cached();
170 308 }
171 309  
172 310 static void __clear_pmb_entry(struct pmb_entry *pmbe)
173 311  
174 312  
175 313  
176 314  
177 315  
178 316  
179 317  
180 318  
181 319  
182 320  
183 321  
184 322  
185 323  
186 324  
187 325  
188 326  
189 327  
190 328  
191 329  
192 330  
193 331  
194 332  
195 333  
196 334  
197 335  
198 336  
199 337  
200 338  
201 339  
... ... @@ -194,146 +332,157 @@
194 332 spin_unlock_irqrestore(&pmbe->lock, flags);
195 333 }
196 334  
197   -static struct {
198   - unsigned long size;
199   - int flag;
200   -} pmb_sizes[] = {
201   - { .size = SZ_512M, .flag = PMB_SZ_512M, },
202   - { .size = SZ_128M, .flag = PMB_SZ_128M, },
203   - { .size = SZ_64M, .flag = PMB_SZ_64M, },
204   - { .size = SZ_16M, .flag = PMB_SZ_16M, },
205   -};
206   -
207   -long pmb_remap(unsigned long vaddr, unsigned long phys,
208   - unsigned long size, pgprot_t prot)
  335 +int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
  336 + unsigned long size, pgprot_t prot)
209 337 {
210 338 struct pmb_entry *pmbp, *pmbe;
211   - unsigned long wanted;
212   - int pmb_flags, i;
213   - long err;
214   - u64 flags;
  339 + unsigned long orig_addr, orig_size;
  340 + unsigned long flags, pmb_flags;
  341 + int i, mapped;
215 342  
216   - flags = pgprot_val(prot);
  343 + if (!pmb_addr_valid(vaddr, size))
  344 + return -EFAULT;
  345 + if (pmb_mapping_exists(vaddr, phys, size))
  346 + return 0;
217 347  
218   - pmb_flags = PMB_WT | PMB_UB;
  348 + orig_addr = vaddr;
  349 + orig_size = size;
219 350  
220   - /* Convert typical pgprot value to the PMB equivalent */
221   - if (flags & _PAGE_CACHABLE) {
222   - pmb_flags |= PMB_C;
  351 + flush_tlb_kernel_range(vaddr, vaddr + size);
223 352  
224   - if ((flags & _PAGE_WT) == 0)
225   - pmb_flags &= ~(PMB_WT | PMB_UB);
226   - }
227   -
  353 + pmb_flags = pgprot_to_pmb_flags(prot);
228 354 pmbp = NULL;
229   - wanted = size;
230 355  
231   -again:
232   - for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
233   - unsigned long flags;
  356 + do {
  357 + for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
  358 + if (size < pmb_sizes[i].size)
  359 + continue;
234 360  
235   - if (size < pmb_sizes[i].size)
236   - continue;
  361 + pmbe = pmb_alloc(vaddr, phys, pmb_flags |
  362 + pmb_sizes[i].flag, PMB_NO_ENTRY);
  363 + if (IS_ERR(pmbe)) {
  364 + pmb_unmap_entry(pmbp, mapped);
  365 + return PTR_ERR(pmbe);
  366 + }
237 367  
238   - pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
239   - PMB_NO_ENTRY);
240   - if (IS_ERR(pmbe)) {
241   - err = PTR_ERR(pmbe);
242   - goto out;
243   - }
  368 + spin_lock_irqsave(&pmbe->lock, flags);
244 369  
245   - spin_lock_irqsave(&pmbe->lock, flags);
  370 + pmbe->size = pmb_sizes[i].size;
246 371  
247   - __set_pmb_entry(pmbe);
  372 + __set_pmb_entry(pmbe);
248 373  
249   - phys += pmb_sizes[i].size;
250   - vaddr += pmb_sizes[i].size;
251   - size -= pmb_sizes[i].size;
  374 + phys += pmbe->size;
  375 + vaddr += pmbe->size;
  376 + size -= pmbe->size;
252 377  
253   - pmbe->size = pmb_sizes[i].size;
  378 + /*
  379 + * Link adjacent entries that span multiple PMB
  380 + * entries for easier tear-down.
  381 + */
  382 + if (likely(pmbp)) {
  383 + spin_lock(&pmbp->lock);
  384 + pmbp->link = pmbe;
  385 + spin_unlock(&pmbp->lock);
  386 + }
254 387  
255   - /*
256   - * Link adjacent entries that span multiple PMB entries
257   - * for easier tear-down.
258   - */
259   - if (likely(pmbp)) {
260   - spin_lock(&pmbp->lock);
261   - pmbp->link = pmbe;
262   - spin_unlock(&pmbp->lock);
  388 + pmbp = pmbe;
  389 +
  390 + /*
  391 + * Instead of trying smaller sizes on every
  392 + * iteration (even if we succeed in allocating
  393 + * space), try using pmb_sizes[i].size again.
  394 + */
  395 + i--;
  396 + mapped++;
  397 +
  398 + spin_unlock_irqrestore(&pmbe->lock, flags);
263 399 }
  400 + } while (size >= SZ_16M);
264 401  
265   - pmbp = pmbe;
  402 + flush_cache_vmap(orig_addr, orig_addr + orig_size);
266 403  
267   - /*
268   - * Instead of trying smaller sizes on every iteration
269   - * (even if we succeed in allocating space), try using
270   - * pmb_sizes[i].size again.
271   - */
272   - i--;
  404 + return 0;
  405 +}
273 406  
274   - spin_unlock_irqrestore(&pmbe->lock, flags);
275   - }
  407 +void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
  408 + pgprot_t prot, void *caller)
  409 +{
  410 + unsigned long vaddr;
  411 + phys_addr_t offset, last_addr;
  412 + phys_addr_t align_mask;
  413 + unsigned long aligned;
  414 + struct vm_struct *area;
  415 + int i, ret;
276 416  
277   - if (size >= SZ_16M)
278   - goto again;
  417 + if (!pmb_iomapping_enabled)
  418 + return NULL;
279 419  
280   - return wanted - size;
  420 + /*
  421 + * Small mappings need to go through the TLB.
  422 + */
  423 + if (size < SZ_16M)
  424 + return ERR_PTR(-EINVAL);
  425 + if (!pmb_prot_valid(prot))
  426 + return ERR_PTR(-EINVAL);
281 427  
282   -out:
283   - pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
  428 + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
  429 + if (size >= pmb_sizes[i].size)
  430 + break;
284 431  
285   - return err;
  432 + last_addr = phys + size;
  433 + align_mask = ~(pmb_sizes[i].size - 1);
  434 + offset = phys & ~align_mask;
  435 + phys &= align_mask;
  436 + aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
  437 +
  438 + /*
  439 + * XXX: This should really start from uncached_end, but this
  440 + * causes the MMU to reset, so for now we restrict it to the
  441 + * 0xb000...0xc000 range.
  442 + */
  443 + area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
  444 + P3SEG, caller);
  445 + if (!area)
  446 + return NULL;
  447 +
  448 + area->phys_addr = phys;
  449 + vaddr = (unsigned long)area->addr;
  450 +
  451 + ret = pmb_bolt_mapping(vaddr, phys, size, prot);
  452 + if (unlikely(ret != 0))
  453 + return ERR_PTR(ret);
  454 +
  455 + return (void __iomem *)(offset + (char *)vaddr);
286 456 }
287 457  
288   -void pmb_unmap(unsigned long addr)
  458 +int pmb_unmap(void __iomem *addr)
289 459 {
290 460 struct pmb_entry *pmbe = NULL;
291   - int i;
  461 + unsigned long vaddr = (unsigned long __force)addr;
  462 + int i, found = 0;
292 463  
293 464 read_lock(&pmb_rwlock);
294 465  
295 466 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
296 467 if (test_bit(i, pmb_map)) {
297 468 pmbe = &pmb_entry_list[i];
298   - if (pmbe->vpn == addr)
  469 + if (pmbe->vpn == vaddr) {
  470 + found = 1;
299 471 break;
  472 + }
300 473 }
301 474 }
302 475  
303 476 read_unlock(&pmb_rwlock);
304 477  
305   - pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
306   -}
  478 + if (found) {
  479 + pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
  480 + return 0;
  481 + }
307 482  
308   -static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
309   -{
310   - return (b->vpn == (a->vpn + a->size)) &&
311   - (b->ppn == (a->ppn + a->size)) &&
312   - (b->flags == a->flags);
  483 + return -EINVAL;
313 484 }
314 485  
315   -static bool pmb_size_valid(unsigned long size)
316   -{
317   - int i;
318   -
319   - for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
320   - if (pmb_sizes[i].size == size)
321   - return true;
322   -
323   - return false;
324   -}
325   -
326   -static int pmb_size_to_flags(unsigned long size)
327   -{
328   - int i;
329   -
330   - for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
331   - if (pmb_sizes[i].size == size)
332   - return pmb_sizes[i].flag;
333   -
334   - return 0;
335   -}
336   -
337 486 static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
338 487 {
339 488 do {
... ... @@ -351,6 +500,8 @@
351 500 */
352 501 __clear_pmb_entry(pmbe);
353 502  
  503 + flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
  504 +
354 505 pmbe = pmblink->link;
355 506  
356 507 pmb_free(pmblink);
... ... @@ -369,11 +520,6 @@
369 520 write_unlock_irqrestore(&pmb_rwlock, flags);
370 521 }
371 522  
372   -static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
373   -{
374   - return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
375   -}
376   -
377 523 static void __init pmb_notify(void)
378 524 {
379 525 int i;
... ... @@ -625,6 +771,18 @@
625 771 }
626 772 #endif
627 773  
  774 +static int __init early_pmb(char *p)
  775 +{
  776 + if (!p)
  777 + return 0;
  778 +
  779 + if (strstr(p, "iomap"))
  780 + pmb_iomapping_enabled = 1;
  781 +
  782 + return 0;
  783 +}
  784 +early_param("pmb", early_pmb);
  785 +
628 786 void __init pmb_init(void)
629 787 {
630 788 /* Synchronize software state */
... ... @@ -713,7 +871,7 @@
713 871  
714 872 return 0;
715 873 }
716   -postcore_initcall(pmb_debugfs_init);
  874 +subsys_initcall(pmb_debugfs_init);
717 875  
718 876 #ifdef CONFIG_PM
719 877 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
... ... @@ -24,8 +24,10 @@
24 24 #include <linux/delay.h>
25 25 #include <linux/dma-mapping.h>
26 26 #include <linux/platform_device.h>
27   -#include <cpu/dma.h>
28   -#include <asm/dma-sh.h>
  27 +#include <linux/pm_runtime.h>
  28 +
  29 +#include <asm/dmaengine.h>
  30 +
29 31 #include "shdma.h"
30 32  
31 33 /* DMA descriptor control */
32 34  
33 35  
34 36  
35 37  
36 38  
37 39  
38 40  
39 41  
40 42  
41 43  
... ... @@ -38,55 +40,56 @@
38 40 };
39 41  
40 42 #define NR_DESCS_PER_CHANNEL 32
41   -/*
42   - * Define the default configuration for dual address memory-memory transfer.
43   - * The 0x400 value represents auto-request, external->external.
44   - *
45   - * And this driver set 4byte burst mode.
46   - * If you want to change mode, you need to change RS_DEFAULT of value.
47   - * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
48   - */
49   -#define RS_DEFAULT (RS_DUAL)
  43 +/* Default MEMCPY transfer size = 2^2 = 4 bytes */
  44 +#define LOG2_DEFAULT_XFER_SIZE 2
50 45  
51 46 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
52 47 static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
53 48  
54 49 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
55 50  
56   -#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
57 51 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
58 52 {
59   - ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
  53 + __raw_writel(data, sh_dc->base + reg / sizeof(u32));
60 54 }
61 55  
62 56 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
63 57 {
64   - return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg);
  58 + return __raw_readl(sh_dc->base + reg / sizeof(u32));
65 59 }
66 60  
  61 +static u16 dmaor_read(struct sh_dmae_device *shdev)
  62 +{
  63 + return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
  64 +}
  65 +
  66 +static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
  67 +{
  68 + __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
  69 +}
  70 +
67 71 /*
68 72 * Reset DMA controller
69 73 *
70 74 * SH7780 has two DMAOR register
71 75 */
72   -static void sh_dmae_ctl_stop(int id)
  76 +static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
73 77 {
74   - unsigned short dmaor = dmaor_read_reg(id);
  78 + unsigned short dmaor = dmaor_read(shdev);
75 79  
76   - dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
77   - dmaor_write_reg(id, dmaor);
  80 + dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
78 81 }
79 82  
80   -static int sh_dmae_rst(int id)
  83 +static int sh_dmae_rst(struct sh_dmae_device *shdev)
81 84 {
82 85 unsigned short dmaor;
83 86  
84   - sh_dmae_ctl_stop(id);
85   - dmaor = dmaor_read_reg(id) | DMAOR_INIT;
  87 + sh_dmae_ctl_stop(shdev);
  88 + dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
86 89  
87   - dmaor_write_reg(id, dmaor);
88   - if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
89   - pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
  90 + dmaor_write(shdev, dmaor);
  91 + if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
  92 + pr_warning("dma-sh: Can't initialize DMAOR.\n");
90 93 return -EINVAL;
91 94 }
92 95 return 0;
93 96  
94 97  
95 98  
... ... @@ -102,15 +105,38 @@
102 105 return false; /* waiting */
103 106 }
104 107  
105   -static unsigned int ts_shift[] = TS_SHIFT;
106   -static inline unsigned int calc_xmit_shift(u32 chcr)
  108 +static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
107 109 {
108   - int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
109   - ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
  110 + struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
  111 + struct sh_dmae_device, common);
  112 + struct sh_dmae_pdata *pdata = shdev->pdata;
  113 + int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
  114 + ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
110 115  
111   - return ts_shift[cnt];
  116 + if (cnt >= pdata->ts_shift_num)
  117 + cnt = 0;
  118 +
  119 + return pdata->ts_shift[cnt];
112 120 }
113 121  
  122 +static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
  123 +{
  124 + struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
  125 + struct sh_dmae_device, common);
  126 + struct sh_dmae_pdata *pdata = shdev->pdata;
  127 + int i;
  128 +
  129 + for (i = 0; i < pdata->ts_shift_num; i++)
  130 + if (pdata->ts_shift[i] == l2size)
  131 + break;
  132 +
  133 + if (i == pdata->ts_shift_num)
  134 + i = 0;
  135 +
  136 + return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
  137 + ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
  138 +}
  139 +
114 140 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
115 141 {
116 142 sh_dmae_writel(sh_chan, hw->sar, SAR);
... ... @@ -136,8 +162,13 @@
136 162  
137 163 static void dmae_init(struct sh_dmae_chan *sh_chan)
138 164 {
139   - u32 chcr = RS_DEFAULT; /* default is DUAL mode */
140   - sh_chan->xmit_shift = calc_xmit_shift(chcr);
  165 + /*
  166 + * Default configuration for dual address memory-memory transfer.
  167 + * 0x400 represents auto-request.
  168 + */
  169 + u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
  170 + LOG2_DEFAULT_XFER_SIZE);
  171 + sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
141 172 sh_dmae_writel(sh_chan, chcr, CHCR);
142 173 }
143 174  
144 175  
145 176  
146 177  
147 178  
... ... @@ -147,38 +178,27 @@
147 178 if (dmae_is_busy(sh_chan))
148 179 return -EBUSY;
149 180  
150   - sh_chan->xmit_shift = calc_xmit_shift(val);
  181 + sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
151 182 sh_dmae_writel(sh_chan, val, CHCR);
152 183  
153 184 return 0;
154 185 }
155 186  
156   -#define DMARS_SHIFT 8
157   -#define DMARS_CHAN_MSK 0x01
158 187 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
159 188 {
160   - u32 addr;
161   - int shift = 0;
  189 + struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
  190 + struct sh_dmae_device, common);
  191 + struct sh_dmae_pdata *pdata = shdev->pdata;
  192 + struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
  193 + u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
  194 + int shift = chan_pdata->dmars_bit;
162 195  
163 196 if (dmae_is_busy(sh_chan))
164 197 return -EBUSY;
165 198  
166   - if (sh_chan->id & DMARS_CHAN_MSK)
167   - shift = DMARS_SHIFT;
  199 + __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
  200 + addr);
168 201  
169   - if (sh_chan->id < 6)
170   - /* DMA0RS0 - DMA0RS2 */
171   - addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4;
172   -#ifdef SH_DMARS_BASE1
173   - else if (sh_chan->id < 12)
174   - /* DMA1RS0 - DMA1RS2 */
175   - addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4;
176   -#endif
177   - else
178   - return -EINVAL;
179   -
180   - ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr);
181   -
182 202 return 0;
183 203 }
184 204  
185 205  
... ... @@ -251,15 +271,15 @@
251 271 struct dma_device *dma_dev = sh_chan->common.device;
252 272 struct sh_dmae_device *shdev = container_of(dma_dev,
253 273 struct sh_dmae_device, common);
254   - struct sh_dmae_pdata *pdata = &shdev->pdata;
  274 + struct sh_dmae_pdata *pdata = shdev->pdata;
255 275 int i;
256 276  
257 277 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
258 278 return NULL;
259 279  
260   - for (i = 0; i < pdata->config_num; i++)
261   - if (pdata->config[i].slave_id == slave_id)
262   - return pdata->config + i;
  280 + for (i = 0; i < pdata->slave_num; i++)
  281 + if (pdata->slave[i].slave_id == slave_id)
  282 + return pdata->slave + i;
263 283  
264 284 return NULL;
265 285 }
... ... @@ -270,6 +290,8 @@
270 290 struct sh_desc *desc;
271 291 struct sh_dmae_slave *param = chan->private;
272 292  
  293 + pm_runtime_get_sync(sh_chan->dev);
  294 +
273 295 /*
274 296 * This relies on the guarantee from dmaengine that alloc_chan_resources
275 297 * never runs concurrently with itself or free_chan_resources.
... ... @@ -288,9 +310,8 @@
288 310  
289 311 dmae_set_dmars(sh_chan, cfg->mid_rid);
290 312 dmae_set_chcr(sh_chan, cfg->chcr);
291   - } else {
292   - if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400)
293   - dmae_set_chcr(sh_chan, RS_DEFAULT);
  313 + } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
  314 + dmae_init(sh_chan);
294 315 }
295 316  
296 317 spin_lock_bh(&sh_chan->desc_lock);
... ... @@ -312,6 +333,9 @@
312 333 }
313 334 spin_unlock_bh(&sh_chan->desc_lock);
314 335  
  336 + if (!sh_chan->descs_allocated)
  337 + pm_runtime_put(sh_chan->dev);
  338 +
315 339 return sh_chan->descs_allocated;
316 340 }
317 341  
... ... @@ -323,6 +347,7 @@
323 347 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
324 348 struct sh_desc *desc, *_desc;
325 349 LIST_HEAD(list);
  350 + int descs = sh_chan->descs_allocated;
326 351  
327 352 dmae_halt(sh_chan);
328 353  
... ... @@ -343,6 +368,9 @@
343 368  
344 369 spin_unlock_bh(&sh_chan->desc_lock);
345 370  
  371 + if (descs > 0)
  372 + pm_runtime_put(sh_chan->dev);
  373 +
346 374 list_for_each_entry_safe(desc, _desc, &list, node)
347 375 kfree(desc);
348 376 }
... ... @@ -559,6 +587,19 @@
559 587 if (!chan)
560 588 return;
561 589  
  590 + dmae_halt(sh_chan);
  591 +
  592 + spin_lock_bh(&sh_chan->desc_lock);
  593 + if (!list_empty(&sh_chan->ld_queue)) {
  594 + /* Record partial transfer */
  595 + struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
  596 + struct sh_desc, node);
  597 + desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
  598 + sh_chan->xmit_shift;
  599 +
  600 + }
  601 + spin_unlock_bh(&sh_chan->desc_lock);
  602 +
562 603 sh_dmae_chan_ld_cleanup(sh_chan, true);
563 604 }
564 605  
... ... @@ -661,7 +702,7 @@
661 702  
662 703 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
663 704 {
664   - struct sh_desc *sd;
  705 + struct sh_desc *desc;
665 706  
666 707 spin_lock_bh(&sh_chan->desc_lock);
667 708 /* DMA work check */
668 709  
... ... @@ -671,10 +712,13 @@
671 712 }
672 713  
673 714 /* Find the first not transferred desciptor */
674   - list_for_each_entry(sd, &sh_chan->ld_queue, node)
675   - if (sd->mark == DESC_SUBMITTED) {
  715 + list_for_each_entry(desc, &sh_chan->ld_queue, node)
  716 + if (desc->mark == DESC_SUBMITTED) {
  717 + dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
  718 + desc->async_tx.cookie, sh_chan->id,
  719 + desc->hw.tcr, desc->hw.sar, desc->hw.dar);
676 720 /* Get the ld start address from ld_queue */
677   - dmae_set_reg(sh_chan, &sd->hw);
  721 + dmae_set_reg(sh_chan, &desc->hw);
678 722 dmae_start(sh_chan);
679 723 break;
680 724 }
... ... @@ -696,6 +740,7 @@
696 740 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
697 741 dma_cookie_t last_used;
698 742 dma_cookie_t last_complete;
  743 + enum dma_status status;
699 744  
700 745 sh_dmae_chan_ld_cleanup(sh_chan, false);
701 746  
... ... @@ -709,7 +754,27 @@
709 754 if (used)
710 755 *used = last_used;
711 756  
712   - return dma_async_is_complete(cookie, last_complete, last_used);
  757 + spin_lock_bh(&sh_chan->desc_lock);
  758 +
  759 + status = dma_async_is_complete(cookie, last_complete, last_used);
  760 +
  761 + /*
  762 + * If we don't find cookie on the queue, it has been aborted and we have
  763 + * to report error
  764 + */
  765 + if (status != DMA_SUCCESS) {
  766 + struct sh_desc *desc;
  767 + status = DMA_ERROR;
  768 + list_for_each_entry(desc, &sh_chan->ld_queue, node)
  769 + if (desc->cookie == cookie) {
  770 + status = DMA_IN_PROGRESS;
  771 + break;
  772 + }
  773 + }
  774 +
  775 + spin_unlock_bh(&sh_chan->desc_lock);
  776 +
  777 + return status;
713 778 }
714 779  
715 780 static irqreturn_t sh_dmae_interrupt(int irq, void *data)
716 781  
717 782  
718 783  
719 784  
720 785  
... ... @@ -732,40 +797,32 @@
732 797 #if defined(CONFIG_CPU_SH4)
733 798 static irqreturn_t sh_dmae_err(int irq, void *data)
734 799 {
735   - int err = 0;
736 800 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
  801 + int i;
737 802  
738   - /* IRQ Multi */
739   - if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
740   - int __maybe_unused cnt = 0;
741   - switch (irq) {
742   -#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
743   - case DMTE6_IRQ:
744   - cnt++;
745   -#endif
746   - case DMTE0_IRQ:
747   - if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) {
748   - disable_irq(irq);
749   - return IRQ_HANDLED;
  803 + /* halt the dma controller */
  804 + sh_dmae_ctl_stop(shdev);
  805 +
  806 + /* We cannot detect, which channel caused the error, have to reset all */
  807 + for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
  808 + struct sh_dmae_chan *sh_chan = shdev->chan[i];
  809 + if (sh_chan) {
  810 + struct sh_desc *desc;
  811 + /* Stop the channel */
  812 + dmae_halt(sh_chan);
  813 + /* Complete all */
  814 + list_for_each_entry(desc, &sh_chan->ld_queue, node) {
  815 + struct dma_async_tx_descriptor *tx = &desc->async_tx;
  816 + desc->mark = DESC_IDLE;
  817 + if (tx->callback)
  818 + tx->callback(tx->callback_param);
750 819 }
751   - default:
752   - return IRQ_NONE;
  820 + list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
753 821 }
754   - } else {
755   - /* reset dma controller */
756   - err = sh_dmae_rst(0);
757   - if (err)
758   - return err;
759   -#ifdef SH_DMAC_BASE1
760   - if (shdev->pdata.mode & SHDMA_DMAOR1) {
761   - err = sh_dmae_rst(1);
762   - if (err)
763   - return err;
764   - }
765   -#endif
766   - disable_irq(irq);
767   - return IRQ_HANDLED;
768 822 }
  823 + sh_dmae_rst(shdev);
  824 +
  825 + return IRQ_HANDLED;
769 826 }
770 827 #endif
771 828  
772 829  
773 830  
... ... @@ -796,19 +853,12 @@
796 853 sh_dmae_chan_ld_cleanup(sh_chan, false);
797 854 }
798 855  
799   -static unsigned int get_dmae_irq(unsigned int id)
  856 +static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
  857 + int irq, unsigned long flags)
800 858 {
801   - unsigned int irq = 0;
802   - if (id < ARRAY_SIZE(dmte_irq_map))
803   - irq = dmte_irq_map[id];
804   - return irq;
805   -}
806   -
807   -static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
808   -{
809 859 int err;
810   - unsigned int irq = get_dmae_irq(id);
811   - unsigned long irqflags = IRQF_DISABLED;
  860 + struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
  861 + struct platform_device *pdev = to_platform_device(shdev->common.dev);
812 862 struct sh_dmae_chan *new_sh_chan;
813 863  
814 864 /* alloc channel */
815 865  
... ... @@ -819,8 +869,13 @@
819 869 return -ENOMEM;
820 870 }
821 871  
  872 + /* copy struct dma_device */
  873 + new_sh_chan->common.device = &shdev->common;
  874 +
822 875 new_sh_chan->dev = shdev->common.dev;
823 876 new_sh_chan->id = id;
  877 + new_sh_chan->irq = irq;
  878 + new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
824 879  
825 880 /* Init DMA tasklet */
826 881 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
827 882  
828 883  
829 884  
... ... @@ -835,29 +890,20 @@
835 890 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
836 891 INIT_LIST_HEAD(&new_sh_chan->ld_free);
837 892  
838   - /* copy struct dma_device */
839   - new_sh_chan->common.device = &shdev->common;
840   -
841 893 /* Add the channel to DMA device channel list */
842 894 list_add_tail(&new_sh_chan->common.device_node,
843 895 &shdev->common.channels);
844 896 shdev->common.chancnt++;
845 897  
846   - if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
847   - irqflags = IRQF_SHARED;
848   -#if defined(DMTE6_IRQ)
849   - if (irq >= DMTE6_IRQ)
850   - irq = DMTE6_IRQ;
851   - else
852   -#endif
853   - irq = DMTE0_IRQ;
854   - }
  898 + if (pdev->id >= 0)
  899 + snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
  900 + "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
  901 + else
  902 + snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
  903 + "sh-dma%d", new_sh_chan->id);
855 904  
856   - snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
857   - "sh-dmae%d", new_sh_chan->id);
858   -
859 905 /* set up channel irq */
860   - err = request_irq(irq, &sh_dmae_interrupt, irqflags,
  906 + err = request_irq(irq, &sh_dmae_interrupt, flags,
861 907 new_sh_chan->dev_id, new_sh_chan);
862 908 if (err) {
863 909 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
864 910  
... ... @@ -881,12 +927,12 @@
881 927  
882 928 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
883 929 if (shdev->chan[i]) {
884   - struct sh_dmae_chan *shchan = shdev->chan[i];
885   - if (!(shdev->pdata.mode & SHDMA_MIX_IRQ))
886   - free_irq(dmte_irq_map[i], shchan);
  930 + struct sh_dmae_chan *sh_chan = shdev->chan[i];
887 931  
888   - list_del(&shchan->common.device_node);
889   - kfree(shchan);
  932 + free_irq(sh_chan->irq, sh_chan);
  933 +
  934 + list_del(&sh_chan->common.device_node);
  935 + kfree(sh_chan);
890 936 shdev->chan[i] = NULL;
891 937 }
892 938 }
893 939  
894 940  
895 941  
896 942  
897 943  
898 944  
899 945  
900 946  
901 947  
902 948  
... ... @@ -895,47 +941,84 @@
895 941  
896 942 static int __init sh_dmae_probe(struct platform_device *pdev)
897 943 {
898   - int err = 0, cnt, ecnt;
899   - unsigned long irqflags = IRQF_DISABLED;
900   -#if defined(CONFIG_CPU_SH4)
901   - int eirq[] = { DMAE0_IRQ,
902   -#if defined(DMAE1_IRQ)
903   - DMAE1_IRQ
904   -#endif
905   - };
906   -#endif
  944 + struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
  945 + unsigned long irqflags = IRQF_DISABLED,
  946 + chan_flag[SH_DMAC_MAX_CHANNELS] = {};
  947 + int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
  948 + int err, i, irq_cnt = 0, irqres = 0;
907 949 struct sh_dmae_device *shdev;
  950 + struct resource *chan, *dmars, *errirq_res, *chanirq_res;
908 951  
909 952 /* get platform data */
910   - if (!pdev->dev.platform_data)
  953 + if (!pdata || !pdata->channel_num)
911 954 return -ENODEV;
912 955  
  956 + chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  957 + /* DMARS area is optional, if absent, this controller cannot do slave DMA */
  958 + dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  959 + /*
  960 + * IRQ resources:
  961 + * 1. there always must be at least one IRQ IO-resource. On SH4 it is
  962 + * the error IRQ, in which case it is the only IRQ in this resource:
  963 + * start == end. If it is the only IRQ resource, all channels also
  964 + * use the same IRQ.
  965 + * 2. DMA channel IRQ resources can be specified one per resource or in
  966 + * ranges (start != end)
  967 + * 3. iff all events (channels and, optionally, error) on this
  968 + * controller use the same IRQ, only one IRQ resource can be
  969 + * specified, otherwise there must be one IRQ per channel, even if
  970 + * some of them are equal
  971 + * 4. if all IRQs on this controller are equal or if some specific IRQs
  972 + * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
  973 + * requested with the IRQF_SHARED flag
  974 + */
  975 + errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  976 + if (!chan || !errirq_res)
  977 + return -ENODEV;
  978 +
  979 + if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
  980 + dev_err(&pdev->dev, "DMAC register region already claimed\n");
  981 + return -EBUSY;
  982 + }
  983 +
  984 + if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
  985 + dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
  986 + err = -EBUSY;
  987 + goto ermrdmars;
  988 + }
  989 +
  990 + err = -ENOMEM;
913 991 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
914 992 if (!shdev) {
915   - dev_err(&pdev->dev, "No enough memory\n");
916   - return -ENOMEM;
  993 + dev_err(&pdev->dev, "Not enough memory\n");
  994 + goto ealloc;
917 995 }
918 996  
  997 + shdev->chan_reg = ioremap(chan->start, resource_size(chan));
  998 + if (!shdev->chan_reg)
  999 + goto emapchan;
  1000 + if (dmars) {
  1001 + shdev->dmars = ioremap(dmars->start, resource_size(dmars));
  1002 + if (!shdev->dmars)
  1003 + goto emapdmars;
  1004 + }
  1005 +
919 1006 /* platform data */
920   - memcpy(&shdev->pdata, pdev->dev.platform_data,
921   - sizeof(struct sh_dmae_pdata));
  1007 + shdev->pdata = pdata;
922 1008  
  1009 + pm_runtime_enable(&pdev->dev);
  1010 + pm_runtime_get_sync(&pdev->dev);
  1011 +
923 1012 /* reset dma controller */
924   - err = sh_dmae_rst(0);
  1013 + err = sh_dmae_rst(shdev);
925 1014 if (err)
926 1015 goto rst_err;
927 1016  
928   - /* SH7780/85/23 has DMAOR1 */
929   - if (shdev->pdata.mode & SHDMA_DMAOR1) {
930   - err = sh_dmae_rst(1);
931   - if (err)
932   - goto rst_err;
933   - }
934   -
935 1017 INIT_LIST_HEAD(&shdev->common.channels);
936 1018  
937 1019 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
938   - dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
  1020 + if (dmars)
  1021 + dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
939 1022  
940 1023 shdev->common.device_alloc_chan_resources
941 1024 = sh_dmae_alloc_chan_resources;
942 1025  
943 1026  
944 1027  
945 1028  
946 1029  
947 1030  
948 1031  
949 1032  
... ... @@ -950,37 +1033,72 @@
950 1033  
951 1034 shdev->common.dev = &pdev->dev;
952 1035 /* Default transfer size of 32 bytes requires 32-byte alignment */
953   - shdev->common.copy_align = 5;
  1036 + shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
954 1037  
955 1038 #if defined(CONFIG_CPU_SH4)
956   - /* Non Mix IRQ mode SH7722/SH7730 etc... */
957   - if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
  1039 + chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
  1040 +
  1041 + if (!chanirq_res)
  1042 + chanirq_res = errirq_res;
  1043 + else
  1044 + irqres++;
  1045 +
  1046 + if (chanirq_res == errirq_res ||
  1047 + (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
958 1048 irqflags = IRQF_SHARED;
959   - eirq[0] = DMTE0_IRQ;
960   -#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ)
961   - eirq[1] = DMTE6_IRQ;
962   -#endif
  1049 +
  1050 + errirq = errirq_res->start;
  1051 +
  1052 + err = request_irq(errirq, sh_dmae_err, irqflags,
  1053 + "DMAC Address Error", shdev);
  1054 + if (err) {
  1055 + dev_err(&pdev->dev,
  1056 + "DMA failed requesting irq #%d, error %d\n",
  1057 + errirq, err);
  1058 + goto eirq_err;
963 1059 }
964 1060  
965   - for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
966   - err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
967   - "DMAC Address Error", shdev);
968   - if (err) {
969   - dev_err(&pdev->dev, "DMA device request_irq"
970   - "error (irq %d) with return %d\n",
971   - eirq[ecnt], err);
972   - goto eirq_err;
  1061 +#else
  1062 + chanirq_res = errirq_res;
  1063 +#endif /* CONFIG_CPU_SH4 */
  1064 +
  1065 + if (chanirq_res->start == chanirq_res->end &&
  1066 + !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
  1067 + /* Special case - all multiplexed */
  1068 + for (; irq_cnt < pdata->channel_num; irq_cnt++) {
  1069 + chan_irq[irq_cnt] = chanirq_res->start;
  1070 + chan_flag[irq_cnt] = IRQF_SHARED;
973 1071 }
  1072 + } else {
  1073 + do {
  1074 + for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
  1075 + if ((errirq_res->flags & IORESOURCE_BITS) ==
  1076 + IORESOURCE_IRQ_SHAREABLE)
  1077 + chan_flag[irq_cnt] = IRQF_SHARED;
  1078 + else
  1079 + chan_flag[irq_cnt] = IRQF_DISABLED;
  1080 + dev_dbg(&pdev->dev,
  1081 + "Found IRQ %d for channel %d\n",
  1082 + i, irq_cnt);
  1083 + chan_irq[irq_cnt++] = i;
  1084 + }
  1085 + chanirq_res = platform_get_resource(pdev,
  1086 + IORESOURCE_IRQ, ++irqres);
  1087 + } while (irq_cnt < pdata->channel_num && chanirq_res);
974 1088 }
975   -#endif /* CONFIG_CPU_SH4 */
976 1089  
  1090 + if (irq_cnt < pdata->channel_num)
  1091 + goto eirqres;
  1092 +
977 1093 /* Create DMA Channel */
978   - for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) {
979   - err = sh_dmae_chan_probe(shdev, cnt);
  1094 + for (i = 0; i < pdata->channel_num; i++) {
  1095 + err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
980 1096 if (err)
981 1097 goto chan_probe_err;
982 1098 }
983 1099  
  1100 + pm_runtime_put(&pdev->dev);
  1101 +
984 1102 platform_set_drvdata(pdev, shdev);
985 1103 dma_async_device_register(&shdev->common);
986 1104  
987 1105  
988 1106  
989 1107  
... ... @@ -988,13 +1106,24 @@
988 1106  
989 1107 chan_probe_err:
990 1108 sh_dmae_chan_remove(shdev);
991   -
  1109 +eirqres:
  1110 +#if defined(CONFIG_CPU_SH4)
  1111 + free_irq(errirq, shdev);
992 1112 eirq_err:
993   - for (ecnt-- ; ecnt >= 0; ecnt--)
994   - free_irq(eirq[ecnt], shdev);
995   -
  1113 +#endif
996 1114 rst_err:
  1115 + pm_runtime_put(&pdev->dev);
  1116 + if (dmars)
  1117 + iounmap(shdev->dmars);
  1118 +emapdmars:
  1119 + iounmap(shdev->chan_reg);
  1120 +emapchan:
997 1121 kfree(shdev);
  1122 +ealloc:
  1123 + if (dmars)
  1124 + release_mem_region(dmars->start, resource_size(dmars));
  1125 +ermrdmars:
  1126 + release_mem_region(chan->start, resource_size(chan));
998 1127  
999 1128 return err;
1000 1129 }
1001 1130  
1002 1131  
1003 1132  
1004 1133  
... ... @@ -1002,36 +1131,39 @@
1002 1131 static int __exit sh_dmae_remove(struct platform_device *pdev)
1003 1132 {
1004 1133 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
  1134 + struct resource *res;
  1135 + int errirq = platform_get_irq(pdev, 0);
1005 1136  
1006 1137 dma_async_device_unregister(&shdev->common);
1007 1138  
1008   - if (shdev->pdata.mode & SHDMA_MIX_IRQ) {
1009   - free_irq(DMTE0_IRQ, shdev);
1010   -#if defined(DMTE6_IRQ)
1011   - free_irq(DMTE6_IRQ, shdev);
1012   -#endif
1013   - }
  1139 + if (errirq > 0)
  1140 + free_irq(errirq, shdev);
1014 1141  
1015 1142 /* channel data remove */
1016 1143 sh_dmae_chan_remove(shdev);
1017 1144  
1018   - if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) {
1019   - free_irq(DMAE0_IRQ, shdev);
1020   -#if defined(DMAE1_IRQ)
1021   - free_irq(DMAE1_IRQ, shdev);
1022   -#endif
1023   - }
  1145 + pm_runtime_disable(&pdev->dev);
  1146 +
  1147 + if (shdev->dmars)
  1148 + iounmap(shdev->dmars);
  1149 + iounmap(shdev->chan_reg);
  1150 +
1024 1151 kfree(shdev);
1025 1152  
  1153 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1154 + if (res)
  1155 + release_mem_region(res->start, resource_size(res));
  1156 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1157 + if (res)
  1158 + release_mem_region(res->start, resource_size(res));
  1159 +
1026 1160 return 0;
1027 1161 }
1028 1162  
1029 1163 static void sh_dmae_shutdown(struct platform_device *pdev)
1030 1164 {
1031 1165 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1032   - sh_dmae_ctl_stop(0);
1033   - if (shdev->pdata.mode & SHDMA_DMAOR1)
1034   - sh_dmae_ctl_stop(1);
  1166 + sh_dmae_ctl_stop(shdev);
1035 1167 }
1036 1168  
1037 1169 static struct platform_driver sh_dmae_driver = {
... ... @@ -17,24 +17,10 @@
17 17 #include <linux/interrupt.h>
18 18 #include <linux/list.h>
19 19  
  20 +#include <asm/dmaengine.h>
  21 +
20 22 #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
21 23  
22   -struct sh_dmae_regs {
23   - u32 sar; /* SAR / source address */
24   - u32 dar; /* DAR / destination address */
25   - u32 tcr; /* TCR / transfer count */
26   -};
27   -
28   -struct sh_desc {
29   - struct sh_dmae_regs hw;
30   - struct list_head node;
31   - struct dma_async_tx_descriptor async_tx;
32   - enum dma_data_direction direction;
33   - dma_cookie_t cookie;
34   - int chunks;
35   - int mark;
36   -};
37   -
38 24 struct device;
39 25  
40 26 struct sh_dmae_chan {
41 27  
42 28  
... ... @@ -47,14 +33,18 @@
47 33 struct tasklet_struct tasklet; /* Tasklet */
48 34 int descs_allocated; /* desc count */
49 35 int xmit_shift; /* log_2(bytes_per_xfer) */
  36 + int irq;
50 37 int id; /* Raw id of this channel */
  38 + u32 __iomem *base;
51 39 char dev_id[16]; /* unique name per DMAC of channel */
52 40 };
53 41  
54 42 struct sh_dmae_device {
55 43 struct dma_device common;
56   - struct sh_dmae_chan *chan[MAX_DMA_CHANNELS];
57   - struct sh_dmae_pdata pdata;
  44 + struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS];
  45 + struct sh_dmae_pdata *pdata;
  46 + u32 __iomem *chan_reg;
  47 + u16 __iomem *dmars;
58 48 };
59 49  
60 50 #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
drivers/serial/Kconfig
... ... @@ -1009,6 +1009,10 @@
1009 1009 depends on SERIAL_SH_SCI=y
1010 1010 select SERIAL_CORE_CONSOLE
1011 1011  
  1012 +config SERIAL_SH_SCI_DMA
  1013 + bool "DMA support"
  1014 + depends on SERIAL_SH_SCI && SH_DMAE && EXPERIMENTAL
  1015 +
1012 1016 config SERIAL_PNX8XXX
1013 1017 bool "Enable PNX8XXX SoCs' UART Support"
1014 1018 depends on MIPS && (SOC_PNX8550 || SOC_PNX833X)
drivers/serial/sh-sci.c
... ... @@ -48,6 +48,9 @@
48 48 #include <linux/ctype.h>
49 49 #include <linux/err.h>
50 50 #include <linux/list.h>
  51 +#include <linux/dmaengine.h>
  52 +#include <linux/scatterlist.h>
  53 +#include <linux/timer.h>
51 54  
52 55 #ifdef CONFIG_SUPERH
53 56 #include <asm/sh_bios.h>
... ... @@ -84,6 +87,27 @@
84 87 struct clk *dclk;
85 88  
86 89 struct list_head node;
  90 + struct dma_chan *chan_tx;
  91 + struct dma_chan *chan_rx;
  92 +#ifdef CONFIG_SERIAL_SH_SCI_DMA
  93 + struct device *dma_dev;
  94 + enum sh_dmae_slave_chan_id slave_tx;
  95 + enum sh_dmae_slave_chan_id slave_rx;
  96 + struct dma_async_tx_descriptor *desc_tx;
  97 + struct dma_async_tx_descriptor *desc_rx[2];
  98 + dma_cookie_t cookie_tx;
  99 + dma_cookie_t cookie_rx[2];
  100 + dma_cookie_t active_rx;
  101 + struct scatterlist sg_tx;
  102 + unsigned int sg_len_tx;
  103 + struct scatterlist sg_rx[2];
  104 + size_t buf_len_rx;
  105 + struct sh_dmae_slave param_tx;
  106 + struct sh_dmae_slave param_rx;
  107 + struct work_struct work_tx;
  108 + struct work_struct work_rx;
  109 + struct timer_list rx_timer;
  110 +#endif
87 111 };
88 112  
89 113 struct sh_sci_priv {
90 114  
91 115  
92 116  
93 117  
94 118  
95 119  
96 120  
97 121  
98 122  
... ... @@ -269,30 +293,45 @@
269 293 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
270 294 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
271 295 defined(CONFIG_CPU_SUBTYPE_SH7786)
272   -static inline int scif_txroom(struct uart_port *port)
  296 +static int scif_txfill(struct uart_port *port)
273 297 {
274   - return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff);
  298 + return sci_in(port, SCTFDR) & 0xff;
275 299 }
276 300  
277   -static inline int scif_rxroom(struct uart_port *port)
  301 +static int scif_txroom(struct uart_port *port)
278 302 {
  303 + return SCIF_TXROOM_MAX - scif_txfill(port);
  304 +}
  305 +
  306 +static int scif_rxfill(struct uart_port *port)
  307 +{
279 308 return sci_in(port, SCRFDR) & 0xff;
280 309 }
281 310 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
282   -static inline int scif_txroom(struct uart_port *port)
  311 +static int scif_txfill(struct uart_port *port)
283 312 {
284   - if ((port->mapbase == 0xffe00000) ||
285   - (port->mapbase == 0xffe08000)) {
  313 + if (port->mapbase == 0xffe00000 ||
  314 + port->mapbase == 0xffe08000)
286 315 /* SCIF0/1*/
287   - return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff);
288   - } else {
  316 + return sci_in(port, SCTFDR) & 0xff;
  317 + else
289 318 /* SCIF2 */
290   - return SCIF2_TXROOM_MAX - (sci_in(port, SCFDR) >> 8);
291   - }
  319 + return sci_in(port, SCFDR) >> 8;
292 320 }
293 321  
294   -static inline int scif_rxroom(struct uart_port *port)
  322 +static int scif_txroom(struct uart_port *port)
295 323 {
  324 + if (port->mapbase == 0xffe00000 ||
  325 + port->mapbase == 0xffe08000)
  326 + /* SCIF0/1*/
  327 + return SCIF_TXROOM_MAX - scif_txfill(port);
  328 + else
  329 + /* SCIF2 */
  330 + return SCIF2_TXROOM_MAX - scif_txfill(port);
  331 +}
  332 +
  333 +static int scif_rxfill(struct uart_port *port)
  334 +{
296 335 if ((port->mapbase == 0xffe00000) ||
297 336 (port->mapbase == 0xffe08000)) {
298 337 /* SCIF0/1*/
299 338  
300 339  
301 340  
302 341  
303 342  
304 343  
305 344  
... ... @@ -303,24 +342,34 @@
303 342 }
304 343 }
305 344 #else
306   -static inline int scif_txroom(struct uart_port *port)
  345 +static int scif_txfill(struct uart_port *port)
307 346 {
308   - return SCIF_TXROOM_MAX - (sci_in(port, SCFDR) >> 8);
  347 + return sci_in(port, SCFDR) >> 8;
309 348 }
310 349  
311   -static inline int scif_rxroom(struct uart_port *port)
  350 +static int scif_txroom(struct uart_port *port)
312 351 {
  352 + return SCIF_TXROOM_MAX - scif_txfill(port);
  353 +}
  354 +
  355 +static int scif_rxfill(struct uart_port *port)
  356 +{
313 357 return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
314 358 }
315 359 #endif
316 360  
317   -static inline int sci_txroom(struct uart_port *port)
  361 +static int sci_txfill(struct uart_port *port)
318 362 {
319   - return (sci_in(port, SCxSR) & SCI_TDRE) != 0;
  363 + return !(sci_in(port, SCxSR) & SCI_TDRE);
320 364 }
321 365  
322   -static inline int sci_rxroom(struct uart_port *port)
  366 +static int sci_txroom(struct uart_port *port)
323 367 {
  368 + return !sci_txfill(port);
  369 +}
  370 +
  371 +static int sci_rxfill(struct uart_port *port)
  372 +{
324 373 return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
325 374 }
326 375  
327 376  
... ... @@ -406,9 +455,9 @@
406 455  
407 456 while (1) {
408 457 if (port->type == PORT_SCI)
409   - count = sci_rxroom(port);
  458 + count = sci_rxfill(port);
410 459 else
411   - count = scif_rxroom(port);
  460 + count = scif_rxfill(port);
412 461  
413 462 /* Don't copy more bytes than there is room for in the buffer */
414 463 count = tty_buffer_request_room(tty, count);
415 464  
... ... @@ -453,10 +502,10 @@
453 502 }
454 503  
455 504 /* Store data and status */
456   - if (status&SCxSR_FER(port)) {
  505 + if (status & SCxSR_FER(port)) {
457 506 flag = TTY_FRAME;
458 507 dev_notice(port->dev, "frame error\n");
459   - } else if (status&SCxSR_PER(port)) {
  508 + } else if (status & SCxSR_PER(port)) {
460 509 flag = TTY_PARITY;
461 510 dev_notice(port->dev, "parity error\n");
462 511 } else
463 512  
464 513  
... ... @@ -618,13 +667,39 @@
618 667 return copied;
619 668 }
620 669  
621   -static irqreturn_t sci_rx_interrupt(int irq, void *port)
  670 +static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
622 671 {
  672 +#ifdef CONFIG_SERIAL_SH_SCI_DMA
  673 + struct uart_port *port = ptr;
  674 + struct sci_port *s = to_sci_port(port);
  675 +
  676 + if (s->chan_rx) {
  677 + unsigned long tout;
  678 + u16 scr = sci_in(port, SCSCR);
  679 + u16 ssr = sci_in(port, SCxSR);
  680 +
  681 + /* Disable future Rx interrupts */
  682 + sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE);
  683 + /* Clear current interrupt */
  684 + sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
  685 + /* Calculate delay for 1.5 DMA buffers */
  686 + tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
  687 + port->fifosize / 2;
  688 + dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
  689 + tout * 1000 / HZ);
  690 + if (tout < 2)
  691 + tout = 2;
  692 + mod_timer(&s->rx_timer, jiffies + tout);
  693 +
  694 + return IRQ_HANDLED;
  695 + }
  696 +#endif
  697 +
623 698 /* I think sci_receive_chars has to be called irrespective
624 699 * of whether the I_IXOFF is set, otherwise, how is the interrupt
625 700 * to be disabled?
626 701 */
627   - sci_receive_chars(port);
  702 + sci_receive_chars(ptr);
628 703  
629 704 return IRQ_HANDLED;
630 705 }
... ... @@ -680,6 +755,7 @@
680 755 {
681 756 unsigned short ssr_status, scr_status, err_enabled;
682 757 struct uart_port *port = ptr;
  758 + struct sci_port *s = to_sci_port(port);
683 759 irqreturn_t ret = IRQ_NONE;
684 760  
685 761 ssr_status = sci_in(port, SCxSR);
686 762  
... ... @@ -687,10 +763,15 @@
687 763 err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE);
688 764  
689 765 /* Tx Interrupt */
690   - if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE))
  766 + if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) &&
  767 + !s->chan_tx)
691 768 ret = sci_tx_interrupt(irq, ptr);
692   - /* Rx Interrupt */
693   - if ((ssr_status & SCxSR_RDxF(port)) && (scr_status & SCI_CTRL_FLAGS_RIE))
  769 + /*
  770 + * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
  771 + * DR flags
  772 + */
  773 + if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
  774 + (scr_status & SCI_CTRL_FLAGS_RIE))
694 775 ret = sci_rx_interrupt(irq, ptr);
695 776 /* Error Interrupt */
696 777 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
... ... @@ -699,6 +780,10 @@
699 780 if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
700 781 ret = sci_br_interrupt(irq, ptr);
701 782  
  783 + WARN_ONCE(ret == IRQ_NONE,
  784 + "%s: %d IRQ %d, status %x, control %x\n", __func__,
  785 + irq, port->line, ssr_status, scr_status);
  786 +
702 787 return ret;
703 788 }
704 789  
... ... @@ -800,7 +885,9 @@
800 885 static unsigned int sci_tx_empty(struct uart_port *port)
801 886 {
802 887 unsigned short status = sci_in(port, SCxSR);
803   - return status & SCxSR_TEND(port) ? TIOCSER_TEMT : 0;
  888 + unsigned short in_tx_fifo = scif_txfill(port);
  889 +
  890 + return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
804 891 }
805 892  
806 893 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
807 894  
808 895  
... ... @@ -812,16 +899,297 @@
812 899  
813 900 static unsigned int sci_get_mctrl(struct uart_port *port)
814 901 {
815   - /* This routine is used for geting signals of: DTR, DCD, DSR, RI,
  902 + /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
816 903 and CTS/RTS */
817 904  
818 905 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR;
819 906 }
820 907  
  908 +#ifdef CONFIG_SERIAL_SH_SCI_DMA
  909 +static void sci_dma_tx_complete(void *arg)
  910 +{
  911 + struct sci_port *s = arg;
  912 + struct uart_port *port = &s->port;
  913 + struct circ_buf *xmit = &port->state->xmit;
  914 + unsigned long flags;
  915 +
  916 + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
  917 +
  918 + spin_lock_irqsave(&port->lock, flags);
  919 +
  920 + xmit->tail += s->sg_tx.length;
  921 + xmit->tail &= UART_XMIT_SIZE - 1;
  922 +
  923 + port->icount.tx += s->sg_tx.length;
  924 +
  925 + async_tx_ack(s->desc_tx);
  926 + s->cookie_tx = -EINVAL;
  927 + s->desc_tx = NULL;
  928 +
  929 + spin_unlock_irqrestore(&port->lock, flags);
  930 +
  931 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  932 + uart_write_wakeup(port);
  933 +
  934 + if (uart_circ_chars_pending(xmit))
  935 + schedule_work(&s->work_tx);
  936 +}
  937 +
  938 +/* Locking: called with port lock held */
  939 +static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
  940 + size_t count)
  941 +{
  942 + struct uart_port *port = &s->port;
  943 + int i, active, room;
  944 +
  945 + room = tty_buffer_request_room(tty, count);
  946 +
  947 + if (s->active_rx == s->cookie_rx[0]) {
  948 + active = 0;
  949 + } else if (s->active_rx == s->cookie_rx[1]) {
  950 + active = 1;
  951 + } else {
  952 + dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
  953 + return 0;
  954 + }
  955 +
  956 + if (room < count)
  957 + dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
  958 + count - room);
  959 + if (!room)
  960 + return room;
  961 +
  962 + for (i = 0; i < room; i++)
  963 + tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
  964 + TTY_NORMAL);
  965 +
  966 + port->icount.rx += room;
  967 +
  968 + return room;
  969 +}
  970 +
  971 +static void sci_dma_rx_complete(void *arg)
  972 +{
  973 + struct sci_port *s = arg;
  974 + struct uart_port *port = &s->port;
  975 + struct tty_struct *tty = port->state->port.tty;
  976 + unsigned long flags;
  977 + int count;
  978 +
  979 + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
  980 +
  981 + spin_lock_irqsave(&port->lock, flags);
  982 +
  983 + count = sci_dma_rx_push(s, tty, s->buf_len_rx);
  984 +
  985 + mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5));
  986 +
  987 + spin_unlock_irqrestore(&port->lock, flags);
  988 +
  989 + if (count)
  990 + tty_flip_buffer_push(tty);
  991 +
  992 + schedule_work(&s->work_rx);
  993 +}
  994 +
  995 +static void sci_start_rx(struct uart_port *port);
  996 +static void sci_start_tx(struct uart_port *port);
  997 +
  998 +static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
  999 +{
  1000 + struct dma_chan *chan = s->chan_rx;
  1001 + struct uart_port *port = &s->port;
  1002 +
  1003 + s->chan_rx = NULL;
  1004 + s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
  1005 + dma_release_channel(chan);
  1006 + dma_free_coherent(port->dev, s->buf_len_rx * 2,
  1007 + sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
  1008 + if (enable_pio)
  1009 + sci_start_rx(port);
  1010 +}
  1011 +
  1012 +static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
  1013 +{
  1014 + struct dma_chan *chan = s->chan_tx;
  1015 + struct uart_port *port = &s->port;
  1016 +
  1017 + s->chan_tx = NULL;
  1018 + s->cookie_tx = -EINVAL;
  1019 + dma_release_channel(chan);
  1020 + if (enable_pio)
  1021 + sci_start_tx(port);
  1022 +}
  1023 +
  1024 +static void sci_submit_rx(struct sci_port *s)
  1025 +{
  1026 + struct dma_chan *chan = s->chan_rx;
  1027 + int i;
  1028 +
  1029 + for (i = 0; i < 2; i++) {
  1030 + struct scatterlist *sg = &s->sg_rx[i];
  1031 + struct dma_async_tx_descriptor *desc;
  1032 +
  1033 + desc = chan->device->device_prep_slave_sg(chan,
  1034 + sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
  1035 +
  1036 + if (desc) {
  1037 + s->desc_rx[i] = desc;
  1038 + desc->callback = sci_dma_rx_complete;
  1039 + desc->callback_param = s;
  1040 + s->cookie_rx[i] = desc->tx_submit(desc);
  1041 + }
  1042 +
  1043 + if (!desc || s->cookie_rx[i] < 0) {
  1044 + if (i) {
  1045 + async_tx_ack(s->desc_rx[0]);
  1046 + s->cookie_rx[0] = -EINVAL;
  1047 + }
  1048 + if (desc) {
  1049 + async_tx_ack(desc);
  1050 + s->cookie_rx[i] = -EINVAL;
  1051 + }
  1052 + dev_warn(s->port.dev,
  1053 + "failed to re-start DMA, using PIO\n");
  1054 + sci_rx_dma_release(s, true);
  1055 + return;
  1056 + }
  1057 + }
  1058 +
  1059 + s->active_rx = s->cookie_rx[0];
  1060 +
  1061 + dma_async_issue_pending(chan);
  1062 +}
  1063 +
  1064 +static void work_fn_rx(struct work_struct *work)
  1065 +{
  1066 + struct sci_port *s = container_of(work, struct sci_port, work_rx);
  1067 + struct uart_port *port = &s->port;
  1068 + struct dma_async_tx_descriptor *desc;
  1069 + int new;
  1070 +
  1071 + if (s->active_rx == s->cookie_rx[0]) {
  1072 + new = 0;
  1073 + } else if (s->active_rx == s->cookie_rx[1]) {
  1074 + new = 1;
  1075 + } else {
  1076 + dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
  1077 + return;
  1078 + }
  1079 + desc = s->desc_rx[new];
  1080 +
  1081 + if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
  1082 + DMA_SUCCESS) {
  1083 + /* Handle incomplete DMA receive */
  1084 + struct tty_struct *tty = port->state->port.tty;
  1085 + struct dma_chan *chan = s->chan_rx;
  1086 + struct sh_desc *sh_desc = container_of(desc, struct sh_desc,
  1087 + async_tx);
  1088 + unsigned long flags;
  1089 + int count;
  1090 +
  1091 + chan->device->device_terminate_all(chan);
  1092 + dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
  1093 + sh_desc->partial, sh_desc->cookie);
  1094 +
  1095 + spin_lock_irqsave(&port->lock, flags);
  1096 + count = sci_dma_rx_push(s, tty, sh_desc->partial);
  1097 + spin_unlock_irqrestore(&port->lock, flags);
  1098 +
  1099 + if (count)
  1100 + tty_flip_buffer_push(tty);
  1101 +
  1102 + sci_submit_rx(s);
  1103 +
  1104 + return;
  1105 + }
  1106 +
  1107 + s->cookie_rx[new] = desc->tx_submit(desc);
  1108 + if (s->cookie_rx[new] < 0) {
  1109 + dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
  1110 + sci_rx_dma_release(s, true);
  1111 + return;
  1112 + }
  1113 +
  1114 + dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
  1115 + s->cookie_rx[new], new);
  1116 +
  1117 + s->active_rx = s->cookie_rx[!new];
  1118 +}
  1119 +
  1120 +static void work_fn_tx(struct work_struct *work)
  1121 +{
  1122 + struct sci_port *s = container_of(work, struct sci_port, work_tx);
  1123 + struct dma_async_tx_descriptor *desc;
  1124 + struct dma_chan *chan = s->chan_tx;
  1125 + struct uart_port *port = &s->port;
  1126 + struct circ_buf *xmit = &port->state->xmit;
  1127 + struct scatterlist *sg = &s->sg_tx;
  1128 +
  1129 + /*
  1130 + * DMA is idle now.
  1131 + * Port xmit buffer is already mapped, and it is one page... Just adjust
  1132 + * offsets and lengths. Since it is a circular buffer, we have to
  1133 + * transmit till the end, and then the rest. Take the port lock to get a
  1134 + * consistent xmit buffer state.
  1135 + */
  1136 + spin_lock_irq(&port->lock);
  1137 + sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
  1138 + sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
  1139 + sg->offset;
  1140 + sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
  1141 + CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
  1142 + sg->dma_length = sg->length;
  1143 + spin_unlock_irq(&port->lock);
  1144 +
  1145 + BUG_ON(!sg->length);
  1146 +
  1147 + desc = chan->device->device_prep_slave_sg(chan,
  1148 + sg, s->sg_len_tx, DMA_TO_DEVICE,
  1149 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1150 + if (!desc) {
  1151 + /* switch to PIO */
  1152 + sci_tx_dma_release(s, true);
  1153 + return;
  1154 + }
  1155 +
  1156 + dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
  1157 +
  1158 + spin_lock_irq(&port->lock);
  1159 + s->desc_tx = desc;
  1160 + desc->callback = sci_dma_tx_complete;
  1161 + desc->callback_param = s;
  1162 + spin_unlock_irq(&port->lock);
  1163 + s->cookie_tx = desc->tx_submit(desc);
  1164 + if (s->cookie_tx < 0) {
  1165 + dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
  1166 + /* switch to PIO */
  1167 + sci_tx_dma_release(s, true);
  1168 + return;
  1169 + }
  1170 +
  1171 + dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
  1172 + xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
  1173 +
  1174 + dma_async_issue_pending(chan);
  1175 +}
  1176 +#endif
  1177 +
821 1178 static void sci_start_tx(struct uart_port *port)
822 1179 {
823 1180 unsigned short ctrl;
824 1181  
  1182 +#ifdef CONFIG_SERIAL_SH_SCI_DMA
  1183 + struct sci_port *s = to_sci_port(port);
  1184 +
  1185 + if (s->chan_tx) {
  1186 + if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0)
  1187 + schedule_work(&s->work_tx);
  1188 +
  1189 + return;
  1190 + }
  1191 +#endif
  1192 +
825 1193 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
826 1194 ctrl = sci_in(port, SCSCR);
827 1195 ctrl |= SCI_CTRL_FLAGS_TIE;
828 1196  
829 1197  
... ... @@ -838,13 +1206,12 @@
838 1206 sci_out(port, SCSCR, ctrl);
839 1207 }
840 1208  
841   -static void sci_start_rx(struct uart_port *port, unsigned int tty_start)
  1209 +static void sci_start_rx(struct uart_port *port)
842 1210 {
843   - unsigned short ctrl;
  1211 + unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE;
844 1212  
845 1213 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */
846   - ctrl = sci_in(port, SCSCR);
847   - ctrl |= SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE;
  1214 + ctrl |= sci_in(port, SCSCR);
848 1215 sci_out(port, SCSCR, ctrl);
849 1216 }
850 1217  
851 1218  
852 1219  
853 1220  
... ... @@ -868,16 +1235,154 @@
868 1235 /* Nothing here yet .. */
869 1236 }
870 1237  
  1238 +#ifdef CONFIG_SERIAL_SH_SCI_DMA
  1239 +static bool filter(struct dma_chan *chan, void *slave)
  1240 +{
  1241 + struct sh_dmae_slave *param = slave;
  1242 +
  1243 + dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
  1244 + param->slave_id);
  1245 +
  1246 + if (param->dma_dev == chan->device->dev) {
  1247 + chan->private = param;
  1248 + return true;
  1249 + } else {
  1250 + return false;
  1251 + }
  1252 +}
  1253 +
  1254 +static void rx_timer_fn(unsigned long arg)
  1255 +{
  1256 + struct sci_port *s = (struct sci_port *)arg;
  1257 + struct uart_port *port = &s->port;
  1258 +
  1259 + u16 scr = sci_in(port, SCSCR);
  1260 + sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
  1261 + dev_dbg(port->dev, "DMA Rx timed out\n");
  1262 + schedule_work(&s->work_rx);
  1263 +}
  1264 +
  1265 +static void sci_request_dma(struct uart_port *port)
  1266 +{
  1267 + struct sci_port *s = to_sci_port(port);
  1268 + struct sh_dmae_slave *param;
  1269 + struct dma_chan *chan;
  1270 + dma_cap_mask_t mask;
  1271 + int nent;
  1272 +
  1273 + dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
  1274 + port->line, s->dma_dev);
  1275 +
  1276 + if (!s->dma_dev)
  1277 + return;
  1278 +
  1279 + dma_cap_zero(mask);
  1280 + dma_cap_set(DMA_SLAVE, mask);
  1281 +
  1282 + param = &s->param_tx;
  1283 +
  1284 + /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
  1285 + param->slave_id = s->slave_tx;
  1286 + param->dma_dev = s->dma_dev;
  1287 +
  1288 + s->cookie_tx = -EINVAL;
  1289 + chan = dma_request_channel(mask, filter, param);
  1290 + dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
  1291 + if (chan) {
  1292 + s->chan_tx = chan;
  1293 + sg_init_table(&s->sg_tx, 1);
  1294 + /* UART circular tx buffer is an aligned page. */
  1295 + BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
  1296 + sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
  1297 + UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
  1298 + nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
  1299 + if (!nent)
  1300 + sci_tx_dma_release(s, false);
  1301 + else
  1302 + dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
  1303 + sg_dma_len(&s->sg_tx),
  1304 + port->state->xmit.buf, sg_dma_address(&s->sg_tx));
  1305 +
  1306 + s->sg_len_tx = nent;
  1307 +
  1308 + INIT_WORK(&s->work_tx, work_fn_tx);
  1309 + }
  1310 +
  1311 + param = &s->param_rx;
  1312 +
  1313 + /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
  1314 + param->slave_id = s->slave_rx;
  1315 + param->dma_dev = s->dma_dev;
  1316 +
  1317 + chan = dma_request_channel(mask, filter, param);
  1318 + dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
  1319 + if (chan) {
  1320 + dma_addr_t dma[2];
  1321 + void *buf[2];
  1322 + int i;
  1323 +
  1324 + s->chan_rx = chan;
  1325 +
  1326 + s->buf_len_rx = 2 * max(16, (int)port->fifosize);
  1327 + buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
  1328 + &dma[0], GFP_KERNEL);
  1329 +
  1330 + if (!buf[0]) {
  1331 + dev_warn(port->dev,
  1332 + "failed to allocate dma buffer, using PIO\n");
  1333 + sci_rx_dma_release(s, true);
  1334 + return;
  1335 + }
  1336 +
  1337 + buf[1] = buf[0] + s->buf_len_rx;
  1338 + dma[1] = dma[0] + s->buf_len_rx;
  1339 +
  1340 + for (i = 0; i < 2; i++) {
  1341 + struct scatterlist *sg = &s->sg_rx[i];
  1342 +
  1343 + sg_init_table(sg, 1);
  1344 + sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
  1345 + (int)buf[i] & ~PAGE_MASK);
  1346 + sg->dma_address = dma[i];
  1347 + sg->dma_length = sg->length;
  1348 + }
  1349 +
  1350 + INIT_WORK(&s->work_rx, work_fn_rx);
  1351 + setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
  1352 +
  1353 + sci_submit_rx(s);
  1354 + }
  1355 +}
  1356 +
  1357 +static void sci_free_dma(struct uart_port *port)
  1358 +{
  1359 + struct sci_port *s = to_sci_port(port);
  1360 +
  1361 + if (!s->dma_dev)
  1362 + return;
  1363 +
  1364 + if (s->chan_tx)
  1365 + sci_tx_dma_release(s, false);
  1366 + if (s->chan_rx)
  1367 + sci_rx_dma_release(s, false);
  1368 +}
  1369 +#endif
  1370 +
871 1371 static int sci_startup(struct uart_port *port)
872 1372 {
873 1373 struct sci_port *s = to_sci_port(port);
874 1374  
  1375 + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
  1376 +
875 1377 if (s->enable)
876 1378 s->enable(port);
877 1379  
878 1380 sci_request_irq(s);
  1381 +#ifdef CONFIG_SERIAL_SH_SCI_DMA
  1382 + sci_request_dma(port);
  1383 +#endif
879 1384 sci_start_tx(port);
880   - sci_start_rx(port, 1);
  1385 + sci_start_rx(port);
881 1386  
882 1387 return 0;
883 1388 }
884 1389  
... ... @@ -886,8 +1391,13 @@
886 1391 {
887 1392 struct sci_port *s = to_sci_port(port);
888 1393  
  1394 + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
  1395 +
889 1396 sci_stop_rx(port);
890 1397 sci_stop_tx(port);
  1398 +#ifdef CONFIG_SERIAL_SH_SCI_DMA
  1399 + sci_free_dma(port);
  1400 +#endif
891 1401 sci_free_irq(s);
892 1402  
893 1403 if (s->disable)
... ... @@ -937,6 +1447,9 @@
937 1447  
938 1448 sci_out(port, SCSMR, smr_val);
939 1449  
  1450 + dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
  1451 + SCSCR_INIT(port));
  1452 +
940 1453 if (t > 0) {
941 1454 if (t >= 256) {
942 1455 sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1);
... ... @@ -954,7 +1467,7 @@
954 1467 sci_out(port, SCSCR, SCSCR_INIT(port));
955 1468  
956 1469 if ((termios->c_cflag & CREAD) != 0)
957   - sci_start_rx(port, 0);
  1470 + sci_start_rx(port);
958 1471 }
959 1472  
960 1473 static const char *sci_type(struct uart_port *port)
961 1474  
962 1475  
963 1476  
964 1477  
... ... @@ -1049,19 +1562,21 @@
1049 1562 unsigned int index,
1050 1563 struct plat_sci_port *p)
1051 1564 {
1052   - sci_port->port.ops = &sci_uart_ops;
1053   - sci_port->port.iotype = UPIO_MEM;
1054   - sci_port->port.line = index;
  1565 + struct uart_port *port = &sci_port->port;
1055 1566  
  1567 + port->ops = &sci_uart_ops;
  1568 + port->iotype = UPIO_MEM;
  1569 + port->line = index;
  1570 +
1056 1571 switch (p->type) {
1057 1572 case PORT_SCIFA:
1058   - sci_port->port.fifosize = 64;
  1573 + port->fifosize = 64;
1059 1574 break;
1060 1575 case PORT_SCIF:
1061   - sci_port->port.fifosize = 16;
  1576 + port->fifosize = 16;
1062 1577 break;
1063 1578 default:
1064   - sci_port->port.fifosize = 1;
  1579 + port->fifosize = 1;
1065 1580 break;
1066 1581 }
1067 1582  
1068 1583  
1069 1584  
... ... @@ -1070,19 +1585,28 @@
1070 1585 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
1071 1586 sci_port->enable = sci_clk_enable;
1072 1587 sci_port->disable = sci_clk_disable;
1073   - sci_port->port.dev = &dev->dev;
  1588 + port->dev = &dev->dev;
1074 1589 }
1075 1590  
1076 1591 sci_port->break_timer.data = (unsigned long)sci_port;
1077 1592 sci_port->break_timer.function = sci_break_timer;
1078 1593 init_timer(&sci_port->break_timer);
1079 1594  
1080   - sci_port->port.mapbase = p->mapbase;
1081   - sci_port->port.membase = p->membase;
  1595 + port->mapbase = p->mapbase;
  1596 + port->membase = p->membase;
1082 1597  
1083   - sci_port->port.irq = p->irqs[SCIx_TXI_IRQ];
1084   - sci_port->port.flags = p->flags;
1085   - sci_port->type = sci_port->port.type = p->type;
  1598 + port->irq = p->irqs[SCIx_TXI_IRQ];
  1599 + port->flags = p->flags;
  1600 + sci_port->type = port->type = p->type;
  1601 +
  1602 +#ifdef CONFIG_SERIAL_SH_SCI_DMA
  1603 + sci_port->dma_dev = p->dma_dev;
  1604 + sci_port->slave_tx = p->dma_slave_tx;
  1605 + sci_port->slave_rx = p->dma_slave_rx;
  1606 +
  1607 + dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__,
  1608 + p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
  1609 +#endif
1086 1610  
1087 1611 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
1088 1612 }
include/linux/serial_sci.h
... ... @@ -2,6 +2,7 @@
2 2 #define __LINUX_SERIAL_SCI_H
3 3  
4 4 #include <linux/serial_core.h>
  5 +#include <asm/dmaengine.h>
5 6  
6 7 /*
7 8 * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts)
... ... @@ -16,6 +17,8 @@
16 17 SCIx_NR_IRQS,
17 18 };
18 19  
  20 +struct device;
  21 +
19 22 /*
20 23 * Platform device specific platform_data struct
21 24 */
... ... @@ -26,6 +29,9 @@
26 29 unsigned int type; /* SCI / SCIF / IRDA */
27 30 upf_t flags; /* UPF_* flags */
28 31 char *clk; /* clock string */
  32 + struct device *dma_dev;
  33 + enum sh_dmae_slave_chan_id dma_slave_tx;
  34 + enum sh_dmae_slave_chan_id dma_slave_rx;
29 35 };
30 36  
31 37 #endif /* __LINUX_SERIAL_SCI_H */
... ... @@ -72,7 +72,7 @@
72 72 #include <linux/interrupt.h>
73 73 #include <linux/io.h>
74 74  
75   -#include <asm/dma-sh.h>
  75 +#include <asm/dmaengine.h>
76 76  
77 77 #include <sound/core.h>
78 78 #include <sound/pcm.h>
sound/soc/sh/siu_pcm.c
... ... @@ -32,7 +32,7 @@
32 32 #include <sound/pcm_params.h>
33 33 #include <sound/soc-dai.h>
34 34  
35   -#include <asm/dma-sh.h>
  35 +#include <asm/dmaengine.h>
36 36 #include <asm/siu.h>
37 37  
38 38 #include "siu.h"