Commit ed30933e6f3dbeaaab1de91e1bec25f42d5d32df
Committed by
Vinod Koul
1 parent
661f7cb55c
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
dma: remove unnecessary null pointer check in mmp_pdma.c
the pointer cfg is dereferenced in line 594, so it's no reason to check null again in line 620. Signed-off-by: Cong Ding <dinggnu@gmail.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Showing 1 changed file with 2 additions and 4 deletions Inline Diff
drivers/dma/mmp_pdma.c
1 | /* | 1 | /* |
2 | * Copyright 2012 Marvell International Ltd. | 2 | * Copyright 2012 Marvell International Ltd. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/dmaengine.h> | 14 | #include <linux/dmaengine.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/platform_data/mmp_dma.h> | 17 | #include <linux/platform_data/mmp_dma.h> |
18 | #include <linux/dmapool.h> | 18 | #include <linux/dmapool.h> |
19 | #include <linux/of_device.h> | 19 | #include <linux/of_device.h> |
20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
21 | 21 | ||
22 | #include "dmaengine.h" | 22 | #include "dmaengine.h" |
23 | 23 | ||
24 | #define DCSR 0x0000 | 24 | #define DCSR 0x0000 |
25 | #define DALGN 0x00a0 | 25 | #define DALGN 0x00a0 |
26 | #define DINT 0x00f0 | 26 | #define DINT 0x00f0 |
27 | #define DDADR 0x0200 | 27 | #define DDADR 0x0200 |
28 | #define DSADR 0x0204 | 28 | #define DSADR 0x0204 |
29 | #define DTADR 0x0208 | 29 | #define DTADR 0x0208 |
30 | #define DCMD 0x020c | 30 | #define DCMD 0x020c |
31 | 31 | ||
32 | #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ | 32 | #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ |
33 | #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ | 33 | #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ |
34 | #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ | 34 | #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ |
35 | #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ | 35 | #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ |
36 | #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ | 36 | #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ |
37 | #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ | 37 | #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ |
38 | #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ | 38 | #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ |
39 | #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ | 39 | #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ |
40 | 40 | ||
41 | #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ | 41 | #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ |
42 | #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ | 42 | #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ |
43 | #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ | 43 | #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ |
44 | #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ | 44 | #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ |
45 | #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ | 45 | #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ |
46 | #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ | 46 | #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ |
47 | #define DCSR_EORINTR (1 << 9) /* The end of Receive */ | 47 | #define DCSR_EORINTR (1 << 9) /* The end of Receive */ |
48 | 48 | ||
49 | #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ | 49 | #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ |
50 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ | 50 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ |
51 | 51 | ||
52 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ | 52 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ |
53 | #define DDADR_STOP (1 << 0) /* Stop (read / write) */ | 53 | #define DDADR_STOP (1 << 0) /* Stop (read / write) */ |
54 | 54 | ||
55 | #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ | 55 | #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ |
56 | #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ | 56 | #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ |
57 | #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ | 57 | #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ |
58 | #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ | 58 | #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ |
59 | #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ | 59 | #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ |
60 | #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ | 60 | #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ |
61 | #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ | 61 | #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ |
62 | #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ | 62 | #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ |
63 | #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ | 63 | #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ |
64 | #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ | 64 | #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ |
65 | #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ | 65 | #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ |
66 | #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ | 66 | #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ |
67 | #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ | 67 | #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ |
68 | #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ | 68 | #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ |
69 | 69 | ||
70 | #define PDMA_ALIGNMENT 3 | 70 | #define PDMA_ALIGNMENT 3 |
71 | #define PDMA_MAX_DESC_BYTES 0x1000 | 71 | #define PDMA_MAX_DESC_BYTES 0x1000 |
72 | 72 | ||
73 | struct mmp_pdma_desc_hw { | 73 | struct mmp_pdma_desc_hw { |
74 | u32 ddadr; /* Points to the next descriptor + flags */ | 74 | u32 ddadr; /* Points to the next descriptor + flags */ |
75 | u32 dsadr; /* DSADR value for the current transfer */ | 75 | u32 dsadr; /* DSADR value for the current transfer */ |
76 | u32 dtadr; /* DTADR value for the current transfer */ | 76 | u32 dtadr; /* DTADR value for the current transfer */ |
77 | u32 dcmd; /* DCMD value for the current transfer */ | 77 | u32 dcmd; /* DCMD value for the current transfer */ |
78 | } __aligned(32); | 78 | } __aligned(32); |
79 | 79 | ||
80 | struct mmp_pdma_desc_sw { | 80 | struct mmp_pdma_desc_sw { |
81 | struct mmp_pdma_desc_hw desc; | 81 | struct mmp_pdma_desc_hw desc; |
82 | struct list_head node; | 82 | struct list_head node; |
83 | struct list_head tx_list; | 83 | struct list_head tx_list; |
84 | struct dma_async_tx_descriptor async_tx; | 84 | struct dma_async_tx_descriptor async_tx; |
85 | }; | 85 | }; |
86 | 86 | ||
87 | struct mmp_pdma_phy; | 87 | struct mmp_pdma_phy; |
88 | 88 | ||
89 | struct mmp_pdma_chan { | 89 | struct mmp_pdma_chan { |
90 | struct device *dev; | 90 | struct device *dev; |
91 | struct dma_chan chan; | 91 | struct dma_chan chan; |
92 | struct dma_async_tx_descriptor desc; | 92 | struct dma_async_tx_descriptor desc; |
93 | struct mmp_pdma_phy *phy; | 93 | struct mmp_pdma_phy *phy; |
94 | enum dma_transfer_direction dir; | 94 | enum dma_transfer_direction dir; |
95 | 95 | ||
96 | /* channel's basic info */ | 96 | /* channel's basic info */ |
97 | struct tasklet_struct tasklet; | 97 | struct tasklet_struct tasklet; |
98 | u32 dcmd; | 98 | u32 dcmd; |
99 | u32 drcmr; | 99 | u32 drcmr; |
100 | u32 dev_addr; | 100 | u32 dev_addr; |
101 | 101 | ||
102 | /* list for desc */ | 102 | /* list for desc */ |
103 | spinlock_t desc_lock; /* Descriptor list lock */ | 103 | spinlock_t desc_lock; /* Descriptor list lock */ |
104 | struct list_head chain_pending; /* Link descriptors queue for pending */ | 104 | struct list_head chain_pending; /* Link descriptors queue for pending */ |
105 | struct list_head chain_running; /* Link descriptors queue for running */ | 105 | struct list_head chain_running; /* Link descriptors queue for running */ |
106 | bool idle; /* channel statue machine */ | 106 | bool idle; /* channel statue machine */ |
107 | 107 | ||
108 | struct dma_pool *desc_pool; /* Descriptors pool */ | 108 | struct dma_pool *desc_pool; /* Descriptors pool */ |
109 | }; | 109 | }; |
110 | 110 | ||
111 | struct mmp_pdma_phy { | 111 | struct mmp_pdma_phy { |
112 | int idx; | 112 | int idx; |
113 | void __iomem *base; | 113 | void __iomem *base; |
114 | struct mmp_pdma_chan *vchan; | 114 | struct mmp_pdma_chan *vchan; |
115 | }; | 115 | }; |
116 | 116 | ||
117 | struct mmp_pdma_device { | 117 | struct mmp_pdma_device { |
118 | int dma_channels; | 118 | int dma_channels; |
119 | void __iomem *base; | 119 | void __iomem *base; |
120 | struct device *dev; | 120 | struct device *dev; |
121 | struct dma_device device; | 121 | struct dma_device device; |
122 | struct mmp_pdma_phy *phy; | 122 | struct mmp_pdma_phy *phy; |
123 | }; | 123 | }; |
124 | 124 | ||
125 | #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) | 125 | #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) |
126 | #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) | 126 | #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) |
127 | #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) | 127 | #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) |
128 | #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) | 128 | #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) |
129 | 129 | ||
130 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) | 130 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) |
131 | { | 131 | { |
132 | u32 reg = (phy->idx << 4) + DDADR; | 132 | u32 reg = (phy->idx << 4) + DDADR; |
133 | 133 | ||
134 | writel(addr, phy->base + reg); | 134 | writel(addr, phy->base + reg); |
135 | } | 135 | } |
136 | 136 | ||
137 | static void enable_chan(struct mmp_pdma_phy *phy) | 137 | static void enable_chan(struct mmp_pdma_phy *phy) |
138 | { | 138 | { |
139 | u32 reg; | 139 | u32 reg; |
140 | 140 | ||
141 | if (!phy->vchan) | 141 | if (!phy->vchan) |
142 | return; | 142 | return; |
143 | 143 | ||
144 | reg = phy->vchan->drcmr; | 144 | reg = phy->vchan->drcmr; |
145 | reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2); | 145 | reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2); |
146 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); | 146 | writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); |
147 | 147 | ||
148 | reg = (phy->idx << 2) + DCSR; | 148 | reg = (phy->idx << 2) + DCSR; |
149 | writel(readl(phy->base + reg) | DCSR_RUN, | 149 | writel(readl(phy->base + reg) | DCSR_RUN, |
150 | phy->base + reg); | 150 | phy->base + reg); |
151 | } | 151 | } |
152 | 152 | ||
153 | static void disable_chan(struct mmp_pdma_phy *phy) | 153 | static void disable_chan(struct mmp_pdma_phy *phy) |
154 | { | 154 | { |
155 | u32 reg; | 155 | u32 reg; |
156 | 156 | ||
157 | if (phy) { | 157 | if (phy) { |
158 | reg = (phy->idx << 2) + DCSR; | 158 | reg = (phy->idx << 2) + DCSR; |
159 | writel(readl(phy->base + reg) & ~DCSR_RUN, | 159 | writel(readl(phy->base + reg) & ~DCSR_RUN, |
160 | phy->base + reg); | 160 | phy->base + reg); |
161 | } | 161 | } |
162 | } | 162 | } |
163 | 163 | ||
164 | static int clear_chan_irq(struct mmp_pdma_phy *phy) | 164 | static int clear_chan_irq(struct mmp_pdma_phy *phy) |
165 | { | 165 | { |
166 | u32 dcsr; | 166 | u32 dcsr; |
167 | u32 dint = readl(phy->base + DINT); | 167 | u32 dint = readl(phy->base + DINT); |
168 | u32 reg = (phy->idx << 2) + DCSR; | 168 | u32 reg = (phy->idx << 2) + DCSR; |
169 | 169 | ||
170 | if (dint & BIT(phy->idx)) { | 170 | if (dint & BIT(phy->idx)) { |
171 | /* clear irq */ | 171 | /* clear irq */ |
172 | dcsr = readl(phy->base + reg); | 172 | dcsr = readl(phy->base + reg); |
173 | writel(dcsr, phy->base + reg); | 173 | writel(dcsr, phy->base + reg); |
174 | if ((dcsr & DCSR_BUSERR) && (phy->vchan)) | 174 | if ((dcsr & DCSR_BUSERR) && (phy->vchan)) |
175 | dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); | 175 | dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); |
176 | return 0; | 176 | return 0; |
177 | } | 177 | } |
178 | return -EAGAIN; | 178 | return -EAGAIN; |
179 | } | 179 | } |
180 | 180 | ||
181 | static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) | 181 | static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) |
182 | { | 182 | { |
183 | struct mmp_pdma_phy *phy = dev_id; | 183 | struct mmp_pdma_phy *phy = dev_id; |
184 | 184 | ||
185 | if (clear_chan_irq(phy) == 0) { | 185 | if (clear_chan_irq(phy) == 0) { |
186 | tasklet_schedule(&phy->vchan->tasklet); | 186 | tasklet_schedule(&phy->vchan->tasklet); |
187 | return IRQ_HANDLED; | 187 | return IRQ_HANDLED; |
188 | } else | 188 | } else |
189 | return IRQ_NONE; | 189 | return IRQ_NONE; |
190 | } | 190 | } |
191 | 191 | ||
192 | static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) | 192 | static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) |
193 | { | 193 | { |
194 | struct mmp_pdma_device *pdev = dev_id; | 194 | struct mmp_pdma_device *pdev = dev_id; |
195 | struct mmp_pdma_phy *phy; | 195 | struct mmp_pdma_phy *phy; |
196 | u32 dint = readl(pdev->base + DINT); | 196 | u32 dint = readl(pdev->base + DINT); |
197 | int i, ret; | 197 | int i, ret; |
198 | int irq_num = 0; | 198 | int irq_num = 0; |
199 | 199 | ||
200 | while (dint) { | 200 | while (dint) { |
201 | i = __ffs(dint); | 201 | i = __ffs(dint); |
202 | dint &= (dint - 1); | 202 | dint &= (dint - 1); |
203 | phy = &pdev->phy[i]; | 203 | phy = &pdev->phy[i]; |
204 | ret = mmp_pdma_chan_handler(irq, phy); | 204 | ret = mmp_pdma_chan_handler(irq, phy); |
205 | if (ret == IRQ_HANDLED) | 205 | if (ret == IRQ_HANDLED) |
206 | irq_num++; | 206 | irq_num++; |
207 | } | 207 | } |
208 | 208 | ||
209 | if (irq_num) | 209 | if (irq_num) |
210 | return IRQ_HANDLED; | 210 | return IRQ_HANDLED; |
211 | else | 211 | else |
212 | return IRQ_NONE; | 212 | return IRQ_NONE; |
213 | } | 213 | } |
214 | 214 | ||
215 | /* lookup free phy channel as descending priority */ | 215 | /* lookup free phy channel as descending priority */ |
216 | static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) | 216 | static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) |
217 | { | 217 | { |
218 | int prio, i; | 218 | int prio, i; |
219 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); | 219 | struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); |
220 | struct mmp_pdma_phy *phy; | 220 | struct mmp_pdma_phy *phy; |
221 | 221 | ||
222 | /* | 222 | /* |
223 | * dma channel priorities | 223 | * dma channel priorities |
224 | * ch 0 - 3, 16 - 19 <--> (0) | 224 | * ch 0 - 3, 16 - 19 <--> (0) |
225 | * ch 4 - 7, 20 - 23 <--> (1) | 225 | * ch 4 - 7, 20 - 23 <--> (1) |
226 | * ch 8 - 11, 24 - 27 <--> (2) | 226 | * ch 8 - 11, 24 - 27 <--> (2) |
227 | * ch 12 - 15, 28 - 31 <--> (3) | 227 | * ch 12 - 15, 28 - 31 <--> (3) |
228 | */ | 228 | */ |
229 | for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { | 229 | for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { |
230 | for (i = 0; i < pdev->dma_channels; i++) { | 230 | for (i = 0; i < pdev->dma_channels; i++) { |
231 | if (prio != ((i & 0xf) >> 2)) | 231 | if (prio != ((i & 0xf) >> 2)) |
232 | continue; | 232 | continue; |
233 | phy = &pdev->phy[i]; | 233 | phy = &pdev->phy[i]; |
234 | if (!phy->vchan) { | 234 | if (!phy->vchan) { |
235 | phy->vchan = pchan; | 235 | phy->vchan = pchan; |
236 | return phy; | 236 | return phy; |
237 | } | 237 | } |
238 | } | 238 | } |
239 | } | 239 | } |
240 | 240 | ||
241 | return NULL; | 241 | return NULL; |
242 | } | 242 | } |
243 | 243 | ||
244 | /* desc->tx_list ==> pending list */ | 244 | /* desc->tx_list ==> pending list */ |
245 | static void append_pending_queue(struct mmp_pdma_chan *chan, | 245 | static void append_pending_queue(struct mmp_pdma_chan *chan, |
246 | struct mmp_pdma_desc_sw *desc) | 246 | struct mmp_pdma_desc_sw *desc) |
247 | { | 247 | { |
248 | struct mmp_pdma_desc_sw *tail = | 248 | struct mmp_pdma_desc_sw *tail = |
249 | to_mmp_pdma_desc(chan->chain_pending.prev); | 249 | to_mmp_pdma_desc(chan->chain_pending.prev); |
250 | 250 | ||
251 | if (list_empty(&chan->chain_pending)) | 251 | if (list_empty(&chan->chain_pending)) |
252 | goto out_splice; | 252 | goto out_splice; |
253 | 253 | ||
254 | /* one irq per queue, even appended */ | 254 | /* one irq per queue, even appended */ |
255 | tail->desc.ddadr = desc->async_tx.phys; | 255 | tail->desc.ddadr = desc->async_tx.phys; |
256 | tail->desc.dcmd &= ~DCMD_ENDIRQEN; | 256 | tail->desc.dcmd &= ~DCMD_ENDIRQEN; |
257 | 257 | ||
258 | /* softly link to pending list */ | 258 | /* softly link to pending list */ |
259 | out_splice: | 259 | out_splice: |
260 | list_splice_tail_init(&desc->tx_list, &chan->chain_pending); | 260 | list_splice_tail_init(&desc->tx_list, &chan->chain_pending); |
261 | } | 261 | } |
262 | 262 | ||
263 | /** | 263 | /** |
264 | * start_pending_queue - transfer any pending transactions | 264 | * start_pending_queue - transfer any pending transactions |
265 | * pending list ==> running list | 265 | * pending list ==> running list |
266 | */ | 266 | */ |
267 | static void start_pending_queue(struct mmp_pdma_chan *chan) | 267 | static void start_pending_queue(struct mmp_pdma_chan *chan) |
268 | { | 268 | { |
269 | struct mmp_pdma_desc_sw *desc; | 269 | struct mmp_pdma_desc_sw *desc; |
270 | 270 | ||
271 | /* still in running, irq will start the pending list */ | 271 | /* still in running, irq will start the pending list */ |
272 | if (!chan->idle) { | 272 | if (!chan->idle) { |
273 | dev_dbg(chan->dev, "DMA controller still busy\n"); | 273 | dev_dbg(chan->dev, "DMA controller still busy\n"); |
274 | return; | 274 | return; |
275 | } | 275 | } |
276 | 276 | ||
277 | if (list_empty(&chan->chain_pending)) { | 277 | if (list_empty(&chan->chain_pending)) { |
278 | /* chance to re-fetch phy channel with higher prio */ | 278 | /* chance to re-fetch phy channel with higher prio */ |
279 | if (chan->phy) { | 279 | if (chan->phy) { |
280 | chan->phy->vchan = NULL; | 280 | chan->phy->vchan = NULL; |
281 | chan->phy = NULL; | 281 | chan->phy = NULL; |
282 | } | 282 | } |
283 | dev_dbg(chan->dev, "no pending list\n"); | 283 | dev_dbg(chan->dev, "no pending list\n"); |
284 | return; | 284 | return; |
285 | } | 285 | } |
286 | 286 | ||
287 | if (!chan->phy) { | 287 | if (!chan->phy) { |
288 | chan->phy = lookup_phy(chan); | 288 | chan->phy = lookup_phy(chan); |
289 | if (!chan->phy) { | 289 | if (!chan->phy) { |
290 | dev_dbg(chan->dev, "no free dma channel\n"); | 290 | dev_dbg(chan->dev, "no free dma channel\n"); |
291 | return; | 291 | return; |
292 | } | 292 | } |
293 | } | 293 | } |
294 | 294 | ||
295 | /* | 295 | /* |
296 | * pending -> running | 296 | * pending -> running |
297 | * reintilize pending list | 297 | * reintilize pending list |
298 | */ | 298 | */ |
299 | desc = list_first_entry(&chan->chain_pending, | 299 | desc = list_first_entry(&chan->chain_pending, |
300 | struct mmp_pdma_desc_sw, node); | 300 | struct mmp_pdma_desc_sw, node); |
301 | list_splice_tail_init(&chan->chain_pending, &chan->chain_running); | 301 | list_splice_tail_init(&chan->chain_pending, &chan->chain_running); |
302 | 302 | ||
303 | /* | 303 | /* |
304 | * Program the descriptor's address into the DMA controller, | 304 | * Program the descriptor's address into the DMA controller, |
305 | * then start the DMA transaction | 305 | * then start the DMA transaction |
306 | */ | 306 | */ |
307 | set_desc(chan->phy, desc->async_tx.phys); | 307 | set_desc(chan->phy, desc->async_tx.phys); |
308 | enable_chan(chan->phy); | 308 | enable_chan(chan->phy); |
309 | chan->idle = false; | 309 | chan->idle = false; |
310 | } | 310 | } |
311 | 311 | ||
312 | 312 | ||
313 | /* desc->tx_list ==> pending list */ | 313 | /* desc->tx_list ==> pending list */ |
314 | static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) | 314 | static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) |
315 | { | 315 | { |
316 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); | 316 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); |
317 | struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); | 317 | struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); |
318 | struct mmp_pdma_desc_sw *child; | 318 | struct mmp_pdma_desc_sw *child; |
319 | unsigned long flags; | 319 | unsigned long flags; |
320 | dma_cookie_t cookie = -EBUSY; | 320 | dma_cookie_t cookie = -EBUSY; |
321 | 321 | ||
322 | spin_lock_irqsave(&chan->desc_lock, flags); | 322 | spin_lock_irqsave(&chan->desc_lock, flags); |
323 | 323 | ||
324 | list_for_each_entry(child, &desc->tx_list, node) { | 324 | list_for_each_entry(child, &desc->tx_list, node) { |
325 | cookie = dma_cookie_assign(&child->async_tx); | 325 | cookie = dma_cookie_assign(&child->async_tx); |
326 | } | 326 | } |
327 | 327 | ||
328 | append_pending_queue(chan, desc); | 328 | append_pending_queue(chan, desc); |
329 | 329 | ||
330 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 330 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
331 | 331 | ||
332 | return cookie; | 332 | return cookie; |
333 | } | 333 | } |
334 | 334 | ||
335 | struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) | 335 | struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) |
336 | { | 336 | { |
337 | struct mmp_pdma_desc_sw *desc; | 337 | struct mmp_pdma_desc_sw *desc; |
338 | dma_addr_t pdesc; | 338 | dma_addr_t pdesc; |
339 | 339 | ||
340 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | 340 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
341 | if (!desc) { | 341 | if (!desc) { |
342 | dev_err(chan->dev, "out of memory for link descriptor\n"); | 342 | dev_err(chan->dev, "out of memory for link descriptor\n"); |
343 | return NULL; | 343 | return NULL; |
344 | } | 344 | } |
345 | 345 | ||
346 | memset(desc, 0, sizeof(*desc)); | 346 | memset(desc, 0, sizeof(*desc)); |
347 | INIT_LIST_HEAD(&desc->tx_list); | 347 | INIT_LIST_HEAD(&desc->tx_list); |
348 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | 348 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); |
349 | /* each desc has submit */ | 349 | /* each desc has submit */ |
350 | desc->async_tx.tx_submit = mmp_pdma_tx_submit; | 350 | desc->async_tx.tx_submit = mmp_pdma_tx_submit; |
351 | desc->async_tx.phys = pdesc; | 351 | desc->async_tx.phys = pdesc; |
352 | 352 | ||
353 | return desc; | 353 | return desc; |
354 | } | 354 | } |
355 | 355 | ||
356 | /** | 356 | /** |
357 | * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. | 357 | * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. |
358 | * | 358 | * |
359 | * This function will create a dma pool for descriptor allocation. | 359 | * This function will create a dma pool for descriptor allocation. |
360 | * Request irq only when channel is requested | 360 | * Request irq only when channel is requested |
361 | * Return - The number of allocated descriptors. | 361 | * Return - The number of allocated descriptors. |
362 | */ | 362 | */ |
363 | 363 | ||
364 | static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) | 364 | static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) |
365 | { | 365 | { |
366 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 366 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
367 | 367 | ||
368 | if (chan->desc_pool) | 368 | if (chan->desc_pool) |
369 | return 1; | 369 | return 1; |
370 | 370 | ||
371 | chan->desc_pool = | 371 | chan->desc_pool = |
372 | dma_pool_create(dev_name(&dchan->dev->device), chan->dev, | 372 | dma_pool_create(dev_name(&dchan->dev->device), chan->dev, |
373 | sizeof(struct mmp_pdma_desc_sw), | 373 | sizeof(struct mmp_pdma_desc_sw), |
374 | __alignof__(struct mmp_pdma_desc_sw), 0); | 374 | __alignof__(struct mmp_pdma_desc_sw), 0); |
375 | if (!chan->desc_pool) { | 375 | if (!chan->desc_pool) { |
376 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); | 376 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); |
377 | return -ENOMEM; | 377 | return -ENOMEM; |
378 | } | 378 | } |
379 | if (chan->phy) { | 379 | if (chan->phy) { |
380 | chan->phy->vchan = NULL; | 380 | chan->phy->vchan = NULL; |
381 | chan->phy = NULL; | 381 | chan->phy = NULL; |
382 | } | 382 | } |
383 | chan->idle = true; | 383 | chan->idle = true; |
384 | chan->dev_addr = 0; | 384 | chan->dev_addr = 0; |
385 | return 1; | 385 | return 1; |
386 | } | 386 | } |
387 | 387 | ||
388 | static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, | 388 | static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, |
389 | struct list_head *list) | 389 | struct list_head *list) |
390 | { | 390 | { |
391 | struct mmp_pdma_desc_sw *desc, *_desc; | 391 | struct mmp_pdma_desc_sw *desc, *_desc; |
392 | 392 | ||
393 | list_for_each_entry_safe(desc, _desc, list, node) { | 393 | list_for_each_entry_safe(desc, _desc, list, node) { |
394 | list_del(&desc->node); | 394 | list_del(&desc->node); |
395 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 395 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
396 | } | 396 | } |
397 | } | 397 | } |
398 | 398 | ||
399 | static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) | 399 | static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) |
400 | { | 400 | { |
401 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 401 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
402 | unsigned long flags; | 402 | unsigned long flags; |
403 | 403 | ||
404 | spin_lock_irqsave(&chan->desc_lock, flags); | 404 | spin_lock_irqsave(&chan->desc_lock, flags); |
405 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | 405 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); |
406 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | 406 | mmp_pdma_free_desc_list(chan, &chan->chain_running); |
407 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 407 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
408 | 408 | ||
409 | dma_pool_destroy(chan->desc_pool); | 409 | dma_pool_destroy(chan->desc_pool); |
410 | chan->desc_pool = NULL; | 410 | chan->desc_pool = NULL; |
411 | chan->idle = true; | 411 | chan->idle = true; |
412 | chan->dev_addr = 0; | 412 | chan->dev_addr = 0; |
413 | if (chan->phy) { | 413 | if (chan->phy) { |
414 | chan->phy->vchan = NULL; | 414 | chan->phy->vchan = NULL; |
415 | chan->phy = NULL; | 415 | chan->phy = NULL; |
416 | } | 416 | } |
417 | return; | 417 | return; |
418 | } | 418 | } |
419 | 419 | ||
420 | static struct dma_async_tx_descriptor * | 420 | static struct dma_async_tx_descriptor * |
421 | mmp_pdma_prep_memcpy(struct dma_chan *dchan, | 421 | mmp_pdma_prep_memcpy(struct dma_chan *dchan, |
422 | dma_addr_t dma_dst, dma_addr_t dma_src, | 422 | dma_addr_t dma_dst, dma_addr_t dma_src, |
423 | size_t len, unsigned long flags) | 423 | size_t len, unsigned long flags) |
424 | { | 424 | { |
425 | struct mmp_pdma_chan *chan; | 425 | struct mmp_pdma_chan *chan; |
426 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | 426 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; |
427 | size_t copy = 0; | 427 | size_t copy = 0; |
428 | 428 | ||
429 | if (!dchan) | 429 | if (!dchan) |
430 | return NULL; | 430 | return NULL; |
431 | 431 | ||
432 | if (!len) | 432 | if (!len) |
433 | return NULL; | 433 | return NULL; |
434 | 434 | ||
435 | chan = to_mmp_pdma_chan(dchan); | 435 | chan = to_mmp_pdma_chan(dchan); |
436 | 436 | ||
437 | if (!chan->dir) { | 437 | if (!chan->dir) { |
438 | chan->dir = DMA_MEM_TO_MEM; | 438 | chan->dir = DMA_MEM_TO_MEM; |
439 | chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; | 439 | chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; |
440 | chan->dcmd |= DCMD_BURST32; | 440 | chan->dcmd |= DCMD_BURST32; |
441 | } | 441 | } |
442 | 442 | ||
443 | do { | 443 | do { |
444 | /* Allocate the link descriptor from DMA pool */ | 444 | /* Allocate the link descriptor from DMA pool */ |
445 | new = mmp_pdma_alloc_descriptor(chan); | 445 | new = mmp_pdma_alloc_descriptor(chan); |
446 | if (!new) { | 446 | if (!new) { |
447 | dev_err(chan->dev, "no memory for desc\n"); | 447 | dev_err(chan->dev, "no memory for desc\n"); |
448 | goto fail; | 448 | goto fail; |
449 | } | 449 | } |
450 | 450 | ||
451 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); | 451 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); |
452 | 452 | ||
453 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); | 453 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); |
454 | new->desc.dsadr = dma_src; | 454 | new->desc.dsadr = dma_src; |
455 | new->desc.dtadr = dma_dst; | 455 | new->desc.dtadr = dma_dst; |
456 | 456 | ||
457 | if (!first) | 457 | if (!first) |
458 | first = new; | 458 | first = new; |
459 | else | 459 | else |
460 | prev->desc.ddadr = new->async_tx.phys; | 460 | prev->desc.ddadr = new->async_tx.phys; |
461 | 461 | ||
462 | new->async_tx.cookie = 0; | 462 | new->async_tx.cookie = 0; |
463 | async_tx_ack(&new->async_tx); | 463 | async_tx_ack(&new->async_tx); |
464 | 464 | ||
465 | prev = new; | 465 | prev = new; |
466 | len -= copy; | 466 | len -= copy; |
467 | 467 | ||
468 | if (chan->dir == DMA_MEM_TO_DEV) { | 468 | if (chan->dir == DMA_MEM_TO_DEV) { |
469 | dma_src += copy; | 469 | dma_src += copy; |
470 | } else if (chan->dir == DMA_DEV_TO_MEM) { | 470 | } else if (chan->dir == DMA_DEV_TO_MEM) { |
471 | dma_dst += copy; | 471 | dma_dst += copy; |
472 | } else if (chan->dir == DMA_MEM_TO_MEM) { | 472 | } else if (chan->dir == DMA_MEM_TO_MEM) { |
473 | dma_src += copy; | 473 | dma_src += copy; |
474 | dma_dst += copy; | 474 | dma_dst += copy; |
475 | } | 475 | } |
476 | 476 | ||
477 | /* Insert the link descriptor to the LD ring */ | 477 | /* Insert the link descriptor to the LD ring */ |
478 | list_add_tail(&new->node, &first->tx_list); | 478 | list_add_tail(&new->node, &first->tx_list); |
479 | } while (len); | 479 | } while (len); |
480 | 480 | ||
481 | first->async_tx.flags = flags; /* client is in control of this ack */ | 481 | first->async_tx.flags = flags; /* client is in control of this ack */ |
482 | first->async_tx.cookie = -EBUSY; | 482 | first->async_tx.cookie = -EBUSY; |
483 | 483 | ||
484 | /* last desc and fire IRQ */ | 484 | /* last desc and fire IRQ */ |
485 | new->desc.ddadr = DDADR_STOP; | 485 | new->desc.ddadr = DDADR_STOP; |
486 | new->desc.dcmd |= DCMD_ENDIRQEN; | 486 | new->desc.dcmd |= DCMD_ENDIRQEN; |
487 | 487 | ||
488 | return &first->async_tx; | 488 | return &first->async_tx; |
489 | 489 | ||
490 | fail: | 490 | fail: |
491 | if (first) | 491 | if (first) |
492 | mmp_pdma_free_desc_list(chan, &first->tx_list); | 492 | mmp_pdma_free_desc_list(chan, &first->tx_list); |
493 | return NULL; | 493 | return NULL; |
494 | } | 494 | } |
495 | 495 | ||
496 | static struct dma_async_tx_descriptor * | 496 | static struct dma_async_tx_descriptor * |
497 | mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | 497 | mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, |
498 | unsigned int sg_len, enum dma_transfer_direction dir, | 498 | unsigned int sg_len, enum dma_transfer_direction dir, |
499 | unsigned long flags, void *context) | 499 | unsigned long flags, void *context) |
500 | { | 500 | { |
501 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 501 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
502 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; | 502 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
503 | size_t len, avail; | 503 | size_t len, avail; |
504 | struct scatterlist *sg; | 504 | struct scatterlist *sg; |
505 | dma_addr_t addr; | 505 | dma_addr_t addr; |
506 | int i; | 506 | int i; |
507 | 507 | ||
508 | if ((sgl == NULL) || (sg_len == 0)) | 508 | if ((sgl == NULL) || (sg_len == 0)) |
509 | return NULL; | 509 | return NULL; |
510 | 510 | ||
511 | for_each_sg(sgl, sg, sg_len, i) { | 511 | for_each_sg(sgl, sg, sg_len, i) { |
512 | addr = sg_dma_address(sg); | 512 | addr = sg_dma_address(sg); |
513 | avail = sg_dma_len(sgl); | 513 | avail = sg_dma_len(sgl); |
514 | 514 | ||
515 | do { | 515 | do { |
516 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); | 516 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); |
517 | 517 | ||
518 | /* allocate and populate the descriptor */ | 518 | /* allocate and populate the descriptor */ |
519 | new = mmp_pdma_alloc_descriptor(chan); | 519 | new = mmp_pdma_alloc_descriptor(chan); |
520 | if (!new) { | 520 | if (!new) { |
521 | dev_err(chan->dev, "no memory for desc\n"); | 521 | dev_err(chan->dev, "no memory for desc\n"); |
522 | goto fail; | 522 | goto fail; |
523 | } | 523 | } |
524 | 524 | ||
525 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); | 525 | new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); |
526 | if (dir == DMA_MEM_TO_DEV) { | 526 | if (dir == DMA_MEM_TO_DEV) { |
527 | new->desc.dsadr = addr; | 527 | new->desc.dsadr = addr; |
528 | new->desc.dtadr = chan->dev_addr; | 528 | new->desc.dtadr = chan->dev_addr; |
529 | } else { | 529 | } else { |
530 | new->desc.dsadr = chan->dev_addr; | 530 | new->desc.dsadr = chan->dev_addr; |
531 | new->desc.dtadr = addr; | 531 | new->desc.dtadr = addr; |
532 | } | 532 | } |
533 | 533 | ||
534 | if (!first) | 534 | if (!first) |
535 | first = new; | 535 | first = new; |
536 | else | 536 | else |
537 | prev->desc.ddadr = new->async_tx.phys; | 537 | prev->desc.ddadr = new->async_tx.phys; |
538 | 538 | ||
539 | new->async_tx.cookie = 0; | 539 | new->async_tx.cookie = 0; |
540 | async_tx_ack(&new->async_tx); | 540 | async_tx_ack(&new->async_tx); |
541 | prev = new; | 541 | prev = new; |
542 | 542 | ||
543 | /* Insert the link descriptor to the LD ring */ | 543 | /* Insert the link descriptor to the LD ring */ |
544 | list_add_tail(&new->node, &first->tx_list); | 544 | list_add_tail(&new->node, &first->tx_list); |
545 | 545 | ||
546 | /* update metadata */ | 546 | /* update metadata */ |
547 | addr += len; | 547 | addr += len; |
548 | avail -= len; | 548 | avail -= len; |
549 | } while (avail); | 549 | } while (avail); |
550 | } | 550 | } |
551 | 551 | ||
552 | first->async_tx.cookie = -EBUSY; | 552 | first->async_tx.cookie = -EBUSY; |
553 | first->async_tx.flags = flags; | 553 | first->async_tx.flags = flags; |
554 | 554 | ||
555 | /* last desc and fire IRQ */ | 555 | /* last desc and fire IRQ */ |
556 | new->desc.ddadr = DDADR_STOP; | 556 | new->desc.ddadr = DDADR_STOP; |
557 | new->desc.dcmd |= DCMD_ENDIRQEN; | 557 | new->desc.dcmd |= DCMD_ENDIRQEN; |
558 | 558 | ||
559 | return &first->async_tx; | 559 | return &first->async_tx; |
560 | 560 | ||
561 | fail: | 561 | fail: |
562 | if (first) | 562 | if (first) |
563 | mmp_pdma_free_desc_list(chan, &first->tx_list); | 563 | mmp_pdma_free_desc_list(chan, &first->tx_list); |
564 | return NULL; | 564 | return NULL; |
565 | } | 565 | } |
566 | 566 | ||
567 | static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | 567 | static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, |
568 | unsigned long arg) | 568 | unsigned long arg) |
569 | { | 569 | { |
570 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 570 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
571 | struct dma_slave_config *cfg = (void *)arg; | 571 | struct dma_slave_config *cfg = (void *)arg; |
572 | unsigned long flags; | 572 | unsigned long flags; |
573 | int ret = 0; | 573 | int ret = 0; |
574 | u32 maxburst = 0, addr = 0; | 574 | u32 maxburst = 0, addr = 0; |
575 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | 575 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; |
576 | 576 | ||
577 | if (!dchan) | 577 | if (!dchan) |
578 | return -EINVAL; | 578 | return -EINVAL; |
579 | 579 | ||
580 | switch (cmd) { | 580 | switch (cmd) { |
581 | case DMA_TERMINATE_ALL: | 581 | case DMA_TERMINATE_ALL: |
582 | disable_chan(chan->phy); | 582 | disable_chan(chan->phy); |
583 | if (chan->phy) { | 583 | if (chan->phy) { |
584 | chan->phy->vchan = NULL; | 584 | chan->phy->vchan = NULL; |
585 | chan->phy = NULL; | 585 | chan->phy = NULL; |
586 | } | 586 | } |
587 | spin_lock_irqsave(&chan->desc_lock, flags); | 587 | spin_lock_irqsave(&chan->desc_lock, flags); |
588 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); | 588 | mmp_pdma_free_desc_list(chan, &chan->chain_pending); |
589 | mmp_pdma_free_desc_list(chan, &chan->chain_running); | 589 | mmp_pdma_free_desc_list(chan, &chan->chain_running); |
590 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 590 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
591 | chan->idle = true; | 591 | chan->idle = true; |
592 | break; | 592 | break; |
593 | case DMA_SLAVE_CONFIG: | 593 | case DMA_SLAVE_CONFIG: |
594 | if (cfg->direction == DMA_DEV_TO_MEM) { | 594 | if (cfg->direction == DMA_DEV_TO_MEM) { |
595 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; | 595 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; |
596 | maxburst = cfg->src_maxburst; | 596 | maxburst = cfg->src_maxburst; |
597 | width = cfg->src_addr_width; | 597 | width = cfg->src_addr_width; |
598 | addr = cfg->src_addr; | 598 | addr = cfg->src_addr; |
599 | } else if (cfg->direction == DMA_MEM_TO_DEV) { | 599 | } else if (cfg->direction == DMA_MEM_TO_DEV) { |
600 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; | 600 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; |
601 | maxburst = cfg->dst_maxburst; | 601 | maxburst = cfg->dst_maxburst; |
602 | width = cfg->dst_addr_width; | 602 | width = cfg->dst_addr_width; |
603 | addr = cfg->dst_addr; | 603 | addr = cfg->dst_addr; |
604 | } | 604 | } |
605 | 605 | ||
606 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) | 606 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) |
607 | chan->dcmd |= DCMD_WIDTH1; | 607 | chan->dcmd |= DCMD_WIDTH1; |
608 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | 608 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) |
609 | chan->dcmd |= DCMD_WIDTH2; | 609 | chan->dcmd |= DCMD_WIDTH2; |
610 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) | 610 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) |
611 | chan->dcmd |= DCMD_WIDTH4; | 611 | chan->dcmd |= DCMD_WIDTH4; |
612 | 612 | ||
613 | if (maxburst == 8) | 613 | if (maxburst == 8) |
614 | chan->dcmd |= DCMD_BURST8; | 614 | chan->dcmd |= DCMD_BURST8; |
615 | else if (maxburst == 16) | 615 | else if (maxburst == 16) |
616 | chan->dcmd |= DCMD_BURST16; | 616 | chan->dcmd |= DCMD_BURST16; |
617 | else if (maxburst == 32) | 617 | else if (maxburst == 32) |
618 | chan->dcmd |= DCMD_BURST32; | 618 | chan->dcmd |= DCMD_BURST32; |
619 | 619 | ||
620 | if (cfg) { | 620 | chan->dir = cfg->direction; |
621 | chan->dir = cfg->direction; | 621 | chan->drcmr = cfg->slave_id; |
622 | chan->drcmr = cfg->slave_id; | ||
623 | } | ||
624 | chan->dev_addr = addr; | 622 | chan->dev_addr = addr; |
625 | break; | 623 | break; |
626 | default: | 624 | default: |
627 | return -ENOSYS; | 625 | return -ENOSYS; |
628 | } | 626 | } |
629 | 627 | ||
630 | return ret; | 628 | return ret; |
631 | } | 629 | } |
632 | 630 | ||
633 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, | 631 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, |
634 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 632 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
635 | { | 633 | { |
636 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 634 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
637 | enum dma_status ret; | 635 | enum dma_status ret; |
638 | unsigned long flags; | 636 | unsigned long flags; |
639 | 637 | ||
640 | spin_lock_irqsave(&chan->desc_lock, flags); | 638 | spin_lock_irqsave(&chan->desc_lock, flags); |
641 | ret = dma_cookie_status(dchan, cookie, txstate); | 639 | ret = dma_cookie_status(dchan, cookie, txstate); |
642 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 640 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
643 | 641 | ||
644 | return ret; | 642 | return ret; |
645 | } | 643 | } |
646 | 644 | ||
647 | /** | 645 | /** |
648 | * mmp_pdma_issue_pending - Issue the DMA start command | 646 | * mmp_pdma_issue_pending - Issue the DMA start command |
649 | * pending list ==> running list | 647 | * pending list ==> running list |
650 | */ | 648 | */ |
651 | static void mmp_pdma_issue_pending(struct dma_chan *dchan) | 649 | static void mmp_pdma_issue_pending(struct dma_chan *dchan) |
652 | { | 650 | { |
653 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 651 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
654 | unsigned long flags; | 652 | unsigned long flags; |
655 | 653 | ||
656 | spin_lock_irqsave(&chan->desc_lock, flags); | 654 | spin_lock_irqsave(&chan->desc_lock, flags); |
657 | start_pending_queue(chan); | 655 | start_pending_queue(chan); |
658 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 656 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
659 | } | 657 | } |
660 | 658 | ||
661 | /* | 659 | /* |
662 | * dma_do_tasklet | 660 | * dma_do_tasklet |
663 | * Do call back | 661 | * Do call back |
664 | * Start pending list | 662 | * Start pending list |
665 | */ | 663 | */ |
666 | static void dma_do_tasklet(unsigned long data) | 664 | static void dma_do_tasklet(unsigned long data) |
667 | { | 665 | { |
668 | struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data; | 666 | struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data; |
669 | struct mmp_pdma_desc_sw *desc, *_desc; | 667 | struct mmp_pdma_desc_sw *desc, *_desc; |
670 | LIST_HEAD(chain_cleanup); | 668 | LIST_HEAD(chain_cleanup); |
671 | unsigned long flags; | 669 | unsigned long flags; |
672 | 670 | ||
673 | /* submit pending list; callback for each desc; free desc */ | 671 | /* submit pending list; callback for each desc; free desc */ |
674 | 672 | ||
675 | spin_lock_irqsave(&chan->desc_lock, flags); | 673 | spin_lock_irqsave(&chan->desc_lock, flags); |
676 | 674 | ||
677 | /* update the cookie if we have some descriptors to cleanup */ | 675 | /* update the cookie if we have some descriptors to cleanup */ |
678 | if (!list_empty(&chan->chain_running)) { | 676 | if (!list_empty(&chan->chain_running)) { |
679 | dma_cookie_t cookie; | 677 | dma_cookie_t cookie; |
680 | 678 | ||
681 | desc = to_mmp_pdma_desc(chan->chain_running.prev); | 679 | desc = to_mmp_pdma_desc(chan->chain_running.prev); |
682 | cookie = desc->async_tx.cookie; | 680 | cookie = desc->async_tx.cookie; |
683 | dma_cookie_complete(&desc->async_tx); | 681 | dma_cookie_complete(&desc->async_tx); |
684 | 682 | ||
685 | dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); | 683 | dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); |
686 | } | 684 | } |
687 | 685 | ||
688 | /* | 686 | /* |
689 | * move the descriptors to a temporary list so we can drop the lock | 687 | * move the descriptors to a temporary list so we can drop the lock |
690 | * during the entire cleanup operation | 688 | * during the entire cleanup operation |
691 | */ | 689 | */ |
692 | list_splice_tail_init(&chan->chain_running, &chain_cleanup); | 690 | list_splice_tail_init(&chan->chain_running, &chain_cleanup); |
693 | 691 | ||
694 | /* the hardware is now idle and ready for more */ | 692 | /* the hardware is now idle and ready for more */ |
695 | chan->idle = true; | 693 | chan->idle = true; |
696 | 694 | ||
697 | /* Start any pending transactions automatically */ | 695 | /* Start any pending transactions automatically */ |
698 | start_pending_queue(chan); | 696 | start_pending_queue(chan); |
699 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 697 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
700 | 698 | ||
701 | /* Run the callback for each descriptor, in order */ | 699 | /* Run the callback for each descriptor, in order */ |
702 | list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { | 700 | list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { |
703 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | 701 | struct dma_async_tx_descriptor *txd = &desc->async_tx; |
704 | 702 | ||
705 | /* Remove from the list of transactions */ | 703 | /* Remove from the list of transactions */ |
706 | list_del(&desc->node); | 704 | list_del(&desc->node); |
707 | /* Run the link descriptor callback function */ | 705 | /* Run the link descriptor callback function */ |
708 | if (txd->callback) | 706 | if (txd->callback) |
709 | txd->callback(txd->callback_param); | 707 | txd->callback(txd->callback_param); |
710 | 708 | ||
711 | dma_pool_free(chan->desc_pool, desc, txd->phys); | 709 | dma_pool_free(chan->desc_pool, desc, txd->phys); |
712 | } | 710 | } |
713 | } | 711 | } |
714 | 712 | ||
715 | static int __devexit mmp_pdma_remove(struct platform_device *op) | 713 | static int __devexit mmp_pdma_remove(struct platform_device *op) |
716 | { | 714 | { |
717 | struct mmp_pdma_device *pdev = platform_get_drvdata(op); | 715 | struct mmp_pdma_device *pdev = platform_get_drvdata(op); |
718 | 716 | ||
719 | dma_async_device_unregister(&pdev->device); | 717 | dma_async_device_unregister(&pdev->device); |
720 | return 0; | 718 | return 0; |
721 | } | 719 | } |
722 | 720 | ||
723 | static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, | 721 | static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, |
724 | int idx, int irq) | 722 | int idx, int irq) |
725 | { | 723 | { |
726 | struct mmp_pdma_phy *phy = &pdev->phy[idx]; | 724 | struct mmp_pdma_phy *phy = &pdev->phy[idx]; |
727 | struct mmp_pdma_chan *chan; | 725 | struct mmp_pdma_chan *chan; |
728 | int ret; | 726 | int ret; |
729 | 727 | ||
730 | chan = devm_kzalloc(pdev->dev, | 728 | chan = devm_kzalloc(pdev->dev, |
731 | sizeof(struct mmp_pdma_chan), GFP_KERNEL); | 729 | sizeof(struct mmp_pdma_chan), GFP_KERNEL); |
732 | if (chan == NULL) | 730 | if (chan == NULL) |
733 | return -ENOMEM; | 731 | return -ENOMEM; |
734 | 732 | ||
735 | phy->idx = idx; | 733 | phy->idx = idx; |
736 | phy->base = pdev->base; | 734 | phy->base = pdev->base; |
737 | 735 | ||
738 | if (irq) { | 736 | if (irq) { |
739 | ret = devm_request_irq(pdev->dev, irq, | 737 | ret = devm_request_irq(pdev->dev, irq, |
740 | mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); | 738 | mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); |
741 | if (ret) { | 739 | if (ret) { |
742 | dev_err(pdev->dev, "channel request irq fail!\n"); | 740 | dev_err(pdev->dev, "channel request irq fail!\n"); |
743 | return ret; | 741 | return ret; |
744 | } | 742 | } |
745 | } | 743 | } |
746 | 744 | ||
747 | spin_lock_init(&chan->desc_lock); | 745 | spin_lock_init(&chan->desc_lock); |
748 | chan->dev = pdev->dev; | 746 | chan->dev = pdev->dev; |
749 | chan->chan.device = &pdev->device; | 747 | chan->chan.device = &pdev->device; |
750 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); | 748 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); |
751 | INIT_LIST_HEAD(&chan->chain_pending); | 749 | INIT_LIST_HEAD(&chan->chain_pending); |
752 | INIT_LIST_HEAD(&chan->chain_running); | 750 | INIT_LIST_HEAD(&chan->chain_running); |
753 | 751 | ||
754 | /* register virt channel to dma engine */ | 752 | /* register virt channel to dma engine */ |
755 | list_add_tail(&chan->chan.device_node, | 753 | list_add_tail(&chan->chan.device_node, |
756 | &pdev->device.channels); | 754 | &pdev->device.channels); |
757 | 755 | ||
758 | return 0; | 756 | return 0; |
759 | } | 757 | } |
760 | 758 | ||
761 | static struct of_device_id mmp_pdma_dt_ids[] = { | 759 | static struct of_device_id mmp_pdma_dt_ids[] = { |
762 | { .compatible = "marvell,pdma-1.0", }, | 760 | { .compatible = "marvell,pdma-1.0", }, |
763 | {} | 761 | {} |
764 | }; | 762 | }; |
765 | MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); | 763 | MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); |
766 | 764 | ||
767 | static int mmp_pdma_probe(struct platform_device *op) | 765 | static int mmp_pdma_probe(struct platform_device *op) |
768 | { | 766 | { |
769 | struct mmp_pdma_device *pdev; | 767 | struct mmp_pdma_device *pdev; |
770 | const struct of_device_id *of_id; | 768 | const struct of_device_id *of_id; |
771 | struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); | 769 | struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); |
772 | struct resource *iores; | 770 | struct resource *iores; |
773 | int i, ret, irq = 0; | 771 | int i, ret, irq = 0; |
774 | int dma_channels = 0, irq_num = 0; | 772 | int dma_channels = 0, irq_num = 0; |
775 | 773 | ||
776 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); | 774 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); |
777 | if (!pdev) | 775 | if (!pdev) |
778 | return -ENOMEM; | 776 | return -ENOMEM; |
779 | pdev->dev = &op->dev; | 777 | pdev->dev = &op->dev; |
780 | 778 | ||
781 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); | 779 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); |
782 | if (!iores) | 780 | if (!iores) |
783 | return -EINVAL; | 781 | return -EINVAL; |
784 | 782 | ||
785 | pdev->base = devm_request_and_ioremap(pdev->dev, iores); | 783 | pdev->base = devm_request_and_ioremap(pdev->dev, iores); |
786 | if (!pdev->base) | 784 | if (!pdev->base) |
787 | return -EADDRNOTAVAIL; | 785 | return -EADDRNOTAVAIL; |
788 | 786 | ||
789 | of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); | 787 | of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); |
790 | if (of_id) | 788 | if (of_id) |
791 | of_property_read_u32(pdev->dev->of_node, | 789 | of_property_read_u32(pdev->dev->of_node, |
792 | "#dma-channels", &dma_channels); | 790 | "#dma-channels", &dma_channels); |
793 | else if (pdata && pdata->dma_channels) | 791 | else if (pdata && pdata->dma_channels) |
794 | dma_channels = pdata->dma_channels; | 792 | dma_channels = pdata->dma_channels; |
795 | else | 793 | else |
796 | dma_channels = 32; /* default 32 channel */ | 794 | dma_channels = 32; /* default 32 channel */ |
797 | pdev->dma_channels = dma_channels; | 795 | pdev->dma_channels = dma_channels; |
798 | 796 | ||
799 | for (i = 0; i < dma_channels; i++) { | 797 | for (i = 0; i < dma_channels; i++) { |
800 | if (platform_get_irq(op, i) > 0) | 798 | if (platform_get_irq(op, i) > 0) |
801 | irq_num++; | 799 | irq_num++; |
802 | } | 800 | } |
803 | 801 | ||
804 | pdev->phy = devm_kzalloc(pdev->dev, | 802 | pdev->phy = devm_kzalloc(pdev->dev, |
805 | dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); | 803 | dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); |
806 | if (pdev->phy == NULL) | 804 | if (pdev->phy == NULL) |
807 | return -ENOMEM; | 805 | return -ENOMEM; |
808 | 806 | ||
809 | INIT_LIST_HEAD(&pdev->device.channels); | 807 | INIT_LIST_HEAD(&pdev->device.channels); |
810 | 808 | ||
811 | if (irq_num != dma_channels) { | 809 | if (irq_num != dma_channels) { |
812 | /* all chan share one irq, demux inside */ | 810 | /* all chan share one irq, demux inside */ |
813 | irq = platform_get_irq(op, 0); | 811 | irq = platform_get_irq(op, 0); |
814 | ret = devm_request_irq(pdev->dev, irq, | 812 | ret = devm_request_irq(pdev->dev, irq, |
815 | mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); | 813 | mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); |
816 | if (ret) | 814 | if (ret) |
817 | return ret; | 815 | return ret; |
818 | } | 816 | } |
819 | 817 | ||
820 | for (i = 0; i < dma_channels; i++) { | 818 | for (i = 0; i < dma_channels; i++) { |
821 | irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); | 819 | irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); |
822 | ret = mmp_pdma_chan_init(pdev, i, irq); | 820 | ret = mmp_pdma_chan_init(pdev, i, irq); |
823 | if (ret) | 821 | if (ret) |
824 | return ret; | 822 | return ret; |
825 | } | 823 | } |
826 | 824 | ||
827 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | 825 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); |
828 | dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); | 826 | dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); |
829 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); | 827 | dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); |
830 | pdev->device.dev = &op->dev; | 828 | pdev->device.dev = &op->dev; |
831 | pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; | 829 | pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; |
832 | pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; | 830 | pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; |
833 | pdev->device.device_tx_status = mmp_pdma_tx_status; | 831 | pdev->device.device_tx_status = mmp_pdma_tx_status; |
834 | pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; | 832 | pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; |
835 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; | 833 | pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; |
836 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; | 834 | pdev->device.device_issue_pending = mmp_pdma_issue_pending; |
837 | pdev->device.device_control = mmp_pdma_control; | 835 | pdev->device.device_control = mmp_pdma_control; |
838 | pdev->device.copy_align = PDMA_ALIGNMENT; | 836 | pdev->device.copy_align = PDMA_ALIGNMENT; |
839 | 837 | ||
840 | if (pdev->dev->coherent_dma_mask) | 838 | if (pdev->dev->coherent_dma_mask) |
841 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); | 839 | dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); |
842 | else | 840 | else |
843 | dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); | 841 | dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); |
844 | 842 | ||
845 | ret = dma_async_device_register(&pdev->device); | 843 | ret = dma_async_device_register(&pdev->device); |
846 | if (ret) { | 844 | if (ret) { |
847 | dev_err(pdev->device.dev, "unable to register\n"); | 845 | dev_err(pdev->device.dev, "unable to register\n"); |
848 | return ret; | 846 | return ret; |
849 | } | 847 | } |
850 | 848 | ||
851 | dev_info(pdev->device.dev, "initialized\n"); | 849 | dev_info(pdev->device.dev, "initialized\n"); |
852 | return 0; | 850 | return 0; |
853 | } | 851 | } |
854 | 852 | ||
855 | static const struct platform_device_id mmp_pdma_id_table[] = { | 853 | static const struct platform_device_id mmp_pdma_id_table[] = { |
856 | { "mmp-pdma", }, | 854 | { "mmp-pdma", }, |
857 | { }, | 855 | { }, |
858 | }; | 856 | }; |
859 | 857 | ||
860 | static struct platform_driver mmp_pdma_driver = { | 858 | static struct platform_driver mmp_pdma_driver = { |
861 | .driver = { | 859 | .driver = { |
862 | .name = "mmp-pdma", | 860 | .name = "mmp-pdma", |
863 | .owner = THIS_MODULE, | 861 | .owner = THIS_MODULE, |
864 | .of_match_table = mmp_pdma_dt_ids, | 862 | .of_match_table = mmp_pdma_dt_ids, |
865 | }, | 863 | }, |
866 | .id_table = mmp_pdma_id_table, | 864 | .id_table = mmp_pdma_id_table, |
867 | .probe = mmp_pdma_probe, | 865 | .probe = mmp_pdma_probe, |
868 | .remove = mmp_pdma_remove, | 866 | .remove = mmp_pdma_remove, |
869 | }; | 867 | }; |
870 | 868 | ||
871 | module_platform_driver(mmp_pdma_driver); | 869 | module_platform_driver(mmp_pdma_driver); |
872 | 870 | ||
873 | MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); | 871 | MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); |
874 | MODULE_AUTHOR("Marvell International Ltd."); | 872 | MODULE_AUTHOR("Marvell International Ltd."); |
875 | MODULE_LICENSE("GPL v2"); | 873 | MODULE_LICENSE("GPL v2"); |
876 | 874 |