Commit ae891a1b93bf62e9aaa116a7a71312375047fc9f

Authored by Maxin B John
Committed by Linus Torvalds
1 parent 5190f0c030

devres: fix possible use after free

devres uses the pointer value as key after it's freed, which is safe but
triggers spurious use-after-free warnings on some static analysis tools.
Rearrange code to avoid such warnings.

Signed-off-by: Maxin B. John <maxin.john@gmail.com>
Reviewed-by: Rolf Eike Beer <eike-kernel@sf-tec.de>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 3 changed files with 3 additions and 3 deletions Inline Diff

1 #include <linux/module.h> 1 #include <linux/module.h>
2 #include <linux/interrupt.h> 2 #include <linux/interrupt.h>
3 #include <linux/device.h> 3 #include <linux/device.h>
4 #include <linux/gfp.h> 4 #include <linux/gfp.h>
5 5
6 /* 6 /*
7 * Device resource management aware IRQ request/free implementation. 7 * Device resource management aware IRQ request/free implementation.
8 */ 8 */
9 struct irq_devres { 9 struct irq_devres {
10 unsigned int irq; 10 unsigned int irq;
11 void *dev_id; 11 void *dev_id;
12 }; 12 };
13 13
14 static void devm_irq_release(struct device *dev, void *res) 14 static void devm_irq_release(struct device *dev, void *res)
15 { 15 {
16 struct irq_devres *this = res; 16 struct irq_devres *this = res;
17 17
18 free_irq(this->irq, this->dev_id); 18 free_irq(this->irq, this->dev_id);
19 } 19 }
20 20
21 static int devm_irq_match(struct device *dev, void *res, void *data) 21 static int devm_irq_match(struct device *dev, void *res, void *data)
22 { 22 {
23 struct irq_devres *this = res, *match = data; 23 struct irq_devres *this = res, *match = data;
24 24
25 return this->irq == match->irq && this->dev_id == match->dev_id; 25 return this->irq == match->irq && this->dev_id == match->dev_id;
26 } 26 }
27 27
28 /** 28 /**
29 * devm_request_threaded_irq - allocate an interrupt line for a managed device 29 * devm_request_threaded_irq - allocate an interrupt line for a managed device
30 * @dev: device to request interrupt for 30 * @dev: device to request interrupt for
31 * @irq: Interrupt line to allocate 31 * @irq: Interrupt line to allocate
32 * @handler: Function to be called when the IRQ occurs 32 * @handler: Function to be called when the IRQ occurs
33 * @thread_fn: function to be called in a threaded interrupt context. NULL 33 * @thread_fn: function to be called in a threaded interrupt context. NULL
34 * for devices which handle everything in @handler 34 * for devices which handle everything in @handler
35 * @irqflags: Interrupt type flags 35 * @irqflags: Interrupt type flags
36 * @devname: An ascii name for the claiming device 36 * @devname: An ascii name for the claiming device
37 * @dev_id: A cookie passed back to the handler function 37 * @dev_id: A cookie passed back to the handler function
38 * 38 *
39 * Except for the extra @dev argument, this function takes the 39 * Except for the extra @dev argument, this function takes the
40 * same arguments and performs the same function as 40 * same arguments and performs the same function as
41 * request_irq(). IRQs requested with this function will be 41 * request_irq(). IRQs requested with this function will be
42 * automatically freed on driver detach. 42 * automatically freed on driver detach.
43 * 43 *
44 * If an IRQ allocated with this function needs to be freed 44 * If an IRQ allocated with this function needs to be freed
45 * separately, devm_free_irq() must be used. 45 * separately, devm_free_irq() must be used.
46 */ 46 */
47 int devm_request_threaded_irq(struct device *dev, unsigned int irq, 47 int devm_request_threaded_irq(struct device *dev, unsigned int irq,
48 irq_handler_t handler, irq_handler_t thread_fn, 48 irq_handler_t handler, irq_handler_t thread_fn,
49 unsigned long irqflags, const char *devname, 49 unsigned long irqflags, const char *devname,
50 void *dev_id) 50 void *dev_id)
51 { 51 {
52 struct irq_devres *dr; 52 struct irq_devres *dr;
53 int rc; 53 int rc;
54 54
55 dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres), 55 dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
56 GFP_KERNEL); 56 GFP_KERNEL);
57 if (!dr) 57 if (!dr)
58 return -ENOMEM; 58 return -ENOMEM;
59 59
60 rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname, 60 rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname,
61 dev_id); 61 dev_id);
62 if (rc) { 62 if (rc) {
63 devres_free(dr); 63 devres_free(dr);
64 return rc; 64 return rc;
65 } 65 }
66 66
67 dr->irq = irq; 67 dr->irq = irq;
68 dr->dev_id = dev_id; 68 dr->dev_id = dev_id;
69 devres_add(dev, dr); 69 devres_add(dev, dr);
70 70
71 return 0; 71 return 0;
72 } 72 }
73 EXPORT_SYMBOL(devm_request_threaded_irq); 73 EXPORT_SYMBOL(devm_request_threaded_irq);
74 74
75 /** 75 /**
76 * devm_free_irq - free an interrupt 76 * devm_free_irq - free an interrupt
77 * @dev: device to free interrupt for 77 * @dev: device to free interrupt for
78 * @irq: Interrupt line to free 78 * @irq: Interrupt line to free
79 * @dev_id: Device identity to free 79 * @dev_id: Device identity to free
80 * 80 *
81 * Except for the extra @dev argument, this function takes the 81 * Except for the extra @dev argument, this function takes the
82 * same arguments and performs the same function as free_irq(). 82 * same arguments and performs the same function as free_irq().
83 * This function instead of free_irq() should be used to manually 83 * This function instead of free_irq() should be used to manually
84 * free IRQs allocated with devm_request_irq(). 84 * free IRQs allocated with devm_request_irq().
85 */ 85 */
86 void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id) 86 void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
87 { 87 {
88 struct irq_devres match_data = { irq, dev_id }; 88 struct irq_devres match_data = { irq, dev_id };
89 89
90 free_irq(irq, dev_id);
91 WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match, 90 WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match,
92 &match_data)); 91 &match_data));
92 free_irq(irq, dev_id);
93 } 93 }
94 EXPORT_SYMBOL(devm_free_irq); 94 EXPORT_SYMBOL(devm_free_irq);
1 #include <linux/pci.h> 1 #include <linux/pci.h>
2 #include <linux/io.h> 2 #include <linux/io.h>
3 #include <linux/gfp.h> 3 #include <linux/gfp.h>
4 #include <linux/module.h> 4 #include <linux/module.h>
5 5
6 void devm_ioremap_release(struct device *dev, void *res) 6 void devm_ioremap_release(struct device *dev, void *res)
7 { 7 {
8 iounmap(*(void __iomem **)res); 8 iounmap(*(void __iomem **)res);
9 } 9 }
10 10
11 static int devm_ioremap_match(struct device *dev, void *res, void *match_data) 11 static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
12 { 12 {
13 return *(void **)res == match_data; 13 return *(void **)res == match_data;
14 } 14 }
15 15
16 /** 16 /**
17 * devm_ioremap - Managed ioremap() 17 * devm_ioremap - Managed ioremap()
18 * @dev: Generic device to remap IO address for 18 * @dev: Generic device to remap IO address for
19 * @offset: BUS offset to map 19 * @offset: BUS offset to map
20 * @size: Size of map 20 * @size: Size of map
21 * 21 *
22 * Managed ioremap(). Map is automatically unmapped on driver detach. 22 * Managed ioremap(). Map is automatically unmapped on driver detach.
23 */ 23 */
24 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, 24 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
25 unsigned long size) 25 unsigned long size)
26 { 26 {
27 void __iomem **ptr, *addr; 27 void __iomem **ptr, *addr;
28 28
29 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 29 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
30 if (!ptr) 30 if (!ptr)
31 return NULL; 31 return NULL;
32 32
33 addr = ioremap(offset, size); 33 addr = ioremap(offset, size);
34 if (addr) { 34 if (addr) {
35 *ptr = addr; 35 *ptr = addr;
36 devres_add(dev, ptr); 36 devres_add(dev, ptr);
37 } else 37 } else
38 devres_free(ptr); 38 devres_free(ptr);
39 39
40 return addr; 40 return addr;
41 } 41 }
42 EXPORT_SYMBOL(devm_ioremap); 42 EXPORT_SYMBOL(devm_ioremap);
43 43
44 /** 44 /**
45 * devm_ioremap_nocache - Managed ioremap_nocache() 45 * devm_ioremap_nocache - Managed ioremap_nocache()
46 * @dev: Generic device to remap IO address for 46 * @dev: Generic device to remap IO address for
47 * @offset: BUS offset to map 47 * @offset: BUS offset to map
48 * @size: Size of map 48 * @size: Size of map
49 * 49 *
50 * Managed ioremap_nocache(). Map is automatically unmapped on driver 50 * Managed ioremap_nocache(). Map is automatically unmapped on driver
51 * detach. 51 * detach.
52 */ 52 */
53 void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, 53 void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
54 unsigned long size) 54 unsigned long size)
55 { 55 {
56 void __iomem **ptr, *addr; 56 void __iomem **ptr, *addr;
57 57
58 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 58 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
59 if (!ptr) 59 if (!ptr)
60 return NULL; 60 return NULL;
61 61
62 addr = ioremap_nocache(offset, size); 62 addr = ioremap_nocache(offset, size);
63 if (addr) { 63 if (addr) {
64 *ptr = addr; 64 *ptr = addr;
65 devres_add(dev, ptr); 65 devres_add(dev, ptr);
66 } else 66 } else
67 devres_free(ptr); 67 devres_free(ptr);
68 68
69 return addr; 69 return addr;
70 } 70 }
71 EXPORT_SYMBOL(devm_ioremap_nocache); 71 EXPORT_SYMBOL(devm_ioremap_nocache);
72 72
73 /** 73 /**
74 * devm_iounmap - Managed iounmap() 74 * devm_iounmap - Managed iounmap()
75 * @dev: Generic device to unmap for 75 * @dev: Generic device to unmap for
76 * @addr: Address to unmap 76 * @addr: Address to unmap
77 * 77 *
78 * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). 78 * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
79 */ 79 */
80 void devm_iounmap(struct device *dev, void __iomem *addr) 80 void devm_iounmap(struct device *dev, void __iomem *addr)
81 { 81 {
82 iounmap(addr);
83 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, 82 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
84 (void *)addr)); 83 (void *)addr));
84 iounmap(addr);
85 } 85 }
86 EXPORT_SYMBOL(devm_iounmap); 86 EXPORT_SYMBOL(devm_iounmap);
87 87
88 #ifdef CONFIG_HAS_IOPORT 88 #ifdef CONFIG_HAS_IOPORT
89 /* 89 /*
90 * Generic iomap devres 90 * Generic iomap devres
91 */ 91 */
92 static void devm_ioport_map_release(struct device *dev, void *res) 92 static void devm_ioport_map_release(struct device *dev, void *res)
93 { 93 {
94 ioport_unmap(*(void __iomem **)res); 94 ioport_unmap(*(void __iomem **)res);
95 } 95 }
96 96
97 static int devm_ioport_map_match(struct device *dev, void *res, 97 static int devm_ioport_map_match(struct device *dev, void *res,
98 void *match_data) 98 void *match_data)
99 { 99 {
100 return *(void **)res == match_data; 100 return *(void **)res == match_data;
101 } 101 }
102 102
103 /** 103 /**
104 * devm_ioport_map - Managed ioport_map() 104 * devm_ioport_map - Managed ioport_map()
105 * @dev: Generic device to map ioport for 105 * @dev: Generic device to map ioport for
106 * @port: Port to map 106 * @port: Port to map
107 * @nr: Number of ports to map 107 * @nr: Number of ports to map
108 * 108 *
109 * Managed ioport_map(). Map is automatically unmapped on driver 109 * Managed ioport_map(). Map is automatically unmapped on driver
110 * detach. 110 * detach.
111 */ 111 */
112 void __iomem * devm_ioport_map(struct device *dev, unsigned long port, 112 void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
113 unsigned int nr) 113 unsigned int nr)
114 { 114 {
115 void __iomem **ptr, *addr; 115 void __iomem **ptr, *addr;
116 116
117 ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); 117 ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
118 if (!ptr) 118 if (!ptr)
119 return NULL; 119 return NULL;
120 120
121 addr = ioport_map(port, nr); 121 addr = ioport_map(port, nr);
122 if (addr) { 122 if (addr) {
123 *ptr = addr; 123 *ptr = addr;
124 devres_add(dev, ptr); 124 devres_add(dev, ptr);
125 } else 125 } else
126 devres_free(ptr); 126 devres_free(ptr);
127 127
128 return addr; 128 return addr;
129 } 129 }
130 EXPORT_SYMBOL(devm_ioport_map); 130 EXPORT_SYMBOL(devm_ioport_map);
131 131
132 /** 132 /**
133 * devm_ioport_unmap - Managed ioport_unmap() 133 * devm_ioport_unmap - Managed ioport_unmap()
134 * @dev: Generic device to unmap for 134 * @dev: Generic device to unmap for
135 * @addr: Address to unmap 135 * @addr: Address to unmap
136 * 136 *
137 * Managed ioport_unmap(). @addr must have been mapped using 137 * Managed ioport_unmap(). @addr must have been mapped using
138 * devm_ioport_map(). 138 * devm_ioport_map().
139 */ 139 */
140 void devm_ioport_unmap(struct device *dev, void __iomem *addr) 140 void devm_ioport_unmap(struct device *dev, void __iomem *addr)
141 { 141 {
142 ioport_unmap(addr); 142 ioport_unmap(addr);
143 WARN_ON(devres_destroy(dev, devm_ioport_map_release, 143 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
144 devm_ioport_map_match, (void *)addr)); 144 devm_ioport_map_match, (void *)addr));
145 } 145 }
146 EXPORT_SYMBOL(devm_ioport_unmap); 146 EXPORT_SYMBOL(devm_ioport_unmap);
147 147
148 #ifdef CONFIG_PCI 148 #ifdef CONFIG_PCI
149 /* 149 /*
150 * PCI iomap devres 150 * PCI iomap devres
151 */ 151 */
152 #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE 152 #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
153 153
154 struct pcim_iomap_devres { 154 struct pcim_iomap_devres {
155 void __iomem *table[PCIM_IOMAP_MAX]; 155 void __iomem *table[PCIM_IOMAP_MAX];
156 }; 156 };
157 157
158 static void pcim_iomap_release(struct device *gendev, void *res) 158 static void pcim_iomap_release(struct device *gendev, void *res)
159 { 159 {
160 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 160 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
161 struct pcim_iomap_devres *this = res; 161 struct pcim_iomap_devres *this = res;
162 int i; 162 int i;
163 163
164 for (i = 0; i < PCIM_IOMAP_MAX; i++) 164 for (i = 0; i < PCIM_IOMAP_MAX; i++)
165 if (this->table[i]) 165 if (this->table[i])
166 pci_iounmap(dev, this->table[i]); 166 pci_iounmap(dev, this->table[i]);
167 } 167 }
168 168
169 /** 169 /**
170 * pcim_iomap_table - access iomap allocation table 170 * pcim_iomap_table - access iomap allocation table
171 * @pdev: PCI device to access iomap table for 171 * @pdev: PCI device to access iomap table for
172 * 172 *
173 * Access iomap allocation table for @dev. If iomap table doesn't 173 * Access iomap allocation table for @dev. If iomap table doesn't
174 * exist and @pdev is managed, it will be allocated. All iomaps 174 * exist and @pdev is managed, it will be allocated. All iomaps
175 * recorded in the iomap table are automatically unmapped on driver 175 * recorded in the iomap table are automatically unmapped on driver
176 * detach. 176 * detach.
177 * 177 *
178 * This function might sleep when the table is first allocated but can 178 * This function might sleep when the table is first allocated but can
179 * be safely called without context and guaranteed to succed once 179 * be safely called without context and guaranteed to succed once
180 * allocated. 180 * allocated.
181 */ 181 */
182 void __iomem * const * pcim_iomap_table(struct pci_dev *pdev) 182 void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
183 { 183 {
184 struct pcim_iomap_devres *dr, *new_dr; 184 struct pcim_iomap_devres *dr, *new_dr;
185 185
186 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); 186 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
187 if (dr) 187 if (dr)
188 return dr->table; 188 return dr->table;
189 189
190 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); 190 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
191 if (!new_dr) 191 if (!new_dr)
192 return NULL; 192 return NULL;
193 dr = devres_get(&pdev->dev, new_dr, NULL, NULL); 193 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
194 return dr->table; 194 return dr->table;
195 } 195 }
196 EXPORT_SYMBOL(pcim_iomap_table); 196 EXPORT_SYMBOL(pcim_iomap_table);
197 197
198 /** 198 /**
199 * pcim_iomap - Managed pcim_iomap() 199 * pcim_iomap - Managed pcim_iomap()
200 * @pdev: PCI device to iomap for 200 * @pdev: PCI device to iomap for
201 * @bar: BAR to iomap 201 * @bar: BAR to iomap
202 * @maxlen: Maximum length of iomap 202 * @maxlen: Maximum length of iomap
203 * 203 *
204 * Managed pci_iomap(). Map is automatically unmapped on driver 204 * Managed pci_iomap(). Map is automatically unmapped on driver
205 * detach. 205 * detach.
206 */ 206 */
207 void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 207 void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
208 { 208 {
209 void __iomem **tbl; 209 void __iomem **tbl;
210 210
211 BUG_ON(bar >= PCIM_IOMAP_MAX); 211 BUG_ON(bar >= PCIM_IOMAP_MAX);
212 212
213 tbl = (void __iomem **)pcim_iomap_table(pdev); 213 tbl = (void __iomem **)pcim_iomap_table(pdev);
214 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ 214 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
215 return NULL; 215 return NULL;
216 216
217 tbl[bar] = pci_iomap(pdev, bar, maxlen); 217 tbl[bar] = pci_iomap(pdev, bar, maxlen);
218 return tbl[bar]; 218 return tbl[bar];
219 } 219 }
220 EXPORT_SYMBOL(pcim_iomap); 220 EXPORT_SYMBOL(pcim_iomap);
221 221
222 /** 222 /**
223 * pcim_iounmap - Managed pci_iounmap() 223 * pcim_iounmap - Managed pci_iounmap()
224 * @pdev: PCI device to iounmap for 224 * @pdev: PCI device to iounmap for
225 * @addr: Address to unmap 225 * @addr: Address to unmap
226 * 226 *
227 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). 227 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
228 */ 228 */
229 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) 229 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
230 { 230 {
231 void __iomem **tbl; 231 void __iomem **tbl;
232 int i; 232 int i;
233 233
234 pci_iounmap(pdev, addr); 234 pci_iounmap(pdev, addr);
235 235
236 tbl = (void __iomem **)pcim_iomap_table(pdev); 236 tbl = (void __iomem **)pcim_iomap_table(pdev);
237 BUG_ON(!tbl); 237 BUG_ON(!tbl);
238 238
239 for (i = 0; i < PCIM_IOMAP_MAX; i++) 239 for (i = 0; i < PCIM_IOMAP_MAX; i++)
240 if (tbl[i] == addr) { 240 if (tbl[i] == addr) {
241 tbl[i] = NULL; 241 tbl[i] = NULL;
242 return; 242 return;
243 } 243 }
244 WARN_ON(1); 244 WARN_ON(1);
245 } 245 }
246 EXPORT_SYMBOL(pcim_iounmap); 246 EXPORT_SYMBOL(pcim_iounmap);
247 247
248 /** 248 /**
249 * pcim_iomap_regions - Request and iomap PCI BARs 249 * pcim_iomap_regions - Request and iomap PCI BARs
250 * @pdev: PCI device to map IO resources for 250 * @pdev: PCI device to map IO resources for
251 * @mask: Mask of BARs to request and iomap 251 * @mask: Mask of BARs to request and iomap
252 * @name: Name used when requesting regions 252 * @name: Name used when requesting regions
253 * 253 *
254 * Request and iomap regions specified by @mask. 254 * Request and iomap regions specified by @mask.
255 */ 255 */
256 int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name) 256 int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
257 { 257 {
258 void __iomem * const *iomap; 258 void __iomem * const *iomap;
259 int i, rc; 259 int i, rc;
260 260
261 iomap = pcim_iomap_table(pdev); 261 iomap = pcim_iomap_table(pdev);
262 if (!iomap) 262 if (!iomap)
263 return -ENOMEM; 263 return -ENOMEM;
264 264
265 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 265 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
266 unsigned long len; 266 unsigned long len;
267 267
268 if (!(mask & (1 << i))) 268 if (!(mask & (1 << i)))
269 continue; 269 continue;
270 270
271 rc = -EINVAL; 271 rc = -EINVAL;
272 len = pci_resource_len(pdev, i); 272 len = pci_resource_len(pdev, i);
273 if (!len) 273 if (!len)
274 goto err_inval; 274 goto err_inval;
275 275
276 rc = pci_request_region(pdev, i, name); 276 rc = pci_request_region(pdev, i, name);
277 if (rc) 277 if (rc)
278 goto err_inval; 278 goto err_inval;
279 279
280 rc = -ENOMEM; 280 rc = -ENOMEM;
281 if (!pcim_iomap(pdev, i, 0)) 281 if (!pcim_iomap(pdev, i, 0))
282 goto err_region; 282 goto err_region;
283 } 283 }
284 284
285 return 0; 285 return 0;
286 286
287 err_region: 287 err_region:
288 pci_release_region(pdev, i); 288 pci_release_region(pdev, i);
289 err_inval: 289 err_inval:
290 while (--i >= 0) { 290 while (--i >= 0) {
291 if (!(mask & (1 << i))) 291 if (!(mask & (1 << i)))
292 continue; 292 continue;
293 pcim_iounmap(pdev, iomap[i]); 293 pcim_iounmap(pdev, iomap[i]);
294 pci_release_region(pdev, i); 294 pci_release_region(pdev, i);
295 } 295 }
296 296
297 return rc; 297 return rc;
298 } 298 }
299 EXPORT_SYMBOL(pcim_iomap_regions); 299 EXPORT_SYMBOL(pcim_iomap_regions);
300 300
301 /** 301 /**
302 * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones 302 * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
303 * @pdev: PCI device to map IO resources for 303 * @pdev: PCI device to map IO resources for
304 * @mask: Mask of BARs to iomap 304 * @mask: Mask of BARs to iomap
305 * @name: Name used when requesting regions 305 * @name: Name used when requesting regions
306 * 306 *
307 * Request all PCI BARs and iomap regions specified by @mask. 307 * Request all PCI BARs and iomap regions specified by @mask.
308 */ 308 */
309 int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask, 309 int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask,
310 const char *name) 310 const char *name)
311 { 311 {
312 int request_mask = ((1 << 6) - 1) & ~mask; 312 int request_mask = ((1 << 6) - 1) & ~mask;
313 int rc; 313 int rc;
314 314
315 rc = pci_request_selected_regions(pdev, request_mask, name); 315 rc = pci_request_selected_regions(pdev, request_mask, name);
316 if (rc) 316 if (rc)
317 return rc; 317 return rc;
318 318
319 rc = pcim_iomap_regions(pdev, mask, name); 319 rc = pcim_iomap_regions(pdev, mask, name);
320 if (rc) 320 if (rc)
321 pci_release_selected_regions(pdev, request_mask); 321 pci_release_selected_regions(pdev, request_mask);
322 return rc; 322 return rc;
323 } 323 }
324 EXPORT_SYMBOL(pcim_iomap_regions_request_all); 324 EXPORT_SYMBOL(pcim_iomap_regions_request_all);
325 325
326 /** 326 /**
327 * pcim_iounmap_regions - Unmap and release PCI BARs 327 * pcim_iounmap_regions - Unmap and release PCI BARs
328 * @pdev: PCI device to map IO resources for 328 * @pdev: PCI device to map IO resources for
329 * @mask: Mask of BARs to unmap and release 329 * @mask: Mask of BARs to unmap and release
330 * 330 *
331 * Unmap and release regions specified by @mask. 331 * Unmap and release regions specified by @mask.
332 */ 332 */
333 void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask) 333 void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
334 { 334 {
335 void __iomem * const *iomap; 335 void __iomem * const *iomap;
336 int i; 336 int i;
337 337
338 iomap = pcim_iomap_table(pdev); 338 iomap = pcim_iomap_table(pdev);
339 if (!iomap) 339 if (!iomap)
340 return; 340 return;
341 341
342 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 342 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
343 if (!(mask & (1 << i))) 343 if (!(mask & (1 << i)))
344 continue; 344 continue;
345 345
346 pcim_iounmap(pdev, iomap[i]); 346 pcim_iounmap(pdev, iomap[i]);
347 pci_release_region(pdev, i); 347 pci_release_region(pdev, i);
348 } 348 }
349 } 349 }
350 EXPORT_SYMBOL(pcim_iounmap_regions); 350 EXPORT_SYMBOL(pcim_iounmap_regions);
351 #endif 351 #endif
352 #endif 352 #endif
1 /* 1 /*
2 * DMA Pool allocator 2 * DMA Pool allocator
3 * 3 *
4 * Copyright 2001 David Brownell 4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation 5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com> 6 * Author: Matthew Wilcox <willy@linux.intel.com>
7 * 7 *
8 * This software may be redistributed and/or modified under the terms of 8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the 9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation. 10 * Free Software Foundation.
11 * 11 *
12 * This allocator returns small blocks of a given size which are DMA-able by 12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get 13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size. 14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this. 15 * Many older drivers still have their own code to do this.
16 * 16 *
17 * The current design of this allocator is fairly simple. The pool is 17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of 18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at 19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked 20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we 21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page. 22 * keep a count of how many are currently allocated from each page.
23 */ 23 */
24 24
25 #include <linux/device.h> 25 #include <linux/device.h>
26 #include <linux/dma-mapping.h> 26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h> 27 #include <linux/dmapool.h>
28 #include <linux/kernel.h> 28 #include <linux/kernel.h>
29 #include <linux/list.h> 29 #include <linux/list.h>
30 #include <linux/module.h> 30 #include <linux/module.h>
31 #include <linux/mutex.h> 31 #include <linux/mutex.h>
32 #include <linux/poison.h> 32 #include <linux/poison.h>
33 #include <linux/sched.h> 33 #include <linux/sched.h>
34 #include <linux/slab.h> 34 #include <linux/slab.h>
35 #include <linux/spinlock.h> 35 #include <linux/spinlock.h>
36 #include <linux/string.h> 36 #include <linux/string.h>
37 #include <linux/types.h> 37 #include <linux/types.h>
38 #include <linux/wait.h> 38 #include <linux/wait.h>
39 39
40 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) 40 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
41 #define DMAPOOL_DEBUG 1 41 #define DMAPOOL_DEBUG 1
42 #endif 42 #endif
43 43
44 struct dma_pool { /* the pool */ 44 struct dma_pool { /* the pool */
45 struct list_head page_list; 45 struct list_head page_list;
46 spinlock_t lock; 46 spinlock_t lock;
47 size_t size; 47 size_t size;
48 struct device *dev; 48 struct device *dev;
49 size_t allocation; 49 size_t allocation;
50 size_t boundary; 50 size_t boundary;
51 char name[32]; 51 char name[32];
52 wait_queue_head_t waitq; 52 wait_queue_head_t waitq;
53 struct list_head pools; 53 struct list_head pools;
54 }; 54 };
55 55
56 struct dma_page { /* cacheable header for 'allocation' bytes */ 56 struct dma_page { /* cacheable header for 'allocation' bytes */
57 struct list_head page_list; 57 struct list_head page_list;
58 void *vaddr; 58 void *vaddr;
59 dma_addr_t dma; 59 dma_addr_t dma;
60 unsigned int in_use; 60 unsigned int in_use;
61 unsigned int offset; 61 unsigned int offset;
62 }; 62 };
63 63
64 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) 64 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
65 65
66 static DEFINE_MUTEX(pools_lock); 66 static DEFINE_MUTEX(pools_lock);
67 67
68 static ssize_t 68 static ssize_t
69 show_pools(struct device *dev, struct device_attribute *attr, char *buf) 69 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
70 { 70 {
71 unsigned temp; 71 unsigned temp;
72 unsigned size; 72 unsigned size;
73 char *next; 73 char *next;
74 struct dma_page *page; 74 struct dma_page *page;
75 struct dma_pool *pool; 75 struct dma_pool *pool;
76 76
77 next = buf; 77 next = buf;
78 size = PAGE_SIZE; 78 size = PAGE_SIZE;
79 79
80 temp = scnprintf(next, size, "poolinfo - 0.1\n"); 80 temp = scnprintf(next, size, "poolinfo - 0.1\n");
81 size -= temp; 81 size -= temp;
82 next += temp; 82 next += temp;
83 83
84 mutex_lock(&pools_lock); 84 mutex_lock(&pools_lock);
85 list_for_each_entry(pool, &dev->dma_pools, pools) { 85 list_for_each_entry(pool, &dev->dma_pools, pools) {
86 unsigned pages = 0; 86 unsigned pages = 0;
87 unsigned blocks = 0; 87 unsigned blocks = 0;
88 88
89 spin_lock_irq(&pool->lock); 89 spin_lock_irq(&pool->lock);
90 list_for_each_entry(page, &pool->page_list, page_list) { 90 list_for_each_entry(page, &pool->page_list, page_list) {
91 pages++; 91 pages++;
92 blocks += page->in_use; 92 blocks += page->in_use;
93 } 93 }
94 spin_unlock_irq(&pool->lock); 94 spin_unlock_irq(&pool->lock);
95 95
96 /* per-pool info, no real statistics yet */ 96 /* per-pool info, no real statistics yet */
97 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", 97 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
98 pool->name, blocks, 98 pool->name, blocks,
99 pages * (pool->allocation / pool->size), 99 pages * (pool->allocation / pool->size),
100 pool->size, pages); 100 pool->size, pages);
101 size -= temp; 101 size -= temp;
102 next += temp; 102 next += temp;
103 } 103 }
104 mutex_unlock(&pools_lock); 104 mutex_unlock(&pools_lock);
105 105
106 return PAGE_SIZE - size; 106 return PAGE_SIZE - size;
107 } 107 }
108 108
109 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); 109 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
110 110
111 /** 111 /**
112 * dma_pool_create - Creates a pool of consistent memory blocks, for dma. 112 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
113 * @name: name of pool, for diagnostics 113 * @name: name of pool, for diagnostics
114 * @dev: device that will be doing the DMA 114 * @dev: device that will be doing the DMA
115 * @size: size of the blocks in this pool. 115 * @size: size of the blocks in this pool.
116 * @align: alignment requirement for blocks; must be a power of two 116 * @align: alignment requirement for blocks; must be a power of two
117 * @boundary: returned blocks won't cross this power of two boundary 117 * @boundary: returned blocks won't cross this power of two boundary
118 * Context: !in_interrupt() 118 * Context: !in_interrupt()
119 * 119 *
120 * Returns a dma allocation pool with the requested characteristics, or 120 * Returns a dma allocation pool with the requested characteristics, or
121 * null if one can't be created. Given one of these pools, dma_pool_alloc() 121 * null if one can't be created. Given one of these pools, dma_pool_alloc()
122 * may be used to allocate memory. Such memory will all have "consistent" 122 * may be used to allocate memory. Such memory will all have "consistent"
123 * DMA mappings, accessible by the device and its driver without using 123 * DMA mappings, accessible by the device and its driver without using
124 * cache flushing primitives. The actual size of blocks allocated may be 124 * cache flushing primitives. The actual size of blocks allocated may be
125 * larger than requested because of alignment. 125 * larger than requested because of alignment.
126 * 126 *
127 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't 127 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
128 * cross that size boundary. This is useful for devices which have 128 * cross that size boundary. This is useful for devices which have
129 * addressing restrictions on individual DMA transfers, such as not crossing 129 * addressing restrictions on individual DMA transfers, such as not crossing
130 * boundaries of 4KBytes. 130 * boundaries of 4KBytes.
131 */ 131 */
132 struct dma_pool *dma_pool_create(const char *name, struct device *dev, 132 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
133 size_t size, size_t align, size_t boundary) 133 size_t size, size_t align, size_t boundary)
134 { 134 {
135 struct dma_pool *retval; 135 struct dma_pool *retval;
136 size_t allocation; 136 size_t allocation;
137 137
138 if (align == 0) { 138 if (align == 0) {
139 align = 1; 139 align = 1;
140 } else if (align & (align - 1)) { 140 } else if (align & (align - 1)) {
141 return NULL; 141 return NULL;
142 } 142 }
143 143
144 if (size == 0) { 144 if (size == 0) {
145 return NULL; 145 return NULL;
146 } else if (size < 4) { 146 } else if (size < 4) {
147 size = 4; 147 size = 4;
148 } 148 }
149 149
150 if ((size % align) != 0) 150 if ((size % align) != 0)
151 size = ALIGN(size, align); 151 size = ALIGN(size, align);
152 152
153 allocation = max_t(size_t, size, PAGE_SIZE); 153 allocation = max_t(size_t, size, PAGE_SIZE);
154 154
155 if (!boundary) { 155 if (!boundary) {
156 boundary = allocation; 156 boundary = allocation;
157 } else if ((boundary < size) || (boundary & (boundary - 1))) { 157 } else if ((boundary < size) || (boundary & (boundary - 1))) {
158 return NULL; 158 return NULL;
159 } 159 }
160 160
161 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); 161 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
162 if (!retval) 162 if (!retval)
163 return retval; 163 return retval;
164 164
165 strlcpy(retval->name, name, sizeof(retval->name)); 165 strlcpy(retval->name, name, sizeof(retval->name));
166 166
167 retval->dev = dev; 167 retval->dev = dev;
168 168
169 INIT_LIST_HEAD(&retval->page_list); 169 INIT_LIST_HEAD(&retval->page_list);
170 spin_lock_init(&retval->lock); 170 spin_lock_init(&retval->lock);
171 retval->size = size; 171 retval->size = size;
172 retval->boundary = boundary; 172 retval->boundary = boundary;
173 retval->allocation = allocation; 173 retval->allocation = allocation;
174 init_waitqueue_head(&retval->waitq); 174 init_waitqueue_head(&retval->waitq);
175 175
176 if (dev) { 176 if (dev) {
177 int ret; 177 int ret;
178 178
179 mutex_lock(&pools_lock); 179 mutex_lock(&pools_lock);
180 if (list_empty(&dev->dma_pools)) 180 if (list_empty(&dev->dma_pools))
181 ret = device_create_file(dev, &dev_attr_pools); 181 ret = device_create_file(dev, &dev_attr_pools);
182 else 182 else
183 ret = 0; 183 ret = 0;
184 /* note: not currently insisting "name" be unique */ 184 /* note: not currently insisting "name" be unique */
185 if (!ret) 185 if (!ret)
186 list_add(&retval->pools, &dev->dma_pools); 186 list_add(&retval->pools, &dev->dma_pools);
187 else { 187 else {
188 kfree(retval); 188 kfree(retval);
189 retval = NULL; 189 retval = NULL;
190 } 190 }
191 mutex_unlock(&pools_lock); 191 mutex_unlock(&pools_lock);
192 } else 192 } else
193 INIT_LIST_HEAD(&retval->pools); 193 INIT_LIST_HEAD(&retval->pools);
194 194
195 return retval; 195 return retval;
196 } 196 }
197 EXPORT_SYMBOL(dma_pool_create); 197 EXPORT_SYMBOL(dma_pool_create);
198 198
199 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) 199 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
200 { 200 {
201 unsigned int offset = 0; 201 unsigned int offset = 0;
202 unsigned int next_boundary = pool->boundary; 202 unsigned int next_boundary = pool->boundary;
203 203
204 do { 204 do {
205 unsigned int next = offset + pool->size; 205 unsigned int next = offset + pool->size;
206 if (unlikely((next + pool->size) >= next_boundary)) { 206 if (unlikely((next + pool->size) >= next_boundary)) {
207 next = next_boundary; 207 next = next_boundary;
208 next_boundary += pool->boundary; 208 next_boundary += pool->boundary;
209 } 209 }
210 *(int *)(page->vaddr + offset) = next; 210 *(int *)(page->vaddr + offset) = next;
211 offset = next; 211 offset = next;
212 } while (offset < pool->allocation); 212 } while (offset < pool->allocation);
213 } 213 }
214 214
215 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) 215 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
216 { 216 {
217 struct dma_page *page; 217 struct dma_page *page;
218 218
219 page = kmalloc(sizeof(*page), mem_flags); 219 page = kmalloc(sizeof(*page), mem_flags);
220 if (!page) 220 if (!page)
221 return NULL; 221 return NULL;
222 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, 222 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
223 &page->dma, mem_flags); 223 &page->dma, mem_flags);
224 if (page->vaddr) { 224 if (page->vaddr) {
225 #ifdef DMAPOOL_DEBUG 225 #ifdef DMAPOOL_DEBUG
226 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 226 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
227 #endif 227 #endif
228 pool_initialise_page(pool, page); 228 pool_initialise_page(pool, page);
229 list_add(&page->page_list, &pool->page_list); 229 list_add(&page->page_list, &pool->page_list);
230 page->in_use = 0; 230 page->in_use = 0;
231 page->offset = 0; 231 page->offset = 0;
232 } else { 232 } else {
233 kfree(page); 233 kfree(page);
234 page = NULL; 234 page = NULL;
235 } 235 }
236 return page; 236 return page;
237 } 237 }
238 238
239 static inline int is_page_busy(struct dma_page *page) 239 static inline int is_page_busy(struct dma_page *page)
240 { 240 {
241 return page->in_use != 0; 241 return page->in_use != 0;
242 } 242 }
243 243
244 static void pool_free_page(struct dma_pool *pool, struct dma_page *page) 244 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
245 { 245 {
246 dma_addr_t dma = page->dma; 246 dma_addr_t dma = page->dma;
247 247
248 #ifdef DMAPOOL_DEBUG 248 #ifdef DMAPOOL_DEBUG
249 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); 249 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
250 #endif 250 #endif
251 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); 251 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
252 list_del(&page->page_list); 252 list_del(&page->page_list);
253 kfree(page); 253 kfree(page);
254 } 254 }
255 255
256 /** 256 /**
257 * dma_pool_destroy - destroys a pool of dma memory blocks. 257 * dma_pool_destroy - destroys a pool of dma memory blocks.
258 * @pool: dma pool that will be destroyed 258 * @pool: dma pool that will be destroyed
259 * Context: !in_interrupt() 259 * Context: !in_interrupt()
260 * 260 *
261 * Caller guarantees that no more memory from the pool is in use, 261 * Caller guarantees that no more memory from the pool is in use,
262 * and that nothing will try to use the pool after this call. 262 * and that nothing will try to use the pool after this call.
263 */ 263 */
264 void dma_pool_destroy(struct dma_pool *pool) 264 void dma_pool_destroy(struct dma_pool *pool)
265 { 265 {
266 mutex_lock(&pools_lock); 266 mutex_lock(&pools_lock);
267 list_del(&pool->pools); 267 list_del(&pool->pools);
268 if (pool->dev && list_empty(&pool->dev->dma_pools)) 268 if (pool->dev && list_empty(&pool->dev->dma_pools))
269 device_remove_file(pool->dev, &dev_attr_pools); 269 device_remove_file(pool->dev, &dev_attr_pools);
270 mutex_unlock(&pools_lock); 270 mutex_unlock(&pools_lock);
271 271
272 while (!list_empty(&pool->page_list)) { 272 while (!list_empty(&pool->page_list)) {
273 struct dma_page *page; 273 struct dma_page *page;
274 page = list_entry(pool->page_list.next, 274 page = list_entry(pool->page_list.next,
275 struct dma_page, page_list); 275 struct dma_page, page_list);
276 if (is_page_busy(page)) { 276 if (is_page_busy(page)) {
277 if (pool->dev) 277 if (pool->dev)
278 dev_err(pool->dev, 278 dev_err(pool->dev,
279 "dma_pool_destroy %s, %p busy\n", 279 "dma_pool_destroy %s, %p busy\n",
280 pool->name, page->vaddr); 280 pool->name, page->vaddr);
281 else 281 else
282 printk(KERN_ERR 282 printk(KERN_ERR
283 "dma_pool_destroy %s, %p busy\n", 283 "dma_pool_destroy %s, %p busy\n",
284 pool->name, page->vaddr); 284 pool->name, page->vaddr);
285 /* leak the still-in-use consistent memory */ 285 /* leak the still-in-use consistent memory */
286 list_del(&page->page_list); 286 list_del(&page->page_list);
287 kfree(page); 287 kfree(page);
288 } else 288 } else
289 pool_free_page(pool, page); 289 pool_free_page(pool, page);
290 } 290 }
291 291
292 kfree(pool); 292 kfree(pool);
293 } 293 }
294 EXPORT_SYMBOL(dma_pool_destroy); 294 EXPORT_SYMBOL(dma_pool_destroy);
295 295
296 /** 296 /**
297 * dma_pool_alloc - get a block of consistent memory 297 * dma_pool_alloc - get a block of consistent memory
298 * @pool: dma pool that will produce the block 298 * @pool: dma pool that will produce the block
299 * @mem_flags: GFP_* bitmask 299 * @mem_flags: GFP_* bitmask
300 * @handle: pointer to dma address of block 300 * @handle: pointer to dma address of block
301 * 301 *
302 * This returns the kernel virtual address of a currently unused block, 302 * This returns the kernel virtual address of a currently unused block,
303 * and reports its dma address through the handle. 303 * and reports its dma address through the handle.
304 * If such a memory block can't be allocated, %NULL is returned. 304 * If such a memory block can't be allocated, %NULL is returned.
305 */ 305 */
306 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 306 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
307 dma_addr_t *handle) 307 dma_addr_t *handle)
308 { 308 {
309 unsigned long flags; 309 unsigned long flags;
310 struct dma_page *page; 310 struct dma_page *page;
311 size_t offset; 311 size_t offset;
312 void *retval; 312 void *retval;
313 313
314 might_sleep_if(mem_flags & __GFP_WAIT); 314 might_sleep_if(mem_flags & __GFP_WAIT);
315 315
316 spin_lock_irqsave(&pool->lock, flags); 316 spin_lock_irqsave(&pool->lock, flags);
317 restart: 317 restart:
318 list_for_each_entry(page, &pool->page_list, page_list) { 318 list_for_each_entry(page, &pool->page_list, page_list) {
319 if (page->offset < pool->allocation) 319 if (page->offset < pool->allocation)
320 goto ready; 320 goto ready;
321 } 321 }
322 page = pool_alloc_page(pool, GFP_ATOMIC); 322 page = pool_alloc_page(pool, GFP_ATOMIC);
323 if (!page) { 323 if (!page) {
324 if (mem_flags & __GFP_WAIT) { 324 if (mem_flags & __GFP_WAIT) {
325 DECLARE_WAITQUEUE(wait, current); 325 DECLARE_WAITQUEUE(wait, current);
326 326
327 __set_current_state(TASK_UNINTERRUPTIBLE); 327 __set_current_state(TASK_UNINTERRUPTIBLE);
328 __add_wait_queue(&pool->waitq, &wait); 328 __add_wait_queue(&pool->waitq, &wait);
329 spin_unlock_irqrestore(&pool->lock, flags); 329 spin_unlock_irqrestore(&pool->lock, flags);
330 330
331 schedule_timeout(POOL_TIMEOUT_JIFFIES); 331 schedule_timeout(POOL_TIMEOUT_JIFFIES);
332 332
333 spin_lock_irqsave(&pool->lock, flags); 333 spin_lock_irqsave(&pool->lock, flags);
334 __remove_wait_queue(&pool->waitq, &wait); 334 __remove_wait_queue(&pool->waitq, &wait);
335 goto restart; 335 goto restart;
336 } 336 }
337 retval = NULL; 337 retval = NULL;
338 goto done; 338 goto done;
339 } 339 }
340 340
341 ready: 341 ready:
342 page->in_use++; 342 page->in_use++;
343 offset = page->offset; 343 offset = page->offset;
344 page->offset = *(int *)(page->vaddr + offset); 344 page->offset = *(int *)(page->vaddr + offset);
345 retval = offset + page->vaddr; 345 retval = offset + page->vaddr;
346 *handle = offset + page->dma; 346 *handle = offset + page->dma;
347 #ifdef DMAPOOL_DEBUG 347 #ifdef DMAPOOL_DEBUG
348 memset(retval, POOL_POISON_ALLOCATED, pool->size); 348 memset(retval, POOL_POISON_ALLOCATED, pool->size);
349 #endif 349 #endif
350 done: 350 done:
351 spin_unlock_irqrestore(&pool->lock, flags); 351 spin_unlock_irqrestore(&pool->lock, flags);
352 return retval; 352 return retval;
353 } 353 }
354 EXPORT_SYMBOL(dma_pool_alloc); 354 EXPORT_SYMBOL(dma_pool_alloc);
355 355
356 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) 356 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
357 { 357 {
358 struct dma_page *page; 358 struct dma_page *page;
359 359
360 list_for_each_entry(page, &pool->page_list, page_list) { 360 list_for_each_entry(page, &pool->page_list, page_list) {
361 if (dma < page->dma) 361 if (dma < page->dma)
362 continue; 362 continue;
363 if (dma < (page->dma + pool->allocation)) 363 if (dma < (page->dma + pool->allocation))
364 return page; 364 return page;
365 } 365 }
366 return NULL; 366 return NULL;
367 } 367 }
368 368
369 /** 369 /**
370 * dma_pool_free - put block back into dma pool 370 * dma_pool_free - put block back into dma pool
371 * @pool: the dma pool holding the block 371 * @pool: the dma pool holding the block
372 * @vaddr: virtual address of block 372 * @vaddr: virtual address of block
373 * @dma: dma address of block 373 * @dma: dma address of block
374 * 374 *
375 * Caller promises neither device nor driver will again touch this block 375 * Caller promises neither device nor driver will again touch this block
376 * unless it is first re-allocated. 376 * unless it is first re-allocated.
377 */ 377 */
378 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) 378 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
379 { 379 {
380 struct dma_page *page; 380 struct dma_page *page;
381 unsigned long flags; 381 unsigned long flags;
382 unsigned int offset; 382 unsigned int offset;
383 383
384 spin_lock_irqsave(&pool->lock, flags); 384 spin_lock_irqsave(&pool->lock, flags);
385 page = pool_find_page(pool, dma); 385 page = pool_find_page(pool, dma);
386 if (!page) { 386 if (!page) {
387 spin_unlock_irqrestore(&pool->lock, flags); 387 spin_unlock_irqrestore(&pool->lock, flags);
388 if (pool->dev) 388 if (pool->dev)
389 dev_err(pool->dev, 389 dev_err(pool->dev,
390 "dma_pool_free %s, %p/%lx (bad dma)\n", 390 "dma_pool_free %s, %p/%lx (bad dma)\n",
391 pool->name, vaddr, (unsigned long)dma); 391 pool->name, vaddr, (unsigned long)dma);
392 else 392 else
393 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", 393 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
394 pool->name, vaddr, (unsigned long)dma); 394 pool->name, vaddr, (unsigned long)dma);
395 return; 395 return;
396 } 396 }
397 397
398 offset = vaddr - page->vaddr; 398 offset = vaddr - page->vaddr;
399 #ifdef DMAPOOL_DEBUG 399 #ifdef DMAPOOL_DEBUG
400 if ((dma - page->dma) != offset) { 400 if ((dma - page->dma) != offset) {
401 spin_unlock_irqrestore(&pool->lock, flags); 401 spin_unlock_irqrestore(&pool->lock, flags);
402 if (pool->dev) 402 if (pool->dev)
403 dev_err(pool->dev, 403 dev_err(pool->dev,
404 "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 404 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
405 pool->name, vaddr, (unsigned long long)dma); 405 pool->name, vaddr, (unsigned long long)dma);
406 else 406 else
407 printk(KERN_ERR 407 printk(KERN_ERR
408 "dma_pool_free %s, %p (bad vaddr)/%Lx\n", 408 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
409 pool->name, vaddr, (unsigned long long)dma); 409 pool->name, vaddr, (unsigned long long)dma);
410 return; 410 return;
411 } 411 }
412 { 412 {
413 unsigned int chain = page->offset; 413 unsigned int chain = page->offset;
414 while (chain < pool->allocation) { 414 while (chain < pool->allocation) {
415 if (chain != offset) { 415 if (chain != offset) {
416 chain = *(int *)(page->vaddr + chain); 416 chain = *(int *)(page->vaddr + chain);
417 continue; 417 continue;
418 } 418 }
419 spin_unlock_irqrestore(&pool->lock, flags); 419 spin_unlock_irqrestore(&pool->lock, flags);
420 if (pool->dev) 420 if (pool->dev)
421 dev_err(pool->dev, "dma_pool_free %s, dma %Lx " 421 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
422 "already free\n", pool->name, 422 "already free\n", pool->name,
423 (unsigned long long)dma); 423 (unsigned long long)dma);
424 else 424 else
425 printk(KERN_ERR "dma_pool_free %s, dma %Lx " 425 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
426 "already free\n", pool->name, 426 "already free\n", pool->name,
427 (unsigned long long)dma); 427 (unsigned long long)dma);
428 return; 428 return;
429 } 429 }
430 } 430 }
431 memset(vaddr, POOL_POISON_FREED, pool->size); 431 memset(vaddr, POOL_POISON_FREED, pool->size);
432 #endif 432 #endif
433 433
434 page->in_use--; 434 page->in_use--;
435 *(int *)vaddr = page->offset; 435 *(int *)vaddr = page->offset;
436 page->offset = offset; 436 page->offset = offset;
437 if (waitqueue_active(&pool->waitq)) 437 if (waitqueue_active(&pool->waitq))
438 wake_up_locked(&pool->waitq); 438 wake_up_locked(&pool->waitq);
439 /* 439 /*
440 * Resist a temptation to do 440 * Resist a temptation to do
441 * if (!is_page_busy(page)) pool_free_page(pool, page); 441 * if (!is_page_busy(page)) pool_free_page(pool, page);
442 * Better have a few empty pages hang around. 442 * Better have a few empty pages hang around.
443 */ 443 */
444 spin_unlock_irqrestore(&pool->lock, flags); 444 spin_unlock_irqrestore(&pool->lock, flags);
445 } 445 }
446 EXPORT_SYMBOL(dma_pool_free); 446 EXPORT_SYMBOL(dma_pool_free);
447 447
448 /* 448 /*
449 * Managed DMA pool 449 * Managed DMA pool
450 */ 450 */
451 static void dmam_pool_release(struct device *dev, void *res) 451 static void dmam_pool_release(struct device *dev, void *res)
452 { 452 {
453 struct dma_pool *pool = *(struct dma_pool **)res; 453 struct dma_pool *pool = *(struct dma_pool **)res;
454 454
455 dma_pool_destroy(pool); 455 dma_pool_destroy(pool);
456 } 456 }
457 457
458 static int dmam_pool_match(struct device *dev, void *res, void *match_data) 458 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
459 { 459 {
460 return *(struct dma_pool **)res == match_data; 460 return *(struct dma_pool **)res == match_data;
461 } 461 }
462 462
463 /** 463 /**
464 * dmam_pool_create - Managed dma_pool_create() 464 * dmam_pool_create - Managed dma_pool_create()
465 * @name: name of pool, for diagnostics 465 * @name: name of pool, for diagnostics
466 * @dev: device that will be doing the DMA 466 * @dev: device that will be doing the DMA
467 * @size: size of the blocks in this pool. 467 * @size: size of the blocks in this pool.
468 * @align: alignment requirement for blocks; must be a power of two 468 * @align: alignment requirement for blocks; must be a power of two
469 * @allocation: returned blocks won't cross this boundary (or zero) 469 * @allocation: returned blocks won't cross this boundary (or zero)
470 * 470 *
471 * Managed dma_pool_create(). DMA pool created with this function is 471 * Managed dma_pool_create(). DMA pool created with this function is
472 * automatically destroyed on driver detach. 472 * automatically destroyed on driver detach.
473 */ 473 */
474 struct dma_pool *dmam_pool_create(const char *name, struct device *dev, 474 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
475 size_t size, size_t align, size_t allocation) 475 size_t size, size_t align, size_t allocation)
476 { 476 {
477 struct dma_pool **ptr, *pool; 477 struct dma_pool **ptr, *pool;
478 478
479 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); 479 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
480 if (!ptr) 480 if (!ptr)
481 return NULL; 481 return NULL;
482 482
483 pool = *ptr = dma_pool_create(name, dev, size, align, allocation); 483 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
484 if (pool) 484 if (pool)
485 devres_add(dev, ptr); 485 devres_add(dev, ptr);
486 else 486 else
487 devres_free(ptr); 487 devres_free(ptr);
488 488
489 return pool; 489 return pool;
490 } 490 }
491 EXPORT_SYMBOL(dmam_pool_create); 491 EXPORT_SYMBOL(dmam_pool_create);
492 492
493 /** 493 /**
494 * dmam_pool_destroy - Managed dma_pool_destroy() 494 * dmam_pool_destroy - Managed dma_pool_destroy()
495 * @pool: dma pool that will be destroyed 495 * @pool: dma pool that will be destroyed
496 * 496 *
497 * Managed dma_pool_destroy(). 497 * Managed dma_pool_destroy().
498 */ 498 */
499 void dmam_pool_destroy(struct dma_pool *pool) 499 void dmam_pool_destroy(struct dma_pool *pool)
500 { 500 {
501 struct device *dev = pool->dev; 501 struct device *dev = pool->dev;
502 502
503 dma_pool_destroy(pool);
504 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); 503 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
504 dma_pool_destroy(pool);
505 } 505 }
506 EXPORT_SYMBOL(dmam_pool_destroy); 506 EXPORT_SYMBOL(dmam_pool_destroy);