Commit 81bf58eb3c16302f19676e73dfe4d1832199c0ef
Exists in
master
and in
6 other branches
Merge branches 'regmap/irq' and 'regmap/cache' into regmap-next
Showing 8 changed files Side-by-side Diff
drivers/base/regmap/Kconfig
drivers/base/regmap/Makefile
drivers/base/regmap/internal.h
drivers/base/regmap/regcache-lzo.c
drivers/base/regmap/regcache.c
... | ... | @@ -241,6 +241,8 @@ |
241 | 241 | map->cache_ops->name); |
242 | 242 | name = map->cache_ops->name; |
243 | 243 | trace_regcache_sync(map->dev, name, "start"); |
244 | + if (!map->cache_dirty) | |
245 | + goto out; | |
244 | 246 | if (map->cache_ops->sync) { |
245 | 247 | ret = map->cache_ops->sync(map); |
246 | 248 | } else { |
... | ... | @@ -289,6 +291,23 @@ |
289 | 291 | mutex_unlock(&map->lock); |
290 | 292 | } |
291 | 293 | EXPORT_SYMBOL_GPL(regcache_cache_only); |
294 | + | |
295 | +/** | |
296 | + * regcache_mark_dirty: Mark the register cache as dirty | |
297 | + * | |
298 | + * @map: map to mark | |
299 | + * | |
300 | + * Mark the register cache as dirty, for example due to the device | |
301 | + * having been powered down for suspend. If the cache is not marked | |
302 | + * as dirty then the cache sync will be suppressed. | |
303 | + */ | |
304 | +void regcache_mark_dirty(struct regmap *map) | |
305 | +{ | |
306 | + mutex_lock(&map->lock); | |
307 | + map->cache_dirty = true; | |
308 | + mutex_unlock(&map->lock); | |
309 | +} | |
310 | +EXPORT_SYMBOL_GPL(regcache_mark_dirty); | |
292 | 311 | |
293 | 312 | /** |
294 | 313 | * regcache_cache_bypass: Put a register map into cache bypass mode |
drivers/base/regmap/regmap-irq.c
1 | +/* | |
2 | + * regmap based irq_chip | |
3 | + * | |
4 | + * Copyright 2011 Wolfson Microelectronics plc | |
5 | + * | |
6 | + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> | |
7 | + * | |
8 | + * This program is free software; you can redistribute it and/or modify | |
9 | + * it under the terms of the GNU General Public License version 2 as | |
10 | + * published by the Free Software Foundation. | |
11 | + */ | |
12 | + | |
13 | +#include <linux/export.h> | |
14 | +#include <linux/regmap.h> | |
15 | +#include <linux/irq.h> | |
16 | +#include <linux/interrupt.h> | |
17 | +#include <linux/slab.h> | |
18 | + | |
19 | +#include "internal.h" | |
20 | + | |
21 | +struct regmap_irq_chip_data { | |
22 | + struct mutex lock; | |
23 | + | |
24 | + struct regmap *map; | |
25 | + struct regmap_irq_chip *chip; | |
26 | + | |
27 | + int irq_base; | |
28 | + | |
29 | + void *status_reg_buf; | |
30 | + unsigned int *status_buf; | |
31 | + unsigned int *mask_buf; | |
32 | + unsigned int *mask_buf_def; | |
33 | +}; | |
34 | + | |
35 | +static inline const | |
36 | +struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, | |
37 | + int irq) | |
38 | +{ | |
39 | + return &data->chip->irqs[irq - data->irq_base]; | |
40 | +} | |
41 | + | |
42 | +static void regmap_irq_lock(struct irq_data *data) | |
43 | +{ | |
44 | + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); | |
45 | + | |
46 | + mutex_lock(&d->lock); | |
47 | +} | |
48 | + | |
49 | +static void regmap_irq_sync_unlock(struct irq_data *data) | |
50 | +{ | |
51 | + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); | |
52 | + int i, ret; | |
53 | + | |
54 | + /* | |
55 | + * If there's been a change in the mask write it back to the | |
56 | + * hardware. We rely on the use of the regmap core cache to | |
57 | + * suppress pointless writes. | |
58 | + */ | |
59 | + for (i = 0; i < d->chip->num_regs; i++) { | |
60 | + ret = regmap_update_bits(d->map, d->chip->mask_base + i, | |
61 | + d->mask_buf_def[i], d->mask_buf[i]); | |
62 | + if (ret != 0) | |
63 | + dev_err(d->map->dev, "Failed to sync masks in %x\n", | |
64 | + d->chip->mask_base + i); | |
65 | + } | |
66 | + | |
67 | + mutex_unlock(&d->lock); | |
68 | +} | |
69 | + | |
70 | +static void regmap_irq_enable(struct irq_data *data) | |
71 | +{ | |
72 | + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); | |
73 | + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq); | |
74 | + | |
75 | + d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask; | |
76 | +} | |
77 | + | |
78 | +static void regmap_irq_disable(struct irq_data *data) | |
79 | +{ | |
80 | + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); | |
81 | + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq); | |
82 | + | |
83 | + d->mask_buf[irq_data->reg_offset] |= irq_data->mask; | |
84 | +} | |
85 | + | |
86 | +static struct irq_chip regmap_irq_chip = { | |
87 | + .name = "regmap", | |
88 | + .irq_bus_lock = regmap_irq_lock, | |
89 | + .irq_bus_sync_unlock = regmap_irq_sync_unlock, | |
90 | + .irq_disable = regmap_irq_disable, | |
91 | + .irq_enable = regmap_irq_enable, | |
92 | +}; | |
93 | + | |
94 | +static irqreturn_t regmap_irq_thread(int irq, void *d) | |
95 | +{ | |
96 | + struct regmap_irq_chip_data *data = d; | |
97 | + struct regmap_irq_chip *chip = data->chip; | |
98 | + struct regmap *map = data->map; | |
99 | + int ret, i; | |
100 | + u8 *buf8 = data->status_reg_buf; | |
101 | + u16 *buf16 = data->status_reg_buf; | |
102 | + u32 *buf32 = data->status_reg_buf; | |
103 | + | |
104 | + ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf, | |
105 | + chip->num_regs); | |
106 | + if (ret != 0) { | |
107 | + dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); | |
108 | + return IRQ_NONE; | |
109 | + } | |
110 | + | |
111 | + /* | |
112 | + * Ignore masked IRQs and ack if we need to; we ack early so | |
113 | + * there is no race between handling and acknowleding the | |
114 | + * interrupt. We assume that typically few of the interrupts | |
115 | + * will fire simultaneously so don't worry about overhead from | |
116 | + * doing a write per register. | |
117 | + */ | |
118 | + for (i = 0; i < data->chip->num_regs; i++) { | |
119 | + switch (map->format.val_bytes) { | |
120 | + case 1: | |
121 | + data->status_buf[i] = buf8[i]; | |
122 | + break; | |
123 | + case 2: | |
124 | + data->status_buf[i] = buf16[i]; | |
125 | + break; | |
126 | + case 4: | |
127 | + data->status_buf[i] = buf32[i]; | |
128 | + break; | |
129 | + default: | |
130 | + BUG(); | |
131 | + return IRQ_NONE; | |
132 | + } | |
133 | + | |
134 | + data->status_buf[i] &= ~data->mask_buf[i]; | |
135 | + | |
136 | + if (data->status_buf[i] && chip->ack_base) { | |
137 | + ret = regmap_write(map, chip->ack_base + i, | |
138 | + data->status_buf[i]); | |
139 | + if (ret != 0) | |
140 | + dev_err(map->dev, "Failed to ack 0x%x: %d\n", | |
141 | + chip->ack_base + i, ret); | |
142 | + } | |
143 | + } | |
144 | + | |
145 | + for (i = 0; i < chip->num_irqs; i++) { | |
146 | + if (data->status_buf[chip->irqs[i].reg_offset] & | |
147 | + chip->irqs[i].mask) { | |
148 | + handle_nested_irq(data->irq_base + i); | |
149 | + } | |
150 | + } | |
151 | + | |
152 | + return IRQ_HANDLED; | |
153 | +} | |
154 | + | |
155 | +/** | |
156 | + * regmap_add_irq_chip(): Use standard regmap IRQ controller handling | |
157 | + * | |
158 | + * map: The regmap for the device. | |
159 | + * irq: The IRQ the device uses to signal interrupts | |
160 | + * irq_flags: The IRQF_ flags to use for the primary interrupt. | |
161 | + * chip: Configuration for the interrupt controller. | |
162 | + * data: Runtime data structure for the controller, allocated on success | |
163 | + * | |
164 | + * Returns 0 on success or an errno on failure. | |
165 | + * | |
166 | + * In order for this to be efficient the chip really should use a | |
167 | + * register cache. The chip driver is responsible for restoring the | |
168 | + * register values used by the IRQ controller over suspend and resume. | |
169 | + */ | |
170 | +int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | |
171 | + int irq_base, struct regmap_irq_chip *chip, | |
172 | + struct regmap_irq_chip_data **data) | |
173 | +{ | |
174 | + struct regmap_irq_chip_data *d; | |
175 | + int cur_irq, i; | |
176 | + int ret = -ENOMEM; | |
177 | + | |
178 | + irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); | |
179 | + if (irq_base < 0) { | |
180 | + dev_warn(map->dev, "Failed to allocate IRQs: %d\n", | |
181 | + irq_base); | |
182 | + return irq_base; | |
183 | + } | |
184 | + | |
185 | + d = kzalloc(sizeof(*d), GFP_KERNEL); | |
186 | + if (!d) | |
187 | + return -ENOMEM; | |
188 | + | |
189 | + d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, | |
190 | + GFP_KERNEL); | |
191 | + if (!d->status_buf) | |
192 | + goto err_alloc; | |
193 | + | |
194 | + d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs, | |
195 | + GFP_KERNEL); | |
196 | + if (!d->status_reg_buf) | |
197 | + goto err_alloc; | |
198 | + | |
199 | + d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, | |
200 | + GFP_KERNEL); | |
201 | + if (!d->mask_buf) | |
202 | + goto err_alloc; | |
203 | + | |
204 | + d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs, | |
205 | + GFP_KERNEL); | |
206 | + if (!d->mask_buf_def) | |
207 | + goto err_alloc; | |
208 | + | |
209 | + d->map = map; | |
210 | + d->chip = chip; | |
211 | + d->irq_base = irq_base; | |
212 | + mutex_init(&d->lock); | |
213 | + | |
214 | + for (i = 0; i < chip->num_irqs; i++) | |
215 | + d->mask_buf_def[chip->irqs[i].reg_offset] | |
216 | + |= chip->irqs[i].mask; | |
217 | + | |
218 | + /* Mask all the interrupts by default */ | |
219 | + for (i = 0; i < chip->num_regs; i++) { | |
220 | + d->mask_buf[i] = d->mask_buf_def[i]; | |
221 | + ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]); | |
222 | + if (ret != 0) { | |
223 | + dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", | |
224 | + chip->mask_base + i, ret); | |
225 | + goto err_alloc; | |
226 | + } | |
227 | + } | |
228 | + | |
229 | + /* Register them with genirq */ | |
230 | + for (cur_irq = irq_base; | |
231 | + cur_irq < chip->num_irqs + irq_base; | |
232 | + cur_irq++) { | |
233 | + irq_set_chip_data(cur_irq, d); | |
234 | + irq_set_chip_and_handler(cur_irq, ®map_irq_chip, | |
235 | + handle_edge_irq); | |
236 | + irq_set_nested_thread(cur_irq, 1); | |
237 | + | |
238 | + /* ARM needs us to explicitly flag the IRQ as valid | |
239 | + * and will set them noprobe when we do so. */ | |
240 | +#ifdef CONFIG_ARM | |
241 | + set_irq_flags(cur_irq, IRQF_VALID); | |
242 | +#else | |
243 | + irq_set_noprobe(cur_irq); | |
244 | +#endif | |
245 | + } | |
246 | + | |
247 | + ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, | |
248 | + chip->name, d); | |
249 | + if (ret != 0) { | |
250 | + dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret); | |
251 | + goto err_alloc; | |
252 | + } | |
253 | + | |
254 | + return 0; | |
255 | + | |
256 | +err_alloc: | |
257 | + kfree(d->mask_buf_def); | |
258 | + kfree(d->mask_buf); | |
259 | + kfree(d->status_reg_buf); | |
260 | + kfree(d->status_buf); | |
261 | + kfree(d); | |
262 | + return ret; | |
263 | +} | |
264 | +EXPORT_SYMBOL_GPL(regmap_add_irq_chip); | |
265 | + | |
266 | +/** | |
267 | + * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip | |
268 | + * | |
269 | + * @irq: Primary IRQ for the device | |
270 | + * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip() | |
271 | + */ | |
272 | +void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) | |
273 | +{ | |
274 | + if (!d) | |
275 | + return; | |
276 | + | |
277 | + free_irq(irq, d); | |
278 | + kfree(d->mask_buf_def); | |
279 | + kfree(d->mask_buf); | |
280 | + kfree(d->status_reg_buf); | |
281 | + kfree(d->status_buf); | |
282 | + kfree(d); | |
283 | +} | |
284 | +EXPORT_SYMBOL_GPL(regmap_del_irq_chip); |
drivers/base/regmap/regmap.c
include/linux/regmap.h
... | ... | @@ -25,7 +25,7 @@ |
25 | 25 | REGCACHE_NONE, |
26 | 26 | REGCACHE_INDEXED, |
27 | 27 | REGCACHE_RBTREE, |
28 | - REGCACHE_LZO | |
28 | + REGCACHE_COMPRESSED | |
29 | 29 | }; |
30 | 30 | |
31 | 31 | /** |
... | ... | @@ -143,6 +143,54 @@ |
143 | 143 | int regcache_sync(struct regmap *map); |
144 | 144 | void regcache_cache_only(struct regmap *map, bool enable); |
145 | 145 | void regcache_cache_bypass(struct regmap *map, bool enable); |
146 | +void regcache_mark_dirty(struct regmap *map); | |
147 | + | |
148 | +/** | |
149 | + * Description of an IRQ for the generic regmap irq_chip. | |
150 | + * | |
151 | + * @reg_offset: Offset of the status/mask register within the bank | |
152 | + * @mask: Mask used to flag/control the register. | |
153 | + */ | |
154 | +struct regmap_irq { | |
155 | + unsigned int reg_offset; | |
156 | + unsigned int mask; | |
157 | +}; | |
158 | + | |
159 | +/** | |
160 | + * Description of a generic regmap irq_chip. This is not intended to | |
161 | + * handle every possible interrupt controller, but it should handle a | |
162 | + * substantial proportion of those that are found in the wild. | |
163 | + * | |
164 | + * @name: Descriptive name for IRQ controller. | |
165 | + * | |
166 | + * @status_base: Base status register address. | |
167 | + * @mask_base: Base mask register address. | |
168 | + * @ack_base: Base ack address. If zero then the chip is clear on read. | |
169 | + * | |
170 | + * @num_regs: Number of registers in each control bank. | |
171 | + * @irqs: Descriptors for individual IRQs. Interrupt numbers are | |
172 | + * assigned based on the index in the array of the interrupt. | |
173 | + * @num_irqs: Number of descriptors. | |
174 | + */ | |
175 | +struct regmap_irq_chip { | |
176 | + const char *name; | |
177 | + | |
178 | + unsigned int status_base; | |
179 | + unsigned int mask_base; | |
180 | + unsigned int ack_base; | |
181 | + | |
182 | + int num_regs; | |
183 | + | |
184 | + const struct regmap_irq *irqs; | |
185 | + int num_irqs; | |
186 | +}; | |
187 | + | |
188 | +struct regmap_irq_chip_data; | |
189 | + | |
190 | +int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | |
191 | + int irq_base, struct regmap_irq_chip *chip, | |
192 | + struct regmap_irq_chip_data **data); | |
193 | +void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); | |
146 | 194 | |
147 | 195 | #endif |