Commit 8243b7f5dc1dced123145566291015704f2b4ba7
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
Merge tag 'regmap-v3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap
Pull regmap updates from Mark Brown: "A quiet release for regmap, some cleanups, fixes and: - Improved node coalescing for rbtree, reducing memory usage and improving performance during syncs. - Support for registering multiple register patches. - A quirk for handling interrupts that need to be clear when masked in regmap-irq" * tag 'regmap-v3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap: regmap: rbtree: Make cache_present bitmap per node regmap: rbtree: Reduce number of nodes, take 2 regmap: rbtree: Simplify adjacent node look-up regmap: debugfs: Fix continued read from registers file regcache-rbtree: Fix reg_stride != 1 regmap: Allow multiple patches to be registered regmap: regcache: allow read-only regs to be cached regmap: fix regcache_reg_present() for empty cache regmap: core: allow a virtual range to cover its own data window regmap: irq: document mask/wake_invert flags regmap: irq: make flags bool and put them in a bitfield regmap: irq: Allow to acknowledge masked interrupts during initialization regmap: Provide __acquires/__releases annotations
Showing 7 changed files Side-by-side Diff
drivers/base/regmap/internal.h
... | ... | @@ -128,9 +128,6 @@ |
128 | 128 | void *cache; |
129 | 129 | u32 cache_dirty; |
130 | 130 | |
131 | - unsigned long *cache_present; | |
132 | - unsigned int cache_present_nbits; | |
133 | - | |
134 | 131 | struct reg_default *patch; |
135 | 132 | int patch_regs; |
136 | 133 | |
... | ... | @@ -203,6 +200,7 @@ |
203 | 200 | unsigned int reg, unsigned int value); |
204 | 201 | int regcache_sync(struct regmap *map); |
205 | 202 | int regcache_sync_block(struct regmap *map, void *block, |
203 | + unsigned long *cache_present, | |
206 | 204 | unsigned int block_base, unsigned int start, |
207 | 205 | unsigned int end); |
208 | 206 | |
... | ... | @@ -218,16 +216,6 @@ |
218 | 216 | bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, |
219 | 217 | unsigned int val); |
220 | 218 | int regcache_lookup_reg(struct regmap *map, unsigned int reg); |
221 | -int regcache_set_reg_present(struct regmap *map, unsigned int reg); | |
222 | - | |
223 | -static inline bool regcache_reg_present(struct regmap *map, unsigned int reg) | |
224 | -{ | |
225 | - if (!map->cache_present) | |
226 | - return true; | |
227 | - if (reg > map->cache_present_nbits) | |
228 | - return false; | |
229 | - return map->cache_present[BIT_WORD(reg)] & BIT_MASK(reg); | |
230 | -} | |
231 | 219 | |
232 | 220 | int _regmap_raw_write(struct regmap *map, unsigned int reg, |
233 | 221 | const void *val, size_t val_len, bool async); |
drivers/base/regmap/regcache-rbtree.c
... | ... | @@ -29,6 +29,8 @@ |
29 | 29 | unsigned int base_reg; |
30 | 30 | /* block of adjacent registers */ |
31 | 31 | void *block; |
32 | + /* Which registers are present */ | |
33 | + long *cache_present; | |
32 | 34 | /* number of registers available in the block */ |
33 | 35 | unsigned int blklen; |
34 | 36 | } __attribute__ ((packed)); |
... | ... | @@ -57,6 +59,7 @@ |
57 | 59 | struct regcache_rbtree_node *rbnode, |
58 | 60 | unsigned int idx, unsigned int val) |
59 | 61 | { |
62 | + set_bit(idx, rbnode->cache_present); | |
60 | 63 | regcache_set_val(map, rbnode->block, idx, val); |
61 | 64 | } |
62 | 65 | |
63 | 66 | |
... | ... | @@ -146,13 +149,13 @@ |
146 | 149 | map->lock(map->lock_arg); |
147 | 150 | |
148 | 151 | mem_size = sizeof(*rbtree_ctx); |
149 | - mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long); | |
150 | 152 | |
151 | 153 | for (node = rb_first(&rbtree_ctx->root); node != NULL; |
152 | 154 | node = rb_next(node)) { |
153 | 155 | n = container_of(node, struct regcache_rbtree_node, node); |
154 | 156 | mem_size += sizeof(*n); |
155 | 157 | mem_size += (n->blklen * map->cache_word_size); |
158 | + mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long); | |
156 | 159 | |
157 | 160 | regcache_rbtree_get_base_top_reg(map, n, &base, &top); |
158 | 161 | this_registers = ((top - base) / map->reg_stride) + 1; |
... | ... | @@ -245,6 +248,7 @@ |
245 | 248 | rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); |
246 | 249 | next = rb_next(&rbtree_node->node); |
247 | 250 | rb_erase(&rbtree_node->node, &rbtree_ctx->root); |
251 | + kfree(rbtree_node->cache_present); | |
248 | 252 | kfree(rbtree_node->block); |
249 | 253 | kfree(rbtree_node); |
250 | 254 | } |
... | ... | @@ -265,7 +269,7 @@ |
265 | 269 | rbnode = regcache_rbtree_lookup(map, reg); |
266 | 270 | if (rbnode) { |
267 | 271 | reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; |
268 | - if (!regcache_reg_present(map, reg)) | |
272 | + if (!test_bit(reg_tmp, rbnode->cache_present)) | |
269 | 273 | return -ENOENT; |
270 | 274 | *value = regcache_rbtree_get_register(map, rbnode, reg_tmp); |
271 | 275 | } else { |
272 | 276 | |
273 | 277 | |
274 | 278 | |
275 | 279 | |
276 | 280 | |
277 | 281 | |
... | ... | @@ -278,27 +282,45 @@ |
278 | 282 | |
279 | 283 | static int regcache_rbtree_insert_to_block(struct regmap *map, |
280 | 284 | struct regcache_rbtree_node *rbnode, |
281 | - unsigned int pos, unsigned int reg, | |
285 | + unsigned int base_reg, | |
286 | + unsigned int top_reg, | |
287 | + unsigned int reg, | |
282 | 288 | unsigned int value) |
283 | 289 | { |
290 | + unsigned int blklen; | |
291 | + unsigned int pos, offset; | |
292 | + unsigned long *present; | |
284 | 293 | u8 *blk; |
285 | 294 | |
295 | + blklen = (top_reg - base_reg) / map->reg_stride + 1; | |
296 | + pos = (reg - base_reg) / map->reg_stride; | |
297 | + offset = (rbnode->base_reg - base_reg) / map->reg_stride; | |
298 | + | |
286 | 299 | blk = krealloc(rbnode->block, |
287 | - (rbnode->blklen + 1) * map->cache_word_size, | |
300 | + blklen * map->cache_word_size, | |
288 | 301 | GFP_KERNEL); |
289 | 302 | if (!blk) |
290 | 303 | return -ENOMEM; |
291 | 304 | |
305 | + present = krealloc(rbnode->cache_present, | |
306 | + BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); | |
307 | + if (!present) { | |
308 | + kfree(blk); | |
309 | + return -ENOMEM; | |
310 | + } | |
311 | + | |
292 | 312 | /* insert the register value in the correct place in the rbnode block */ |
293 | - memmove(blk + (pos + 1) * map->cache_word_size, | |
294 | - blk + pos * map->cache_word_size, | |
295 | - (rbnode->blklen - pos) * map->cache_word_size); | |
313 | + if (pos == 0) { | |
314 | + memmove(blk + offset * map->cache_word_size, | |
315 | + blk, rbnode->blklen * map->cache_word_size); | |
316 | + bitmap_shift_right(present, present, offset, blklen); | |
317 | + } | |
296 | 318 | |
297 | 319 | /* update the rbnode block, its size and the base register */ |
298 | 320 | rbnode->block = blk; |
299 | - rbnode->blklen++; | |
300 | - if (!pos) | |
301 | - rbnode->base_reg = reg; | |
321 | + rbnode->blklen = blklen; | |
322 | + rbnode->base_reg = base_reg; | |
323 | + rbnode->cache_present = present; | |
302 | 324 | |
303 | 325 | regcache_rbtree_set_register(map, rbnode, pos, value); |
304 | 326 | return 0; |
... | ... | @@ -325,8 +347,8 @@ |
325 | 347 | |
326 | 348 | if (i != map->rd_table->n_yes_ranges) { |
327 | 349 | range = &map->rd_table->yes_ranges[i]; |
328 | - rbnode->blklen = range->range_max - range->range_min | |
329 | - + 1; | |
350 | + rbnode->blklen = (range->range_max - range->range_min) / | |
351 | + map->reg_stride + 1; | |
330 | 352 | rbnode->base_reg = range->range_min; |
331 | 353 | } |
332 | 354 | } |
333 | 355 | |
334 | 356 | |
... | ... | @@ -338,12 +360,21 @@ |
338 | 360 | |
339 | 361 | rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, |
340 | 362 | GFP_KERNEL); |
341 | - if (!rbnode->block) { | |
342 | - kfree(rbnode); | |
343 | - return NULL; | |
344 | - } | |
363 | + if (!rbnode->block) | |
364 | + goto err_free; | |
345 | 365 | |
366 | + rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) * | |
367 | + sizeof(*rbnode->cache_present), GFP_KERNEL); | |
368 | + if (!rbnode->cache_present) | |
369 | + goto err_free_block; | |
370 | + | |
346 | 371 | return rbnode; |
372 | + | |
373 | +err_free_block: | |
374 | + kfree(rbnode->block); | |
375 | +err_free: | |
376 | + kfree(rbnode); | |
377 | + return NULL; | |
347 | 378 | } |
348 | 379 | |
349 | 380 | static int regcache_rbtree_write(struct regmap *map, unsigned int reg, |
350 | 381 | |
... | ... | @@ -353,15 +384,9 @@ |
353 | 384 | struct regcache_rbtree_node *rbnode, *rbnode_tmp; |
354 | 385 | struct rb_node *node; |
355 | 386 | unsigned int reg_tmp; |
356 | - unsigned int pos; | |
357 | - int i; | |
358 | 387 | int ret; |
359 | 388 | |
360 | 389 | rbtree_ctx = map->cache; |
361 | - /* update the reg_present bitmap, make space if necessary */ | |
362 | - ret = regcache_set_reg_present(map, reg); | |
363 | - if (ret < 0) | |
364 | - return ret; | |
365 | 390 | |
366 | 391 | /* if we can't locate it in the cached rbnode we'll have |
367 | 392 | * to traverse the rbtree looking for it. |
368 | 393 | |
369 | 394 | |
... | ... | @@ -371,30 +396,43 @@ |
371 | 396 | reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; |
372 | 397 | regcache_rbtree_set_register(map, rbnode, reg_tmp, value); |
373 | 398 | } else { |
399 | + unsigned int base_reg, top_reg; | |
400 | + unsigned int new_base_reg, new_top_reg; | |
401 | + unsigned int min, max; | |
402 | + unsigned int max_dist; | |
403 | + | |
404 | + max_dist = map->reg_stride * sizeof(*rbnode_tmp) / | |
405 | + map->cache_word_size; | |
406 | + if (reg < max_dist) | |
407 | + min = 0; | |
408 | + else | |
409 | + min = reg - max_dist; | |
410 | + max = reg + max_dist; | |
411 | + | |
374 | 412 | /* look for an adjacent register to the one we are about to add */ |
375 | 413 | for (node = rb_first(&rbtree_ctx->root); node; |
376 | 414 | node = rb_next(node)) { |
377 | 415 | rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, |
378 | 416 | node); |
379 | - for (i = 0; i < rbnode_tmp->blklen; i++) { | |
380 | - reg_tmp = rbnode_tmp->base_reg + | |
381 | - (i * map->reg_stride); | |
382 | - if (abs(reg_tmp - reg) != map->reg_stride) | |
383 | - continue; | |
384 | - /* decide where in the block to place our register */ | |
385 | - if (reg_tmp + map->reg_stride == reg) | |
386 | - pos = i + 1; | |
387 | - else | |
388 | - pos = i; | |
389 | - ret = regcache_rbtree_insert_to_block(map, | |
390 | - rbnode_tmp, | |
391 | - pos, reg, | |
392 | - value); | |
393 | - if (ret) | |
394 | - return ret; | |
395 | - rbtree_ctx->cached_rbnode = rbnode_tmp; | |
396 | - return 0; | |
417 | + | |
418 | + regcache_rbtree_get_base_top_reg(map, rbnode_tmp, | |
419 | + &base_reg, &top_reg); | |
420 | + | |
421 | + if (base_reg <= max && top_reg >= min) { | |
422 | + new_base_reg = min(reg, base_reg); | |
423 | + new_top_reg = max(reg, top_reg); | |
424 | + } else { | |
425 | + continue; | |
397 | 426 | } |
427 | + | |
428 | + ret = regcache_rbtree_insert_to_block(map, rbnode_tmp, | |
429 | + new_base_reg, | |
430 | + new_top_reg, reg, | |
431 | + value); | |
432 | + if (ret) | |
433 | + return ret; | |
434 | + rbtree_ctx->cached_rbnode = rbnode_tmp; | |
435 | + return 0; | |
398 | 436 | } |
399 | 437 | |
400 | 438 | /* We did not manage to find a place to insert it in |
401 | 439 | |
402 | 440 | |
403 | 441 | |
404 | 442 | |
405 | 443 | |
406 | 444 | |
407 | 445 | |
... | ... | @@ -418,30 +456,34 @@ |
418 | 456 | struct regcache_rbtree_ctx *rbtree_ctx; |
419 | 457 | struct rb_node *node; |
420 | 458 | struct regcache_rbtree_node *rbnode; |
459 | + unsigned int base_reg, top_reg; | |
460 | + unsigned int start, end; | |
421 | 461 | int ret; |
422 | - int base, end; | |
423 | 462 | |
424 | 463 | rbtree_ctx = map->cache; |
425 | 464 | for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { |
426 | 465 | rbnode = rb_entry(node, struct regcache_rbtree_node, node); |
427 | 466 | |
428 | - if (rbnode->base_reg > max) | |
467 | + regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, | |
468 | + &top_reg); | |
469 | + if (base_reg > max) | |
429 | 470 | break; |
430 | - if (rbnode->base_reg + rbnode->blklen < min) | |
471 | + if (top_reg < min) | |
431 | 472 | continue; |
432 | 473 | |
433 | - if (min > rbnode->base_reg) | |
434 | - base = min - rbnode->base_reg; | |
474 | + if (min > base_reg) | |
475 | + start = (min - base_reg) / map->reg_stride; | |
435 | 476 | else |
436 | - base = 0; | |
477 | + start = 0; | |
437 | 478 | |
438 | - if (max < rbnode->base_reg + rbnode->blklen) | |
439 | - end = max - rbnode->base_reg + 1; | |
479 | + if (max < top_reg) | |
480 | + end = (max - base_reg) / map->reg_stride + 1; | |
440 | 481 | else |
441 | 482 | end = rbnode->blklen; |
442 | 483 | |
443 | - ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg, | |
444 | - base, end); | |
484 | + ret = regcache_sync_block(map, rbnode->block, | |
485 | + rbnode->cache_present, | |
486 | + rbnode->base_reg, start, end); | |
445 | 487 | if (ret != 0) |
446 | 488 | return ret; |
447 | 489 | } |
... | ... | @@ -449,6 +491,42 @@ |
449 | 491 | return regmap_async_complete(map); |
450 | 492 | } |
451 | 493 | |
494 | +static int regcache_rbtree_drop(struct regmap *map, unsigned int min, | |
495 | + unsigned int max) | |
496 | +{ | |
497 | + struct regcache_rbtree_ctx *rbtree_ctx; | |
498 | + struct regcache_rbtree_node *rbnode; | |
499 | + struct rb_node *node; | |
500 | + unsigned int base_reg, top_reg; | |
501 | + unsigned int start, end; | |
502 | + | |
503 | + rbtree_ctx = map->cache; | |
504 | + for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { | |
505 | + rbnode = rb_entry(node, struct regcache_rbtree_node, node); | |
506 | + | |
507 | + regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, | |
508 | + &top_reg); | |
509 | + if (base_reg > max) | |
510 | + break; | |
511 | + if (top_reg < min) | |
512 | + continue; | |
513 | + | |
514 | + if (min > base_reg) | |
515 | + start = (min - base_reg) / map->reg_stride; | |
516 | + else | |
517 | + start = 0; | |
518 | + | |
519 | + if (max < top_reg) | |
520 | + end = (max - base_reg) / map->reg_stride + 1; | |
521 | + else | |
522 | + end = rbnode->blklen; | |
523 | + | |
524 | + bitmap_clear(rbnode->cache_present, start, end - start); | |
525 | + } | |
526 | + | |
527 | + return 0; | |
528 | +} | |
529 | + | |
452 | 530 | struct regcache_ops regcache_rbtree_ops = { |
453 | 531 | .type = REGCACHE_RBTREE, |
454 | 532 | .name = "rbtree", |
... | ... | @@ -456,6 +534,7 @@ |
456 | 534 | .exit = regcache_rbtree_exit, |
457 | 535 | .read = regcache_rbtree_read, |
458 | 536 | .write = regcache_rbtree_write, |
459 | - .sync = regcache_rbtree_sync | |
537 | + .sync = regcache_rbtree_sync, | |
538 | + .drop = regcache_rbtree_drop, | |
460 | 539 | }; |
drivers/base/regmap/regcache.c
... | ... | @@ -121,8 +121,6 @@ |
121 | 121 | map->reg_defaults_raw = config->reg_defaults_raw; |
122 | 122 | map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); |
123 | 123 | map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; |
124 | - map->cache_present = NULL; | |
125 | - map->cache_present_nbits = 0; | |
126 | 124 | |
127 | 125 | map->cache = NULL; |
128 | 126 | map->cache_ops = cache_types[i]; |
... | ... | @@ -181,7 +179,6 @@ |
181 | 179 | |
182 | 180 | BUG_ON(!map->cache_ops); |
183 | 181 | |
184 | - kfree(map->cache_present); | |
185 | 182 | kfree(map->reg_defaults); |
186 | 183 | if (map->cache_free) |
187 | 184 | kfree(map->reg_defaults_raw); |
... | ... | @@ -241,9 +238,6 @@ |
241 | 238 | |
242 | 239 | BUG_ON(!map->cache_ops); |
243 | 240 | |
244 | - if (!regmap_writeable(map, reg)) | |
245 | - return -EIO; | |
246 | - | |
247 | 241 | if (!regmap_volatile(map, reg)) |
248 | 242 | return map->cache_ops->write(map, reg, value); |
249 | 243 | |
250 | 244 | |
251 | 245 | |
252 | 246 | |
... | ... | @@ -410,23 +404,17 @@ |
410 | 404 | int regcache_drop_region(struct regmap *map, unsigned int min, |
411 | 405 | unsigned int max) |
412 | 406 | { |
413 | - unsigned int reg; | |
414 | 407 | int ret = 0; |
415 | 408 | |
416 | - if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop)) | |
409 | + if (!map->cache_ops || !map->cache_ops->drop) | |
417 | 410 | return -EINVAL; |
418 | 411 | |
419 | 412 | map->lock(map->lock_arg); |
420 | 413 | |
421 | 414 | trace_regcache_drop_region(map->dev, min, max); |
422 | 415 | |
423 | - if (map->cache_present) | |
424 | - for (reg = min; reg < max + 1; reg++) | |
425 | - clear_bit(reg, map->cache_present); | |
416 | + ret = map->cache_ops->drop(map, min, max); | |
426 | 417 | |
427 | - if (map->cache_ops && map->cache_ops->drop) | |
428 | - ret = map->cache_ops->drop(map, min, max); | |
429 | - | |
430 | 418 | map->unlock(map->lock_arg); |
431 | 419 | |
432 | 420 | return ret; |
... | ... | @@ -493,42 +481,6 @@ |
493 | 481 | } |
494 | 482 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); |
495 | 483 | |
496 | -int regcache_set_reg_present(struct regmap *map, unsigned int reg) | |
497 | -{ | |
498 | - unsigned long *cache_present; | |
499 | - unsigned int cache_present_size; | |
500 | - unsigned int nregs; | |
501 | - int i; | |
502 | - | |
503 | - nregs = reg + 1; | |
504 | - cache_present_size = BITS_TO_LONGS(nregs); | |
505 | - cache_present_size *= sizeof(long); | |
506 | - | |
507 | - if (!map->cache_present) { | |
508 | - cache_present = kmalloc(cache_present_size, GFP_KERNEL); | |
509 | - if (!cache_present) | |
510 | - return -ENOMEM; | |
511 | - bitmap_zero(cache_present, nregs); | |
512 | - map->cache_present = cache_present; | |
513 | - map->cache_present_nbits = nregs; | |
514 | - } | |
515 | - | |
516 | - if (nregs > map->cache_present_nbits) { | |
517 | - cache_present = krealloc(map->cache_present, | |
518 | - cache_present_size, GFP_KERNEL); | |
519 | - if (!cache_present) | |
520 | - return -ENOMEM; | |
521 | - for (i = 0; i < nregs; i++) | |
522 | - if (i >= map->cache_present_nbits) | |
523 | - clear_bit(i, cache_present); | |
524 | - map->cache_present = cache_present; | |
525 | - map->cache_present_nbits = nregs; | |
526 | - } | |
527 | - | |
528 | - set_bit(reg, map->cache_present); | |
529 | - return 0; | |
530 | -} | |
531 | - | |
532 | 484 | bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, |
533 | 485 | unsigned int val) |
534 | 486 | { |
535 | 487 | |
... | ... | @@ -620,7 +572,16 @@ |
620 | 572 | return -ENOENT; |
621 | 573 | } |
622 | 574 | |
575 | +static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) | |
576 | +{ | |
577 | + if (!cache_present) | |
578 | + return true; | |
579 | + | |
580 | + return test_bit(idx, cache_present); | |
581 | +} | |
582 | + | |
623 | 583 | static int regcache_sync_block_single(struct regmap *map, void *block, |
584 | + unsigned long *cache_present, | |
624 | 585 | unsigned int block_base, |
625 | 586 | unsigned int start, unsigned int end) |
626 | 587 | { |
... | ... | @@ -630,7 +591,7 @@ |
630 | 591 | for (i = start; i < end; i++) { |
631 | 592 | regtmp = block_base + (i * map->reg_stride); |
632 | 593 | |
633 | - if (!regcache_reg_present(map, regtmp)) | |
594 | + if (!regcache_reg_present(cache_present, i)) | |
634 | 595 | continue; |
635 | 596 | |
636 | 597 | val = regcache_get_val(map, block, i); |
... | ... | @@ -681,6 +642,7 @@ |
681 | 642 | } |
682 | 643 | |
683 | 644 | static int regcache_sync_block_raw(struct regmap *map, void *block, |
645 | + unsigned long *cache_present, | |
684 | 646 | unsigned int block_base, unsigned int start, |
685 | 647 | unsigned int end) |
686 | 648 | { |
... | ... | @@ -693,7 +655,7 @@ |
693 | 655 | for (i = start; i < end; i++) { |
694 | 656 | regtmp = block_base + (i * map->reg_stride); |
695 | 657 | |
696 | - if (!regcache_reg_present(map, regtmp)) { | |
658 | + if (!regcache_reg_present(cache_present, i)) { | |
697 | 659 | ret = regcache_sync_block_raw_flush(map, &data, |
698 | 660 | base, regtmp); |
699 | 661 | if (ret != 0) |
700 | 662 | |
701 | 663 | |
... | ... | @@ -724,14 +686,15 @@ |
724 | 686 | } |
725 | 687 | |
726 | 688 | int regcache_sync_block(struct regmap *map, void *block, |
689 | + unsigned long *cache_present, | |
727 | 690 | unsigned int block_base, unsigned int start, |
728 | 691 | unsigned int end) |
729 | 692 | { |
730 | 693 | if (regmap_can_raw_write(map)) |
731 | - return regcache_sync_block_raw(map, block, block_base, | |
732 | - start, end); | |
694 | + return regcache_sync_block_raw(map, block, cache_present, | |
695 | + block_base, start, end); | |
733 | 696 | else |
734 | - return regcache_sync_block_single(map, block, block_base, | |
735 | - start, end); | |
697 | + return regcache_sync_block_single(map, block, cache_present, | |
698 | + block_base, start, end); | |
736 | 699 | } |
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap-irq.c
... | ... | @@ -418,6 +418,31 @@ |
418 | 418 | reg, ret); |
419 | 419 | goto err_alloc; |
420 | 420 | } |
421 | + | |
422 | + if (!chip->init_ack_masked) | |
423 | + continue; | |
424 | + | |
425 | + /* Ack masked but set interrupts */ | |
426 | + reg = chip->status_base + | |
427 | + (i * map->reg_stride * d->irq_reg_stride); | |
428 | + ret = regmap_read(map, reg, &d->status_buf[i]); | |
429 | + if (ret != 0) { | |
430 | + dev_err(map->dev, "Failed to read IRQ status: %d\n", | |
431 | + ret); | |
432 | + goto err_alloc; | |
433 | + } | |
434 | + | |
435 | + if (d->status_buf[i] && chip->ack_base) { | |
436 | + reg = chip->ack_base + | |
437 | + (i * map->reg_stride * d->irq_reg_stride); | |
438 | + ret = regmap_write(map, reg, | |
439 | + d->status_buf[i] & d->mask_buf[i]); | |
440 | + if (ret != 0) { | |
441 | + dev_err(map->dev, "Failed to ack 0x%x: %d\n", | |
442 | + reg, ret); | |
443 | + goto err_alloc; | |
444 | + } | |
445 | + } | |
421 | 446 | } |
422 | 447 | |
423 | 448 | /* Wake is disabled by default */ |
drivers/base/regmap/regmap.c
... | ... | @@ -303,6 +303,7 @@ |
303 | 303 | } |
304 | 304 | |
305 | 305 | static void regmap_lock_spinlock(void *__map) |
306 | +__acquires(&map->spinlock) | |
306 | 307 | { |
307 | 308 | struct regmap *map = __map; |
308 | 309 | unsigned long flags; |
... | ... | @@ -312,6 +313,7 @@ |
312 | 313 | } |
313 | 314 | |
314 | 315 | static void regmap_unlock_spinlock(void *__map) |
316 | +__releases(&map->spinlock) | |
315 | 317 | { |
316 | 318 | struct regmap *map = __map; |
317 | 319 | spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); |
... | ... | @@ -687,6 +689,10 @@ |
687 | 689 | unsigned win_max = win_min + |
688 | 690 | config->ranges[j].window_len - 1; |
689 | 691 | |
692 | + /* Allow data window inside its own virtual range */ | |
693 | + if (j == i) | |
694 | + continue; | |
695 | + | |
690 | 696 | if (range_cfg->range_min <= sel_reg && |
691 | 697 | sel_reg <= range_cfg->range_max) { |
692 | 698 | dev_err(map->dev, |
... | ... | @@ -1261,6 +1267,9 @@ |
1261 | 1267 | int ret; |
1262 | 1268 | void *context = _regmap_map_get_context(map); |
1263 | 1269 | |
1270 | + if (!regmap_writeable(map, reg)) | |
1271 | + return -EIO; | |
1272 | + | |
1264 | 1273 | if (!map->cache_bypass && !map->defer_caching) { |
1265 | 1274 | ret = regcache_write(map, reg, val); |
1266 | 1275 | if (ret != 0) |
1267 | 1276 | |
... | ... | @@ -1888,13 +1897,10 @@ |
1888 | 1897 | int regmap_register_patch(struct regmap *map, const struct reg_default *regs, |
1889 | 1898 | int num_regs) |
1890 | 1899 | { |
1900 | + struct reg_default *p; | |
1891 | 1901 | int i, ret; |
1892 | 1902 | bool bypass; |
1893 | 1903 | |
1894 | - /* If needed the implementation can be extended to support this */ | |
1895 | - if (map->patch) | |
1896 | - return -EBUSY; | |
1897 | - | |
1898 | 1904 | map->lock(map->lock_arg); |
1899 | 1905 | |
1900 | 1906 | bypass = map->cache_bypass; |
... | ... | @@ -1911,11 +1917,13 @@ |
1911 | 1917 | } |
1912 | 1918 | } |
1913 | 1919 | |
1914 | - map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL); | |
1915 | - if (map->patch != NULL) { | |
1916 | - memcpy(map->patch, regs, | |
1917 | - num_regs * sizeof(struct reg_default)); | |
1918 | - map->patch_regs = num_regs; | |
1920 | + p = krealloc(map->patch, | |
1921 | + sizeof(struct reg_default) * (map->patch_regs + num_regs), | |
1922 | + GFP_KERNEL); | |
1923 | + if (p) { | |
1924 | + memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); | |
1925 | + map->patch = p; | |
1926 | + map->patch_regs += num_regs; | |
1919 | 1927 | } else { |
1920 | 1928 | ret = -ENOMEM; |
1921 | 1929 | } |
include/linux/regmap.h
... | ... | @@ -472,6 +472,9 @@ |
472 | 472 | * @ack_base: Base ack address. If zero then the chip is clear on read. |
473 | 473 | * @wake_base: Base address for wake enables. If zero unsupported. |
474 | 474 | * @irq_reg_stride: Stride to use for chips where registers are not contiguous. |
475 | + * @init_ack_masked: Ack all masked interrupts once during initalization. | |
476 | + * @mask_invert: Inverted mask register: cleared bits are masked out. | |
477 | + * @wake_invert: Inverted wake register: cleared bits are wake enabled. | |
475 | 478 | * @runtime_pm: Hold a runtime PM lock on the device when accessing it. |
476 | 479 | * |
477 | 480 | * @num_regs: Number of registers in each control bank. |
... | ... | @@ -487,9 +490,10 @@ |
487 | 490 | unsigned int ack_base; |
488 | 491 | unsigned int wake_base; |
489 | 492 | unsigned int irq_reg_stride; |
490 | - unsigned int mask_invert; | |
491 | - unsigned int wake_invert; | |
492 | - bool runtime_pm; | |
493 | + bool init_ack_masked:1; | |
494 | + bool mask_invert:1; | |
495 | + bool wake_invert:1; | |
496 | + bool runtime_pm:1; | |
493 | 497 | |
494 | 498 | int num_regs; |
495 | 499 |