Commit f01ee60fffa4dc6c77122121233a793f7f696e67

Authored by Stephen Warren
Committed by Mark Brown
1 parent c0cc6fe1d0

regmap: implement register striding

regmap_config.reg_stride is introduced. All extant register addresses
are a multiple of this value. Users of serial-oriented regmap busses will
typically set this to 1. Users of the MMIO regmap bus will typically set
this based on the value size of their registers, in bytes, so 4 for a
32-bit register.

Throughout the regmap code, actual register addresses are used. Wherever
the register address is used to index some array of values, the address
is divided by the stride to determine the index, or vice-versa. Error-
checking is added to all entry-points for register address data to ensure
that register addresses actually satisfy the specified stride. The MMIO
bus ensures that the specified stride is large enough for the register
size.

Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>

Showing 9 changed files with 109 additions and 42 deletions Side-by-side Diff

drivers/base/regmap/internal.h
... ... @@ -62,6 +62,7 @@
62 62  
63 63 /* number of bits to (left) shift the reg value when formatting*/
64 64 int reg_shift;
  65 + int reg_stride;
65 66  
66 67 /* regcache specific members */
67 68 const struct regcache_ops *cache_ops;
drivers/base/regmap/regcache-lzo.c
... ... @@ -108,7 +108,7 @@
108 108 static inline int regcache_lzo_get_blkindex(struct regmap *map,
109 109 unsigned int reg)
110 110 {
111   - return (reg * map->cache_word_size) /
  111 + return ((reg / map->reg_stride) * map->cache_word_size) /
112 112 DIV_ROUND_UP(map->cache_size_raw,
113 113 regcache_lzo_block_count(map));
114 114 }
... ... @@ -116,9 +116,10 @@
116 116 static inline int regcache_lzo_get_blkpos(struct regmap *map,
117 117 unsigned int reg)
118 118 {
119   - return reg % (DIV_ROUND_UP(map->cache_size_raw,
120   - regcache_lzo_block_count(map)) /
121   - map->cache_word_size);
  119 + return (reg / map->reg_stride) %
  120 + (DIV_ROUND_UP(map->cache_size_raw,
  121 + regcache_lzo_block_count(map)) /
  122 + map->cache_word_size);
122 123 }
123 124  
124 125 static inline int regcache_lzo_get_blksize(struct regmap *map)
... ... @@ -322,7 +323,7 @@
322 323 }
323 324  
324 325 /* set the bit so we know we have to sync this register */
325   - set_bit(reg, lzo_block->sync_bmp);
  326 + set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
326 327 kfree(tmp_dst);
327 328 kfree(lzo_block->src);
328 329 return 0;
drivers/base/regmap/regcache-rbtree.c
... ... @@ -39,11 +39,12 @@
39 39 };
40 40  
41 41 static inline void regcache_rbtree_get_base_top_reg(
  42 + struct regmap *map,
42 43 struct regcache_rbtree_node *rbnode,
43 44 unsigned int *base, unsigned int *top)
44 45 {
45 46 *base = rbnode->base_reg;
46   - *top = rbnode->base_reg + rbnode->blklen - 1;
  47 + *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
47 48 }
48 49  
49 50 static unsigned int regcache_rbtree_get_register(
... ... @@ -70,7 +71,8 @@
70 71  
71 72 rbnode = rbtree_ctx->cached_rbnode;
72 73 if (rbnode) {
73   - regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  74 + regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  75 + &top_reg);
74 76 if (reg >= base_reg && reg <= top_reg)
75 77 return rbnode;
76 78 }
... ... @@ -78,7 +80,8 @@
78 80 node = rbtree_ctx->root.rb_node;
79 81 while (node) {
80 82 rbnode = container_of(node, struct regcache_rbtree_node, node);
81   - regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
  83 + regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  84 + &top_reg);
82 85 if (reg >= base_reg && reg <= top_reg) {
83 86 rbtree_ctx->cached_rbnode = rbnode;
84 87 return rbnode;
... ... @@ -92,7 +95,7 @@
92 95 return NULL;
93 96 }
94 97  
95   -static int regcache_rbtree_insert(struct rb_root *root,
  98 +static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
96 99 struct regcache_rbtree_node *rbnode)
97 100 {
98 101 struct rb_node **new, *parent;
... ... @@ -106,7 +109,7 @@
106 109 rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
107 110 node);
108 111 /* base and top registers of the current rbnode */
109   - regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
  112 + regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
110 113 &top_reg_tmp);
111 114 /* base register of the rbnode to be added */
112 115 base_reg = rbnode->base_reg;
... ... @@ -138,7 +141,7 @@
138 141 unsigned int base, top;
139 142 int nodes = 0;
140 143 int registers = 0;
141   - int average;
  144 + int this_registers, average;
142 145  
143 146 map->lock(map);
144 147  
145 148  
... ... @@ -146,11 +149,12 @@
146 149 node = rb_next(node)) {
147 150 n = container_of(node, struct regcache_rbtree_node, node);
148 151  
149   - regcache_rbtree_get_base_top_reg(n, &base, &top);
150   - seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1);
  152 + regcache_rbtree_get_base_top_reg(map, n, &base, &top);
  153 + this_registers = ((top - base) / map->reg_stride) + 1;
  154 + seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
151 155  
152 156 nodes++;
153   - registers += top - base + 1;
  157 + registers += this_registers;
154 158 }
155 159  
156 160 if (nodes)
... ... @@ -255,7 +259,7 @@
255 259  
256 260 rbnode = regcache_rbtree_lookup(map, reg);
257 261 if (rbnode) {
258   - reg_tmp = reg - rbnode->base_reg;
  262 + reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
259 263 *value = regcache_rbtree_get_register(rbnode, reg_tmp,
260 264 map->cache_word_size);
261 265 } else {
... ... @@ -310,7 +314,7 @@
310 314 */
311 315 rbnode = regcache_rbtree_lookup(map, reg);
312 316 if (rbnode) {
313   - reg_tmp = reg - rbnode->base_reg;
  317 + reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
314 318 val = regcache_rbtree_get_register(rbnode, reg_tmp,
315 319 map->cache_word_size);
316 320 if (val == value)
317 321  
318 322  
... ... @@ -321,13 +325,15 @@
321 325 /* look for an adjacent register to the one we are about to add */
322 326 for (node = rb_first(&rbtree_ctx->root); node;
323 327 node = rb_next(node)) {
324   - rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node);
  328 + rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
  329 + node);
325 330 for (i = 0; i < rbnode_tmp->blklen; i++) {
326   - reg_tmp = rbnode_tmp->base_reg + i;
327   - if (abs(reg_tmp - reg) != 1)
  331 + reg_tmp = rbnode_tmp->base_reg +
  332 + (i * map->reg_stride);
  333 + if (abs(reg_tmp - reg) != map->reg_stride)
328 334 continue;
329 335 /* decide where in the block to place our register */
330   - if (reg_tmp + 1 == reg)
  336 + if (reg_tmp + map->reg_stride == reg)
331 337 pos = i + 1;
332 338 else
333 339 pos = i;
... ... @@ -357,7 +363,7 @@
357 363 return -ENOMEM;
358 364 }
359 365 regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
360   - regcache_rbtree_insert(&rbtree_ctx->root, rbnode);
  366 + regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
361 367 rbtree_ctx->cached_rbnode = rbnode;
362 368 }
363 369  
... ... @@ -397,7 +403,7 @@
397 403 end = rbnode->blklen;
398 404  
399 405 for (i = base; i < end; i++) {
400   - regtmp = rbnode->base_reg + i;
  406 + regtmp = rbnode->base_reg + (i * map->reg_stride);
401 407 val = regcache_rbtree_get_register(rbnode, i,
402 408 map->cache_word_size);
403 409  
drivers/base/regmap/regcache.c
... ... @@ -59,7 +59,7 @@
59 59 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
60 60 val = regcache_get_val(map->reg_defaults_raw,
61 61 i, map->cache_word_size);
62   - if (regmap_volatile(map, i))
  62 + if (regmap_volatile(map, i * map->reg_stride))
63 63 continue;
64 64 count++;
65 65 }
66 66  
... ... @@ -76,9 +76,9 @@
76 76 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
77 77 val = regcache_get_val(map->reg_defaults_raw,
78 78 i, map->cache_word_size);
79   - if (regmap_volatile(map, i))
  79 + if (regmap_volatile(map, i * map->reg_stride))
80 80 continue;
81   - map->reg_defaults[j].reg = i;
  81 + map->reg_defaults[j].reg = i * map->reg_stride;
82 82 map->reg_defaults[j].def = val;
83 83 j++;
84 84 }
... ... @@ -98,6 +98,10 @@
98 98 int i;
99 99 void *tmp_buf;
100 100  
  101 + for (i = 0; i < config->num_reg_defaults; i++)
  102 + if (config->reg_defaults[i].reg % map->reg_stride)
  103 + return -EINVAL;
  104 +
101 105 if (map->cache_type == REGCACHE_NONE) {
102 106 map->cache_bypass = true;
103 107 return 0;
... ... @@ -278,6 +282,10 @@
278 282 /* Apply any patch first */
279 283 map->cache_bypass = 1;
280 284 for (i = 0; i < map->patch_regs; i++) {
  285 + if (map->patch[i].reg % map->reg_stride) {
  286 + ret = -EINVAL;
  287 + goto out;
  288 + }
281 289 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
282 290 if (ret != 0) {
283 291 dev_err(map->dev, "Failed to write %x = %x: %d\n",
drivers/base/regmap/regmap-debugfs.c
... ... @@ -80,7 +80,7 @@
80 80 val_len = 2 * map->format.val_bytes;
81 81 tot_len = reg_len + val_len + 3; /* : \n */
82 82  
83   - for (i = 0; i < map->max_register + 1; i++) {
  83 + for (i = 0; i <= map->max_register; i += map->reg_stride) {
84 84 if (!regmap_readable(map, i))
85 85 continue;
86 86  
... ... @@ -197,7 +197,7 @@
197 197 reg_len = regmap_calc_reg_len(map->max_register, buf, count);
198 198 tot_len = reg_len + 10; /* ': R W V P\n' */
199 199  
200   - for (i = 0; i < map->max_register + 1; i++) {
  200 + for (i = 0; i <= map->max_register; i += map->reg_stride) {
201 201 /* Ignore registers which are neither readable nor writable */
202 202 if (!regmap_readable(map, i) && !regmap_writeable(map, i))
203 203 continue;
drivers/base/regmap/regmap-irq.c
... ... @@ -58,11 +58,12 @@
58 58 * suppress pointless writes.
59 59 */
60 60 for (i = 0; i < d->chip->num_regs; i++) {
61   - ret = regmap_update_bits(d->map, d->chip->mask_base + i,
  61 + ret = regmap_update_bits(d->map, d->chip->mask_base +
  62 + (i * map->map->reg_stride),
62 63 d->mask_buf_def[i], d->mask_buf[i]);
63 64 if (ret != 0)
64 65 dev_err(d->map->dev, "Failed to sync masks in %x\n",
65   - d->chip->mask_base + i);
  66 + d->chip->mask_base + (i * map->reg_stride));
66 67 }
67 68  
68 69 mutex_unlock(&d->lock);
... ... @@ -73,7 +74,7 @@
73 74 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
74 75 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
75 76  
76   - d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask;
  77 + d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
77 78 }
78 79  
79 80 static void regmap_irq_disable(struct irq_data *data)
... ... @@ -81,7 +82,7 @@
81 82 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
82 83 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
83 84  
84   - d->mask_buf[irq_data->reg_offset] |= irq_data->mask;
  85 + d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
85 86 }
86 87  
87 88 static struct irq_chip regmap_irq_chip = {
88 89  
89 90  
... ... @@ -136,17 +137,19 @@
136 137 data->status_buf[i] &= ~data->mask_buf[i];
137 138  
138 139 if (data->status_buf[i] && chip->ack_base) {
139   - ret = regmap_write(map, chip->ack_base + i,
  140 + ret = regmap_write(map, chip->ack_base +
  141 + (i * map->reg_stride),
140 142 data->status_buf[i]);
141 143 if (ret != 0)
142 144 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
143   - chip->ack_base + i, ret);
  145 + chip->ack_base + (i * map->reg_stride),
  146 + ret);
144 147 }
145 148 }
146 149  
147 150 for (i = 0; i < chip->num_irqs; i++) {
148   - if (data->status_buf[chip->irqs[i].reg_offset] &
149   - chip->irqs[i].mask) {
  151 + if (data->status_buf[chip->irqs[i].reg_offset /
  152 + map->reg_stride] & chip->irqs[i].mask) {
150 153 handle_nested_irq(data->irq_base + i);
151 154 handled = true;
152 155 }
... ... @@ -181,6 +184,14 @@
181 184 int cur_irq, i;
182 185 int ret = -ENOMEM;
183 186  
  187 + for (i = 0; i < chip->num_irqs; i++) {
  188 + if (chip->irqs[i].reg_offset % map->reg_stride)
  189 + return -EINVAL;
  190 + if (chip->irqs[i].reg_offset / map->reg_stride >=
  191 + chip->num_regs)
  192 + return -EINVAL;
  193 + }
  194 +
184 195 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
185 196 if (irq_base < 0) {
186 197 dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
187 198  
188 199  
... ... @@ -218,16 +229,17 @@
218 229 mutex_init(&d->lock);
219 230  
220 231 for (i = 0; i < chip->num_irqs; i++)
221   - d->mask_buf_def[chip->irqs[i].reg_offset]
  232 + d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
222 233 |= chip->irqs[i].mask;
223 234  
224 235 /* Mask all the interrupts by default */
225 236 for (i = 0; i < chip->num_regs; i++) {
226 237 d->mask_buf[i] = d->mask_buf_def[i];
227   - ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]);
  238 + ret = regmap_write(map, chip->mask_base + (i * map->reg_stride),
  239 + d->mask_buf[i]);
228 240 if (ret != 0) {
229 241 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
230   - chip->mask_base + i, ret);
  242 + chip->mask_base + (i * map->reg_stride), ret);
231 243 goto err_alloc;
232 244 }
233 245 }
drivers/base/regmap/regmap-mmio.c
... ... @@ -130,6 +130,7 @@
130 130 const struct regmap_config *config)
131 131 {
132 132 struct regmap_mmio_context *ctx;
  133 + int min_stride;
133 134  
134 135 if (config->reg_bits != 32)
135 136 return ERR_PTR(-EINVAL);
136 137  
137 138  
138 139  
139 140  
... ... @@ -139,15 +140,27 @@
139 140  
140 141 switch (config->val_bits) {
141 142 case 8:
  143 + /* The core treats 0 as 1 */
  144 + min_stride = 0;
  145 + break;
142 146 case 16:
  147 + min_stride = 2;
  148 + break;
143 149 case 32:
  150 + min_stride = 4;
  151 + break;
144 152 #ifdef CONFIG_64BIT
145 153 case 64:
  154 + min_stride = 8;
  155 + break;
146 156 #endif
147 157 break;
148 158 default:
149 159 return ERR_PTR(-EINVAL);
150 160 }
  161 +
  162 + if (config->reg_stride < min_stride)
  163 + return ERR_PTR(-EINVAL);
151 164  
152 165 ctx = kzalloc(GFP_KERNEL, sizeof(*ctx));
153 166 if (!ctx)
drivers/base/regmap/regmap.c
... ... @@ -243,6 +243,10 @@
243 243 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
244 244 map->format.buf_size += map->format.pad_bytes;
245 245 map->reg_shift = config->pad_bits % 8;
  246 + if (config->reg_stride)
  247 + map->reg_stride = config->reg_stride;
  248 + else
  249 + map->reg_stride = 1;
246 250 map->dev = dev;
247 251 map->bus = bus;
248 252 map->bus_context = bus_context;
... ... @@ -469,7 +473,8 @@
469 473 /* Check for unwritable registers before we start */
470 474 if (map->writeable_reg)
471 475 for (i = 0; i < val_len / map->format.val_bytes; i++)
472   - if (!map->writeable_reg(map->dev, reg + i))
  476 + if (!map->writeable_reg(map->dev,
  477 + reg + (i * map->reg_stride)))
473 478 return -EINVAL;
474 479  
475 480 if (!map->cache_bypass && map->format.parse_val) {
... ... @@ -478,7 +483,8 @@
478 483 for (i = 0; i < val_len / val_bytes; i++) {
479 484 memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
480 485 ival = map->format.parse_val(map->work_buf);
481   - ret = regcache_write(map, reg + i, ival);
  486 + ret = regcache_write(map, reg + (i * map->reg_stride),
  487 + ival);
482 488 if (ret) {
483 489 dev_err(map->dev,
484 490 "Error in caching of register: %u ret: %d\n",
... ... @@ -590,6 +596,9 @@
590 596 {
591 597 int ret;
592 598  
  599 + if (reg % map->reg_stride)
  600 + return -EINVAL;
  601 +
593 602 map->lock(map);
594 603  
595 604 ret = _regmap_write(map, reg, val);
... ... @@ -623,6 +632,8 @@
623 632  
624 633 if (val_len % map->format.val_bytes)
625 634 return -EINVAL;
  635 + if (reg % map->reg_stride)
  636 + return -EINVAL;
626 637  
627 638 map->lock(map);
628 639  
... ... @@ -657,6 +668,8 @@
657 668  
658 669 if (!map->format.parse_val)
659 670 return -EINVAL;
  671 + if (reg % map->reg_stride)
  672 + return -EINVAL;
660 673  
661 674 map->lock(map);
662 675  
... ... @@ -753,6 +766,9 @@
753 766 {
754 767 int ret;
755 768  
  769 + if (reg % map->reg_stride)
  770 + return -EINVAL;
  771 +
756 772 map->lock(map);
757 773  
758 774 ret = _regmap_read(map, reg, val);
... ... @@ -784,6 +800,8 @@
784 800  
785 801 if (val_len % map->format.val_bytes)
786 802 return -EINVAL;
  803 + if (reg % map->reg_stride)
  804 + return -EINVAL;
787 805  
788 806 map->lock(map);
789 807  
... ... @@ -797,7 +815,8 @@
797 815 * cost as we expect to hit the cache.
798 816 */
799 817 for (i = 0; i < val_count; i++) {
800   - ret = _regmap_read(map, reg + i, &v);
  818 + ret = _regmap_read(map, reg + (i * map->reg_stride),
  819 + &v);
801 820 if (ret != 0)
802 821 goto out;
803 822  
... ... @@ -832,6 +851,8 @@
832 851  
833 852 if (!map->format.parse_val)
834 853 return -EINVAL;
  854 + if (reg % map->reg_stride)
  855 + return -EINVAL;
835 856  
836 857 if (vol || map->cache_type == REGCACHE_NONE) {
837 858 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
... ... @@ -842,7 +863,8 @@
842 863 map->format.parse_val(val + i);
843 864 } else {
844 865 for (i = 0; i < val_count; i++) {
845   - ret = regmap_read(map, reg + i, val + (i * val_bytes));
  866 + ret = regmap_read(map, reg + (i * map->reg_stride),
  867 + val + (i * val_bytes));
846 868 if (ret != 0)
847 869 return ret;
848 870 }
include/linux/regmap.h
... ... @@ -50,6 +50,9 @@
50 50 * register regions.
51 51 *
52 52 * @reg_bits: Number of bits in a register address, mandatory.
  53 + * @reg_stride: The register address stride. Valid register addresses are a
  54 + * multiple of this value. If set to 0, a value of 1 will be
  55 + * used.
53 56 * @pad_bits: Number of bits of padding between register and value.
54 57 * @val_bits: Number of bits in a register value, mandatory.
55 58 *
... ... @@ -83,6 +86,7 @@
83 86 const char *name;
84 87  
85 88 int reg_bits;
  89 + int reg_stride;
86 90 int pad_bits;
87 91 int val_bits;
88 92