Commit f01ee60fffa4dc6c77122121233a793f7f696e67

Authored by Stephen Warren
Committed by Mark Brown
1 parent c0cc6fe1d0

regmap: implement register striding

regmap_config.reg_stride is introduced. All extant register addresses
are a multiple of this value. Users of serial-oriented regmap busses will
typically set this to 1. Users of the MMIO regmap bus will typically set
this based on the value size of their registers, in bytes, so 4 for a
32-bit register.

Throughout the regmap code, actual register addresses are used. Wherever
the register address is used to index some array of values, the address
is divided by the stride to determine the index, or vice-versa. Error-
checking is added to all entry-points for register address data to ensure
that register addresses actually satisfy the specified stride. The MMIO
bus ensures that the specified stride is large enough for the register
size.

Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>

Showing 9 changed files with 109 additions and 42 deletions Inline Diff

drivers/base/regmap/internal.h
1 /* 1 /*
2 * Register map access API internal header 2 * Register map access API internal header
3 * 3 *
4 * Copyright 2011 Wolfson Microelectronics plc 4 * Copyright 2011 Wolfson Microelectronics plc
5 * 5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13 #ifndef _REGMAP_INTERNAL_H 13 #ifndef _REGMAP_INTERNAL_H
14 #define _REGMAP_INTERNAL_H 14 #define _REGMAP_INTERNAL_H
15 15
16 #include <linux/regmap.h> 16 #include <linux/regmap.h>
17 #include <linux/fs.h> 17 #include <linux/fs.h>
18 18
19 struct regmap; 19 struct regmap;
20 struct regcache_ops; 20 struct regcache_ops;
21 21
22 struct regmap_format { 22 struct regmap_format {
23 size_t buf_size; 23 size_t buf_size;
24 size_t reg_bytes; 24 size_t reg_bytes;
25 size_t pad_bytes; 25 size_t pad_bytes;
26 size_t val_bytes; 26 size_t val_bytes;
27 void (*format_write)(struct regmap *map, 27 void (*format_write)(struct regmap *map,
28 unsigned int reg, unsigned int val); 28 unsigned int reg, unsigned int val);
29 void (*format_reg)(void *buf, unsigned int reg, unsigned int shift); 29 void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
30 void (*format_val)(void *buf, unsigned int val, unsigned int shift); 30 void (*format_val)(void *buf, unsigned int val, unsigned int shift);
31 unsigned int (*parse_val)(void *buf); 31 unsigned int (*parse_val)(void *buf);
32 }; 32 };
33 33
34 typedef void (*regmap_lock)(struct regmap *map); 34 typedef void (*regmap_lock)(struct regmap *map);
35 typedef void (*regmap_unlock)(struct regmap *map); 35 typedef void (*regmap_unlock)(struct regmap *map);
36 36
37 struct regmap { 37 struct regmap {
38 struct mutex mutex; 38 struct mutex mutex;
39 spinlock_t spinlock; 39 spinlock_t spinlock;
40 regmap_lock lock; 40 regmap_lock lock;
41 regmap_unlock unlock; 41 regmap_unlock unlock;
42 42
43 struct device *dev; /* Device we do I/O on */ 43 struct device *dev; /* Device we do I/O on */
44 void *work_buf; /* Scratch buffer used to format I/O */ 44 void *work_buf; /* Scratch buffer used to format I/O */
45 struct regmap_format format; /* Buffer format */ 45 struct regmap_format format; /* Buffer format */
46 const struct regmap_bus *bus; 46 const struct regmap_bus *bus;
47 void *bus_context; 47 void *bus_context;
48 48
49 #ifdef CONFIG_DEBUG_FS 49 #ifdef CONFIG_DEBUG_FS
50 struct dentry *debugfs; 50 struct dentry *debugfs;
51 const char *debugfs_name; 51 const char *debugfs_name;
52 #endif 52 #endif
53 53
54 unsigned int max_register; 54 unsigned int max_register;
55 bool (*writeable_reg)(struct device *dev, unsigned int reg); 55 bool (*writeable_reg)(struct device *dev, unsigned int reg);
56 bool (*readable_reg)(struct device *dev, unsigned int reg); 56 bool (*readable_reg)(struct device *dev, unsigned int reg);
57 bool (*volatile_reg)(struct device *dev, unsigned int reg); 57 bool (*volatile_reg)(struct device *dev, unsigned int reg);
58 bool (*precious_reg)(struct device *dev, unsigned int reg); 58 bool (*precious_reg)(struct device *dev, unsigned int reg);
59 59
60 u8 read_flag_mask; 60 u8 read_flag_mask;
61 u8 write_flag_mask; 61 u8 write_flag_mask;
62 62
63 /* number of bits to (left) shift the reg value when formatting*/ 63 /* number of bits to (left) shift the reg value when formatting*/
64 int reg_shift; 64 int reg_shift;
65 int reg_stride;
65 66
66 /* regcache specific members */ 67 /* regcache specific members */
67 const struct regcache_ops *cache_ops; 68 const struct regcache_ops *cache_ops;
68 enum regcache_type cache_type; 69 enum regcache_type cache_type;
69 70
70 /* number of bytes in reg_defaults_raw */ 71 /* number of bytes in reg_defaults_raw */
71 unsigned int cache_size_raw; 72 unsigned int cache_size_raw;
72 /* number of bytes per word in reg_defaults_raw */ 73 /* number of bytes per word in reg_defaults_raw */
73 unsigned int cache_word_size; 74 unsigned int cache_word_size;
74 /* number of entries in reg_defaults */ 75 /* number of entries in reg_defaults */
75 unsigned int num_reg_defaults; 76 unsigned int num_reg_defaults;
76 /* number of entries in reg_defaults_raw */ 77 /* number of entries in reg_defaults_raw */
77 unsigned int num_reg_defaults_raw; 78 unsigned int num_reg_defaults_raw;
78 79
79 /* if set, only the cache is modified not the HW */ 80 /* if set, only the cache is modified not the HW */
80 u32 cache_only; 81 u32 cache_only;
81 /* if set, only the HW is modified not the cache */ 82 /* if set, only the HW is modified not the cache */
82 u32 cache_bypass; 83 u32 cache_bypass;
83 /* if set, remember to free reg_defaults_raw */ 84 /* if set, remember to free reg_defaults_raw */
84 bool cache_free; 85 bool cache_free;
85 86
86 struct reg_default *reg_defaults; 87 struct reg_default *reg_defaults;
87 const void *reg_defaults_raw; 88 const void *reg_defaults_raw;
88 void *cache; 89 void *cache;
89 u32 cache_dirty; 90 u32 cache_dirty;
90 91
91 struct reg_default *patch; 92 struct reg_default *patch;
92 int patch_regs; 93 int patch_regs;
93 }; 94 };
94 95
95 struct regcache_ops { 96 struct regcache_ops {
96 const char *name; 97 const char *name;
97 enum regcache_type type; 98 enum regcache_type type;
98 int (*init)(struct regmap *map); 99 int (*init)(struct regmap *map);
99 int (*exit)(struct regmap *map); 100 int (*exit)(struct regmap *map);
100 int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); 101 int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
101 int (*write)(struct regmap *map, unsigned int reg, unsigned int value); 102 int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
102 int (*sync)(struct regmap *map, unsigned int min, unsigned int max); 103 int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
103 }; 104 };
104 105
105 bool regmap_writeable(struct regmap *map, unsigned int reg); 106 bool regmap_writeable(struct regmap *map, unsigned int reg);
106 bool regmap_readable(struct regmap *map, unsigned int reg); 107 bool regmap_readable(struct regmap *map, unsigned int reg);
107 bool regmap_volatile(struct regmap *map, unsigned int reg); 108 bool regmap_volatile(struct regmap *map, unsigned int reg);
108 bool regmap_precious(struct regmap *map, unsigned int reg); 109 bool regmap_precious(struct regmap *map, unsigned int reg);
109 110
110 int _regmap_write(struct regmap *map, unsigned int reg, 111 int _regmap_write(struct regmap *map, unsigned int reg,
111 unsigned int val); 112 unsigned int val);
112 113
113 #ifdef CONFIG_DEBUG_FS 114 #ifdef CONFIG_DEBUG_FS
114 extern void regmap_debugfs_initcall(void); 115 extern void regmap_debugfs_initcall(void);
115 extern void regmap_debugfs_init(struct regmap *map, const char *name); 116 extern void regmap_debugfs_init(struct regmap *map, const char *name);
116 extern void regmap_debugfs_exit(struct regmap *map); 117 extern void regmap_debugfs_exit(struct regmap *map);
117 #else 118 #else
118 static inline void regmap_debugfs_initcall(void) { } 119 static inline void regmap_debugfs_initcall(void) { }
119 static inline void regmap_debugfs_init(struct regmap *map, const char *name) { } 120 static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
120 static inline void regmap_debugfs_exit(struct regmap *map) { } 121 static inline void regmap_debugfs_exit(struct regmap *map) { }
121 #endif 122 #endif
122 123
123 /* regcache core declarations */ 124 /* regcache core declarations */
124 int regcache_init(struct regmap *map, const struct regmap_config *config); 125 int regcache_init(struct regmap *map, const struct regmap_config *config);
125 void regcache_exit(struct regmap *map); 126 void regcache_exit(struct regmap *map);
126 int regcache_read(struct regmap *map, 127 int regcache_read(struct regmap *map,
127 unsigned int reg, unsigned int *value); 128 unsigned int reg, unsigned int *value);
128 int regcache_write(struct regmap *map, 129 int regcache_write(struct regmap *map,
129 unsigned int reg, unsigned int value); 130 unsigned int reg, unsigned int value);
130 int regcache_sync(struct regmap *map); 131 int regcache_sync(struct regmap *map);
131 132
132 unsigned int regcache_get_val(const void *base, unsigned int idx, 133 unsigned int regcache_get_val(const void *base, unsigned int idx,
133 unsigned int word_size); 134 unsigned int word_size);
134 bool regcache_set_val(void *base, unsigned int idx, 135 bool regcache_set_val(void *base, unsigned int idx,
135 unsigned int val, unsigned int word_size); 136 unsigned int val, unsigned int word_size);
136 int regcache_lookup_reg(struct regmap *map, unsigned int reg); 137 int regcache_lookup_reg(struct regmap *map, unsigned int reg);
137 138
138 extern struct regcache_ops regcache_rbtree_ops; 139 extern struct regcache_ops regcache_rbtree_ops;
139 extern struct regcache_ops regcache_lzo_ops; 140 extern struct regcache_ops regcache_lzo_ops;
140 141
141 #endif 142 #endif
142 143
drivers/base/regmap/regcache-lzo.c
1 /* 1 /*
2 * Register cache access API - LZO caching support 2 * Register cache access API - LZO caching support
3 * 3 *
4 * Copyright 2011 Wolfson Microelectronics plc 4 * Copyright 2011 Wolfson Microelectronics plc
5 * 5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> 6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/device.h> 14 #include <linux/device.h>
15 #include <linux/lzo.h> 15 #include <linux/lzo.h>
16 16
17 #include "internal.h" 17 #include "internal.h"
18 18
19 static int regcache_lzo_exit(struct regmap *map); 19 static int regcache_lzo_exit(struct regmap *map);
20 20
21 struct regcache_lzo_ctx { 21 struct regcache_lzo_ctx {
22 void *wmem; 22 void *wmem;
23 void *dst; 23 void *dst;
24 const void *src; 24 const void *src;
25 size_t src_len; 25 size_t src_len;
26 size_t dst_len; 26 size_t dst_len;
27 size_t decompressed_size; 27 size_t decompressed_size;
28 unsigned long *sync_bmp; 28 unsigned long *sync_bmp;
29 int sync_bmp_nbits; 29 int sync_bmp_nbits;
30 }; 30 };
31 31
32 #define LZO_BLOCK_NUM 8 32 #define LZO_BLOCK_NUM 8
33 static int regcache_lzo_block_count(struct regmap *map) 33 static int regcache_lzo_block_count(struct regmap *map)
34 { 34 {
35 return LZO_BLOCK_NUM; 35 return LZO_BLOCK_NUM;
36 } 36 }
37 37
38 static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx) 38 static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
39 { 39 {
40 lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); 40 lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
41 if (!lzo_ctx->wmem) 41 if (!lzo_ctx->wmem)
42 return -ENOMEM; 42 return -ENOMEM;
43 return 0; 43 return 0;
44 } 44 }
45 45
46 static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx) 46 static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
47 { 47 {
48 size_t compress_size; 48 size_t compress_size;
49 int ret; 49 int ret;
50 50
51 ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len, 51 ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
52 lzo_ctx->dst, &compress_size, lzo_ctx->wmem); 52 lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
53 if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len) 53 if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
54 return -EINVAL; 54 return -EINVAL;
55 lzo_ctx->dst_len = compress_size; 55 lzo_ctx->dst_len = compress_size;
56 return 0; 56 return 0;
57 } 57 }
58 58
59 static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx) 59 static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
60 { 60 {
61 size_t dst_len; 61 size_t dst_len;
62 int ret; 62 int ret;
63 63
64 dst_len = lzo_ctx->dst_len; 64 dst_len = lzo_ctx->dst_len;
65 ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len, 65 ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
66 lzo_ctx->dst, &dst_len); 66 lzo_ctx->dst, &dst_len);
67 if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len) 67 if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
68 return -EINVAL; 68 return -EINVAL;
69 return 0; 69 return 0;
70 } 70 }
71 71
72 static int regcache_lzo_compress_cache_block(struct regmap *map, 72 static int regcache_lzo_compress_cache_block(struct regmap *map,
73 struct regcache_lzo_ctx *lzo_ctx) 73 struct regcache_lzo_ctx *lzo_ctx)
74 { 74 {
75 int ret; 75 int ret;
76 76
77 lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE); 77 lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
78 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); 78 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
79 if (!lzo_ctx->dst) { 79 if (!lzo_ctx->dst) {
80 lzo_ctx->dst_len = 0; 80 lzo_ctx->dst_len = 0;
81 return -ENOMEM; 81 return -ENOMEM;
82 } 82 }
83 83
84 ret = regcache_lzo_compress(lzo_ctx); 84 ret = regcache_lzo_compress(lzo_ctx);
85 if (ret < 0) 85 if (ret < 0)
86 return ret; 86 return ret;
87 return 0; 87 return 0;
88 } 88 }
89 89
90 static int regcache_lzo_decompress_cache_block(struct regmap *map, 90 static int regcache_lzo_decompress_cache_block(struct regmap *map,
91 struct regcache_lzo_ctx *lzo_ctx) 91 struct regcache_lzo_ctx *lzo_ctx)
92 { 92 {
93 int ret; 93 int ret;
94 94
95 lzo_ctx->dst_len = lzo_ctx->decompressed_size; 95 lzo_ctx->dst_len = lzo_ctx->decompressed_size;
96 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); 96 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
97 if (!lzo_ctx->dst) { 97 if (!lzo_ctx->dst) {
98 lzo_ctx->dst_len = 0; 98 lzo_ctx->dst_len = 0;
99 return -ENOMEM; 99 return -ENOMEM;
100 } 100 }
101 101
102 ret = regcache_lzo_decompress(lzo_ctx); 102 ret = regcache_lzo_decompress(lzo_ctx);
103 if (ret < 0) 103 if (ret < 0)
104 return ret; 104 return ret;
105 return 0; 105 return 0;
106 } 106 }
107 107
108 static inline int regcache_lzo_get_blkindex(struct regmap *map, 108 static inline int regcache_lzo_get_blkindex(struct regmap *map,
109 unsigned int reg) 109 unsigned int reg)
110 { 110 {
111 return (reg * map->cache_word_size) / 111 return ((reg / map->reg_stride) * map->cache_word_size) /
112 DIV_ROUND_UP(map->cache_size_raw, 112 DIV_ROUND_UP(map->cache_size_raw,
113 regcache_lzo_block_count(map)); 113 regcache_lzo_block_count(map));
114 } 114 }
115 115
116 static inline int regcache_lzo_get_blkpos(struct regmap *map, 116 static inline int regcache_lzo_get_blkpos(struct regmap *map,
117 unsigned int reg) 117 unsigned int reg)
118 { 118 {
119 return reg % (DIV_ROUND_UP(map->cache_size_raw, 119 return (reg / map->reg_stride) %
120 regcache_lzo_block_count(map)) / 120 (DIV_ROUND_UP(map->cache_size_raw,
121 map->cache_word_size); 121 regcache_lzo_block_count(map)) /
122 map->cache_word_size);
122 } 123 }
123 124
124 static inline int regcache_lzo_get_blksize(struct regmap *map) 125 static inline int regcache_lzo_get_blksize(struct regmap *map)
125 { 126 {
126 return DIV_ROUND_UP(map->cache_size_raw, 127 return DIV_ROUND_UP(map->cache_size_raw,
127 regcache_lzo_block_count(map)); 128 regcache_lzo_block_count(map));
128 } 129 }
129 130
130 static int regcache_lzo_init(struct regmap *map) 131 static int regcache_lzo_init(struct regmap *map)
131 { 132 {
132 struct regcache_lzo_ctx **lzo_blocks; 133 struct regcache_lzo_ctx **lzo_blocks;
133 size_t bmp_size; 134 size_t bmp_size;
134 int ret, i, blksize, blkcount; 135 int ret, i, blksize, blkcount;
135 const char *p, *end; 136 const char *p, *end;
136 unsigned long *sync_bmp; 137 unsigned long *sync_bmp;
137 138
138 ret = 0; 139 ret = 0;
139 140
140 blkcount = regcache_lzo_block_count(map); 141 blkcount = regcache_lzo_block_count(map);
141 map->cache = kzalloc(blkcount * sizeof *lzo_blocks, 142 map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
142 GFP_KERNEL); 143 GFP_KERNEL);
143 if (!map->cache) 144 if (!map->cache)
144 return -ENOMEM; 145 return -ENOMEM;
145 lzo_blocks = map->cache; 146 lzo_blocks = map->cache;
146 147
147 /* 148 /*
148 * allocate a bitmap to be used when syncing the cache with 149 * allocate a bitmap to be used when syncing the cache with
149 * the hardware. Each time a register is modified, the corresponding 150 * the hardware. Each time a register is modified, the corresponding
150 * bit is set in the bitmap, so we know that we have to sync 151 * bit is set in the bitmap, so we know that we have to sync
151 * that register. 152 * that register.
152 */ 153 */
153 bmp_size = map->num_reg_defaults_raw; 154 bmp_size = map->num_reg_defaults_raw;
154 sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long), 155 sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
155 GFP_KERNEL); 156 GFP_KERNEL);
156 if (!sync_bmp) { 157 if (!sync_bmp) {
157 ret = -ENOMEM; 158 ret = -ENOMEM;
158 goto err; 159 goto err;
159 } 160 }
160 bitmap_zero(sync_bmp, bmp_size); 161 bitmap_zero(sync_bmp, bmp_size);
161 162
162 /* allocate the lzo blocks and initialize them */ 163 /* allocate the lzo blocks and initialize them */
163 for (i = 0; i < blkcount; i++) { 164 for (i = 0; i < blkcount; i++) {
164 lzo_blocks[i] = kzalloc(sizeof **lzo_blocks, 165 lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
165 GFP_KERNEL); 166 GFP_KERNEL);
166 if (!lzo_blocks[i]) { 167 if (!lzo_blocks[i]) {
167 kfree(sync_bmp); 168 kfree(sync_bmp);
168 ret = -ENOMEM; 169 ret = -ENOMEM;
169 goto err; 170 goto err;
170 } 171 }
171 lzo_blocks[i]->sync_bmp = sync_bmp; 172 lzo_blocks[i]->sync_bmp = sync_bmp;
172 lzo_blocks[i]->sync_bmp_nbits = bmp_size; 173 lzo_blocks[i]->sync_bmp_nbits = bmp_size;
173 /* alloc the working space for the compressed block */ 174 /* alloc the working space for the compressed block */
174 ret = regcache_lzo_prepare(lzo_blocks[i]); 175 ret = regcache_lzo_prepare(lzo_blocks[i]);
175 if (ret < 0) 176 if (ret < 0)
176 goto err; 177 goto err;
177 } 178 }
178 179
179 blksize = regcache_lzo_get_blksize(map); 180 blksize = regcache_lzo_get_blksize(map);
180 p = map->reg_defaults_raw; 181 p = map->reg_defaults_raw;
181 end = map->reg_defaults_raw + map->cache_size_raw; 182 end = map->reg_defaults_raw + map->cache_size_raw;
182 /* compress the register map and fill the lzo blocks */ 183 /* compress the register map and fill the lzo blocks */
183 for (i = 0; i < blkcount; i++, p += blksize) { 184 for (i = 0; i < blkcount; i++, p += blksize) {
184 lzo_blocks[i]->src = p; 185 lzo_blocks[i]->src = p;
185 if (p + blksize > end) 186 if (p + blksize > end)
186 lzo_blocks[i]->src_len = end - p; 187 lzo_blocks[i]->src_len = end - p;
187 else 188 else
188 lzo_blocks[i]->src_len = blksize; 189 lzo_blocks[i]->src_len = blksize;
189 ret = regcache_lzo_compress_cache_block(map, 190 ret = regcache_lzo_compress_cache_block(map,
190 lzo_blocks[i]); 191 lzo_blocks[i]);
191 if (ret < 0) 192 if (ret < 0)
192 goto err; 193 goto err;
193 lzo_blocks[i]->decompressed_size = 194 lzo_blocks[i]->decompressed_size =
194 lzo_blocks[i]->src_len; 195 lzo_blocks[i]->src_len;
195 } 196 }
196 197
197 return 0; 198 return 0;
198 err: 199 err:
199 regcache_lzo_exit(map); 200 regcache_lzo_exit(map);
200 return ret; 201 return ret;
201 } 202 }
202 203
203 static int regcache_lzo_exit(struct regmap *map) 204 static int regcache_lzo_exit(struct regmap *map)
204 { 205 {
205 struct regcache_lzo_ctx **lzo_blocks; 206 struct regcache_lzo_ctx **lzo_blocks;
206 int i, blkcount; 207 int i, blkcount;
207 208
208 lzo_blocks = map->cache; 209 lzo_blocks = map->cache;
209 if (!lzo_blocks) 210 if (!lzo_blocks)
210 return 0; 211 return 0;
211 212
212 blkcount = regcache_lzo_block_count(map); 213 blkcount = regcache_lzo_block_count(map);
213 /* 214 /*
214 * the pointer to the bitmap used for syncing the cache 215 * the pointer to the bitmap used for syncing the cache
215 * is shared amongst all lzo_blocks. Ensure it is freed 216 * is shared amongst all lzo_blocks. Ensure it is freed
216 * only once. 217 * only once.
217 */ 218 */
218 if (lzo_blocks[0]) 219 if (lzo_blocks[0])
219 kfree(lzo_blocks[0]->sync_bmp); 220 kfree(lzo_blocks[0]->sync_bmp);
220 for (i = 0; i < blkcount; i++) { 221 for (i = 0; i < blkcount; i++) {
221 if (lzo_blocks[i]) { 222 if (lzo_blocks[i]) {
222 kfree(lzo_blocks[i]->wmem); 223 kfree(lzo_blocks[i]->wmem);
223 kfree(lzo_blocks[i]->dst); 224 kfree(lzo_blocks[i]->dst);
224 } 225 }
225 /* each lzo_block is a pointer returned by kmalloc or NULL */ 226 /* each lzo_block is a pointer returned by kmalloc or NULL */
226 kfree(lzo_blocks[i]); 227 kfree(lzo_blocks[i]);
227 } 228 }
228 kfree(lzo_blocks); 229 kfree(lzo_blocks);
229 map->cache = NULL; 230 map->cache = NULL;
230 return 0; 231 return 0;
231 } 232 }
232 233
233 static int regcache_lzo_read(struct regmap *map, 234 static int regcache_lzo_read(struct regmap *map,
234 unsigned int reg, unsigned int *value) 235 unsigned int reg, unsigned int *value)
235 { 236 {
236 struct regcache_lzo_ctx *lzo_block, **lzo_blocks; 237 struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
237 int ret, blkindex, blkpos; 238 int ret, blkindex, blkpos;
238 size_t blksize, tmp_dst_len; 239 size_t blksize, tmp_dst_len;
239 void *tmp_dst; 240 void *tmp_dst;
240 241
241 /* index of the compressed lzo block */ 242 /* index of the compressed lzo block */
242 blkindex = regcache_lzo_get_blkindex(map, reg); 243 blkindex = regcache_lzo_get_blkindex(map, reg);
243 /* register index within the decompressed block */ 244 /* register index within the decompressed block */
244 blkpos = regcache_lzo_get_blkpos(map, reg); 245 blkpos = regcache_lzo_get_blkpos(map, reg);
245 /* size of the compressed block */ 246 /* size of the compressed block */
246 blksize = regcache_lzo_get_blksize(map); 247 blksize = regcache_lzo_get_blksize(map);
247 lzo_blocks = map->cache; 248 lzo_blocks = map->cache;
248 lzo_block = lzo_blocks[blkindex]; 249 lzo_block = lzo_blocks[blkindex];
249 250
250 /* save the pointer and length of the compressed block */ 251 /* save the pointer and length of the compressed block */
251 tmp_dst = lzo_block->dst; 252 tmp_dst = lzo_block->dst;
252 tmp_dst_len = lzo_block->dst_len; 253 tmp_dst_len = lzo_block->dst_len;
253 254
254 /* prepare the source to be the compressed block */ 255 /* prepare the source to be the compressed block */
255 lzo_block->src = lzo_block->dst; 256 lzo_block->src = lzo_block->dst;
256 lzo_block->src_len = lzo_block->dst_len; 257 lzo_block->src_len = lzo_block->dst_len;
257 258
258 /* decompress the block */ 259 /* decompress the block */
259 ret = regcache_lzo_decompress_cache_block(map, lzo_block); 260 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
260 if (ret >= 0) 261 if (ret >= 0)
261 /* fetch the value from the cache */ 262 /* fetch the value from the cache */
262 *value = regcache_get_val(lzo_block->dst, blkpos, 263 *value = regcache_get_val(lzo_block->dst, blkpos,
263 map->cache_word_size); 264 map->cache_word_size);
264 265
265 kfree(lzo_block->dst); 266 kfree(lzo_block->dst);
266 /* restore the pointer and length of the compressed block */ 267 /* restore the pointer and length of the compressed block */
267 lzo_block->dst = tmp_dst; 268 lzo_block->dst = tmp_dst;
268 lzo_block->dst_len = tmp_dst_len; 269 lzo_block->dst_len = tmp_dst_len;
269 270
270 return ret; 271 return ret;
271 } 272 }
272 273
273 static int regcache_lzo_write(struct regmap *map, 274 static int regcache_lzo_write(struct regmap *map,
274 unsigned int reg, unsigned int value) 275 unsigned int reg, unsigned int value)
275 { 276 {
276 struct regcache_lzo_ctx *lzo_block, **lzo_blocks; 277 struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
277 int ret, blkindex, blkpos; 278 int ret, blkindex, blkpos;
278 size_t blksize, tmp_dst_len; 279 size_t blksize, tmp_dst_len;
279 void *tmp_dst; 280 void *tmp_dst;
280 281
281 /* index of the compressed lzo block */ 282 /* index of the compressed lzo block */
282 blkindex = regcache_lzo_get_blkindex(map, reg); 283 blkindex = regcache_lzo_get_blkindex(map, reg);
283 /* register index within the decompressed block */ 284 /* register index within the decompressed block */
284 blkpos = regcache_lzo_get_blkpos(map, reg); 285 blkpos = regcache_lzo_get_blkpos(map, reg);
285 /* size of the compressed block */ 286 /* size of the compressed block */
286 blksize = regcache_lzo_get_blksize(map); 287 blksize = regcache_lzo_get_blksize(map);
287 lzo_blocks = map->cache; 288 lzo_blocks = map->cache;
288 lzo_block = lzo_blocks[blkindex]; 289 lzo_block = lzo_blocks[blkindex];
289 290
290 /* save the pointer and length of the compressed block */ 291 /* save the pointer and length of the compressed block */
291 tmp_dst = lzo_block->dst; 292 tmp_dst = lzo_block->dst;
292 tmp_dst_len = lzo_block->dst_len; 293 tmp_dst_len = lzo_block->dst_len;
293 294
294 /* prepare the source to be the compressed block */ 295 /* prepare the source to be the compressed block */
295 lzo_block->src = lzo_block->dst; 296 lzo_block->src = lzo_block->dst;
296 lzo_block->src_len = lzo_block->dst_len; 297 lzo_block->src_len = lzo_block->dst_len;
297 298
298 /* decompress the block */ 299 /* decompress the block */
299 ret = regcache_lzo_decompress_cache_block(map, lzo_block); 300 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
300 if (ret < 0) { 301 if (ret < 0) {
301 kfree(lzo_block->dst); 302 kfree(lzo_block->dst);
302 goto out; 303 goto out;
303 } 304 }
304 305
305 /* write the new value to the cache */ 306 /* write the new value to the cache */
306 if (regcache_set_val(lzo_block->dst, blkpos, value, 307 if (regcache_set_val(lzo_block->dst, blkpos, value,
307 map->cache_word_size)) { 308 map->cache_word_size)) {
308 kfree(lzo_block->dst); 309 kfree(lzo_block->dst);
309 goto out; 310 goto out;
310 } 311 }
311 312
312 /* prepare the source to be the decompressed block */ 313 /* prepare the source to be the decompressed block */
313 lzo_block->src = lzo_block->dst; 314 lzo_block->src = lzo_block->dst;
314 lzo_block->src_len = lzo_block->dst_len; 315 lzo_block->src_len = lzo_block->dst_len;
315 316
316 /* compress the block */ 317 /* compress the block */
317 ret = regcache_lzo_compress_cache_block(map, lzo_block); 318 ret = regcache_lzo_compress_cache_block(map, lzo_block);
318 if (ret < 0) { 319 if (ret < 0) {
319 kfree(lzo_block->dst); 320 kfree(lzo_block->dst);
320 kfree(lzo_block->src); 321 kfree(lzo_block->src);
321 goto out; 322 goto out;
322 } 323 }
323 324
324 /* set the bit so we know we have to sync this register */ 325 /* set the bit so we know we have to sync this register */
325 set_bit(reg, lzo_block->sync_bmp); 326 set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
326 kfree(tmp_dst); 327 kfree(tmp_dst);
327 kfree(lzo_block->src); 328 kfree(lzo_block->src);
328 return 0; 329 return 0;
329 out: 330 out:
330 lzo_block->dst = tmp_dst; 331 lzo_block->dst = tmp_dst;
331 lzo_block->dst_len = tmp_dst_len; 332 lzo_block->dst_len = tmp_dst_len;
332 return ret; 333 return ret;
333 } 334 }
334 335
335 static int regcache_lzo_sync(struct regmap *map, unsigned int min, 336 static int regcache_lzo_sync(struct regmap *map, unsigned int min,
336 unsigned int max) 337 unsigned int max)
337 { 338 {
338 struct regcache_lzo_ctx **lzo_blocks; 339 struct regcache_lzo_ctx **lzo_blocks;
339 unsigned int val; 340 unsigned int val;
340 int i; 341 int i;
341 int ret; 342 int ret;
342 343
343 lzo_blocks = map->cache; 344 lzo_blocks = map->cache;
344 i = min; 345 i = min;
345 for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp, 346 for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
346 lzo_blocks[0]->sync_bmp_nbits) { 347 lzo_blocks[0]->sync_bmp_nbits) {
347 if (i > max) 348 if (i > max)
348 continue; 349 continue;
349 350
350 ret = regcache_read(map, i, &val); 351 ret = regcache_read(map, i, &val);
351 if (ret) 352 if (ret)
352 return ret; 353 return ret;
353 354
354 /* Is this the hardware default? If so skip. */ 355 /* Is this the hardware default? If so skip. */
355 ret = regcache_lookup_reg(map, i); 356 ret = regcache_lookup_reg(map, i);
356 if (ret > 0 && val == map->reg_defaults[ret].def) 357 if (ret > 0 && val == map->reg_defaults[ret].def)
357 continue; 358 continue;
358 359
359 map->cache_bypass = 1; 360 map->cache_bypass = 1;
360 ret = _regmap_write(map, i, val); 361 ret = _regmap_write(map, i, val);
361 map->cache_bypass = 0; 362 map->cache_bypass = 0;
362 if (ret) 363 if (ret)
363 return ret; 364 return ret;
364 dev_dbg(map->dev, "Synced register %#x, value %#x\n", 365 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
365 i, val); 366 i, val);
366 } 367 }
367 368
368 return 0; 369 return 0;
369 } 370 }
370 371
371 struct regcache_ops regcache_lzo_ops = { 372 struct regcache_ops regcache_lzo_ops = {
372 .type = REGCACHE_COMPRESSED, 373 .type = REGCACHE_COMPRESSED,
373 .name = "lzo", 374 .name = "lzo",
374 .init = regcache_lzo_init, 375 .init = regcache_lzo_init,
375 .exit = regcache_lzo_exit, 376 .exit = regcache_lzo_exit,
376 .read = regcache_lzo_read, 377 .read = regcache_lzo_read,
377 .write = regcache_lzo_write, 378 .write = regcache_lzo_write,
378 .sync = regcache_lzo_sync 379 .sync = regcache_lzo_sync
379 }; 380 };
380 381
drivers/base/regmap/regcache-rbtree.c
1 /* 1 /*
2 * Register cache access API - rbtree caching support 2 * Register cache access API - rbtree caching support
3 * 3 *
4 * Copyright 2011 Wolfson Microelectronics plc 4 * Copyright 2011 Wolfson Microelectronics plc
5 * 5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> 6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/device.h> 14 #include <linux/device.h>
15 #include <linux/debugfs.h> 15 #include <linux/debugfs.h>
16 #include <linux/rbtree.h> 16 #include <linux/rbtree.h>
17 #include <linux/seq_file.h> 17 #include <linux/seq_file.h>
18 18
19 #include "internal.h" 19 #include "internal.h"
20 20
21 static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 21 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
22 unsigned int value); 22 unsigned int value);
23 static int regcache_rbtree_exit(struct regmap *map); 23 static int regcache_rbtree_exit(struct regmap *map);
24 24
25 struct regcache_rbtree_node { 25 struct regcache_rbtree_node {
26 /* the actual rbtree node holding this block */ 26 /* the actual rbtree node holding this block */
27 struct rb_node node; 27 struct rb_node node;
28 /* base register handled by this block */ 28 /* base register handled by this block */
29 unsigned int base_reg; 29 unsigned int base_reg;
30 /* block of adjacent registers */ 30 /* block of adjacent registers */
31 void *block; 31 void *block;
32 /* number of registers available in the block */ 32 /* number of registers available in the block */
33 unsigned int blklen; 33 unsigned int blklen;
34 } __attribute__ ((packed)); 34 } __attribute__ ((packed));
35 35
36 struct regcache_rbtree_ctx { 36 struct regcache_rbtree_ctx {
37 struct rb_root root; 37 struct rb_root root;
38 struct regcache_rbtree_node *cached_rbnode; 38 struct regcache_rbtree_node *cached_rbnode;
39 }; 39 };
40 40
41 static inline void regcache_rbtree_get_base_top_reg( 41 static inline void regcache_rbtree_get_base_top_reg(
42 struct regmap *map,
42 struct regcache_rbtree_node *rbnode, 43 struct regcache_rbtree_node *rbnode,
43 unsigned int *base, unsigned int *top) 44 unsigned int *base, unsigned int *top)
44 { 45 {
45 *base = rbnode->base_reg; 46 *base = rbnode->base_reg;
46 *top = rbnode->base_reg + rbnode->blklen - 1; 47 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
47 } 48 }
48 49
49 static unsigned int regcache_rbtree_get_register( 50 static unsigned int regcache_rbtree_get_register(
50 struct regcache_rbtree_node *rbnode, unsigned int idx, 51 struct regcache_rbtree_node *rbnode, unsigned int idx,
51 unsigned int word_size) 52 unsigned int word_size)
52 { 53 {
53 return regcache_get_val(rbnode->block, idx, word_size); 54 return regcache_get_val(rbnode->block, idx, word_size);
54 } 55 }
55 56
56 static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode, 57 static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode,
57 unsigned int idx, unsigned int val, 58 unsigned int idx, unsigned int val,
58 unsigned int word_size) 59 unsigned int word_size)
59 { 60 {
60 regcache_set_val(rbnode->block, idx, val, word_size); 61 regcache_set_val(rbnode->block, idx, val, word_size);
61 } 62 }
62 63
63 static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, 64 static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
64 unsigned int reg) 65 unsigned int reg)
65 { 66 {
66 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 67 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
67 struct rb_node *node; 68 struct rb_node *node;
68 struct regcache_rbtree_node *rbnode; 69 struct regcache_rbtree_node *rbnode;
69 unsigned int base_reg, top_reg; 70 unsigned int base_reg, top_reg;
70 71
71 rbnode = rbtree_ctx->cached_rbnode; 72 rbnode = rbtree_ctx->cached_rbnode;
72 if (rbnode) { 73 if (rbnode) {
73 regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); 74 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
75 &top_reg);
74 if (reg >= base_reg && reg <= top_reg) 76 if (reg >= base_reg && reg <= top_reg)
75 return rbnode; 77 return rbnode;
76 } 78 }
77 79
78 node = rbtree_ctx->root.rb_node; 80 node = rbtree_ctx->root.rb_node;
79 while (node) { 81 while (node) {
80 rbnode = container_of(node, struct regcache_rbtree_node, node); 82 rbnode = container_of(node, struct regcache_rbtree_node, node);
81 regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); 83 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
84 &top_reg);
82 if (reg >= base_reg && reg <= top_reg) { 85 if (reg >= base_reg && reg <= top_reg) {
83 rbtree_ctx->cached_rbnode = rbnode; 86 rbtree_ctx->cached_rbnode = rbnode;
84 return rbnode; 87 return rbnode;
85 } else if (reg > top_reg) { 88 } else if (reg > top_reg) {
86 node = node->rb_right; 89 node = node->rb_right;
87 } else if (reg < base_reg) { 90 } else if (reg < base_reg) {
88 node = node->rb_left; 91 node = node->rb_left;
89 } 92 }
90 } 93 }
91 94
92 return NULL; 95 return NULL;
93 } 96 }
94 97
95 static int regcache_rbtree_insert(struct rb_root *root, 98 static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
96 struct regcache_rbtree_node *rbnode) 99 struct regcache_rbtree_node *rbnode)
97 { 100 {
98 struct rb_node **new, *parent; 101 struct rb_node **new, *parent;
99 struct regcache_rbtree_node *rbnode_tmp; 102 struct regcache_rbtree_node *rbnode_tmp;
100 unsigned int base_reg_tmp, top_reg_tmp; 103 unsigned int base_reg_tmp, top_reg_tmp;
101 unsigned int base_reg; 104 unsigned int base_reg;
102 105
103 parent = NULL; 106 parent = NULL;
104 new = &root->rb_node; 107 new = &root->rb_node;
105 while (*new) { 108 while (*new) {
106 rbnode_tmp = container_of(*new, struct regcache_rbtree_node, 109 rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
107 node); 110 node);
108 /* base and top registers of the current rbnode */ 111 /* base and top registers of the current rbnode */
109 regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp, 112 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
110 &top_reg_tmp); 113 &top_reg_tmp);
111 /* base register of the rbnode to be added */ 114 /* base register of the rbnode to be added */
112 base_reg = rbnode->base_reg; 115 base_reg = rbnode->base_reg;
113 parent = *new; 116 parent = *new;
114 /* if this register has already been inserted, just return */ 117 /* if this register has already been inserted, just return */
115 if (base_reg >= base_reg_tmp && 118 if (base_reg >= base_reg_tmp &&
116 base_reg <= top_reg_tmp) 119 base_reg <= top_reg_tmp)
117 return 0; 120 return 0;
118 else if (base_reg > top_reg_tmp) 121 else if (base_reg > top_reg_tmp)
119 new = &((*new)->rb_right); 122 new = &((*new)->rb_right);
120 else if (base_reg < base_reg_tmp) 123 else if (base_reg < base_reg_tmp)
121 new = &((*new)->rb_left); 124 new = &((*new)->rb_left);
122 } 125 }
123 126
124 /* insert the node into the rbtree */ 127 /* insert the node into the rbtree */
125 rb_link_node(&rbnode->node, parent, new); 128 rb_link_node(&rbnode->node, parent, new);
126 rb_insert_color(&rbnode->node, root); 129 rb_insert_color(&rbnode->node, root);
127 130
128 return 1; 131 return 1;
129 } 132 }
130 133
131 #ifdef CONFIG_DEBUG_FS 134 #ifdef CONFIG_DEBUG_FS
132 static int rbtree_show(struct seq_file *s, void *ignored) 135 static int rbtree_show(struct seq_file *s, void *ignored)
133 { 136 {
134 struct regmap *map = s->private; 137 struct regmap *map = s->private;
135 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 138 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
136 struct regcache_rbtree_node *n; 139 struct regcache_rbtree_node *n;
137 struct rb_node *node; 140 struct rb_node *node;
138 unsigned int base, top; 141 unsigned int base, top;
139 int nodes = 0; 142 int nodes = 0;
140 int registers = 0; 143 int registers = 0;
141 int average; 144 int this_registers, average;
142 145
143 map->lock(map); 146 map->lock(map);
144 147
145 for (node = rb_first(&rbtree_ctx->root); node != NULL; 148 for (node = rb_first(&rbtree_ctx->root); node != NULL;
146 node = rb_next(node)) { 149 node = rb_next(node)) {
147 n = container_of(node, struct regcache_rbtree_node, node); 150 n = container_of(node, struct regcache_rbtree_node, node);
148 151
149 regcache_rbtree_get_base_top_reg(n, &base, &top); 152 regcache_rbtree_get_base_top_reg(map, n, &base, &top);
150 seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1); 153 this_registers = ((top - base) / map->reg_stride) + 1;
154 seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
151 155
152 nodes++; 156 nodes++;
153 registers += top - base + 1; 157 registers += this_registers;
154 } 158 }
155 159
156 if (nodes) 160 if (nodes)
157 average = registers / nodes; 161 average = registers / nodes;
158 else 162 else
159 average = 0; 163 average = 0;
160 164
161 seq_printf(s, "%d nodes, %d registers, average %d registers\n", 165 seq_printf(s, "%d nodes, %d registers, average %d registers\n",
162 nodes, registers, average); 166 nodes, registers, average);
163 167
164 map->unlock(map); 168 map->unlock(map);
165 169
166 return 0; 170 return 0;
167 } 171 }
168 172
169 static int rbtree_open(struct inode *inode, struct file *file) 173 static int rbtree_open(struct inode *inode, struct file *file)
170 { 174 {
171 return single_open(file, rbtree_show, inode->i_private); 175 return single_open(file, rbtree_show, inode->i_private);
172 } 176 }
173 177
174 static const struct file_operations rbtree_fops = { 178 static const struct file_operations rbtree_fops = {
175 .open = rbtree_open, 179 .open = rbtree_open,
176 .read = seq_read, 180 .read = seq_read,
177 .llseek = seq_lseek, 181 .llseek = seq_lseek,
178 .release = single_release, 182 .release = single_release,
179 }; 183 };
180 184
181 static void rbtree_debugfs_init(struct regmap *map) 185 static void rbtree_debugfs_init(struct regmap *map)
182 { 186 {
183 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); 187 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
184 } 188 }
185 #else 189 #else
186 static void rbtree_debugfs_init(struct regmap *map) 190 static void rbtree_debugfs_init(struct regmap *map)
187 { 191 {
188 } 192 }
189 #endif 193 #endif
190 194
191 static int regcache_rbtree_init(struct regmap *map) 195 static int regcache_rbtree_init(struct regmap *map)
192 { 196 {
193 struct regcache_rbtree_ctx *rbtree_ctx; 197 struct regcache_rbtree_ctx *rbtree_ctx;
194 int i; 198 int i;
195 int ret; 199 int ret;
196 200
197 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); 201 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
198 if (!map->cache) 202 if (!map->cache)
199 return -ENOMEM; 203 return -ENOMEM;
200 204
201 rbtree_ctx = map->cache; 205 rbtree_ctx = map->cache;
202 rbtree_ctx->root = RB_ROOT; 206 rbtree_ctx->root = RB_ROOT;
203 rbtree_ctx->cached_rbnode = NULL; 207 rbtree_ctx->cached_rbnode = NULL;
204 208
205 for (i = 0; i < map->num_reg_defaults; i++) { 209 for (i = 0; i < map->num_reg_defaults; i++) {
206 ret = regcache_rbtree_write(map, 210 ret = regcache_rbtree_write(map,
207 map->reg_defaults[i].reg, 211 map->reg_defaults[i].reg,
208 map->reg_defaults[i].def); 212 map->reg_defaults[i].def);
209 if (ret) 213 if (ret)
210 goto err; 214 goto err;
211 } 215 }
212 216
213 rbtree_debugfs_init(map); 217 rbtree_debugfs_init(map);
214 218
215 return 0; 219 return 0;
216 220
217 err: 221 err:
218 regcache_rbtree_exit(map); 222 regcache_rbtree_exit(map);
219 return ret; 223 return ret;
220 } 224 }
221 225
222 static int regcache_rbtree_exit(struct regmap *map) 226 static int regcache_rbtree_exit(struct regmap *map)
223 { 227 {
224 struct rb_node *next; 228 struct rb_node *next;
225 struct regcache_rbtree_ctx *rbtree_ctx; 229 struct regcache_rbtree_ctx *rbtree_ctx;
226 struct regcache_rbtree_node *rbtree_node; 230 struct regcache_rbtree_node *rbtree_node;
227 231
228 /* if we've already been called then just return */ 232 /* if we've already been called then just return */
229 rbtree_ctx = map->cache; 233 rbtree_ctx = map->cache;
230 if (!rbtree_ctx) 234 if (!rbtree_ctx)
231 return 0; 235 return 0;
232 236
233 /* free up the rbtree */ 237 /* free up the rbtree */
234 next = rb_first(&rbtree_ctx->root); 238 next = rb_first(&rbtree_ctx->root);
235 while (next) { 239 while (next) {
236 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); 240 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
237 next = rb_next(&rbtree_node->node); 241 next = rb_next(&rbtree_node->node);
238 rb_erase(&rbtree_node->node, &rbtree_ctx->root); 242 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
239 kfree(rbtree_node->block); 243 kfree(rbtree_node->block);
240 kfree(rbtree_node); 244 kfree(rbtree_node);
241 } 245 }
242 246
243 /* release the resources */ 247 /* release the resources */
244 kfree(map->cache); 248 kfree(map->cache);
245 map->cache = NULL; 249 map->cache = NULL;
246 250
247 return 0; 251 return 0;
248 } 252 }
249 253
250 static int regcache_rbtree_read(struct regmap *map, 254 static int regcache_rbtree_read(struct regmap *map,
251 unsigned int reg, unsigned int *value) 255 unsigned int reg, unsigned int *value)
252 { 256 {
253 struct regcache_rbtree_node *rbnode; 257 struct regcache_rbtree_node *rbnode;
254 unsigned int reg_tmp; 258 unsigned int reg_tmp;
255 259
256 rbnode = regcache_rbtree_lookup(map, reg); 260 rbnode = regcache_rbtree_lookup(map, reg);
257 if (rbnode) { 261 if (rbnode) {
258 reg_tmp = reg - rbnode->base_reg; 262 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
259 *value = regcache_rbtree_get_register(rbnode, reg_tmp, 263 *value = regcache_rbtree_get_register(rbnode, reg_tmp,
260 map->cache_word_size); 264 map->cache_word_size);
261 } else { 265 } else {
262 return -ENOENT; 266 return -ENOENT;
263 } 267 }
264 268
265 return 0; 269 return 0;
266 } 270 }
267 271
268 272
269 static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode, 273 static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
270 unsigned int pos, unsigned int reg, 274 unsigned int pos, unsigned int reg,
271 unsigned int value, unsigned int word_size) 275 unsigned int value, unsigned int word_size)
272 { 276 {
273 u8 *blk; 277 u8 *blk;
274 278
275 blk = krealloc(rbnode->block, 279 blk = krealloc(rbnode->block,
276 (rbnode->blklen + 1) * word_size, GFP_KERNEL); 280 (rbnode->blklen + 1) * word_size, GFP_KERNEL);
277 if (!blk) 281 if (!blk)
278 return -ENOMEM; 282 return -ENOMEM;
279 283
280 /* insert the register value in the correct place in the rbnode block */ 284 /* insert the register value in the correct place in the rbnode block */
281 memmove(blk + (pos + 1) * word_size, 285 memmove(blk + (pos + 1) * word_size,
282 blk + pos * word_size, 286 blk + pos * word_size,
283 (rbnode->blklen - pos) * word_size); 287 (rbnode->blklen - pos) * word_size);
284 288
285 /* update the rbnode block, its size and the base register */ 289 /* update the rbnode block, its size and the base register */
286 rbnode->block = blk; 290 rbnode->block = blk;
287 rbnode->blklen++; 291 rbnode->blklen++;
288 if (!pos) 292 if (!pos)
289 rbnode->base_reg = reg; 293 rbnode->base_reg = reg;
290 294
291 regcache_rbtree_set_register(rbnode, pos, value, word_size); 295 regcache_rbtree_set_register(rbnode, pos, value, word_size);
292 return 0; 296 return 0;
293 } 297 }
294 298
295 static int regcache_rbtree_write(struct regmap *map, unsigned int reg, 299 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
296 unsigned int value) 300 unsigned int value)
297 { 301 {
298 struct regcache_rbtree_ctx *rbtree_ctx; 302 struct regcache_rbtree_ctx *rbtree_ctx;
299 struct regcache_rbtree_node *rbnode, *rbnode_tmp; 303 struct regcache_rbtree_node *rbnode, *rbnode_tmp;
300 struct rb_node *node; 304 struct rb_node *node;
301 unsigned int val; 305 unsigned int val;
302 unsigned int reg_tmp; 306 unsigned int reg_tmp;
303 unsigned int pos; 307 unsigned int pos;
304 int i; 308 int i;
305 int ret; 309 int ret;
306 310
307 rbtree_ctx = map->cache; 311 rbtree_ctx = map->cache;
308 /* if we can't locate it in the cached rbnode we'll have 312 /* if we can't locate it in the cached rbnode we'll have
309 * to traverse the rbtree looking for it. 313 * to traverse the rbtree looking for it.
310 */ 314 */
311 rbnode = regcache_rbtree_lookup(map, reg); 315 rbnode = regcache_rbtree_lookup(map, reg);
312 if (rbnode) { 316 if (rbnode) {
313 reg_tmp = reg - rbnode->base_reg; 317 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
314 val = regcache_rbtree_get_register(rbnode, reg_tmp, 318 val = regcache_rbtree_get_register(rbnode, reg_tmp,
315 map->cache_word_size); 319 map->cache_word_size);
316 if (val == value) 320 if (val == value)
317 return 0; 321 return 0;
318 regcache_rbtree_set_register(rbnode, reg_tmp, value, 322 regcache_rbtree_set_register(rbnode, reg_tmp, value,
319 map->cache_word_size); 323 map->cache_word_size);
320 } else { 324 } else {
321 /* look for an adjacent register to the one we are about to add */ 325 /* look for an adjacent register to the one we are about to add */
322 for (node = rb_first(&rbtree_ctx->root); node; 326 for (node = rb_first(&rbtree_ctx->root); node;
323 node = rb_next(node)) { 327 node = rb_next(node)) {
324 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node); 328 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
329 node);
325 for (i = 0; i < rbnode_tmp->blklen; i++) { 330 for (i = 0; i < rbnode_tmp->blklen; i++) {
326 reg_tmp = rbnode_tmp->base_reg + i; 331 reg_tmp = rbnode_tmp->base_reg +
327 if (abs(reg_tmp - reg) != 1) 332 (i * map->reg_stride);
333 if (abs(reg_tmp - reg) != map->reg_stride)
328 continue; 334 continue;
329 /* decide where in the block to place our register */ 335 /* decide where in the block to place our register */
330 if (reg_tmp + 1 == reg) 336 if (reg_tmp + map->reg_stride == reg)
331 pos = i + 1; 337 pos = i + 1;
332 else 338 else
333 pos = i; 339 pos = i;
334 ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos, 340 ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos,
335 reg, value, 341 reg, value,
336 map->cache_word_size); 342 map->cache_word_size);
337 if (ret) 343 if (ret)
338 return ret; 344 return ret;
339 rbtree_ctx->cached_rbnode = rbnode_tmp; 345 rbtree_ctx->cached_rbnode = rbnode_tmp;
340 return 0; 346 return 0;
341 } 347 }
342 } 348 }
343 /* we did not manage to find a place to insert it in an existing 349 /* we did not manage to find a place to insert it in an existing
344 * block so create a new rbnode with a single register in its block. 350 * block so create a new rbnode with a single register in its block.
345 * This block will get populated further if any other adjacent 351 * This block will get populated further if any other adjacent
346 * registers get modified in the future. 352 * registers get modified in the future.
347 */ 353 */
348 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); 354 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
349 if (!rbnode) 355 if (!rbnode)
350 return -ENOMEM; 356 return -ENOMEM;
351 rbnode->blklen = 1; 357 rbnode->blklen = 1;
352 rbnode->base_reg = reg; 358 rbnode->base_reg = reg;
353 rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, 359 rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
354 GFP_KERNEL); 360 GFP_KERNEL);
355 if (!rbnode->block) { 361 if (!rbnode->block) {
356 kfree(rbnode); 362 kfree(rbnode);
357 return -ENOMEM; 363 return -ENOMEM;
358 } 364 }
359 regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size); 365 regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
360 regcache_rbtree_insert(&rbtree_ctx->root, rbnode); 366 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
361 rbtree_ctx->cached_rbnode = rbnode; 367 rbtree_ctx->cached_rbnode = rbnode;
362 } 368 }
363 369
364 return 0; 370 return 0;
365 } 371 }
366 372
367 static int regcache_rbtree_sync(struct regmap *map, unsigned int min, 373 static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
368 unsigned int max) 374 unsigned int max)
369 { 375 {
370 struct regcache_rbtree_ctx *rbtree_ctx; 376 struct regcache_rbtree_ctx *rbtree_ctx;
371 struct rb_node *node; 377 struct rb_node *node;
372 struct regcache_rbtree_node *rbnode; 378 struct regcache_rbtree_node *rbnode;
373 unsigned int regtmp; 379 unsigned int regtmp;
374 unsigned int val; 380 unsigned int val;
375 int ret; 381 int ret;
376 int i, base, end; 382 int i, base, end;
377 383
378 rbtree_ctx = map->cache; 384 rbtree_ctx = map->cache;
379 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 385 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
380 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 386 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
381 387
382 if (rbnode->base_reg < min) 388 if (rbnode->base_reg < min)
383 continue; 389 continue;
384 if (rbnode->base_reg > max) 390 if (rbnode->base_reg > max)
385 break; 391 break;
386 if (rbnode->base_reg + rbnode->blklen < min) 392 if (rbnode->base_reg + rbnode->blklen < min)
387 continue; 393 continue;
388 394
389 if (min > rbnode->base_reg) 395 if (min > rbnode->base_reg)
390 base = min - rbnode->base_reg; 396 base = min - rbnode->base_reg;
391 else 397 else
392 base = 0; 398 base = 0;
393 399
394 if (max < rbnode->base_reg + rbnode->blklen) 400 if (max < rbnode->base_reg + rbnode->blklen)
395 end = rbnode->base_reg + rbnode->blklen - max; 401 end = rbnode->base_reg + rbnode->blklen - max;
396 else 402 else
397 end = rbnode->blklen; 403 end = rbnode->blklen;
398 404
399 for (i = base; i < end; i++) { 405 for (i = base; i < end; i++) {
400 regtmp = rbnode->base_reg + i; 406 regtmp = rbnode->base_reg + (i * map->reg_stride);
401 val = regcache_rbtree_get_register(rbnode, i, 407 val = regcache_rbtree_get_register(rbnode, i,
402 map->cache_word_size); 408 map->cache_word_size);
403 409
404 /* Is this the hardware default? If so skip. */ 410 /* Is this the hardware default? If so skip. */
405 ret = regcache_lookup_reg(map, regtmp); 411 ret = regcache_lookup_reg(map, regtmp);
406 if (ret >= 0 && val == map->reg_defaults[ret].def) 412 if (ret >= 0 && val == map->reg_defaults[ret].def)
407 continue; 413 continue;
408 414
409 map->cache_bypass = 1; 415 map->cache_bypass = 1;
410 ret = _regmap_write(map, regtmp, val); 416 ret = _regmap_write(map, regtmp, val);
411 map->cache_bypass = 0; 417 map->cache_bypass = 0;
412 if (ret) 418 if (ret)
413 return ret; 419 return ret;
414 dev_dbg(map->dev, "Synced register %#x, value %#x\n", 420 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
415 regtmp, val); 421 regtmp, val);
416 } 422 }
417 } 423 }
418 424
419 return 0; 425 return 0;
420 } 426 }
421 427
422 struct regcache_ops regcache_rbtree_ops = { 428 struct regcache_ops regcache_rbtree_ops = {
423 .type = REGCACHE_RBTREE, 429 .type = REGCACHE_RBTREE,
424 .name = "rbtree", 430 .name = "rbtree",
425 .init = regcache_rbtree_init, 431 .init = regcache_rbtree_init,
426 .exit = regcache_rbtree_exit, 432 .exit = regcache_rbtree_exit,
427 .read = regcache_rbtree_read, 433 .read = regcache_rbtree_read,
428 .write = regcache_rbtree_write, 434 .write = regcache_rbtree_write,
429 .sync = regcache_rbtree_sync 435 .sync = regcache_rbtree_sync
430 }; 436 };
431 437
drivers/base/regmap/regcache.c
1 /* 1 /*
2 * Register cache access API 2 * Register cache access API
3 * 3 *
4 * Copyright 2011 Wolfson Microelectronics plc 4 * Copyright 2011 Wolfson Microelectronics plc
5 * 5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> 6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/export.h> 14 #include <linux/export.h>
15 #include <linux/device.h> 15 #include <linux/device.h>
16 #include <trace/events/regmap.h> 16 #include <trace/events/regmap.h>
17 #include <linux/bsearch.h> 17 #include <linux/bsearch.h>
18 #include <linux/sort.h> 18 #include <linux/sort.h>
19 19
20 #include "internal.h" 20 #include "internal.h"
21 21
22 static const struct regcache_ops *cache_types[] = { 22 static const struct regcache_ops *cache_types[] = {
23 &regcache_rbtree_ops, 23 &regcache_rbtree_ops,
24 &regcache_lzo_ops, 24 &regcache_lzo_ops,
25 }; 25 };
26 26
27 static int regcache_hw_init(struct regmap *map) 27 static int regcache_hw_init(struct regmap *map)
28 { 28 {
29 int i, j; 29 int i, j;
30 int ret; 30 int ret;
31 int count; 31 int count;
32 unsigned int val; 32 unsigned int val;
33 void *tmp_buf; 33 void *tmp_buf;
34 34
35 if (!map->num_reg_defaults_raw) 35 if (!map->num_reg_defaults_raw)
36 return -EINVAL; 36 return -EINVAL;
37 37
38 if (!map->reg_defaults_raw) { 38 if (!map->reg_defaults_raw) {
39 u32 cache_bypass = map->cache_bypass; 39 u32 cache_bypass = map->cache_bypass;
40 dev_warn(map->dev, "No cache defaults, reading back from HW\n"); 40 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
41 41
42 /* Bypass the cache access till data read from HW*/ 42 /* Bypass the cache access till data read from HW*/
43 map->cache_bypass = 1; 43 map->cache_bypass = 1;
44 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); 44 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
45 if (!tmp_buf) 45 if (!tmp_buf)
46 return -EINVAL; 46 return -EINVAL;
47 ret = regmap_bulk_read(map, 0, tmp_buf, 47 ret = regmap_bulk_read(map, 0, tmp_buf,
48 map->num_reg_defaults_raw); 48 map->num_reg_defaults_raw);
49 map->cache_bypass = cache_bypass; 49 map->cache_bypass = cache_bypass;
50 if (ret < 0) { 50 if (ret < 0) {
51 kfree(tmp_buf); 51 kfree(tmp_buf);
52 return ret; 52 return ret;
53 } 53 }
54 map->reg_defaults_raw = tmp_buf; 54 map->reg_defaults_raw = tmp_buf;
55 map->cache_free = 1; 55 map->cache_free = 1;
56 } 56 }
57 57
58 /* calculate the size of reg_defaults */ 58 /* calculate the size of reg_defaults */
59 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { 59 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
60 val = regcache_get_val(map->reg_defaults_raw, 60 val = regcache_get_val(map->reg_defaults_raw,
61 i, map->cache_word_size); 61 i, map->cache_word_size);
62 if (regmap_volatile(map, i)) 62 if (regmap_volatile(map, i * map->reg_stride))
63 continue; 63 continue;
64 count++; 64 count++;
65 } 65 }
66 66
67 map->reg_defaults = kmalloc(count * sizeof(struct reg_default), 67 map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
68 GFP_KERNEL); 68 GFP_KERNEL);
69 if (!map->reg_defaults) { 69 if (!map->reg_defaults) {
70 ret = -ENOMEM; 70 ret = -ENOMEM;
71 goto err_free; 71 goto err_free;
72 } 72 }
73 73
74 /* fill the reg_defaults */ 74 /* fill the reg_defaults */
75 map->num_reg_defaults = count; 75 map->num_reg_defaults = count;
76 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { 76 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
77 val = regcache_get_val(map->reg_defaults_raw, 77 val = regcache_get_val(map->reg_defaults_raw,
78 i, map->cache_word_size); 78 i, map->cache_word_size);
79 if (regmap_volatile(map, i)) 79 if (regmap_volatile(map, i * map->reg_stride))
80 continue; 80 continue;
81 map->reg_defaults[j].reg = i; 81 map->reg_defaults[j].reg = i * map->reg_stride;
82 map->reg_defaults[j].def = val; 82 map->reg_defaults[j].def = val;
83 j++; 83 j++;
84 } 84 }
85 85
86 return 0; 86 return 0;
87 87
88 err_free: 88 err_free:
89 if (map->cache_free) 89 if (map->cache_free)
90 kfree(map->reg_defaults_raw); 90 kfree(map->reg_defaults_raw);
91 91
92 return ret; 92 return ret;
93 } 93 }
94 94
95 int regcache_init(struct regmap *map, const struct regmap_config *config) 95 int regcache_init(struct regmap *map, const struct regmap_config *config)
96 { 96 {
97 int ret; 97 int ret;
98 int i; 98 int i;
99 void *tmp_buf; 99 void *tmp_buf;
100 100
101 for (i = 0; i < config->num_reg_defaults; i++)
102 if (config->reg_defaults[i].reg % map->reg_stride)
103 return -EINVAL;
104
101 if (map->cache_type == REGCACHE_NONE) { 105 if (map->cache_type == REGCACHE_NONE) {
102 map->cache_bypass = true; 106 map->cache_bypass = true;
103 return 0; 107 return 0;
104 } 108 }
105 109
106 for (i = 0; i < ARRAY_SIZE(cache_types); i++) 110 for (i = 0; i < ARRAY_SIZE(cache_types); i++)
107 if (cache_types[i]->type == map->cache_type) 111 if (cache_types[i]->type == map->cache_type)
108 break; 112 break;
109 113
110 if (i == ARRAY_SIZE(cache_types)) { 114 if (i == ARRAY_SIZE(cache_types)) {
111 dev_err(map->dev, "Could not match compress type: %d\n", 115 dev_err(map->dev, "Could not match compress type: %d\n",
112 map->cache_type); 116 map->cache_type);
113 return -EINVAL; 117 return -EINVAL;
114 } 118 }
115 119
116 map->num_reg_defaults = config->num_reg_defaults; 120 map->num_reg_defaults = config->num_reg_defaults;
117 map->num_reg_defaults_raw = config->num_reg_defaults_raw; 121 map->num_reg_defaults_raw = config->num_reg_defaults_raw;
118 map->reg_defaults_raw = config->reg_defaults_raw; 122 map->reg_defaults_raw = config->reg_defaults_raw;
119 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); 123 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
120 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; 124 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
121 125
122 map->cache = NULL; 126 map->cache = NULL;
123 map->cache_ops = cache_types[i]; 127 map->cache_ops = cache_types[i];
124 128
125 if (!map->cache_ops->read || 129 if (!map->cache_ops->read ||
126 !map->cache_ops->write || 130 !map->cache_ops->write ||
127 !map->cache_ops->name) 131 !map->cache_ops->name)
128 return -EINVAL; 132 return -EINVAL;
129 133
130 /* We still need to ensure that the reg_defaults 134 /* We still need to ensure that the reg_defaults
131 * won't vanish from under us. We'll need to make 135 * won't vanish from under us. We'll need to make
132 * a copy of it. 136 * a copy of it.
133 */ 137 */
134 if (config->reg_defaults) { 138 if (config->reg_defaults) {
135 if (!map->num_reg_defaults) 139 if (!map->num_reg_defaults)
136 return -EINVAL; 140 return -EINVAL;
137 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * 141 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
138 sizeof(struct reg_default), GFP_KERNEL); 142 sizeof(struct reg_default), GFP_KERNEL);
139 if (!tmp_buf) 143 if (!tmp_buf)
140 return -ENOMEM; 144 return -ENOMEM;
141 map->reg_defaults = tmp_buf; 145 map->reg_defaults = tmp_buf;
142 } else if (map->num_reg_defaults_raw) { 146 } else if (map->num_reg_defaults_raw) {
143 /* Some devices such as PMICs don't have cache defaults, 147 /* Some devices such as PMICs don't have cache defaults,
144 * we cope with this by reading back the HW registers and 148 * we cope with this by reading back the HW registers and
145 * crafting the cache defaults by hand. 149 * crafting the cache defaults by hand.
146 */ 150 */
147 ret = regcache_hw_init(map); 151 ret = regcache_hw_init(map);
148 if (ret < 0) 152 if (ret < 0)
149 return ret; 153 return ret;
150 } 154 }
151 155
152 if (!map->max_register) 156 if (!map->max_register)
153 map->max_register = map->num_reg_defaults_raw; 157 map->max_register = map->num_reg_defaults_raw;
154 158
155 if (map->cache_ops->init) { 159 if (map->cache_ops->init) {
156 dev_dbg(map->dev, "Initializing %s cache\n", 160 dev_dbg(map->dev, "Initializing %s cache\n",
157 map->cache_ops->name); 161 map->cache_ops->name);
158 ret = map->cache_ops->init(map); 162 ret = map->cache_ops->init(map);
159 if (ret) 163 if (ret)
160 goto err_free; 164 goto err_free;
161 } 165 }
162 return 0; 166 return 0;
163 167
164 err_free: 168 err_free:
165 kfree(map->reg_defaults); 169 kfree(map->reg_defaults);
166 if (map->cache_free) 170 if (map->cache_free)
167 kfree(map->reg_defaults_raw); 171 kfree(map->reg_defaults_raw);
168 172
169 return ret; 173 return ret;
170 } 174 }
171 175
172 void regcache_exit(struct regmap *map) 176 void regcache_exit(struct regmap *map)
173 { 177 {
174 if (map->cache_type == REGCACHE_NONE) 178 if (map->cache_type == REGCACHE_NONE)
175 return; 179 return;
176 180
177 BUG_ON(!map->cache_ops); 181 BUG_ON(!map->cache_ops);
178 182
179 kfree(map->reg_defaults); 183 kfree(map->reg_defaults);
180 if (map->cache_free) 184 if (map->cache_free)
181 kfree(map->reg_defaults_raw); 185 kfree(map->reg_defaults_raw);
182 186
183 if (map->cache_ops->exit) { 187 if (map->cache_ops->exit) {
184 dev_dbg(map->dev, "Destroying %s cache\n", 188 dev_dbg(map->dev, "Destroying %s cache\n",
185 map->cache_ops->name); 189 map->cache_ops->name);
186 map->cache_ops->exit(map); 190 map->cache_ops->exit(map);
187 } 191 }
188 } 192 }
189 193
190 /** 194 /**
191 * regcache_read: Fetch the value of a given register from the cache. 195 * regcache_read: Fetch the value of a given register from the cache.
192 * 196 *
193 * @map: map to configure. 197 * @map: map to configure.
194 * @reg: The register index. 198 * @reg: The register index.
195 * @value: The value to be returned. 199 * @value: The value to be returned.
196 * 200 *
197 * Return a negative value on failure, 0 on success. 201 * Return a negative value on failure, 0 on success.
198 */ 202 */
199 int regcache_read(struct regmap *map, 203 int regcache_read(struct regmap *map,
200 unsigned int reg, unsigned int *value) 204 unsigned int reg, unsigned int *value)
201 { 205 {
202 int ret; 206 int ret;
203 207
204 if (map->cache_type == REGCACHE_NONE) 208 if (map->cache_type == REGCACHE_NONE)
205 return -ENOSYS; 209 return -ENOSYS;
206 210
207 BUG_ON(!map->cache_ops); 211 BUG_ON(!map->cache_ops);
208 212
209 if (!regmap_volatile(map, reg)) { 213 if (!regmap_volatile(map, reg)) {
210 ret = map->cache_ops->read(map, reg, value); 214 ret = map->cache_ops->read(map, reg, value);
211 215
212 if (ret == 0) 216 if (ret == 0)
213 trace_regmap_reg_read_cache(map->dev, reg, *value); 217 trace_regmap_reg_read_cache(map->dev, reg, *value);
214 218
215 return ret; 219 return ret;
216 } 220 }
217 221
218 return -EINVAL; 222 return -EINVAL;
219 } 223 }
220 224
221 /** 225 /**
222 * regcache_write: Set the value of a given register in the cache. 226 * regcache_write: Set the value of a given register in the cache.
223 * 227 *
224 * @map: map to configure. 228 * @map: map to configure.
225 * @reg: The register index. 229 * @reg: The register index.
226 * @value: The new register value. 230 * @value: The new register value.
227 * 231 *
228 * Return a negative value on failure, 0 on success. 232 * Return a negative value on failure, 0 on success.
229 */ 233 */
230 int regcache_write(struct regmap *map, 234 int regcache_write(struct regmap *map,
231 unsigned int reg, unsigned int value) 235 unsigned int reg, unsigned int value)
232 { 236 {
233 if (map->cache_type == REGCACHE_NONE) 237 if (map->cache_type == REGCACHE_NONE)
234 return 0; 238 return 0;
235 239
236 BUG_ON(!map->cache_ops); 240 BUG_ON(!map->cache_ops);
237 241
238 if (!regmap_writeable(map, reg)) 242 if (!regmap_writeable(map, reg))
239 return -EIO; 243 return -EIO;
240 244
241 if (!regmap_volatile(map, reg)) 245 if (!regmap_volatile(map, reg))
242 return map->cache_ops->write(map, reg, value); 246 return map->cache_ops->write(map, reg, value);
243 247
244 return 0; 248 return 0;
245 } 249 }
246 250
247 /** 251 /**
248 * regcache_sync: Sync the register cache with the hardware. 252 * regcache_sync: Sync the register cache with the hardware.
249 * 253 *
250 * @map: map to configure. 254 * @map: map to configure.
251 * 255 *
252 * Any registers that should not be synced should be marked as 256 * Any registers that should not be synced should be marked as
253 * volatile. In general drivers can choose not to use the provided 257 * volatile. In general drivers can choose not to use the provided
254 * syncing functionality if they so require. 258 * syncing functionality if they so require.
255 * 259 *
256 * Return a negative value on failure, 0 on success. 260 * Return a negative value on failure, 0 on success.
257 */ 261 */
258 int regcache_sync(struct regmap *map) 262 int regcache_sync(struct regmap *map)
259 { 263 {
260 int ret = 0; 264 int ret = 0;
261 unsigned int i; 265 unsigned int i;
262 const char *name; 266 const char *name;
263 unsigned int bypass; 267 unsigned int bypass;
264 268
265 BUG_ON(!map->cache_ops || !map->cache_ops->sync); 269 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
266 270
267 map->lock(map); 271 map->lock(map);
268 /* Remember the initial bypass state */ 272 /* Remember the initial bypass state */
269 bypass = map->cache_bypass; 273 bypass = map->cache_bypass;
270 dev_dbg(map->dev, "Syncing %s cache\n", 274 dev_dbg(map->dev, "Syncing %s cache\n",
271 map->cache_ops->name); 275 map->cache_ops->name);
272 name = map->cache_ops->name; 276 name = map->cache_ops->name;
273 trace_regcache_sync(map->dev, name, "start"); 277 trace_regcache_sync(map->dev, name, "start");
274 278
275 if (!map->cache_dirty) 279 if (!map->cache_dirty)
276 goto out; 280 goto out;
277 281
278 /* Apply any patch first */ 282 /* Apply any patch first */
279 map->cache_bypass = 1; 283 map->cache_bypass = 1;
280 for (i = 0; i < map->patch_regs; i++) { 284 for (i = 0; i < map->patch_regs; i++) {
285 if (map->patch[i].reg % map->reg_stride) {
286 ret = -EINVAL;
287 goto out;
288 }
281 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); 289 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
282 if (ret != 0) { 290 if (ret != 0) {
283 dev_err(map->dev, "Failed to write %x = %x: %d\n", 291 dev_err(map->dev, "Failed to write %x = %x: %d\n",
284 map->patch[i].reg, map->patch[i].def, ret); 292 map->patch[i].reg, map->patch[i].def, ret);
285 goto out; 293 goto out;
286 } 294 }
287 } 295 }
288 map->cache_bypass = 0; 296 map->cache_bypass = 0;
289 297
290 ret = map->cache_ops->sync(map, 0, map->max_register); 298 ret = map->cache_ops->sync(map, 0, map->max_register);
291 299
292 if (ret == 0) 300 if (ret == 0)
293 map->cache_dirty = false; 301 map->cache_dirty = false;
294 302
295 out: 303 out:
296 trace_regcache_sync(map->dev, name, "stop"); 304 trace_regcache_sync(map->dev, name, "stop");
297 /* Restore the bypass state */ 305 /* Restore the bypass state */
298 map->cache_bypass = bypass; 306 map->cache_bypass = bypass;
299 map->unlock(map); 307 map->unlock(map);
300 308
301 return ret; 309 return ret;
302 } 310 }
303 EXPORT_SYMBOL_GPL(regcache_sync); 311 EXPORT_SYMBOL_GPL(regcache_sync);
304 312
305 /** 313 /**
306 * regcache_sync_region: Sync part of the register cache with the hardware. 314 * regcache_sync_region: Sync part of the register cache with the hardware.
307 * 315 *
308 * @map: map to sync. 316 * @map: map to sync.
309 * @min: first register to sync 317 * @min: first register to sync
310 * @max: last register to sync 318 * @max: last register to sync
311 * 319 *
312 * Write all non-default register values in the specified region to 320 * Write all non-default register values in the specified region to
313 * the hardware. 321 * the hardware.
314 * 322 *
315 * Return a negative value on failure, 0 on success. 323 * Return a negative value on failure, 0 on success.
316 */ 324 */
317 int regcache_sync_region(struct regmap *map, unsigned int min, 325 int regcache_sync_region(struct regmap *map, unsigned int min,
318 unsigned int max) 326 unsigned int max)
319 { 327 {
320 int ret = 0; 328 int ret = 0;
321 const char *name; 329 const char *name;
322 unsigned int bypass; 330 unsigned int bypass;
323 331
324 BUG_ON(!map->cache_ops || !map->cache_ops->sync); 332 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
325 333
326 map->lock(map); 334 map->lock(map);
327 335
328 /* Remember the initial bypass state */ 336 /* Remember the initial bypass state */
329 bypass = map->cache_bypass; 337 bypass = map->cache_bypass;
330 338
331 name = map->cache_ops->name; 339 name = map->cache_ops->name;
332 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); 340 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
333 341
334 trace_regcache_sync(map->dev, name, "start region"); 342 trace_regcache_sync(map->dev, name, "start region");
335 343
336 if (!map->cache_dirty) 344 if (!map->cache_dirty)
337 goto out; 345 goto out;
338 346
339 ret = map->cache_ops->sync(map, min, max); 347 ret = map->cache_ops->sync(map, min, max);
340 348
341 out: 349 out:
342 trace_regcache_sync(map->dev, name, "stop region"); 350 trace_regcache_sync(map->dev, name, "stop region");
343 /* Restore the bypass state */ 351 /* Restore the bypass state */
344 map->cache_bypass = bypass; 352 map->cache_bypass = bypass;
345 map->unlock(map); 353 map->unlock(map);
346 354
347 return ret; 355 return ret;
348 } 356 }
349 EXPORT_SYMBOL_GPL(regcache_sync_region); 357 EXPORT_SYMBOL_GPL(regcache_sync_region);
350 358
351 /** 359 /**
352 * regcache_cache_only: Put a register map into cache only mode 360 * regcache_cache_only: Put a register map into cache only mode
353 * 361 *
354 * @map: map to configure 362 * @map: map to configure
355 * @cache_only: flag if changes should be written to the hardware 363 * @cache_only: flag if changes should be written to the hardware
356 * 364 *
357 * When a register map is marked as cache only writes to the register 365 * When a register map is marked as cache only writes to the register
358 * map API will only update the register cache, they will not cause 366 * map API will only update the register cache, they will not cause
359 * any hardware changes. This is useful for allowing portions of 367 * any hardware changes. This is useful for allowing portions of
360 * drivers to act as though the device were functioning as normal when 368 * drivers to act as though the device were functioning as normal when
361 * it is disabled for power saving reasons. 369 * it is disabled for power saving reasons.
362 */ 370 */
363 void regcache_cache_only(struct regmap *map, bool enable) 371 void regcache_cache_only(struct regmap *map, bool enable)
364 { 372 {
365 map->lock(map); 373 map->lock(map);
366 WARN_ON(map->cache_bypass && enable); 374 WARN_ON(map->cache_bypass && enable);
367 map->cache_only = enable; 375 map->cache_only = enable;
368 trace_regmap_cache_only(map->dev, enable); 376 trace_regmap_cache_only(map->dev, enable);
369 map->unlock(map); 377 map->unlock(map);
370 } 378 }
371 EXPORT_SYMBOL_GPL(regcache_cache_only); 379 EXPORT_SYMBOL_GPL(regcache_cache_only);
372 380
373 /** 381 /**
374 * regcache_mark_dirty: Mark the register cache as dirty 382 * regcache_mark_dirty: Mark the register cache as dirty
375 * 383 *
376 * @map: map to mark 384 * @map: map to mark
377 * 385 *
378 * Mark the register cache as dirty, for example due to the device 386 * Mark the register cache as dirty, for example due to the device
379 * having been powered down for suspend. If the cache is not marked 387 * having been powered down for suspend. If the cache is not marked
380 * as dirty then the cache sync will be suppressed. 388 * as dirty then the cache sync will be suppressed.
381 */ 389 */
382 void regcache_mark_dirty(struct regmap *map) 390 void regcache_mark_dirty(struct regmap *map)
383 { 391 {
384 map->lock(map); 392 map->lock(map);
385 map->cache_dirty = true; 393 map->cache_dirty = true;
386 map->unlock(map); 394 map->unlock(map);
387 } 395 }
388 EXPORT_SYMBOL_GPL(regcache_mark_dirty); 396 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
389 397
390 /** 398 /**
391 * regcache_cache_bypass: Put a register map into cache bypass mode 399 * regcache_cache_bypass: Put a register map into cache bypass mode
392 * 400 *
393 * @map: map to configure 401 * @map: map to configure
394 * @cache_bypass: flag if changes should not be written to the hardware 402 * @cache_bypass: flag if changes should not be written to the hardware
395 * 403 *
396 * When a register map is marked with the cache bypass option, writes 404 * When a register map is marked with the cache bypass option, writes
397 * to the register map API will only update the hardware and not the 405 * to the register map API will only update the hardware and not the
398 * the cache directly. This is useful when syncing the cache back to 406 * the cache directly. This is useful when syncing the cache back to
399 * the hardware. 407 * the hardware.
400 */ 408 */
401 void regcache_cache_bypass(struct regmap *map, bool enable) 409 void regcache_cache_bypass(struct regmap *map, bool enable)
402 { 410 {
403 map->lock(map); 411 map->lock(map);
404 WARN_ON(map->cache_only && enable); 412 WARN_ON(map->cache_only && enable);
405 map->cache_bypass = enable; 413 map->cache_bypass = enable;
406 trace_regmap_cache_bypass(map->dev, enable); 414 trace_regmap_cache_bypass(map->dev, enable);
407 map->unlock(map); 415 map->unlock(map);
408 } 416 }
409 EXPORT_SYMBOL_GPL(regcache_cache_bypass); 417 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
410 418
411 bool regcache_set_val(void *base, unsigned int idx, 419 bool regcache_set_val(void *base, unsigned int idx,
412 unsigned int val, unsigned int word_size) 420 unsigned int val, unsigned int word_size)
413 { 421 {
414 switch (word_size) { 422 switch (word_size) {
415 case 1: { 423 case 1: {
416 u8 *cache = base; 424 u8 *cache = base;
417 if (cache[idx] == val) 425 if (cache[idx] == val)
418 return true; 426 return true;
419 cache[idx] = val; 427 cache[idx] = val;
420 break; 428 break;
421 } 429 }
422 case 2: { 430 case 2: {
423 u16 *cache = base; 431 u16 *cache = base;
424 if (cache[idx] == val) 432 if (cache[idx] == val)
425 return true; 433 return true;
426 cache[idx] = val; 434 cache[idx] = val;
427 break; 435 break;
428 } 436 }
429 case 4: { 437 case 4: {
430 u32 *cache = base; 438 u32 *cache = base;
431 if (cache[idx] == val) 439 if (cache[idx] == val)
432 return true; 440 return true;
433 cache[idx] = val; 441 cache[idx] = val;
434 break; 442 break;
435 } 443 }
436 default: 444 default:
437 BUG(); 445 BUG();
438 } 446 }
439 return false; 447 return false;
440 } 448 }
441 449
442 unsigned int regcache_get_val(const void *base, unsigned int idx, 450 unsigned int regcache_get_val(const void *base, unsigned int idx,
443 unsigned int word_size) 451 unsigned int word_size)
444 { 452 {
445 if (!base) 453 if (!base)
446 return -EINVAL; 454 return -EINVAL;
447 455
448 switch (word_size) { 456 switch (word_size) {
449 case 1: { 457 case 1: {
450 const u8 *cache = base; 458 const u8 *cache = base;
451 return cache[idx]; 459 return cache[idx];
452 } 460 }
453 case 2: { 461 case 2: {
454 const u16 *cache = base; 462 const u16 *cache = base;
455 return cache[idx]; 463 return cache[idx];
456 } 464 }
457 case 4: { 465 case 4: {
458 const u32 *cache = base; 466 const u32 *cache = base;
459 return cache[idx]; 467 return cache[idx];
460 } 468 }
461 default: 469 default:
462 BUG(); 470 BUG();
463 } 471 }
464 /* unreachable */ 472 /* unreachable */
465 return -1; 473 return -1;
466 } 474 }
467 475
468 static int regcache_default_cmp(const void *a, const void *b) 476 static int regcache_default_cmp(const void *a, const void *b)
469 { 477 {
470 const struct reg_default *_a = a; 478 const struct reg_default *_a = a;
471 const struct reg_default *_b = b; 479 const struct reg_default *_b = b;
472 480
473 return _a->reg - _b->reg; 481 return _a->reg - _b->reg;
474 } 482 }
475 483
476 int regcache_lookup_reg(struct regmap *map, unsigned int reg) 484 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
477 { 485 {
478 struct reg_default key; 486 struct reg_default key;
479 struct reg_default *r; 487 struct reg_default *r;
480 488
481 key.reg = reg; 489 key.reg = reg;
482 key.def = 0; 490 key.def = 0;
483 491
484 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, 492 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
485 sizeof(struct reg_default), regcache_default_cmp); 493 sizeof(struct reg_default), regcache_default_cmp);
486 494
487 if (r) 495 if (r)
488 return r - map->reg_defaults; 496 return r - map->reg_defaults;
489 else 497 else
490 return -ENOENT; 498 return -ENOENT;
491 } 499 }
492 500
drivers/base/regmap/regmap-debugfs.c
1 /* 1 /*
2 * Register map access API - debugfs 2 * Register map access API - debugfs
3 * 3 *
4 * Copyright 2011 Wolfson Microelectronics plc 4 * Copyright 2011 Wolfson Microelectronics plc
5 * 5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/mutex.h> 14 #include <linux/mutex.h>
15 #include <linux/debugfs.h> 15 #include <linux/debugfs.h>
16 #include <linux/uaccess.h> 16 #include <linux/uaccess.h>
17 #include <linux/device.h> 17 #include <linux/device.h>
18 18
19 #include "internal.h" 19 #include "internal.h"
20 20
21 static struct dentry *regmap_debugfs_root; 21 static struct dentry *regmap_debugfs_root;
22 22
23 /* Calculate the length of a fixed format */ 23 /* Calculate the length of a fixed format */
24 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) 24 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
25 { 25 {
26 snprintf(buf, buf_size, "%x", max_val); 26 snprintf(buf, buf_size, "%x", max_val);
27 return strlen(buf); 27 return strlen(buf);
28 } 28 }
29 29
30 static ssize_t regmap_name_read_file(struct file *file, 30 static ssize_t regmap_name_read_file(struct file *file,
31 char __user *user_buf, size_t count, 31 char __user *user_buf, size_t count,
32 loff_t *ppos) 32 loff_t *ppos)
33 { 33 {
34 struct regmap *map = file->private_data; 34 struct regmap *map = file->private_data;
35 int ret; 35 int ret;
36 char *buf; 36 char *buf;
37 37
38 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 38 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
39 if (!buf) 39 if (!buf)
40 return -ENOMEM; 40 return -ENOMEM;
41 41
42 ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name); 42 ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name);
43 if (ret < 0) { 43 if (ret < 0) {
44 kfree(buf); 44 kfree(buf);
45 return ret; 45 return ret;
46 } 46 }
47 47
48 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 48 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
49 kfree(buf); 49 kfree(buf);
50 return ret; 50 return ret;
51 } 51 }
52 52
53 static const struct file_operations regmap_name_fops = { 53 static const struct file_operations regmap_name_fops = {
54 .open = simple_open, 54 .open = simple_open,
55 .read = regmap_name_read_file, 55 .read = regmap_name_read_file,
56 .llseek = default_llseek, 56 .llseek = default_llseek,
57 }; 57 };
58 58
59 static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, 59 static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
60 size_t count, loff_t *ppos) 60 size_t count, loff_t *ppos)
61 { 61 {
62 int reg_len, val_len, tot_len; 62 int reg_len, val_len, tot_len;
63 size_t buf_pos = 0; 63 size_t buf_pos = 0;
64 loff_t p = 0; 64 loff_t p = 0;
65 ssize_t ret; 65 ssize_t ret;
66 int i; 66 int i;
67 struct regmap *map = file->private_data; 67 struct regmap *map = file->private_data;
68 char *buf; 68 char *buf;
69 unsigned int val; 69 unsigned int val;
70 70
71 if (*ppos < 0 || !count) 71 if (*ppos < 0 || !count)
72 return -EINVAL; 72 return -EINVAL;
73 73
74 buf = kmalloc(count, GFP_KERNEL); 74 buf = kmalloc(count, GFP_KERNEL);
75 if (!buf) 75 if (!buf)
76 return -ENOMEM; 76 return -ENOMEM;
77 77
78 /* Calculate the length of a fixed format */ 78 /* Calculate the length of a fixed format */
79 reg_len = regmap_calc_reg_len(map->max_register, buf, count); 79 reg_len = regmap_calc_reg_len(map->max_register, buf, count);
80 val_len = 2 * map->format.val_bytes; 80 val_len = 2 * map->format.val_bytes;
81 tot_len = reg_len + val_len + 3; /* : \n */ 81 tot_len = reg_len + val_len + 3; /* : \n */
82 82
83 for (i = 0; i < map->max_register + 1; i++) { 83 for (i = 0; i <= map->max_register; i += map->reg_stride) {
84 if (!regmap_readable(map, i)) 84 if (!regmap_readable(map, i))
85 continue; 85 continue;
86 86
87 if (regmap_precious(map, i)) 87 if (regmap_precious(map, i))
88 continue; 88 continue;
89 89
90 /* If we're in the region the user is trying to read */ 90 /* If we're in the region the user is trying to read */
91 if (p >= *ppos) { 91 if (p >= *ppos) {
92 /* ...but not beyond it */ 92 /* ...but not beyond it */
93 if (buf_pos >= count - 1 - tot_len) 93 if (buf_pos >= count - 1 - tot_len)
94 break; 94 break;
95 95
96 /* Format the register */ 96 /* Format the register */
97 snprintf(buf + buf_pos, count - buf_pos, "%.*x: ", 97 snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
98 reg_len, i); 98 reg_len, i);
99 buf_pos += reg_len + 2; 99 buf_pos += reg_len + 2;
100 100
101 /* Format the value, write all X if we can't read */ 101 /* Format the value, write all X if we can't read */
102 ret = regmap_read(map, i, &val); 102 ret = regmap_read(map, i, &val);
103 if (ret == 0) 103 if (ret == 0)
104 snprintf(buf + buf_pos, count - buf_pos, 104 snprintf(buf + buf_pos, count - buf_pos,
105 "%.*x", val_len, val); 105 "%.*x", val_len, val);
106 else 106 else
107 memset(buf + buf_pos, 'X', val_len); 107 memset(buf + buf_pos, 'X', val_len);
108 buf_pos += 2 * map->format.val_bytes; 108 buf_pos += 2 * map->format.val_bytes;
109 109
110 buf[buf_pos++] = '\n'; 110 buf[buf_pos++] = '\n';
111 } 111 }
112 p += tot_len; 112 p += tot_len;
113 } 113 }
114 114
115 ret = buf_pos; 115 ret = buf_pos;
116 116
117 if (copy_to_user(user_buf, buf, buf_pos)) { 117 if (copy_to_user(user_buf, buf, buf_pos)) {
118 ret = -EFAULT; 118 ret = -EFAULT;
119 goto out; 119 goto out;
120 } 120 }
121 121
122 *ppos += buf_pos; 122 *ppos += buf_pos;
123 123
124 out: 124 out:
125 kfree(buf); 125 kfree(buf);
126 return ret; 126 return ret;
127 } 127 }
128 128
129 #undef REGMAP_ALLOW_WRITE_DEBUGFS 129 #undef REGMAP_ALLOW_WRITE_DEBUGFS
130 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS 130 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
131 /* 131 /*
132 * This can be dangerous especially when we have clients such as 132 * This can be dangerous especially when we have clients such as
133 * PMICs, therefore don't provide any real compile time configuration option 133 * PMICs, therefore don't provide any real compile time configuration option
134 * for this feature, people who want to use this will need to modify 134 * for this feature, people who want to use this will need to modify
135 * the source code directly. 135 * the source code directly.
136 */ 136 */
137 static ssize_t regmap_map_write_file(struct file *file, 137 static ssize_t regmap_map_write_file(struct file *file,
138 const char __user *user_buf, 138 const char __user *user_buf,
139 size_t count, loff_t *ppos) 139 size_t count, loff_t *ppos)
140 { 140 {
141 char buf[32]; 141 char buf[32];
142 size_t buf_size; 142 size_t buf_size;
143 char *start = buf; 143 char *start = buf;
144 unsigned long reg, value; 144 unsigned long reg, value;
145 struct regmap *map = file->private_data; 145 struct regmap *map = file->private_data;
146 146
147 buf_size = min(count, (sizeof(buf)-1)); 147 buf_size = min(count, (sizeof(buf)-1));
148 if (copy_from_user(buf, user_buf, buf_size)) 148 if (copy_from_user(buf, user_buf, buf_size))
149 return -EFAULT; 149 return -EFAULT;
150 buf[buf_size] = 0; 150 buf[buf_size] = 0;
151 151
152 while (*start == ' ') 152 while (*start == ' ')
153 start++; 153 start++;
154 reg = simple_strtoul(start, &start, 16); 154 reg = simple_strtoul(start, &start, 16);
155 while (*start == ' ') 155 while (*start == ' ')
156 start++; 156 start++;
157 if (strict_strtoul(start, 16, &value)) 157 if (strict_strtoul(start, 16, &value))
158 return -EINVAL; 158 return -EINVAL;
159 159
160 /* Userspace has been fiddling around behind the kernel's back */ 160 /* Userspace has been fiddling around behind the kernel's back */
161 add_taint(TAINT_USER); 161 add_taint(TAINT_USER);
162 162
163 regmap_write(map, reg, value); 163 regmap_write(map, reg, value);
164 return buf_size; 164 return buf_size;
165 } 165 }
166 #else 166 #else
167 #define regmap_map_write_file NULL 167 #define regmap_map_write_file NULL
168 #endif 168 #endif
169 169
170 static const struct file_operations regmap_map_fops = { 170 static const struct file_operations regmap_map_fops = {
171 .open = simple_open, 171 .open = simple_open,
172 .read = regmap_map_read_file, 172 .read = regmap_map_read_file,
173 .write = regmap_map_write_file, 173 .write = regmap_map_write_file,
174 .llseek = default_llseek, 174 .llseek = default_llseek,
175 }; 175 };
176 176
177 static ssize_t regmap_access_read_file(struct file *file, 177 static ssize_t regmap_access_read_file(struct file *file,
178 char __user *user_buf, size_t count, 178 char __user *user_buf, size_t count,
179 loff_t *ppos) 179 loff_t *ppos)
180 { 180 {
181 int reg_len, tot_len; 181 int reg_len, tot_len;
182 size_t buf_pos = 0; 182 size_t buf_pos = 0;
183 loff_t p = 0; 183 loff_t p = 0;
184 ssize_t ret; 184 ssize_t ret;
185 int i; 185 int i;
186 struct regmap *map = file->private_data; 186 struct regmap *map = file->private_data;
187 char *buf; 187 char *buf;
188 188
189 if (*ppos < 0 || !count) 189 if (*ppos < 0 || !count)
190 return -EINVAL; 190 return -EINVAL;
191 191
192 buf = kmalloc(count, GFP_KERNEL); 192 buf = kmalloc(count, GFP_KERNEL);
193 if (!buf) 193 if (!buf)
194 return -ENOMEM; 194 return -ENOMEM;
195 195
196 /* Calculate the length of a fixed format */ 196 /* Calculate the length of a fixed format */
197 reg_len = regmap_calc_reg_len(map->max_register, buf, count); 197 reg_len = regmap_calc_reg_len(map->max_register, buf, count);
198 tot_len = reg_len + 10; /* ': R W V P\n' */ 198 tot_len = reg_len + 10; /* ': R W V P\n' */
199 199
200 for (i = 0; i < map->max_register + 1; i++) { 200 for (i = 0; i <= map->max_register; i += map->reg_stride) {
201 /* Ignore registers which are neither readable nor writable */ 201 /* Ignore registers which are neither readable nor writable */
202 if (!regmap_readable(map, i) && !regmap_writeable(map, i)) 202 if (!regmap_readable(map, i) && !regmap_writeable(map, i))
203 continue; 203 continue;
204 204
205 /* If we're in the region the user is trying to read */ 205 /* If we're in the region the user is trying to read */
206 if (p >= *ppos) { 206 if (p >= *ppos) {
207 /* ...but not beyond it */ 207 /* ...but not beyond it */
208 if (buf_pos >= count - 1 - tot_len) 208 if (buf_pos >= count - 1 - tot_len)
209 break; 209 break;
210 210
211 /* Format the register */ 211 /* Format the register */
212 snprintf(buf + buf_pos, count - buf_pos, 212 snprintf(buf + buf_pos, count - buf_pos,
213 "%.*x: %c %c %c %c\n", 213 "%.*x: %c %c %c %c\n",
214 reg_len, i, 214 reg_len, i,
215 regmap_readable(map, i) ? 'y' : 'n', 215 regmap_readable(map, i) ? 'y' : 'n',
216 regmap_writeable(map, i) ? 'y' : 'n', 216 regmap_writeable(map, i) ? 'y' : 'n',
217 regmap_volatile(map, i) ? 'y' : 'n', 217 regmap_volatile(map, i) ? 'y' : 'n',
218 regmap_precious(map, i) ? 'y' : 'n'); 218 regmap_precious(map, i) ? 'y' : 'n');
219 219
220 buf_pos += tot_len; 220 buf_pos += tot_len;
221 } 221 }
222 p += tot_len; 222 p += tot_len;
223 } 223 }
224 224
225 ret = buf_pos; 225 ret = buf_pos;
226 226
227 if (copy_to_user(user_buf, buf, buf_pos)) { 227 if (copy_to_user(user_buf, buf, buf_pos)) {
228 ret = -EFAULT; 228 ret = -EFAULT;
229 goto out; 229 goto out;
230 } 230 }
231 231
232 *ppos += buf_pos; 232 *ppos += buf_pos;
233 233
234 out: 234 out:
235 kfree(buf); 235 kfree(buf);
236 return ret; 236 return ret;
237 } 237 }
238 238
239 static const struct file_operations regmap_access_fops = { 239 static const struct file_operations regmap_access_fops = {
240 .open = simple_open, 240 .open = simple_open,
241 .read = regmap_access_read_file, 241 .read = regmap_access_read_file,
242 .llseek = default_llseek, 242 .llseek = default_llseek,
243 }; 243 };
244 244
245 void regmap_debugfs_init(struct regmap *map, const char *name) 245 void regmap_debugfs_init(struct regmap *map, const char *name)
246 { 246 {
247 if (name) { 247 if (name) {
248 map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", 248 map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
249 dev_name(map->dev), name); 249 dev_name(map->dev), name);
250 name = map->debugfs_name; 250 name = map->debugfs_name;
251 } else { 251 } else {
252 name = dev_name(map->dev); 252 name = dev_name(map->dev);
253 } 253 }
254 254
255 map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); 255 map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
256 if (!map->debugfs) { 256 if (!map->debugfs) {
257 dev_warn(map->dev, "Failed to create debugfs directory\n"); 257 dev_warn(map->dev, "Failed to create debugfs directory\n");
258 return; 258 return;
259 } 259 }
260 260
261 debugfs_create_file("name", 0400, map->debugfs, 261 debugfs_create_file("name", 0400, map->debugfs,
262 map, &regmap_name_fops); 262 map, &regmap_name_fops);
263 263
264 if (map->max_register) { 264 if (map->max_register) {
265 debugfs_create_file("registers", 0400, map->debugfs, 265 debugfs_create_file("registers", 0400, map->debugfs,
266 map, &regmap_map_fops); 266 map, &regmap_map_fops);
267 debugfs_create_file("access", 0400, map->debugfs, 267 debugfs_create_file("access", 0400, map->debugfs,
268 map, &regmap_access_fops); 268 map, &regmap_access_fops);
269 } 269 }
270 270
271 if (map->cache_type) { 271 if (map->cache_type) {
272 debugfs_create_bool("cache_only", 0400, map->debugfs, 272 debugfs_create_bool("cache_only", 0400, map->debugfs,
273 &map->cache_only); 273 &map->cache_only);
274 debugfs_create_bool("cache_dirty", 0400, map->debugfs, 274 debugfs_create_bool("cache_dirty", 0400, map->debugfs,
275 &map->cache_dirty); 275 &map->cache_dirty);
276 debugfs_create_bool("cache_bypass", 0400, map->debugfs, 276 debugfs_create_bool("cache_bypass", 0400, map->debugfs,
277 &map->cache_bypass); 277 &map->cache_bypass);
278 } 278 }
279 } 279 }
280 280
281 void regmap_debugfs_exit(struct regmap *map) 281 void regmap_debugfs_exit(struct regmap *map)
282 { 282 {
283 debugfs_remove_recursive(map->debugfs); 283 debugfs_remove_recursive(map->debugfs);
284 kfree(map->debugfs_name); 284 kfree(map->debugfs_name);
285 } 285 }
286 286
287 void regmap_debugfs_initcall(void) 287 void regmap_debugfs_initcall(void)
288 { 288 {
289 regmap_debugfs_root = debugfs_create_dir("regmap", NULL); 289 regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
290 if (!regmap_debugfs_root) { 290 if (!regmap_debugfs_root) {
291 pr_warn("regmap: Failed to create debugfs root\n"); 291 pr_warn("regmap: Failed to create debugfs root\n");
292 return; 292 return;
293 } 293 }
294 } 294 }
295 295
drivers/base/regmap/regmap-irq.c
1 /* 1 /*
2 * regmap based irq_chip 2 * regmap based irq_chip
3 * 3 *
4 * Copyright 2011 Wolfson Microelectronics plc 4 * Copyright 2011 Wolfson Microelectronics plc
5 * 5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13 #include <linux/export.h> 13 #include <linux/export.h>
14 #include <linux/device.h> 14 #include <linux/device.h>
15 #include <linux/regmap.h> 15 #include <linux/regmap.h>
16 #include <linux/irq.h> 16 #include <linux/irq.h>
17 #include <linux/interrupt.h> 17 #include <linux/interrupt.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 19
20 #include "internal.h" 20 #include "internal.h"
21 21
22 struct regmap_irq_chip_data { 22 struct regmap_irq_chip_data {
23 struct mutex lock; 23 struct mutex lock;
24 24
25 struct regmap *map; 25 struct regmap *map;
26 struct regmap_irq_chip *chip; 26 struct regmap_irq_chip *chip;
27 27
28 int irq_base; 28 int irq_base;
29 29
30 void *status_reg_buf; 30 void *status_reg_buf;
31 unsigned int *status_buf; 31 unsigned int *status_buf;
32 unsigned int *mask_buf; 32 unsigned int *mask_buf;
33 unsigned int *mask_buf_def; 33 unsigned int *mask_buf_def;
34 }; 34 };
35 35
36 static inline const 36 static inline const
37 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, 37 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
38 int irq) 38 int irq)
39 { 39 {
40 return &data->chip->irqs[irq - data->irq_base]; 40 return &data->chip->irqs[irq - data->irq_base];
41 } 41 }
42 42
43 static void regmap_irq_lock(struct irq_data *data) 43 static void regmap_irq_lock(struct irq_data *data)
44 { 44 {
45 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 45 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
46 46
47 mutex_lock(&d->lock); 47 mutex_lock(&d->lock);
48 } 48 }
49 49
50 static void regmap_irq_sync_unlock(struct irq_data *data) 50 static void regmap_irq_sync_unlock(struct irq_data *data)
51 { 51 {
52 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 52 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
53 int i, ret; 53 int i, ret;
54 54
55 /* 55 /*
56 * If there's been a change in the mask write it back to the 56 * If there's been a change in the mask write it back to the
57 * hardware. We rely on the use of the regmap core cache to 57 * hardware. We rely on the use of the regmap core cache to
58 * suppress pointless writes. 58 * suppress pointless writes.
59 */ 59 */
60 for (i = 0; i < d->chip->num_regs; i++) { 60 for (i = 0; i < d->chip->num_regs; i++) {
61 ret = regmap_update_bits(d->map, d->chip->mask_base + i, 61 ret = regmap_update_bits(d->map, d->chip->mask_base +
62 (i * map->map->reg_stride),
62 d->mask_buf_def[i], d->mask_buf[i]); 63 d->mask_buf_def[i], d->mask_buf[i]);
63 if (ret != 0) 64 if (ret != 0)
64 dev_err(d->map->dev, "Failed to sync masks in %x\n", 65 dev_err(d->map->dev, "Failed to sync masks in %x\n",
65 d->chip->mask_base + i); 66 d->chip->mask_base + (i * map->reg_stride));
66 } 67 }
67 68
68 mutex_unlock(&d->lock); 69 mutex_unlock(&d->lock);
69 } 70 }
70 71
71 static void regmap_irq_enable(struct irq_data *data) 72 static void regmap_irq_enable(struct irq_data *data)
72 { 73 {
73 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 74 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
74 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq); 75 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
75 76
76 d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask; 77 d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
77 } 78 }
78 79
79 static void regmap_irq_disable(struct irq_data *data) 80 static void regmap_irq_disable(struct irq_data *data)
80 { 81 {
81 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 82 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
82 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq); 83 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
83 84
84 d->mask_buf[irq_data->reg_offset] |= irq_data->mask; 85 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
85 } 86 }
86 87
87 static struct irq_chip regmap_irq_chip = { 88 static struct irq_chip regmap_irq_chip = {
88 .name = "regmap", 89 .name = "regmap",
89 .irq_bus_lock = regmap_irq_lock, 90 .irq_bus_lock = regmap_irq_lock,
90 .irq_bus_sync_unlock = regmap_irq_sync_unlock, 91 .irq_bus_sync_unlock = regmap_irq_sync_unlock,
91 .irq_disable = regmap_irq_disable, 92 .irq_disable = regmap_irq_disable,
92 .irq_enable = regmap_irq_enable, 93 .irq_enable = regmap_irq_enable,
93 }; 94 };
94 95
95 static irqreturn_t regmap_irq_thread(int irq, void *d) 96 static irqreturn_t regmap_irq_thread(int irq, void *d)
96 { 97 {
97 struct regmap_irq_chip_data *data = d; 98 struct regmap_irq_chip_data *data = d;
98 struct regmap_irq_chip *chip = data->chip; 99 struct regmap_irq_chip *chip = data->chip;
99 struct regmap *map = data->map; 100 struct regmap *map = data->map;
100 int ret, i; 101 int ret, i;
101 u8 *buf8 = data->status_reg_buf; 102 u8 *buf8 = data->status_reg_buf;
102 u16 *buf16 = data->status_reg_buf; 103 u16 *buf16 = data->status_reg_buf;
103 u32 *buf32 = data->status_reg_buf; 104 u32 *buf32 = data->status_reg_buf;
104 bool handled = false; 105 bool handled = false;
105 106
106 ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf, 107 ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf,
107 chip->num_regs); 108 chip->num_regs);
108 if (ret != 0) { 109 if (ret != 0) {
109 dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); 110 dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
110 return IRQ_NONE; 111 return IRQ_NONE;
111 } 112 }
112 113
113 /* 114 /*
114 * Ignore masked IRQs and ack if we need to; we ack early so 115 * Ignore masked IRQs and ack if we need to; we ack early so
115 * there is no race between handling and acknowleding the 116 * there is no race between handling and acknowleding the
116 * interrupt. We assume that typically few of the interrupts 117 * interrupt. We assume that typically few of the interrupts
117 * will fire simultaneously so don't worry about overhead from 118 * will fire simultaneously so don't worry about overhead from
118 * doing a write per register. 119 * doing a write per register.
119 */ 120 */
120 for (i = 0; i < data->chip->num_regs; i++) { 121 for (i = 0; i < data->chip->num_regs; i++) {
121 switch (map->format.val_bytes) { 122 switch (map->format.val_bytes) {
122 case 1: 123 case 1:
123 data->status_buf[i] = buf8[i]; 124 data->status_buf[i] = buf8[i];
124 break; 125 break;
125 case 2: 126 case 2:
126 data->status_buf[i] = buf16[i]; 127 data->status_buf[i] = buf16[i];
127 break; 128 break;
128 case 4: 129 case 4:
129 data->status_buf[i] = buf32[i]; 130 data->status_buf[i] = buf32[i];
130 break; 131 break;
131 default: 132 default:
132 BUG(); 133 BUG();
133 return IRQ_NONE; 134 return IRQ_NONE;
134 } 135 }
135 136
136 data->status_buf[i] &= ~data->mask_buf[i]; 137 data->status_buf[i] &= ~data->mask_buf[i];
137 138
138 if (data->status_buf[i] && chip->ack_base) { 139 if (data->status_buf[i] && chip->ack_base) {
139 ret = regmap_write(map, chip->ack_base + i, 140 ret = regmap_write(map, chip->ack_base +
141 (i * map->reg_stride),
140 data->status_buf[i]); 142 data->status_buf[i]);
141 if (ret != 0) 143 if (ret != 0)
142 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 144 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
143 chip->ack_base + i, ret); 145 chip->ack_base + (i * map->reg_stride),
146 ret);
144 } 147 }
145 } 148 }
146 149
147 for (i = 0; i < chip->num_irqs; i++) { 150 for (i = 0; i < chip->num_irqs; i++) {
148 if (data->status_buf[chip->irqs[i].reg_offset] & 151 if (data->status_buf[chip->irqs[i].reg_offset /
149 chip->irqs[i].mask) { 152 map->reg_stride] & chip->irqs[i].mask) {
150 handle_nested_irq(data->irq_base + i); 153 handle_nested_irq(data->irq_base + i);
151 handled = true; 154 handled = true;
152 } 155 }
153 } 156 }
154 157
155 if (handled) 158 if (handled)
156 return IRQ_HANDLED; 159 return IRQ_HANDLED;
157 else 160 else
158 return IRQ_NONE; 161 return IRQ_NONE;
159 } 162 }
160 163
161 /** 164 /**
162 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling 165 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
163 * 166 *
164 * map: The regmap for the device. 167 * map: The regmap for the device.
165 * irq: The IRQ the device uses to signal interrupts 168 * irq: The IRQ the device uses to signal interrupts
166 * irq_flags: The IRQF_ flags to use for the primary interrupt. 169 * irq_flags: The IRQF_ flags to use for the primary interrupt.
167 * chip: Configuration for the interrupt controller. 170 * chip: Configuration for the interrupt controller.
168 * data: Runtime data structure for the controller, allocated on success 171 * data: Runtime data structure for the controller, allocated on success
169 * 172 *
170 * Returns 0 on success or an errno on failure. 173 * Returns 0 on success or an errno on failure.
171 * 174 *
172 * In order for this to be efficient the chip really should use a 175 * In order for this to be efficient the chip really should use a
173 * register cache. The chip driver is responsible for restoring the 176 * register cache. The chip driver is responsible for restoring the
174 * register values used by the IRQ controller over suspend and resume. 177 * register values used by the IRQ controller over suspend and resume.
175 */ 178 */
176 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, 179 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
177 int irq_base, struct regmap_irq_chip *chip, 180 int irq_base, struct regmap_irq_chip *chip,
178 struct regmap_irq_chip_data **data) 181 struct regmap_irq_chip_data **data)
179 { 182 {
180 struct regmap_irq_chip_data *d; 183 struct regmap_irq_chip_data *d;
181 int cur_irq, i; 184 int cur_irq, i;
182 int ret = -ENOMEM; 185 int ret = -ENOMEM;
183 186
187 for (i = 0; i < chip->num_irqs; i++) {
188 if (chip->irqs[i].reg_offset % map->reg_stride)
189 return -EINVAL;
190 if (chip->irqs[i].reg_offset / map->reg_stride >=
191 chip->num_regs)
192 return -EINVAL;
193 }
194
184 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); 195 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
185 if (irq_base < 0) { 196 if (irq_base < 0) {
186 dev_warn(map->dev, "Failed to allocate IRQs: %d\n", 197 dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
187 irq_base); 198 irq_base);
188 return irq_base; 199 return irq_base;
189 } 200 }
190 201
191 d = kzalloc(sizeof(*d), GFP_KERNEL); 202 d = kzalloc(sizeof(*d), GFP_KERNEL);
192 if (!d) 203 if (!d)
193 return -ENOMEM; 204 return -ENOMEM;
194 205
195 d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, 206 d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
196 GFP_KERNEL); 207 GFP_KERNEL);
197 if (!d->status_buf) 208 if (!d->status_buf)
198 goto err_alloc; 209 goto err_alloc;
199 210
200 d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs, 211 d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs,
201 GFP_KERNEL); 212 GFP_KERNEL);
202 if (!d->status_reg_buf) 213 if (!d->status_reg_buf)
203 goto err_alloc; 214 goto err_alloc;
204 215
205 d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, 216 d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
206 GFP_KERNEL); 217 GFP_KERNEL);
207 if (!d->mask_buf) 218 if (!d->mask_buf)
208 goto err_alloc; 219 goto err_alloc;
209 220
210 d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs, 221 d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
211 GFP_KERNEL); 222 GFP_KERNEL);
212 if (!d->mask_buf_def) 223 if (!d->mask_buf_def)
213 goto err_alloc; 224 goto err_alloc;
214 225
215 d->map = map; 226 d->map = map;
216 d->chip = chip; 227 d->chip = chip;
217 d->irq_base = irq_base; 228 d->irq_base = irq_base;
218 mutex_init(&d->lock); 229 mutex_init(&d->lock);
219 230
220 for (i = 0; i < chip->num_irqs; i++) 231 for (i = 0; i < chip->num_irqs; i++)
221 d->mask_buf_def[chip->irqs[i].reg_offset] 232 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
222 |= chip->irqs[i].mask; 233 |= chip->irqs[i].mask;
223 234
224 /* Mask all the interrupts by default */ 235 /* Mask all the interrupts by default */
225 for (i = 0; i < chip->num_regs; i++) { 236 for (i = 0; i < chip->num_regs; i++) {
226 d->mask_buf[i] = d->mask_buf_def[i]; 237 d->mask_buf[i] = d->mask_buf_def[i];
227 ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]); 238 ret = regmap_write(map, chip->mask_base + (i * map->reg_stride),
239 d->mask_buf[i]);
228 if (ret != 0) { 240 if (ret != 0) {
229 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 241 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
230 chip->mask_base + i, ret); 242 chip->mask_base + (i * map->reg_stride), ret);
231 goto err_alloc; 243 goto err_alloc;
232 } 244 }
233 } 245 }
234 246
235 /* Register them with genirq */ 247 /* Register them with genirq */
236 for (cur_irq = irq_base; 248 for (cur_irq = irq_base;
237 cur_irq < chip->num_irqs + irq_base; 249 cur_irq < chip->num_irqs + irq_base;
238 cur_irq++) { 250 cur_irq++) {
239 irq_set_chip_data(cur_irq, d); 251 irq_set_chip_data(cur_irq, d);
240 irq_set_chip_and_handler(cur_irq, &regmap_irq_chip, 252 irq_set_chip_and_handler(cur_irq, &regmap_irq_chip,
241 handle_edge_irq); 253 handle_edge_irq);
242 irq_set_nested_thread(cur_irq, 1); 254 irq_set_nested_thread(cur_irq, 1);
243 255
244 /* ARM needs us to explicitly flag the IRQ as valid 256 /* ARM needs us to explicitly flag the IRQ as valid
245 * and will set them noprobe when we do so. */ 257 * and will set them noprobe when we do so. */
246 #ifdef CONFIG_ARM 258 #ifdef CONFIG_ARM
247 set_irq_flags(cur_irq, IRQF_VALID); 259 set_irq_flags(cur_irq, IRQF_VALID);
248 #else 260 #else
249 irq_set_noprobe(cur_irq); 261 irq_set_noprobe(cur_irq);
250 #endif 262 #endif
251 } 263 }
252 264
253 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, 265 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
254 chip->name, d); 266 chip->name, d);
255 if (ret != 0) { 267 if (ret != 0) {
256 dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret); 268 dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
257 goto err_alloc; 269 goto err_alloc;
258 } 270 }
259 271
260 return 0; 272 return 0;
261 273
262 err_alloc: 274 err_alloc:
263 kfree(d->mask_buf_def); 275 kfree(d->mask_buf_def);
264 kfree(d->mask_buf); 276 kfree(d->mask_buf);
265 kfree(d->status_reg_buf); 277 kfree(d->status_reg_buf);
266 kfree(d->status_buf); 278 kfree(d->status_buf);
267 kfree(d); 279 kfree(d);
268 return ret; 280 return ret;
269 } 281 }
270 EXPORT_SYMBOL_GPL(regmap_add_irq_chip); 282 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
271 283
272 /** 284 /**
273 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip 285 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
274 * 286 *
275 * @irq: Primary IRQ for the device 287 * @irq: Primary IRQ for the device
276 * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip() 288 * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
277 */ 289 */
278 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) 290 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
279 { 291 {
280 if (!d) 292 if (!d)
281 return; 293 return;
282 294
283 free_irq(irq, d); 295 free_irq(irq, d);
284 kfree(d->mask_buf_def); 296 kfree(d->mask_buf_def);
285 kfree(d->mask_buf); 297 kfree(d->mask_buf);
286 kfree(d->status_reg_buf); 298 kfree(d->status_reg_buf);
287 kfree(d->status_buf); 299 kfree(d->status_buf);
288 kfree(d); 300 kfree(d);
289 } 301 }
290 EXPORT_SYMBOL_GPL(regmap_del_irq_chip); 302 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
291 303
292 /** 304 /**
293 * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip 305 * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
294 * 306 *
295 * Useful for drivers to request their own IRQs. 307 * Useful for drivers to request their own IRQs.
296 * 308 *
297 * @data: regmap_irq controller to operate on. 309 * @data: regmap_irq controller to operate on.
298 */ 310 */
299 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) 311 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
300 { 312 {
301 return data->irq_base; 313 return data->irq_base;
302 } 314 }
303 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); 315 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
304 316
drivers/base/regmap/regmap-mmio.c
1 /* 1 /*
2 * Register map access API - MMIO support 2 * Register map access API - MMIO support
3 * 3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation. 8 * version 2, as published by the Free Software Foundation.
9 * 9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT 10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License 15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19 #include <linux/err.h> 19 #include <linux/err.h>
20 #include <linux/init.h> 20 #include <linux/init.h>
21 #include <linux/io.h> 21 #include <linux/io.h>
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/regmap.h> 23 #include <linux/regmap.h>
24 #include <linux/slab.h> 24 #include <linux/slab.h>
25 25
26 struct regmap_mmio_context { 26 struct regmap_mmio_context {
27 void __iomem *regs; 27 void __iomem *regs;
28 unsigned val_bytes; 28 unsigned val_bytes;
29 }; 29 };
30 30
31 static int regmap_mmio_gather_write(void *context, 31 static int regmap_mmio_gather_write(void *context,
32 const void *reg, size_t reg_size, 32 const void *reg, size_t reg_size,
33 const void *val, size_t val_size) 33 const void *val, size_t val_size)
34 { 34 {
35 struct regmap_mmio_context *ctx = context; 35 struct regmap_mmio_context *ctx = context;
36 u32 offset; 36 u32 offset;
37 37
38 BUG_ON(reg_size != 4); 38 BUG_ON(reg_size != 4);
39 39
40 offset = be32_to_cpup(reg); 40 offset = be32_to_cpup(reg);
41 41
42 while (val_size) { 42 while (val_size) {
43 switch (ctx->val_bytes) { 43 switch (ctx->val_bytes) {
44 case 1: 44 case 1:
45 writeb(*(u8 *)val, ctx->regs + offset); 45 writeb(*(u8 *)val, ctx->regs + offset);
46 break; 46 break;
47 case 2: 47 case 2:
48 writew(be16_to_cpup(val), ctx->regs + offset); 48 writew(be16_to_cpup(val), ctx->regs + offset);
49 break; 49 break;
50 case 4: 50 case 4:
51 writel(be32_to_cpup(val), ctx->regs + offset); 51 writel(be32_to_cpup(val), ctx->regs + offset);
52 break; 52 break;
53 #ifdef CONFIG_64BIT 53 #ifdef CONFIG_64BIT
54 case 8: 54 case 8:
55 writeq(be64_to_cpup(val), ctx->regs + offset); 55 writeq(be64_to_cpup(val), ctx->regs + offset);
56 break; 56 break;
57 #endif 57 #endif
58 default: 58 default:
59 /* Should be caught by regmap_mmio_check_config */ 59 /* Should be caught by regmap_mmio_check_config */
60 BUG(); 60 BUG();
61 } 61 }
62 val_size -= ctx->val_bytes; 62 val_size -= ctx->val_bytes;
63 val += ctx->val_bytes; 63 val += ctx->val_bytes;
64 offset += ctx->val_bytes; 64 offset += ctx->val_bytes;
65 } 65 }
66 66
67 return 0; 67 return 0;
68 } 68 }
69 69
70 static int regmap_mmio_write(void *context, const void *data, size_t count) 70 static int regmap_mmio_write(void *context, const void *data, size_t count)
71 { 71 {
72 BUG_ON(count < 4); 72 BUG_ON(count < 4);
73 73
74 return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4); 74 return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4);
75 } 75 }
76 76
77 static int regmap_mmio_read(void *context, 77 static int regmap_mmio_read(void *context,
78 const void *reg, size_t reg_size, 78 const void *reg, size_t reg_size,
79 void *val, size_t val_size) 79 void *val, size_t val_size)
80 { 80 {
81 struct regmap_mmio_context *ctx = context; 81 struct regmap_mmio_context *ctx = context;
82 u32 offset; 82 u32 offset;
83 83
84 BUG_ON(reg_size != 4); 84 BUG_ON(reg_size != 4);
85 85
86 offset = be32_to_cpup(reg); 86 offset = be32_to_cpup(reg);
87 87
88 while (val_size) { 88 while (val_size) {
89 switch (ctx->val_bytes) { 89 switch (ctx->val_bytes) {
90 case 1: 90 case 1:
91 *(u8 *)val = readb(ctx->regs + offset); 91 *(u8 *)val = readb(ctx->regs + offset);
92 break; 92 break;
93 case 2: 93 case 2:
94 *(u16 *)val = cpu_to_be16(readw(ctx->regs + offset)); 94 *(u16 *)val = cpu_to_be16(readw(ctx->regs + offset));
95 break; 95 break;
96 case 4: 96 case 4:
97 *(u32 *)val = cpu_to_be32(readl(ctx->regs + offset)); 97 *(u32 *)val = cpu_to_be32(readl(ctx->regs + offset));
98 break; 98 break;
99 #ifdef CONFIG_64BIT 99 #ifdef CONFIG_64BIT
100 case 8: 100 case 8:
101 *(u64 *)val = cpu_to_be32(readq(ctx->regs + offset)); 101 *(u64 *)val = cpu_to_be32(readq(ctx->regs + offset));
102 break; 102 break;
103 #endif 103 #endif
104 default: 104 default:
105 /* Should be caught by regmap_mmio_check_config */ 105 /* Should be caught by regmap_mmio_check_config */
106 BUG(); 106 BUG();
107 } 107 }
108 val_size -= ctx->val_bytes; 108 val_size -= ctx->val_bytes;
109 val += ctx->val_bytes; 109 val += ctx->val_bytes;
110 offset += ctx->val_bytes; 110 offset += ctx->val_bytes;
111 } 111 }
112 112
113 return 0; 113 return 0;
114 } 114 }
115 115
116 static void regmap_mmio_free_context(void *context) 116 static void regmap_mmio_free_context(void *context)
117 { 117 {
118 kfree(context); 118 kfree(context);
119 } 119 }
120 120
121 static struct regmap_bus regmap_mmio = { 121 static struct regmap_bus regmap_mmio = {
122 .fast_io = true, 122 .fast_io = true,
123 .write = regmap_mmio_write, 123 .write = regmap_mmio_write,
124 .gather_write = regmap_mmio_gather_write, 124 .gather_write = regmap_mmio_gather_write,
125 .read = regmap_mmio_read, 125 .read = regmap_mmio_read,
126 .free_context = regmap_mmio_free_context, 126 .free_context = regmap_mmio_free_context,
127 }; 127 };
128 128
129 struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, 129 struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
130 const struct regmap_config *config) 130 const struct regmap_config *config)
131 { 131 {
132 struct regmap_mmio_context *ctx; 132 struct regmap_mmio_context *ctx;
133 int min_stride;
133 134
134 if (config->reg_bits != 32) 135 if (config->reg_bits != 32)
135 return ERR_PTR(-EINVAL); 136 return ERR_PTR(-EINVAL);
136 137
137 if (config->pad_bits) 138 if (config->pad_bits)
138 return ERR_PTR(-EINVAL); 139 return ERR_PTR(-EINVAL);
139 140
140 switch (config->val_bits) { 141 switch (config->val_bits) {
141 case 8: 142 case 8:
143 /* The core treats 0 as 1 */
144 min_stride = 0;
145 break;
142 case 16: 146 case 16:
147 min_stride = 2;
148 break;
143 case 32: 149 case 32:
150 min_stride = 4;
151 break;
144 #ifdef CONFIG_64BIT 152 #ifdef CONFIG_64BIT
145 case 64: 153 case 64:
154 min_stride = 8;
155 break;
146 #endif 156 #endif
147 break; 157 break;
148 default: 158 default:
149 return ERR_PTR(-EINVAL); 159 return ERR_PTR(-EINVAL);
150 } 160 }
161
162 if (config->reg_stride < min_stride)
163 return ERR_PTR(-EINVAL);
151 164
152 ctx = kzalloc(GFP_KERNEL, sizeof(*ctx)); 165 ctx = kzalloc(GFP_KERNEL, sizeof(*ctx));
153 if (!ctx) 166 if (!ctx)
154 return ERR_PTR(-ENOMEM); 167 return ERR_PTR(-ENOMEM);
155 168
156 ctx->regs = regs; 169 ctx->regs = regs;
157 ctx->val_bytes = config->val_bits / 8; 170 ctx->val_bytes = config->val_bits / 8;
158 171
159 return ctx; 172 return ctx;
160 } 173 }
161 174
162 /** 175 /**
163 * regmap_init_mmio(): Initialise register map 176 * regmap_init_mmio(): Initialise register map
164 * 177 *
165 * @dev: Device that will be interacted with 178 * @dev: Device that will be interacted with
166 * @regs: Pointer to memory-mapped IO region 179 * @regs: Pointer to memory-mapped IO region
167 * @config: Configuration for register map 180 * @config: Configuration for register map
168 * 181 *
169 * The return value will be an ERR_PTR() on error or a valid pointer to 182 * The return value will be an ERR_PTR() on error or a valid pointer to
170 * a struct regmap. 183 * a struct regmap.
171 */ 184 */
172 struct regmap *regmap_init_mmio(struct device *dev, 185 struct regmap *regmap_init_mmio(struct device *dev,
173 void __iomem *regs, 186 void __iomem *regs,
174 const struct regmap_config *config) 187 const struct regmap_config *config)
175 { 188 {
176 struct regmap_mmio_context *ctx; 189 struct regmap_mmio_context *ctx;
177 190
178 ctx = regmap_mmio_gen_context(regs, config); 191 ctx = regmap_mmio_gen_context(regs, config);
179 if (IS_ERR(ctx)) 192 if (IS_ERR(ctx))
180 return ERR_CAST(ctx); 193 return ERR_CAST(ctx);
181 194
182 return regmap_init(dev, &regmap_mmio, ctx, config); 195 return regmap_init(dev, &regmap_mmio, ctx, config);
183 } 196 }
184 EXPORT_SYMBOL_GPL(regmap_init_mmio); 197 EXPORT_SYMBOL_GPL(regmap_init_mmio);
185 198
186 /** 199 /**
187 * devm_regmap_init_mmio(): Initialise managed register map 200 * devm_regmap_init_mmio(): Initialise managed register map
188 * 201 *
189 * @dev: Device that will be interacted with 202 * @dev: Device that will be interacted with
190 * @regs: Pointer to memory-mapped IO region 203 * @regs: Pointer to memory-mapped IO region
191 * @config: Configuration for register map 204 * @config: Configuration for register map
192 * 205 *
193 * The return value will be an ERR_PTR() on error or a valid pointer 206 * The return value will be an ERR_PTR() on error or a valid pointer
194 * to a struct regmap. The regmap will be automatically freed by the 207 * to a struct regmap. The regmap will be automatically freed by the
195 * device management code. 208 * device management code.
196 */ 209 */
197 struct regmap *devm_regmap_init_mmio(struct device *dev, 210 struct regmap *devm_regmap_init_mmio(struct device *dev,
198 void __iomem *regs, 211 void __iomem *regs,
199 const struct regmap_config *config) 212 const struct regmap_config *config)
200 { 213 {
201 struct regmap_mmio_context *ctx; 214 struct regmap_mmio_context *ctx;
202 215
203 ctx = regmap_mmio_gen_context(regs, config); 216 ctx = regmap_mmio_gen_context(regs, config);
204 if (IS_ERR(ctx)) 217 if (IS_ERR(ctx))
205 return ERR_CAST(ctx); 218 return ERR_CAST(ctx);
206 219
207 return devm_regmap_init(dev, &regmap_mmio, ctx, config); 220 return devm_regmap_init(dev, &regmap_mmio, ctx, config);
208 } 221 }
209 EXPORT_SYMBOL_GPL(devm_regmap_init_mmio); 222 EXPORT_SYMBOL_GPL(devm_regmap_init_mmio);
210 223
211 MODULE_LICENSE("GPL v2"); 224 MODULE_LICENSE("GPL v2");
212 225
drivers/base/regmap/regmap.c
1 /* 1 /*
2 * Register map access API 2 * Register map access API
3 * 3 *
4 * Copyright 2011 Wolfson Microelectronics plc 4 * Copyright 2011 Wolfson Microelectronics plc
5 * 5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13 #include <linux/device.h> 13 #include <linux/device.h>
14 #include <linux/slab.h> 14 #include <linux/slab.h>
15 #include <linux/export.h> 15 #include <linux/export.h>
16 #include <linux/mutex.h> 16 #include <linux/mutex.h>
17 #include <linux/err.h> 17 #include <linux/err.h>
18 18
19 #define CREATE_TRACE_POINTS 19 #define CREATE_TRACE_POINTS
20 #include <trace/events/regmap.h> 20 #include <trace/events/regmap.h>
21 21
22 #include "internal.h" 22 #include "internal.h"
23 23
24 bool regmap_writeable(struct regmap *map, unsigned int reg) 24 bool regmap_writeable(struct regmap *map, unsigned int reg)
25 { 25 {
26 if (map->max_register && reg > map->max_register) 26 if (map->max_register && reg > map->max_register)
27 return false; 27 return false;
28 28
29 if (map->writeable_reg) 29 if (map->writeable_reg)
30 return map->writeable_reg(map->dev, reg); 30 return map->writeable_reg(map->dev, reg);
31 31
32 return true; 32 return true;
33 } 33 }
34 34
35 bool regmap_readable(struct regmap *map, unsigned int reg) 35 bool regmap_readable(struct regmap *map, unsigned int reg)
36 { 36 {
37 if (map->max_register && reg > map->max_register) 37 if (map->max_register && reg > map->max_register)
38 return false; 38 return false;
39 39
40 if (map->format.format_write) 40 if (map->format.format_write)
41 return false; 41 return false;
42 42
43 if (map->readable_reg) 43 if (map->readable_reg)
44 return map->readable_reg(map->dev, reg); 44 return map->readable_reg(map->dev, reg);
45 45
46 return true; 46 return true;
47 } 47 }
48 48
49 bool regmap_volatile(struct regmap *map, unsigned int reg) 49 bool regmap_volatile(struct regmap *map, unsigned int reg)
50 { 50 {
51 if (!regmap_readable(map, reg)) 51 if (!regmap_readable(map, reg))
52 return false; 52 return false;
53 53
54 if (map->volatile_reg) 54 if (map->volatile_reg)
55 return map->volatile_reg(map->dev, reg); 55 return map->volatile_reg(map->dev, reg);
56 56
57 return true; 57 return true;
58 } 58 }
59 59
60 bool regmap_precious(struct regmap *map, unsigned int reg) 60 bool regmap_precious(struct regmap *map, unsigned int reg)
61 { 61 {
62 if (!regmap_readable(map, reg)) 62 if (!regmap_readable(map, reg))
63 return false; 63 return false;
64 64
65 if (map->precious_reg) 65 if (map->precious_reg)
66 return map->precious_reg(map->dev, reg); 66 return map->precious_reg(map->dev, reg);
67 67
68 return false; 68 return false;
69 } 69 }
70 70
71 static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 71 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
72 unsigned int num) 72 unsigned int num)
73 { 73 {
74 unsigned int i; 74 unsigned int i;
75 75
76 for (i = 0; i < num; i++) 76 for (i = 0; i < num; i++)
77 if (!regmap_volatile(map, reg + i)) 77 if (!regmap_volatile(map, reg + i))
78 return false; 78 return false;
79 79
80 return true; 80 return true;
81 } 81 }
82 82
83 static void regmap_format_2_6_write(struct regmap *map, 83 static void regmap_format_2_6_write(struct regmap *map,
84 unsigned int reg, unsigned int val) 84 unsigned int reg, unsigned int val)
85 { 85 {
86 u8 *out = map->work_buf; 86 u8 *out = map->work_buf;
87 87
88 *out = (reg << 6) | val; 88 *out = (reg << 6) | val;
89 } 89 }
90 90
91 static void regmap_format_4_12_write(struct regmap *map, 91 static void regmap_format_4_12_write(struct regmap *map,
92 unsigned int reg, unsigned int val) 92 unsigned int reg, unsigned int val)
93 { 93 {
94 __be16 *out = map->work_buf; 94 __be16 *out = map->work_buf;
95 *out = cpu_to_be16((reg << 12) | val); 95 *out = cpu_to_be16((reg << 12) | val);
96 } 96 }
97 97
98 static void regmap_format_7_9_write(struct regmap *map, 98 static void regmap_format_7_9_write(struct regmap *map,
99 unsigned int reg, unsigned int val) 99 unsigned int reg, unsigned int val)
100 { 100 {
101 __be16 *out = map->work_buf; 101 __be16 *out = map->work_buf;
102 *out = cpu_to_be16((reg << 9) | val); 102 *out = cpu_to_be16((reg << 9) | val);
103 } 103 }
104 104
105 static void regmap_format_10_14_write(struct regmap *map, 105 static void regmap_format_10_14_write(struct regmap *map,
106 unsigned int reg, unsigned int val) 106 unsigned int reg, unsigned int val)
107 { 107 {
108 u8 *out = map->work_buf; 108 u8 *out = map->work_buf;
109 109
110 out[2] = val; 110 out[2] = val;
111 out[1] = (val >> 8) | (reg << 6); 111 out[1] = (val >> 8) | (reg << 6);
112 out[0] = reg >> 2; 112 out[0] = reg >> 2;
113 } 113 }
114 114
115 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) 115 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
116 { 116 {
117 u8 *b = buf; 117 u8 *b = buf;
118 118
119 b[0] = val << shift; 119 b[0] = val << shift;
120 } 120 }
121 121
122 static void regmap_format_16(void *buf, unsigned int val, unsigned int shift) 122 static void regmap_format_16(void *buf, unsigned int val, unsigned int shift)
123 { 123 {
124 __be16 *b = buf; 124 __be16 *b = buf;
125 125
126 b[0] = cpu_to_be16(val << shift); 126 b[0] = cpu_to_be16(val << shift);
127 } 127 }
128 128
129 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) 129 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
130 { 130 {
131 u8 *b = buf; 131 u8 *b = buf;
132 132
133 val <<= shift; 133 val <<= shift;
134 134
135 b[0] = val >> 16; 135 b[0] = val >> 16;
136 b[1] = val >> 8; 136 b[1] = val >> 8;
137 b[2] = val; 137 b[2] = val;
138 } 138 }
139 139
140 static void regmap_format_32(void *buf, unsigned int val, unsigned int shift) 140 static void regmap_format_32(void *buf, unsigned int val, unsigned int shift)
141 { 141 {
142 __be32 *b = buf; 142 __be32 *b = buf;
143 143
144 b[0] = cpu_to_be32(val << shift); 144 b[0] = cpu_to_be32(val << shift);
145 } 145 }
146 146
147 static unsigned int regmap_parse_8(void *buf) 147 static unsigned int regmap_parse_8(void *buf)
148 { 148 {
149 u8 *b = buf; 149 u8 *b = buf;
150 150
151 return b[0]; 151 return b[0];
152 } 152 }
153 153
154 static unsigned int regmap_parse_16(void *buf) 154 static unsigned int regmap_parse_16(void *buf)
155 { 155 {
156 __be16 *b = buf; 156 __be16 *b = buf;
157 157
158 b[0] = be16_to_cpu(b[0]); 158 b[0] = be16_to_cpu(b[0]);
159 159
160 return b[0]; 160 return b[0];
161 } 161 }
162 162
163 static unsigned int regmap_parse_24(void *buf) 163 static unsigned int regmap_parse_24(void *buf)
164 { 164 {
165 u8 *b = buf; 165 u8 *b = buf;
166 unsigned int ret = b[2]; 166 unsigned int ret = b[2];
167 ret |= ((unsigned int)b[1]) << 8; 167 ret |= ((unsigned int)b[1]) << 8;
168 ret |= ((unsigned int)b[0]) << 16; 168 ret |= ((unsigned int)b[0]) << 16;
169 169
170 return ret; 170 return ret;
171 } 171 }
172 172
173 static unsigned int regmap_parse_32(void *buf) 173 static unsigned int regmap_parse_32(void *buf)
174 { 174 {
175 __be32 *b = buf; 175 __be32 *b = buf;
176 176
177 b[0] = be32_to_cpu(b[0]); 177 b[0] = be32_to_cpu(b[0]);
178 178
179 return b[0]; 179 return b[0];
180 } 180 }
181 181
182 static void regmap_lock_mutex(struct regmap *map) 182 static void regmap_lock_mutex(struct regmap *map)
183 { 183 {
184 mutex_lock(&map->mutex); 184 mutex_lock(&map->mutex);
185 } 185 }
186 186
187 static void regmap_unlock_mutex(struct regmap *map) 187 static void regmap_unlock_mutex(struct regmap *map)
188 { 188 {
189 mutex_unlock(&map->mutex); 189 mutex_unlock(&map->mutex);
190 } 190 }
191 191
192 static void regmap_lock_spinlock(struct regmap *map) 192 static void regmap_lock_spinlock(struct regmap *map)
193 { 193 {
194 spin_lock(&map->spinlock); 194 spin_lock(&map->spinlock);
195 } 195 }
196 196
197 static void regmap_unlock_spinlock(struct regmap *map) 197 static void regmap_unlock_spinlock(struct regmap *map)
198 { 198 {
199 spin_unlock(&map->spinlock); 199 spin_unlock(&map->spinlock);
200 } 200 }
201 201
202 /** 202 /**
203 * regmap_init(): Initialise register map 203 * regmap_init(): Initialise register map
204 * 204 *
205 * @dev: Device that will be interacted with 205 * @dev: Device that will be interacted with
206 * @bus: Bus-specific callbacks to use with device 206 * @bus: Bus-specific callbacks to use with device
207 * @bus_context: Data passed to bus-specific callbacks 207 * @bus_context: Data passed to bus-specific callbacks
208 * @config: Configuration for register map 208 * @config: Configuration for register map
209 * 209 *
210 * The return value will be an ERR_PTR() on error or a valid pointer to 210 * The return value will be an ERR_PTR() on error or a valid pointer to
211 * a struct regmap. This function should generally not be called 211 * a struct regmap. This function should generally not be called
212 * directly, it should be called by bus-specific init functions. 212 * directly, it should be called by bus-specific init functions.
213 */ 213 */
214 struct regmap *regmap_init(struct device *dev, 214 struct regmap *regmap_init(struct device *dev,
215 const struct regmap_bus *bus, 215 const struct regmap_bus *bus,
216 void *bus_context, 216 void *bus_context,
217 const struct regmap_config *config) 217 const struct regmap_config *config)
218 { 218 {
219 struct regmap *map; 219 struct regmap *map;
220 int ret = -EINVAL; 220 int ret = -EINVAL;
221 221
222 if (!bus || !config) 222 if (!bus || !config)
223 goto err; 223 goto err;
224 224
225 map = kzalloc(sizeof(*map), GFP_KERNEL); 225 map = kzalloc(sizeof(*map), GFP_KERNEL);
226 if (map == NULL) { 226 if (map == NULL) {
227 ret = -ENOMEM; 227 ret = -ENOMEM;
228 goto err; 228 goto err;
229 } 229 }
230 230
231 if (bus->fast_io) { 231 if (bus->fast_io) {
232 spin_lock_init(&map->spinlock); 232 spin_lock_init(&map->spinlock);
233 map->lock = regmap_lock_spinlock; 233 map->lock = regmap_lock_spinlock;
234 map->unlock = regmap_unlock_spinlock; 234 map->unlock = regmap_unlock_spinlock;
235 } else { 235 } else {
236 mutex_init(&map->mutex); 236 mutex_init(&map->mutex);
237 map->lock = regmap_lock_mutex; 237 map->lock = regmap_lock_mutex;
238 map->unlock = regmap_unlock_mutex; 238 map->unlock = regmap_unlock_mutex;
239 } 239 }
240 map->format.buf_size = (config->reg_bits + config->val_bits) / 8; 240 map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
241 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 241 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
242 map->format.pad_bytes = config->pad_bits / 8; 242 map->format.pad_bytes = config->pad_bits / 8;
243 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 243 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
244 map->format.buf_size += map->format.pad_bytes; 244 map->format.buf_size += map->format.pad_bytes;
245 map->reg_shift = config->pad_bits % 8; 245 map->reg_shift = config->pad_bits % 8;
246 if (config->reg_stride)
247 map->reg_stride = config->reg_stride;
248 else
249 map->reg_stride = 1;
246 map->dev = dev; 250 map->dev = dev;
247 map->bus = bus; 251 map->bus = bus;
248 map->bus_context = bus_context; 252 map->bus_context = bus_context;
249 map->max_register = config->max_register; 253 map->max_register = config->max_register;
250 map->writeable_reg = config->writeable_reg; 254 map->writeable_reg = config->writeable_reg;
251 map->readable_reg = config->readable_reg; 255 map->readable_reg = config->readable_reg;
252 map->volatile_reg = config->volatile_reg; 256 map->volatile_reg = config->volatile_reg;
253 map->precious_reg = config->precious_reg; 257 map->precious_reg = config->precious_reg;
254 map->cache_type = config->cache_type; 258 map->cache_type = config->cache_type;
255 259
256 if (config->read_flag_mask || config->write_flag_mask) { 260 if (config->read_flag_mask || config->write_flag_mask) {
257 map->read_flag_mask = config->read_flag_mask; 261 map->read_flag_mask = config->read_flag_mask;
258 map->write_flag_mask = config->write_flag_mask; 262 map->write_flag_mask = config->write_flag_mask;
259 } else { 263 } else {
260 map->read_flag_mask = bus->read_flag_mask; 264 map->read_flag_mask = bus->read_flag_mask;
261 } 265 }
262 266
263 switch (config->reg_bits + map->reg_shift) { 267 switch (config->reg_bits + map->reg_shift) {
264 case 2: 268 case 2:
265 switch (config->val_bits) { 269 switch (config->val_bits) {
266 case 6: 270 case 6:
267 map->format.format_write = regmap_format_2_6_write; 271 map->format.format_write = regmap_format_2_6_write;
268 break; 272 break;
269 default: 273 default:
270 goto err_map; 274 goto err_map;
271 } 275 }
272 break; 276 break;
273 277
274 case 4: 278 case 4:
275 switch (config->val_bits) { 279 switch (config->val_bits) {
276 case 12: 280 case 12:
277 map->format.format_write = regmap_format_4_12_write; 281 map->format.format_write = regmap_format_4_12_write;
278 break; 282 break;
279 default: 283 default:
280 goto err_map; 284 goto err_map;
281 } 285 }
282 break; 286 break;
283 287
284 case 7: 288 case 7:
285 switch (config->val_bits) { 289 switch (config->val_bits) {
286 case 9: 290 case 9:
287 map->format.format_write = regmap_format_7_9_write; 291 map->format.format_write = regmap_format_7_9_write;
288 break; 292 break;
289 default: 293 default:
290 goto err_map; 294 goto err_map;
291 } 295 }
292 break; 296 break;
293 297
294 case 10: 298 case 10:
295 switch (config->val_bits) { 299 switch (config->val_bits) {
296 case 14: 300 case 14:
297 map->format.format_write = regmap_format_10_14_write; 301 map->format.format_write = regmap_format_10_14_write;
298 break; 302 break;
299 default: 303 default:
300 goto err_map; 304 goto err_map;
301 } 305 }
302 break; 306 break;
303 307
304 case 8: 308 case 8:
305 map->format.format_reg = regmap_format_8; 309 map->format.format_reg = regmap_format_8;
306 break; 310 break;
307 311
308 case 16: 312 case 16:
309 map->format.format_reg = regmap_format_16; 313 map->format.format_reg = regmap_format_16;
310 break; 314 break;
311 315
312 case 32: 316 case 32:
313 map->format.format_reg = regmap_format_32; 317 map->format.format_reg = regmap_format_32;
314 break; 318 break;
315 319
316 default: 320 default:
317 goto err_map; 321 goto err_map;
318 } 322 }
319 323
320 switch (config->val_bits) { 324 switch (config->val_bits) {
321 case 8: 325 case 8:
322 map->format.format_val = regmap_format_8; 326 map->format.format_val = regmap_format_8;
323 map->format.parse_val = regmap_parse_8; 327 map->format.parse_val = regmap_parse_8;
324 break; 328 break;
325 case 16: 329 case 16:
326 map->format.format_val = regmap_format_16; 330 map->format.format_val = regmap_format_16;
327 map->format.parse_val = regmap_parse_16; 331 map->format.parse_val = regmap_parse_16;
328 break; 332 break;
329 case 24: 333 case 24:
330 map->format.format_val = regmap_format_24; 334 map->format.format_val = regmap_format_24;
331 map->format.parse_val = regmap_parse_24; 335 map->format.parse_val = regmap_parse_24;
332 break; 336 break;
333 case 32: 337 case 32:
334 map->format.format_val = regmap_format_32; 338 map->format.format_val = regmap_format_32;
335 map->format.parse_val = regmap_parse_32; 339 map->format.parse_val = regmap_parse_32;
336 break; 340 break;
337 } 341 }
338 342
339 if (!map->format.format_write && 343 if (!map->format.format_write &&
340 !(map->format.format_reg && map->format.format_val)) 344 !(map->format.format_reg && map->format.format_val))
341 goto err_map; 345 goto err_map;
342 346
343 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); 347 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
344 if (map->work_buf == NULL) { 348 if (map->work_buf == NULL) {
345 ret = -ENOMEM; 349 ret = -ENOMEM;
346 goto err_map; 350 goto err_map;
347 } 351 }
348 352
349 regmap_debugfs_init(map, config->name); 353 regmap_debugfs_init(map, config->name);
350 354
351 ret = regcache_init(map, config); 355 ret = regcache_init(map, config);
352 if (ret < 0) 356 if (ret < 0)
353 goto err_free_workbuf; 357 goto err_free_workbuf;
354 358
355 return map; 359 return map;
356 360
357 err_free_workbuf: 361 err_free_workbuf:
358 kfree(map->work_buf); 362 kfree(map->work_buf);
359 err_map: 363 err_map:
360 kfree(map); 364 kfree(map);
361 err: 365 err:
362 return ERR_PTR(ret); 366 return ERR_PTR(ret);
363 } 367 }
364 EXPORT_SYMBOL_GPL(regmap_init); 368 EXPORT_SYMBOL_GPL(regmap_init);
365 369
366 static void devm_regmap_release(struct device *dev, void *res) 370 static void devm_regmap_release(struct device *dev, void *res)
367 { 371 {
368 regmap_exit(*(struct regmap **)res); 372 regmap_exit(*(struct regmap **)res);
369 } 373 }
370 374
371 /** 375 /**
372 * devm_regmap_init(): Initialise managed register map 376 * devm_regmap_init(): Initialise managed register map
373 * 377 *
374 * @dev: Device that will be interacted with 378 * @dev: Device that will be interacted with
375 * @bus: Bus-specific callbacks to use with device 379 * @bus: Bus-specific callbacks to use with device
376 * @bus_context: Data passed to bus-specific callbacks 380 * @bus_context: Data passed to bus-specific callbacks
377 * @config: Configuration for register map 381 * @config: Configuration for register map
378 * 382 *
379 * The return value will be an ERR_PTR() on error or a valid pointer 383 * The return value will be an ERR_PTR() on error or a valid pointer
380 * to a struct regmap. This function should generally not be called 384 * to a struct regmap. This function should generally not be called
381 * directly, it should be called by bus-specific init functions. The 385 * directly, it should be called by bus-specific init functions. The
382 * map will be automatically freed by the device management code. 386 * map will be automatically freed by the device management code.
383 */ 387 */
384 struct regmap *devm_regmap_init(struct device *dev, 388 struct regmap *devm_regmap_init(struct device *dev,
385 const struct regmap_bus *bus, 389 const struct regmap_bus *bus,
386 void *bus_context, 390 void *bus_context,
387 const struct regmap_config *config) 391 const struct regmap_config *config)
388 { 392 {
389 struct regmap **ptr, *regmap; 393 struct regmap **ptr, *regmap;
390 394
391 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); 395 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
392 if (!ptr) 396 if (!ptr)
393 return ERR_PTR(-ENOMEM); 397 return ERR_PTR(-ENOMEM);
394 398
395 regmap = regmap_init(dev, bus, bus_context, config); 399 regmap = regmap_init(dev, bus, bus_context, config);
396 if (!IS_ERR(regmap)) { 400 if (!IS_ERR(regmap)) {
397 *ptr = regmap; 401 *ptr = regmap;
398 devres_add(dev, ptr); 402 devres_add(dev, ptr);
399 } else { 403 } else {
400 devres_free(ptr); 404 devres_free(ptr);
401 } 405 }
402 406
403 return regmap; 407 return regmap;
404 } 408 }
405 EXPORT_SYMBOL_GPL(devm_regmap_init); 409 EXPORT_SYMBOL_GPL(devm_regmap_init);
406 410
407 /** 411 /**
408 * regmap_reinit_cache(): Reinitialise the current register cache 412 * regmap_reinit_cache(): Reinitialise the current register cache
409 * 413 *
410 * @map: Register map to operate on. 414 * @map: Register map to operate on.
411 * @config: New configuration. Only the cache data will be used. 415 * @config: New configuration. Only the cache data will be used.
412 * 416 *
413 * Discard any existing register cache for the map and initialize a 417 * Discard any existing register cache for the map and initialize a
414 * new cache. This can be used to restore the cache to defaults or to 418 * new cache. This can be used to restore the cache to defaults or to
415 * update the cache configuration to reflect runtime discovery of the 419 * update the cache configuration to reflect runtime discovery of the
416 * hardware. 420 * hardware.
417 */ 421 */
418 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) 422 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
419 { 423 {
420 int ret; 424 int ret;
421 425
422 map->lock(map); 426 map->lock(map);
423 427
424 regcache_exit(map); 428 regcache_exit(map);
425 regmap_debugfs_exit(map); 429 regmap_debugfs_exit(map);
426 430
427 map->max_register = config->max_register; 431 map->max_register = config->max_register;
428 map->writeable_reg = config->writeable_reg; 432 map->writeable_reg = config->writeable_reg;
429 map->readable_reg = config->readable_reg; 433 map->readable_reg = config->readable_reg;
430 map->volatile_reg = config->volatile_reg; 434 map->volatile_reg = config->volatile_reg;
431 map->precious_reg = config->precious_reg; 435 map->precious_reg = config->precious_reg;
432 map->cache_type = config->cache_type; 436 map->cache_type = config->cache_type;
433 437
434 regmap_debugfs_init(map, config->name); 438 regmap_debugfs_init(map, config->name);
435 439
436 map->cache_bypass = false; 440 map->cache_bypass = false;
437 map->cache_only = false; 441 map->cache_only = false;
438 442
439 ret = regcache_init(map, config); 443 ret = regcache_init(map, config);
440 444
441 map->unlock(map); 445 map->unlock(map);
442 446
443 return ret; 447 return ret;
444 } 448 }
445 449
446 /** 450 /**
447 * regmap_exit(): Free a previously allocated register map 451 * regmap_exit(): Free a previously allocated register map
448 */ 452 */
449 void regmap_exit(struct regmap *map) 453 void regmap_exit(struct regmap *map)
450 { 454 {
451 regcache_exit(map); 455 regcache_exit(map);
452 regmap_debugfs_exit(map); 456 regmap_debugfs_exit(map);
453 if (map->bus->free_context) 457 if (map->bus->free_context)
454 map->bus->free_context(map->bus_context); 458 map->bus->free_context(map->bus_context);
455 kfree(map->work_buf); 459 kfree(map->work_buf);
456 kfree(map); 460 kfree(map);
457 } 461 }
458 EXPORT_SYMBOL_GPL(regmap_exit); 462 EXPORT_SYMBOL_GPL(regmap_exit);
459 463
460 static int _regmap_raw_write(struct regmap *map, unsigned int reg, 464 static int _regmap_raw_write(struct regmap *map, unsigned int reg,
461 const void *val, size_t val_len) 465 const void *val, size_t val_len)
462 { 466 {
463 u8 *u8 = map->work_buf; 467 u8 *u8 = map->work_buf;
464 void *buf; 468 void *buf;
465 int ret = -ENOTSUPP; 469 int ret = -ENOTSUPP;
466 size_t len; 470 size_t len;
467 int i; 471 int i;
468 472
469 /* Check for unwritable registers before we start */ 473 /* Check for unwritable registers before we start */
470 if (map->writeable_reg) 474 if (map->writeable_reg)
471 for (i = 0; i < val_len / map->format.val_bytes; i++) 475 for (i = 0; i < val_len / map->format.val_bytes; i++)
472 if (!map->writeable_reg(map->dev, reg + i)) 476 if (!map->writeable_reg(map->dev,
477 reg + (i * map->reg_stride)))
473 return -EINVAL; 478 return -EINVAL;
474 479
475 if (!map->cache_bypass && map->format.parse_val) { 480 if (!map->cache_bypass && map->format.parse_val) {
476 unsigned int ival; 481 unsigned int ival;
477 int val_bytes = map->format.val_bytes; 482 int val_bytes = map->format.val_bytes;
478 for (i = 0; i < val_len / val_bytes; i++) { 483 for (i = 0; i < val_len / val_bytes; i++) {
479 memcpy(map->work_buf, val + (i * val_bytes), val_bytes); 484 memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
480 ival = map->format.parse_val(map->work_buf); 485 ival = map->format.parse_val(map->work_buf);
481 ret = regcache_write(map, reg + i, ival); 486 ret = regcache_write(map, reg + (i * map->reg_stride),
487 ival);
482 if (ret) { 488 if (ret) {
483 dev_err(map->dev, 489 dev_err(map->dev,
484 "Error in caching of register: %u ret: %d\n", 490 "Error in caching of register: %u ret: %d\n",
485 reg + i, ret); 491 reg + i, ret);
486 return ret; 492 return ret;
487 } 493 }
488 } 494 }
489 if (map->cache_only) { 495 if (map->cache_only) {
490 map->cache_dirty = true; 496 map->cache_dirty = true;
491 return 0; 497 return 0;
492 } 498 }
493 } 499 }
494 500
495 map->format.format_reg(map->work_buf, reg, map->reg_shift); 501 map->format.format_reg(map->work_buf, reg, map->reg_shift);
496 502
497 u8[0] |= map->write_flag_mask; 503 u8[0] |= map->write_flag_mask;
498 504
499 trace_regmap_hw_write_start(map->dev, reg, 505 trace_regmap_hw_write_start(map->dev, reg,
500 val_len / map->format.val_bytes); 506 val_len / map->format.val_bytes);
501 507
502 /* If we're doing a single register write we can probably just 508 /* If we're doing a single register write we can probably just
503 * send the work_buf directly, otherwise try to do a gather 509 * send the work_buf directly, otherwise try to do a gather
504 * write. 510 * write.
505 */ 511 */
506 if (val == (map->work_buf + map->format.pad_bytes + 512 if (val == (map->work_buf + map->format.pad_bytes +
507 map->format.reg_bytes)) 513 map->format.reg_bytes))
508 ret = map->bus->write(map->bus_context, map->work_buf, 514 ret = map->bus->write(map->bus_context, map->work_buf,
509 map->format.reg_bytes + 515 map->format.reg_bytes +
510 map->format.pad_bytes + 516 map->format.pad_bytes +
511 val_len); 517 val_len);
512 else if (map->bus->gather_write) 518 else if (map->bus->gather_write)
513 ret = map->bus->gather_write(map->bus_context, map->work_buf, 519 ret = map->bus->gather_write(map->bus_context, map->work_buf,
514 map->format.reg_bytes + 520 map->format.reg_bytes +
515 map->format.pad_bytes, 521 map->format.pad_bytes,
516 val, val_len); 522 val, val_len);
517 523
518 /* If that didn't work fall back on linearising by hand. */ 524 /* If that didn't work fall back on linearising by hand. */
519 if (ret == -ENOTSUPP) { 525 if (ret == -ENOTSUPP) {
520 len = map->format.reg_bytes + map->format.pad_bytes + val_len; 526 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
521 buf = kzalloc(len, GFP_KERNEL); 527 buf = kzalloc(len, GFP_KERNEL);
522 if (!buf) 528 if (!buf)
523 return -ENOMEM; 529 return -ENOMEM;
524 530
525 memcpy(buf, map->work_buf, map->format.reg_bytes); 531 memcpy(buf, map->work_buf, map->format.reg_bytes);
526 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, 532 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
527 val, val_len); 533 val, val_len);
528 ret = map->bus->write(map->bus_context, buf, len); 534 ret = map->bus->write(map->bus_context, buf, len);
529 535
530 kfree(buf); 536 kfree(buf);
531 } 537 }
532 538
533 trace_regmap_hw_write_done(map->dev, reg, 539 trace_regmap_hw_write_done(map->dev, reg,
534 val_len / map->format.val_bytes); 540 val_len / map->format.val_bytes);
535 541
536 return ret; 542 return ret;
537 } 543 }
538 544
539 int _regmap_write(struct regmap *map, unsigned int reg, 545 int _regmap_write(struct regmap *map, unsigned int reg,
540 unsigned int val) 546 unsigned int val)
541 { 547 {
542 int ret; 548 int ret;
543 BUG_ON(!map->format.format_write && !map->format.format_val); 549 BUG_ON(!map->format.format_write && !map->format.format_val);
544 550
545 if (!map->cache_bypass && map->format.format_write) { 551 if (!map->cache_bypass && map->format.format_write) {
546 ret = regcache_write(map, reg, val); 552 ret = regcache_write(map, reg, val);
547 if (ret != 0) 553 if (ret != 0)
548 return ret; 554 return ret;
549 if (map->cache_only) { 555 if (map->cache_only) {
550 map->cache_dirty = true; 556 map->cache_dirty = true;
551 return 0; 557 return 0;
552 } 558 }
553 } 559 }
554 560
555 trace_regmap_reg_write(map->dev, reg, val); 561 trace_regmap_reg_write(map->dev, reg, val);
556 562
557 if (map->format.format_write) { 563 if (map->format.format_write) {
558 map->format.format_write(map, reg, val); 564 map->format.format_write(map, reg, val);
559 565
560 trace_regmap_hw_write_start(map->dev, reg, 1); 566 trace_regmap_hw_write_start(map->dev, reg, 1);
561 567
562 ret = map->bus->write(map->bus_context, map->work_buf, 568 ret = map->bus->write(map->bus_context, map->work_buf,
563 map->format.buf_size); 569 map->format.buf_size);
564 570
565 trace_regmap_hw_write_done(map->dev, reg, 1); 571 trace_regmap_hw_write_done(map->dev, reg, 1);
566 572
567 return ret; 573 return ret;
568 } else { 574 } else {
569 map->format.format_val(map->work_buf + map->format.reg_bytes 575 map->format.format_val(map->work_buf + map->format.reg_bytes
570 + map->format.pad_bytes, val, 0); 576 + map->format.pad_bytes, val, 0);
571 return _regmap_raw_write(map, reg, 577 return _regmap_raw_write(map, reg,
572 map->work_buf + 578 map->work_buf +
573 map->format.reg_bytes + 579 map->format.reg_bytes +
574 map->format.pad_bytes, 580 map->format.pad_bytes,
575 map->format.val_bytes); 581 map->format.val_bytes);
576 } 582 }
577 } 583 }
578 584
579 /** 585 /**
580 * regmap_write(): Write a value to a single register 586 * regmap_write(): Write a value to a single register
581 * 587 *
582 * @map: Register map to write to 588 * @map: Register map to write to
583 * @reg: Register to write to 589 * @reg: Register to write to
584 * @val: Value to be written 590 * @val: Value to be written
585 * 591 *
586 * A value of zero will be returned on success, a negative errno will 592 * A value of zero will be returned on success, a negative errno will
587 * be returned in error cases. 593 * be returned in error cases.
588 */ 594 */
589 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) 595 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
590 { 596 {
591 int ret; 597 int ret;
592 598
599 if (reg % map->reg_stride)
600 return -EINVAL;
601
593 map->lock(map); 602 map->lock(map);
594 603
595 ret = _regmap_write(map, reg, val); 604 ret = _regmap_write(map, reg, val);
596 605
597 map->unlock(map); 606 map->unlock(map);
598 607
599 return ret; 608 return ret;
600 } 609 }
601 EXPORT_SYMBOL_GPL(regmap_write); 610 EXPORT_SYMBOL_GPL(regmap_write);
602 611
603 /** 612 /**
604 * regmap_raw_write(): Write raw values to one or more registers 613 * regmap_raw_write(): Write raw values to one or more registers
605 * 614 *
606 * @map: Register map to write to 615 * @map: Register map to write to
607 * @reg: Initial register to write to 616 * @reg: Initial register to write to
608 * @val: Block of data to be written, laid out for direct transmission to the 617 * @val: Block of data to be written, laid out for direct transmission to the
609 * device 618 * device
610 * @val_len: Length of data pointed to by val. 619 * @val_len: Length of data pointed to by val.
611 * 620 *
612 * This function is intended to be used for things like firmware 621 * This function is intended to be used for things like firmware
613 * download where a large block of data needs to be transferred to the 622 * download where a large block of data needs to be transferred to the
614 * device. No formatting will be done on the data provided. 623 * device. No formatting will be done on the data provided.
615 * 624 *
616 * A value of zero will be returned on success, a negative errno will 625 * A value of zero will be returned on success, a negative errno will
617 * be returned in error cases. 626 * be returned in error cases.
618 */ 627 */
619 int regmap_raw_write(struct regmap *map, unsigned int reg, 628 int regmap_raw_write(struct regmap *map, unsigned int reg,
620 const void *val, size_t val_len) 629 const void *val, size_t val_len)
621 { 630 {
622 int ret; 631 int ret;
623 632
624 if (val_len % map->format.val_bytes) 633 if (val_len % map->format.val_bytes)
625 return -EINVAL; 634 return -EINVAL;
635 if (reg % map->reg_stride)
636 return -EINVAL;
626 637
627 map->lock(map); 638 map->lock(map);
628 639
629 ret = _regmap_raw_write(map, reg, val, val_len); 640 ret = _regmap_raw_write(map, reg, val, val_len);
630 641
631 map->unlock(map); 642 map->unlock(map);
632 643
633 return ret; 644 return ret;
634 } 645 }
635 EXPORT_SYMBOL_GPL(regmap_raw_write); 646 EXPORT_SYMBOL_GPL(regmap_raw_write);
636 647
637 /* 648 /*
638 * regmap_bulk_write(): Write multiple registers to the device 649 * regmap_bulk_write(): Write multiple registers to the device
639 * 650 *
640 * @map: Register map to write to 651 * @map: Register map to write to
641 * @reg: First register to be write from 652 * @reg: First register to be write from
642 * @val: Block of data to be written, in native register size for device 653 * @val: Block of data to be written, in native register size for device
643 * @val_count: Number of registers to write 654 * @val_count: Number of registers to write
644 * 655 *
645 * This function is intended to be used for writing a large block of 656 * This function is intended to be used for writing a large block of
646 * data to be device either in single transfer or multiple transfer. 657 * data to be device either in single transfer or multiple transfer.
647 * 658 *
648 * A value of zero will be returned on success, a negative errno will 659 * A value of zero will be returned on success, a negative errno will
649 * be returned in error cases. 660 * be returned in error cases.
650 */ 661 */
651 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 662 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
652 size_t val_count) 663 size_t val_count)
653 { 664 {
654 int ret = 0, i; 665 int ret = 0, i;
655 size_t val_bytes = map->format.val_bytes; 666 size_t val_bytes = map->format.val_bytes;
656 void *wval; 667 void *wval;
657 668
658 if (!map->format.parse_val) 669 if (!map->format.parse_val)
659 return -EINVAL; 670 return -EINVAL;
671 if (reg % map->reg_stride)
672 return -EINVAL;
660 673
661 map->lock(map); 674 map->lock(map);
662 675
663 /* No formatting is require if val_byte is 1 */ 676 /* No formatting is require if val_byte is 1 */
664 if (val_bytes == 1) { 677 if (val_bytes == 1) {
665 wval = (void *)val; 678 wval = (void *)val;
666 } else { 679 } else {
667 wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); 680 wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
668 if (!wval) { 681 if (!wval) {
669 ret = -ENOMEM; 682 ret = -ENOMEM;
670 dev_err(map->dev, "Error in memory allocation\n"); 683 dev_err(map->dev, "Error in memory allocation\n");
671 goto out; 684 goto out;
672 } 685 }
673 for (i = 0; i < val_count * val_bytes; i += val_bytes) 686 for (i = 0; i < val_count * val_bytes; i += val_bytes)
674 map->format.parse_val(wval + i); 687 map->format.parse_val(wval + i);
675 } 688 }
676 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 689 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
677 690
678 if (val_bytes != 1) 691 if (val_bytes != 1)
679 kfree(wval); 692 kfree(wval);
680 693
681 out: 694 out:
682 map->unlock(map); 695 map->unlock(map);
683 return ret; 696 return ret;
684 } 697 }
685 EXPORT_SYMBOL_GPL(regmap_bulk_write); 698 EXPORT_SYMBOL_GPL(regmap_bulk_write);
686 699
687 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 700 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
688 unsigned int val_len) 701 unsigned int val_len)
689 { 702 {
690 u8 *u8 = map->work_buf; 703 u8 *u8 = map->work_buf;
691 int ret; 704 int ret;
692 705
693 map->format.format_reg(map->work_buf, reg, map->reg_shift); 706 map->format.format_reg(map->work_buf, reg, map->reg_shift);
694 707
695 /* 708 /*
696 * Some buses or devices flag reads by setting the high bits in the 709 * Some buses or devices flag reads by setting the high bits in the
697 * register addresss; since it's always the high bits for all 710 * register addresss; since it's always the high bits for all
698 * current formats we can do this here rather than in 711 * current formats we can do this here rather than in
699 * formatting. This may break if we get interesting formats. 712 * formatting. This may break if we get interesting formats.
700 */ 713 */
701 u8[0] |= map->read_flag_mask; 714 u8[0] |= map->read_flag_mask;
702 715
703 trace_regmap_hw_read_start(map->dev, reg, 716 trace_regmap_hw_read_start(map->dev, reg,
704 val_len / map->format.val_bytes); 717 val_len / map->format.val_bytes);
705 718
706 ret = map->bus->read(map->bus_context, map->work_buf, 719 ret = map->bus->read(map->bus_context, map->work_buf,
707 map->format.reg_bytes + map->format.pad_bytes, 720 map->format.reg_bytes + map->format.pad_bytes,
708 val, val_len); 721 val, val_len);
709 722
710 trace_regmap_hw_read_done(map->dev, reg, 723 trace_regmap_hw_read_done(map->dev, reg,
711 val_len / map->format.val_bytes); 724 val_len / map->format.val_bytes);
712 725
713 return ret; 726 return ret;
714 } 727 }
715 728
716 static int _regmap_read(struct regmap *map, unsigned int reg, 729 static int _regmap_read(struct regmap *map, unsigned int reg,
717 unsigned int *val) 730 unsigned int *val)
718 { 731 {
719 int ret; 732 int ret;
720 733
721 if (!map->cache_bypass) { 734 if (!map->cache_bypass) {
722 ret = regcache_read(map, reg, val); 735 ret = regcache_read(map, reg, val);
723 if (ret == 0) 736 if (ret == 0)
724 return 0; 737 return 0;
725 } 738 }
726 739
727 if (!map->format.parse_val) 740 if (!map->format.parse_val)
728 return -EINVAL; 741 return -EINVAL;
729 742
730 if (map->cache_only) 743 if (map->cache_only)
731 return -EBUSY; 744 return -EBUSY;
732 745
733 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 746 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
734 if (ret == 0) { 747 if (ret == 0) {
735 *val = map->format.parse_val(map->work_buf); 748 *val = map->format.parse_val(map->work_buf);
736 trace_regmap_reg_read(map->dev, reg, *val); 749 trace_regmap_reg_read(map->dev, reg, *val);
737 } 750 }
738 751
739 return ret; 752 return ret;
740 } 753 }
741 754
742 /** 755 /**
743 * regmap_read(): Read a value from a single register 756 * regmap_read(): Read a value from a single register
744 * 757 *
745 * @map: Register map to write to 758 * @map: Register map to write to
746 * @reg: Register to be read from 759 * @reg: Register to be read from
747 * @val: Pointer to store read value 760 * @val: Pointer to store read value
748 * 761 *
749 * A value of zero will be returned on success, a negative errno will 762 * A value of zero will be returned on success, a negative errno will
750 * be returned in error cases. 763 * be returned in error cases.
751 */ 764 */
752 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) 765 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
753 { 766 {
754 int ret; 767 int ret;
755 768
769 if (reg % map->reg_stride)
770 return -EINVAL;
771
756 map->lock(map); 772 map->lock(map);
757 773
758 ret = _regmap_read(map, reg, val); 774 ret = _regmap_read(map, reg, val);
759 775
760 map->unlock(map); 776 map->unlock(map);
761 777
762 return ret; 778 return ret;
763 } 779 }
764 EXPORT_SYMBOL_GPL(regmap_read); 780 EXPORT_SYMBOL_GPL(regmap_read);
765 781
766 /** 782 /**
767 * regmap_raw_read(): Read raw data from the device 783 * regmap_raw_read(): Read raw data from the device
768 * 784 *
769 * @map: Register map to write to 785 * @map: Register map to write to
770 * @reg: First register to be read from 786 * @reg: First register to be read from
771 * @val: Pointer to store read value 787 * @val: Pointer to store read value
772 * @val_len: Size of data to read 788 * @val_len: Size of data to read
773 * 789 *
774 * A value of zero will be returned on success, a negative errno will 790 * A value of zero will be returned on success, a negative errno will
775 * be returned in error cases. 791 * be returned in error cases.
776 */ 792 */
777 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 793 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
778 size_t val_len) 794 size_t val_len)
779 { 795 {
780 size_t val_bytes = map->format.val_bytes; 796 size_t val_bytes = map->format.val_bytes;
781 size_t val_count = val_len / val_bytes; 797 size_t val_count = val_len / val_bytes;
782 unsigned int v; 798 unsigned int v;
783 int ret, i; 799 int ret, i;
784 800
785 if (val_len % map->format.val_bytes) 801 if (val_len % map->format.val_bytes)
786 return -EINVAL; 802 return -EINVAL;
803 if (reg % map->reg_stride)
804 return -EINVAL;
787 805
788 map->lock(map); 806 map->lock(map);
789 807
790 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 808 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
791 map->cache_type == REGCACHE_NONE) { 809 map->cache_type == REGCACHE_NONE) {
792 /* Physical block read if there's no cache involved */ 810 /* Physical block read if there's no cache involved */
793 ret = _regmap_raw_read(map, reg, val, val_len); 811 ret = _regmap_raw_read(map, reg, val, val_len);
794 812
795 } else { 813 } else {
796 /* Otherwise go word by word for the cache; should be low 814 /* Otherwise go word by word for the cache; should be low
797 * cost as we expect to hit the cache. 815 * cost as we expect to hit the cache.
798 */ 816 */
799 for (i = 0; i < val_count; i++) { 817 for (i = 0; i < val_count; i++) {
800 ret = _regmap_read(map, reg + i, &v); 818 ret = _regmap_read(map, reg + (i * map->reg_stride),
819 &v);
801 if (ret != 0) 820 if (ret != 0)
802 goto out; 821 goto out;
803 822
804 map->format.format_val(val + (i * val_bytes), v, 0); 823 map->format.format_val(val + (i * val_bytes), v, 0);
805 } 824 }
806 } 825 }
807 826
808 out: 827 out:
809 map->unlock(map); 828 map->unlock(map);
810 829
811 return ret; 830 return ret;
812 } 831 }
813 EXPORT_SYMBOL_GPL(regmap_raw_read); 832 EXPORT_SYMBOL_GPL(regmap_raw_read);
814 833
815 /** 834 /**
816 * regmap_bulk_read(): Read multiple registers from the device 835 * regmap_bulk_read(): Read multiple registers from the device
817 * 836 *
818 * @map: Register map to write to 837 * @map: Register map to write to
819 * @reg: First register to be read from 838 * @reg: First register to be read from
820 * @val: Pointer to store read value, in native register size for device 839 * @val: Pointer to store read value, in native register size for device
821 * @val_count: Number of registers to read 840 * @val_count: Number of registers to read
822 * 841 *
823 * A value of zero will be returned on success, a negative errno will 842 * A value of zero will be returned on success, a negative errno will
824 * be returned in error cases. 843 * be returned in error cases.
825 */ 844 */
826 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 845 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
827 size_t val_count) 846 size_t val_count)
828 { 847 {
829 int ret, i; 848 int ret, i;
830 size_t val_bytes = map->format.val_bytes; 849 size_t val_bytes = map->format.val_bytes;
831 bool vol = regmap_volatile_range(map, reg, val_count); 850 bool vol = regmap_volatile_range(map, reg, val_count);
832 851
833 if (!map->format.parse_val) 852 if (!map->format.parse_val)
834 return -EINVAL; 853 return -EINVAL;
854 if (reg % map->reg_stride)
855 return -EINVAL;
835 856
836 if (vol || map->cache_type == REGCACHE_NONE) { 857 if (vol || map->cache_type == REGCACHE_NONE) {
837 ret = regmap_raw_read(map, reg, val, val_bytes * val_count); 858 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
838 if (ret != 0) 859 if (ret != 0)
839 return ret; 860 return ret;
840 861
841 for (i = 0; i < val_count * val_bytes; i += val_bytes) 862 for (i = 0; i < val_count * val_bytes; i += val_bytes)
842 map->format.parse_val(val + i); 863 map->format.parse_val(val + i);
843 } else { 864 } else {
844 for (i = 0; i < val_count; i++) { 865 for (i = 0; i < val_count; i++) {
845 ret = regmap_read(map, reg + i, val + (i * val_bytes)); 866 ret = regmap_read(map, reg + (i * map->reg_stride),
867 val + (i * val_bytes));
846 if (ret != 0) 868 if (ret != 0)
847 return ret; 869 return ret;
848 } 870 }
849 } 871 }
850 872
851 return 0; 873 return 0;
852 } 874 }
853 EXPORT_SYMBOL_GPL(regmap_bulk_read); 875 EXPORT_SYMBOL_GPL(regmap_bulk_read);
854 876
855 static int _regmap_update_bits(struct regmap *map, unsigned int reg, 877 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
856 unsigned int mask, unsigned int val, 878 unsigned int mask, unsigned int val,
857 bool *change) 879 bool *change)
858 { 880 {
859 int ret; 881 int ret;
860 unsigned int tmp, orig; 882 unsigned int tmp, orig;
861 883
862 map->lock(map); 884 map->lock(map);
863 885
864 ret = _regmap_read(map, reg, &orig); 886 ret = _regmap_read(map, reg, &orig);
865 if (ret != 0) 887 if (ret != 0)
866 goto out; 888 goto out;
867 889
868 tmp = orig & ~mask; 890 tmp = orig & ~mask;
869 tmp |= val & mask; 891 tmp |= val & mask;
870 892
871 if (tmp != orig) { 893 if (tmp != orig) {
872 ret = _regmap_write(map, reg, tmp); 894 ret = _regmap_write(map, reg, tmp);
873 *change = true; 895 *change = true;
874 } else { 896 } else {
875 *change = false; 897 *change = false;
876 } 898 }
877 899
878 out: 900 out:
879 map->unlock(map); 901 map->unlock(map);
880 902
881 return ret; 903 return ret;
882 } 904 }
883 905
884 /** 906 /**
885 * regmap_update_bits: Perform a read/modify/write cycle on the register map 907 * regmap_update_bits: Perform a read/modify/write cycle on the register map
886 * 908 *
887 * @map: Register map to update 909 * @map: Register map to update
888 * @reg: Register to update 910 * @reg: Register to update
889 * @mask: Bitmask to change 911 * @mask: Bitmask to change
890 * @val: New value for bitmask 912 * @val: New value for bitmask
891 * 913 *
892 * Returns zero for success, a negative number on error. 914 * Returns zero for success, a negative number on error.
893 */ 915 */
894 int regmap_update_bits(struct regmap *map, unsigned int reg, 916 int regmap_update_bits(struct regmap *map, unsigned int reg,
895 unsigned int mask, unsigned int val) 917 unsigned int mask, unsigned int val)
896 { 918 {
897 bool change; 919 bool change;
898 return _regmap_update_bits(map, reg, mask, val, &change); 920 return _regmap_update_bits(map, reg, mask, val, &change);
899 } 921 }
900 EXPORT_SYMBOL_GPL(regmap_update_bits); 922 EXPORT_SYMBOL_GPL(regmap_update_bits);
901 923
902 /** 924 /**
903 * regmap_update_bits_check: Perform a read/modify/write cycle on the 925 * regmap_update_bits_check: Perform a read/modify/write cycle on the
904 * register map and report if updated 926 * register map and report if updated
905 * 927 *
906 * @map: Register map to update 928 * @map: Register map to update
907 * @reg: Register to update 929 * @reg: Register to update
908 * @mask: Bitmask to change 930 * @mask: Bitmask to change
909 * @val: New value for bitmask 931 * @val: New value for bitmask
910 * @change: Boolean indicating if a write was done 932 * @change: Boolean indicating if a write was done
911 * 933 *
912 * Returns zero for success, a negative number on error. 934 * Returns zero for success, a negative number on error.
913 */ 935 */
914 int regmap_update_bits_check(struct regmap *map, unsigned int reg, 936 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
915 unsigned int mask, unsigned int val, 937 unsigned int mask, unsigned int val,
916 bool *change) 938 bool *change)
917 { 939 {
918 return _regmap_update_bits(map, reg, mask, val, change); 940 return _regmap_update_bits(map, reg, mask, val, change);
919 } 941 }
920 EXPORT_SYMBOL_GPL(regmap_update_bits_check); 942 EXPORT_SYMBOL_GPL(regmap_update_bits_check);
921 943
922 /** 944 /**
923 * regmap_register_patch: Register and apply register updates to be applied 945 * regmap_register_patch: Register and apply register updates to be applied
924 * on device initialistion 946 * on device initialistion
925 * 947 *
926 * @map: Register map to apply updates to. 948 * @map: Register map to apply updates to.
927 * @regs: Values to update. 949 * @regs: Values to update.
928 * @num_regs: Number of entries in regs. 950 * @num_regs: Number of entries in regs.
929 * 951 *
930 * Register a set of register updates to be applied to the device 952 * Register a set of register updates to be applied to the device
931 * whenever the device registers are synchronised with the cache and 953 * whenever the device registers are synchronised with the cache and
932 * apply them immediately. Typically this is used to apply 954 * apply them immediately. Typically this is used to apply
933 * corrections to be applied to the device defaults on startup, such 955 * corrections to be applied to the device defaults on startup, such
934 * as the updates some vendors provide to undocumented registers. 956 * as the updates some vendors provide to undocumented registers.
935 */ 957 */
936 int regmap_register_patch(struct regmap *map, const struct reg_default *regs, 958 int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
937 int num_regs) 959 int num_regs)
938 { 960 {
939 int i, ret; 961 int i, ret;
940 bool bypass; 962 bool bypass;
941 963
942 /* If needed the implementation can be extended to support this */ 964 /* If needed the implementation can be extended to support this */
943 if (map->patch) 965 if (map->patch)
944 return -EBUSY; 966 return -EBUSY;
945 967
946 map->lock(map); 968 map->lock(map);
947 969
948 bypass = map->cache_bypass; 970 bypass = map->cache_bypass;
949 971
950 map->cache_bypass = true; 972 map->cache_bypass = true;
951 973
952 /* Write out first; it's useful to apply even if we fail later. */ 974 /* Write out first; it's useful to apply even if we fail later. */
953 for (i = 0; i < num_regs; i++) { 975 for (i = 0; i < num_regs; i++) {
954 ret = _regmap_write(map, regs[i].reg, regs[i].def); 976 ret = _regmap_write(map, regs[i].reg, regs[i].def);
955 if (ret != 0) { 977 if (ret != 0) {
956 dev_err(map->dev, "Failed to write %x = %x: %d\n", 978 dev_err(map->dev, "Failed to write %x = %x: %d\n",
957 regs[i].reg, regs[i].def, ret); 979 regs[i].reg, regs[i].def, ret);
958 goto out; 980 goto out;
959 } 981 }
960 } 982 }
961 983
962 map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL); 984 map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
963 if (map->patch != NULL) { 985 if (map->patch != NULL) {
964 memcpy(map->patch, regs, 986 memcpy(map->patch, regs,
965 num_regs * sizeof(struct reg_default)); 987 num_regs * sizeof(struct reg_default));
966 map->patch_regs = num_regs; 988 map->patch_regs = num_regs;
967 } else { 989 } else {
968 ret = -ENOMEM; 990 ret = -ENOMEM;
969 } 991 }
970 992
971 out: 993 out:
972 map->cache_bypass = bypass; 994 map->cache_bypass = bypass;
973 995
974 map->unlock(map); 996 map->unlock(map);
975 997
976 return ret; 998 return ret;
977 } 999 }
978 EXPORT_SYMBOL_GPL(regmap_register_patch); 1000 EXPORT_SYMBOL_GPL(regmap_register_patch);
979 1001
980 /* 1002 /*
981 * regmap_get_val_bytes(): Report the size of a register value 1003 * regmap_get_val_bytes(): Report the size of a register value
982 * 1004 *
983 * Report the size of a register value, mainly intended to for use by 1005 * Report the size of a register value, mainly intended to for use by
984 * generic infrastructure built on top of regmap. 1006 * generic infrastructure built on top of regmap.
985 */ 1007 */
986 int regmap_get_val_bytes(struct regmap *map) 1008 int regmap_get_val_bytes(struct regmap *map)
987 { 1009 {
988 if (map->format.format_write) 1010 if (map->format.format_write)
989 return -EINVAL; 1011 return -EINVAL;
990 1012
991 return map->format.val_bytes; 1013 return map->format.val_bytes;
992 } 1014 }
993 EXPORT_SYMBOL_GPL(regmap_get_val_bytes); 1015 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
994 1016
995 static int __init regmap_initcall(void) 1017 static int __init regmap_initcall(void)
996 { 1018 {
997 regmap_debugfs_initcall(); 1019 regmap_debugfs_initcall();
998 1020
999 return 0; 1021 return 0;
1000 } 1022 }
1001 postcore_initcall(regmap_initcall); 1023 postcore_initcall(regmap_initcall);
1002 1024
include/linux/regmap.h
1 #ifndef __LINUX_REGMAP_H 1 #ifndef __LINUX_REGMAP_H
2 #define __LINUX_REGMAP_H 2 #define __LINUX_REGMAP_H
3 3
4 /* 4 /*
5 * Register map access API 5 * Register map access API
6 * 6 *
7 * Copyright 2011 Wolfson Microelectronics plc 7 * Copyright 2011 Wolfson Microelectronics plc
8 * 8 *
9 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 9 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16 #include <linux/list.h> 16 #include <linux/list.h>
17 17
18 struct module; 18 struct module;
19 struct device; 19 struct device;
20 struct i2c_client; 20 struct i2c_client;
21 struct spi_device; 21 struct spi_device;
22 struct regmap; 22 struct regmap;
23 23
24 /* An enum of all the supported cache types */ 24 /* An enum of all the supported cache types */
25 enum regcache_type { 25 enum regcache_type {
26 REGCACHE_NONE, 26 REGCACHE_NONE,
27 REGCACHE_RBTREE, 27 REGCACHE_RBTREE,
28 REGCACHE_COMPRESSED 28 REGCACHE_COMPRESSED
29 }; 29 };
30 30
31 /** 31 /**
32 * Default value for a register. We use an array of structs rather 32 * Default value for a register. We use an array of structs rather
33 * than a simple array as many modern devices have very sparse 33 * than a simple array as many modern devices have very sparse
34 * register maps. 34 * register maps.
35 * 35 *
36 * @reg: Register address. 36 * @reg: Register address.
37 * @def: Register default value. 37 * @def: Register default value.
38 */ 38 */
39 struct reg_default { 39 struct reg_default {
40 unsigned int reg; 40 unsigned int reg;
41 unsigned int def; 41 unsigned int def;
42 }; 42 };
43 43
44 #ifdef CONFIG_REGMAP 44 #ifdef CONFIG_REGMAP
45 45
46 /** 46 /**
47 * Configuration for the register map of a device. 47 * Configuration for the register map of a device.
48 * 48 *
49 * @name: Optional name of the regmap. Useful when a device has multiple 49 * @name: Optional name of the regmap. Useful when a device has multiple
50 * register regions. 50 * register regions.
51 * 51 *
52 * @reg_bits: Number of bits in a register address, mandatory. 52 * @reg_bits: Number of bits in a register address, mandatory.
53 * @reg_stride: The register address stride. Valid register addresses are a
54 * multiple of this value. If set to 0, a value of 1 will be
55 * used.
53 * @pad_bits: Number of bits of padding between register and value. 56 * @pad_bits: Number of bits of padding between register and value.
54 * @val_bits: Number of bits in a register value, mandatory. 57 * @val_bits: Number of bits in a register value, mandatory.
55 * 58 *
56 * @writeable_reg: Optional callback returning true if the register 59 * @writeable_reg: Optional callback returning true if the register
57 * can be written to. 60 * can be written to.
58 * @readable_reg: Optional callback returning true if the register 61 * @readable_reg: Optional callback returning true if the register
59 * can be read from. 62 * can be read from.
60 * @volatile_reg: Optional callback returning true if the register 63 * @volatile_reg: Optional callback returning true if the register
61 * value can't be cached. 64 * value can't be cached.
62 * @precious_reg: Optional callback returning true if the rgister 65 * @precious_reg: Optional callback returning true if the rgister
63 * should not be read outside of a call from the driver 66 * should not be read outside of a call from the driver
64 * (eg, a clear on read interrupt status register). 67 * (eg, a clear on read interrupt status register).
65 * 68 *
66 * @max_register: Optional, specifies the maximum valid register index. 69 * @max_register: Optional, specifies the maximum valid register index.
67 * @reg_defaults: Power on reset values for registers (for use with 70 * @reg_defaults: Power on reset values for registers (for use with
68 * register cache support). 71 * register cache support).
69 * @num_reg_defaults: Number of elements in reg_defaults. 72 * @num_reg_defaults: Number of elements in reg_defaults.
70 * 73 *
71 * @read_flag_mask: Mask to be set in the top byte of the register when doing 74 * @read_flag_mask: Mask to be set in the top byte of the register when doing
72 * a read. 75 * a read.
73 * @write_flag_mask: Mask to be set in the top byte of the register when doing 76 * @write_flag_mask: Mask to be set in the top byte of the register when doing
74 * a write. If both read_flag_mask and write_flag_mask are 77 * a write. If both read_flag_mask and write_flag_mask are
75 * empty the regmap_bus default masks are used. 78 * empty the regmap_bus default masks are used.
76 * 79 *
77 * @cache_type: The actual cache type. 80 * @cache_type: The actual cache type.
78 * @reg_defaults_raw: Power on reset values for registers (for use with 81 * @reg_defaults_raw: Power on reset values for registers (for use with
79 * register cache support). 82 * register cache support).
80 * @num_reg_defaults_raw: Number of elements in reg_defaults_raw. 83 * @num_reg_defaults_raw: Number of elements in reg_defaults_raw.
81 */ 84 */
82 struct regmap_config { 85 struct regmap_config {
83 const char *name; 86 const char *name;
84 87
85 int reg_bits; 88 int reg_bits;
89 int reg_stride;
86 int pad_bits; 90 int pad_bits;
87 int val_bits; 91 int val_bits;
88 92
89 bool (*writeable_reg)(struct device *dev, unsigned int reg); 93 bool (*writeable_reg)(struct device *dev, unsigned int reg);
90 bool (*readable_reg)(struct device *dev, unsigned int reg); 94 bool (*readable_reg)(struct device *dev, unsigned int reg);
91 bool (*volatile_reg)(struct device *dev, unsigned int reg); 95 bool (*volatile_reg)(struct device *dev, unsigned int reg);
92 bool (*precious_reg)(struct device *dev, unsigned int reg); 96 bool (*precious_reg)(struct device *dev, unsigned int reg);
93 97
94 unsigned int max_register; 98 unsigned int max_register;
95 const struct reg_default *reg_defaults; 99 const struct reg_default *reg_defaults;
96 unsigned int num_reg_defaults; 100 unsigned int num_reg_defaults;
97 enum regcache_type cache_type; 101 enum regcache_type cache_type;
98 const void *reg_defaults_raw; 102 const void *reg_defaults_raw;
99 unsigned int num_reg_defaults_raw; 103 unsigned int num_reg_defaults_raw;
100 104
101 u8 read_flag_mask; 105 u8 read_flag_mask;
102 u8 write_flag_mask; 106 u8 write_flag_mask;
103 }; 107 };
104 108
105 typedef int (*regmap_hw_write)(void *context, const void *data, 109 typedef int (*regmap_hw_write)(void *context, const void *data,
106 size_t count); 110 size_t count);
107 typedef int (*regmap_hw_gather_write)(void *context, 111 typedef int (*regmap_hw_gather_write)(void *context,
108 const void *reg, size_t reg_len, 112 const void *reg, size_t reg_len,
109 const void *val, size_t val_len); 113 const void *val, size_t val_len);
110 typedef int (*regmap_hw_read)(void *context, 114 typedef int (*regmap_hw_read)(void *context,
111 const void *reg_buf, size_t reg_size, 115 const void *reg_buf, size_t reg_size,
112 void *val_buf, size_t val_size); 116 void *val_buf, size_t val_size);
113 typedef void (*regmap_hw_free_context)(void *context); 117 typedef void (*regmap_hw_free_context)(void *context);
114 118
115 /** 119 /**
116 * Description of a hardware bus for the register map infrastructure. 120 * Description of a hardware bus for the register map infrastructure.
117 * 121 *
118 * @fast_io: Register IO is fast. Use a spinlock instead of a mutex 122 * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
119 * to perform locking. 123 * to perform locking.
120 * @write: Write operation. 124 * @write: Write operation.
121 * @gather_write: Write operation with split register/value, return -ENOTSUPP 125 * @gather_write: Write operation with split register/value, return -ENOTSUPP
122 * if not implemented on a given device. 126 * if not implemented on a given device.
123 * @read: Read operation. Data is returned in the buffer used to transmit 127 * @read: Read operation. Data is returned in the buffer used to transmit
124 * data. 128 * data.
125 * @read_flag_mask: Mask to be set in the top byte of the register when doing 129 * @read_flag_mask: Mask to be set in the top byte of the register when doing
126 * a read. 130 * a read.
127 */ 131 */
128 struct regmap_bus { 132 struct regmap_bus {
129 bool fast_io; 133 bool fast_io;
130 regmap_hw_write write; 134 regmap_hw_write write;
131 regmap_hw_gather_write gather_write; 135 regmap_hw_gather_write gather_write;
132 regmap_hw_read read; 136 regmap_hw_read read;
133 regmap_hw_free_context free_context; 137 regmap_hw_free_context free_context;
134 u8 read_flag_mask; 138 u8 read_flag_mask;
135 }; 139 };
136 140
137 struct regmap *regmap_init(struct device *dev, 141 struct regmap *regmap_init(struct device *dev,
138 const struct regmap_bus *bus, 142 const struct regmap_bus *bus,
139 void *bus_context, 143 void *bus_context,
140 const struct regmap_config *config); 144 const struct regmap_config *config);
141 struct regmap *regmap_init_i2c(struct i2c_client *i2c, 145 struct regmap *regmap_init_i2c(struct i2c_client *i2c,
142 const struct regmap_config *config); 146 const struct regmap_config *config);
143 struct regmap *regmap_init_spi(struct spi_device *dev, 147 struct regmap *regmap_init_spi(struct spi_device *dev,
144 const struct regmap_config *config); 148 const struct regmap_config *config);
145 struct regmap *regmap_init_mmio(struct device *dev, 149 struct regmap *regmap_init_mmio(struct device *dev,
146 void __iomem *regs, 150 void __iomem *regs,
147 const struct regmap_config *config); 151 const struct regmap_config *config);
148 152
149 struct regmap *devm_regmap_init(struct device *dev, 153 struct regmap *devm_regmap_init(struct device *dev,
150 const struct regmap_bus *bus, 154 const struct regmap_bus *bus,
151 void *bus_context, 155 void *bus_context,
152 const struct regmap_config *config); 156 const struct regmap_config *config);
153 struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, 157 struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
154 const struct regmap_config *config); 158 const struct regmap_config *config);
155 struct regmap *devm_regmap_init_spi(struct spi_device *dev, 159 struct regmap *devm_regmap_init_spi(struct spi_device *dev,
156 const struct regmap_config *config); 160 const struct regmap_config *config);
157 struct regmap *devm_regmap_init_mmio(struct device *dev, 161 struct regmap *devm_regmap_init_mmio(struct device *dev,
158 void __iomem *regs, 162 void __iomem *regs,
159 const struct regmap_config *config); 163 const struct regmap_config *config);
160 164
161 void regmap_exit(struct regmap *map); 165 void regmap_exit(struct regmap *map);
162 int regmap_reinit_cache(struct regmap *map, 166 int regmap_reinit_cache(struct regmap *map,
163 const struct regmap_config *config); 167 const struct regmap_config *config);
164 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val); 168 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
165 int regmap_raw_write(struct regmap *map, unsigned int reg, 169 int regmap_raw_write(struct regmap *map, unsigned int reg,
166 const void *val, size_t val_len); 170 const void *val, size_t val_len);
167 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 171 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
168 size_t val_count); 172 size_t val_count);
169 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); 173 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
170 int regmap_raw_read(struct regmap *map, unsigned int reg, 174 int regmap_raw_read(struct regmap *map, unsigned int reg,
171 void *val, size_t val_len); 175 void *val, size_t val_len);
172 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, 176 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
173 size_t val_count); 177 size_t val_count);
174 int regmap_update_bits(struct regmap *map, unsigned int reg, 178 int regmap_update_bits(struct regmap *map, unsigned int reg,
175 unsigned int mask, unsigned int val); 179 unsigned int mask, unsigned int val);
176 int regmap_update_bits_check(struct regmap *map, unsigned int reg, 180 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
177 unsigned int mask, unsigned int val, 181 unsigned int mask, unsigned int val,
178 bool *change); 182 bool *change);
179 int regmap_get_val_bytes(struct regmap *map); 183 int regmap_get_val_bytes(struct regmap *map);
180 184
181 int regcache_sync(struct regmap *map); 185 int regcache_sync(struct regmap *map);
182 int regcache_sync_region(struct regmap *map, unsigned int min, 186 int regcache_sync_region(struct regmap *map, unsigned int min,
183 unsigned int max); 187 unsigned int max);
184 void regcache_cache_only(struct regmap *map, bool enable); 188 void regcache_cache_only(struct regmap *map, bool enable);
185 void regcache_cache_bypass(struct regmap *map, bool enable); 189 void regcache_cache_bypass(struct regmap *map, bool enable);
186 void regcache_mark_dirty(struct regmap *map); 190 void regcache_mark_dirty(struct regmap *map);
187 191
188 int regmap_register_patch(struct regmap *map, const struct reg_default *regs, 192 int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
189 int num_regs); 193 int num_regs);
190 194
191 /** 195 /**
192 * Description of an IRQ for the generic regmap irq_chip. 196 * Description of an IRQ for the generic regmap irq_chip.
193 * 197 *
194 * @reg_offset: Offset of the status/mask register within the bank 198 * @reg_offset: Offset of the status/mask register within the bank
195 * @mask: Mask used to flag/control the register. 199 * @mask: Mask used to flag/control the register.
196 */ 200 */
197 struct regmap_irq { 201 struct regmap_irq {
198 unsigned int reg_offset; 202 unsigned int reg_offset;
199 unsigned int mask; 203 unsigned int mask;
200 }; 204 };
201 205
202 /** 206 /**
203 * Description of a generic regmap irq_chip. This is not intended to 207 * Description of a generic regmap irq_chip. This is not intended to
204 * handle every possible interrupt controller, but it should handle a 208 * handle every possible interrupt controller, but it should handle a
205 * substantial proportion of those that are found in the wild. 209 * substantial proportion of those that are found in the wild.
206 * 210 *
207 * @name: Descriptive name for IRQ controller. 211 * @name: Descriptive name for IRQ controller.
208 * 212 *
209 * @status_base: Base status register address. 213 * @status_base: Base status register address.
210 * @mask_base: Base mask register address. 214 * @mask_base: Base mask register address.
211 * @ack_base: Base ack address. If zero then the chip is clear on read. 215 * @ack_base: Base ack address. If zero then the chip is clear on read.
212 * 216 *
213 * @num_regs: Number of registers in each control bank. 217 * @num_regs: Number of registers in each control bank.
214 * @irqs: Descriptors for individual IRQs. Interrupt numbers are 218 * @irqs: Descriptors for individual IRQs. Interrupt numbers are
215 * assigned based on the index in the array of the interrupt. 219 * assigned based on the index in the array of the interrupt.
216 * @num_irqs: Number of descriptors. 220 * @num_irqs: Number of descriptors.
217 */ 221 */
218 struct regmap_irq_chip { 222 struct regmap_irq_chip {
219 const char *name; 223 const char *name;
220 224
221 unsigned int status_base; 225 unsigned int status_base;
222 unsigned int mask_base; 226 unsigned int mask_base;
223 unsigned int ack_base; 227 unsigned int ack_base;
224 228
225 int num_regs; 229 int num_regs;
226 230
227 const struct regmap_irq *irqs; 231 const struct regmap_irq *irqs;
228 int num_irqs; 232 int num_irqs;
229 }; 233 };
230 234
231 struct regmap_irq_chip_data; 235 struct regmap_irq_chip_data;
232 236
233 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, 237 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
234 int irq_base, struct regmap_irq_chip *chip, 238 int irq_base, struct regmap_irq_chip *chip,
235 struct regmap_irq_chip_data **data); 239 struct regmap_irq_chip_data **data);
236 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); 240 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
237 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data); 241 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
238 242
239 #else 243 #else
240 244
241 /* 245 /*
242 * These stubs should only ever be called by generic code which has 246 * These stubs should only ever be called by generic code which has
243 * regmap based facilities, if they ever get called at runtime 247 * regmap based facilities, if they ever get called at runtime
244 * something is going wrong and something probably needs to select 248 * something is going wrong and something probably needs to select
245 * REGMAP. 249 * REGMAP.
246 */ 250 */
247 251
248 static inline int regmap_write(struct regmap *map, unsigned int reg, 252 static inline int regmap_write(struct regmap *map, unsigned int reg,
249 unsigned int val) 253 unsigned int val)
250 { 254 {
251 WARN_ONCE(1, "regmap API is disabled"); 255 WARN_ONCE(1, "regmap API is disabled");
252 return -EINVAL; 256 return -EINVAL;
253 } 257 }
254 258
255 static inline int regmap_raw_write(struct regmap *map, unsigned int reg, 259 static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
256 const void *val, size_t val_len) 260 const void *val, size_t val_len)
257 { 261 {
258 WARN_ONCE(1, "regmap API is disabled"); 262 WARN_ONCE(1, "regmap API is disabled");
259 return -EINVAL; 263 return -EINVAL;
260 } 264 }
261 265
262 static inline int regmap_bulk_write(struct regmap *map, unsigned int reg, 266 static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
263 const void *val, size_t val_count) 267 const void *val, size_t val_count)
264 { 268 {
265 WARN_ONCE(1, "regmap API is disabled"); 269 WARN_ONCE(1, "regmap API is disabled");
266 return -EINVAL; 270 return -EINVAL;
267 } 271 }
268 272
269 static inline int regmap_read(struct regmap *map, unsigned int reg, 273 static inline int regmap_read(struct regmap *map, unsigned int reg,
270 unsigned int *val) 274 unsigned int *val)
271 { 275 {
272 WARN_ONCE(1, "regmap API is disabled"); 276 WARN_ONCE(1, "regmap API is disabled");
273 return -EINVAL; 277 return -EINVAL;
274 } 278 }
275 279
276 static inline int regmap_raw_read(struct regmap *map, unsigned int reg, 280 static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
277 void *val, size_t val_len) 281 void *val, size_t val_len)
278 { 282 {
279 WARN_ONCE(1, "regmap API is disabled"); 283 WARN_ONCE(1, "regmap API is disabled");
280 return -EINVAL; 284 return -EINVAL;
281 } 285 }
282 286
283 static inline int regmap_bulk_read(struct regmap *map, unsigned int reg, 287 static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
284 void *val, size_t val_count) 288 void *val, size_t val_count)
285 { 289 {
286 WARN_ONCE(1, "regmap API is disabled"); 290 WARN_ONCE(1, "regmap API is disabled");
287 return -EINVAL; 291 return -EINVAL;
288 } 292 }
289 293
290 static inline int regmap_update_bits(struct regmap *map, unsigned int reg, 294 static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
291 unsigned int mask, unsigned int val) 295 unsigned int mask, unsigned int val)
292 { 296 {
293 WARN_ONCE(1, "regmap API is disabled"); 297 WARN_ONCE(1, "regmap API is disabled");
294 return -EINVAL; 298 return -EINVAL;
295 } 299 }
296 300
297 static inline int regmap_update_bits_check(struct regmap *map, 301 static inline int regmap_update_bits_check(struct regmap *map,
298 unsigned int reg, 302 unsigned int reg,
299 unsigned int mask, unsigned int val, 303 unsigned int mask, unsigned int val,
300 bool *change) 304 bool *change)
301 { 305 {
302 WARN_ONCE(1, "regmap API is disabled"); 306 WARN_ONCE(1, "regmap API is disabled");
303 return -EINVAL; 307 return -EINVAL;
304 } 308 }
305 309
306 static inline int regmap_get_val_bytes(struct regmap *map) 310 static inline int regmap_get_val_bytes(struct regmap *map)
307 { 311 {
308 WARN_ONCE(1, "regmap API is disabled"); 312 WARN_ONCE(1, "regmap API is disabled");
309 return -EINVAL; 313 return -EINVAL;
310 } 314 }
311 315
312 static inline int regcache_sync(struct regmap *map) 316 static inline int regcache_sync(struct regmap *map)
313 { 317 {
314 WARN_ONCE(1, "regmap API is disabled"); 318 WARN_ONCE(1, "regmap API is disabled");
315 return -EINVAL; 319 return -EINVAL;
316 } 320 }
317 321
318 static inline int regcache_sync_region(struct regmap *map, unsigned int min, 322 static inline int regcache_sync_region(struct regmap *map, unsigned int min,
319 unsigned int max) 323 unsigned int max)
320 { 324 {
321 WARN_ONCE(1, "regmap API is disabled"); 325 WARN_ONCE(1, "regmap API is disabled");
322 return -EINVAL; 326 return -EINVAL;
323 } 327 }
324 328
325 static inline void regcache_cache_only(struct regmap *map, bool enable) 329 static inline void regcache_cache_only(struct regmap *map, bool enable)
326 { 330 {
327 WARN_ONCE(1, "regmap API is disabled"); 331 WARN_ONCE(1, "regmap API is disabled");
328 } 332 }
329 333
330 static inline void regcache_cache_bypass(struct regmap *map, bool enable) 334 static inline void regcache_cache_bypass(struct regmap *map, bool enable)
331 { 335 {
332 WARN_ONCE(1, "regmap API is disabled"); 336 WARN_ONCE(1, "regmap API is disabled");
333 } 337 }
334 338
335 static inline void regcache_mark_dirty(struct regmap *map) 339 static inline void regcache_mark_dirty(struct regmap *map)
336 { 340 {
337 WARN_ONCE(1, "regmap API is disabled"); 341 WARN_ONCE(1, "regmap API is disabled");
338 } 342 }
339 343
340 static inline int regmap_register_patch(struct regmap *map, 344 static inline int regmap_register_patch(struct regmap *map,
341 const struct reg_default *regs, 345 const struct reg_default *regs,
342 int num_regs) 346 int num_regs)
343 { 347 {
344 WARN_ONCE(1, "regmap API is disabled"); 348 WARN_ONCE(1, "regmap API is disabled");
345 return -EINVAL; 349 return -EINVAL;
346 } 350 }
347 351
348 #endif 352 #endif
349 353
350 #endif 354 #endif
351 355