Blame view

drivers/iio/industrialio-buffer.c 36.3 KB
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
1
2
3
4
5
6
7
8
  /* The industrial I/O core
   *
   * Copyright (c) 2008 Jonathan Cameron
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License version 2 as published by
   * the Free Software Foundation.
   *
14555b144   Jonathan Cameron   staging:iio: repl...
9
   * Handling of buffer allocation / resizing.
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
10
11
12
13
14
15
16
   *
   *
   * Things to look at here.
   * - Better memory allocation techniques?
   * - Alternative access techniques?
   */
  #include <linux/kernel.h>
8e336a722   Paul Gortmaker   staging: Add expo...
17
  #include <linux/export.h>
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
18
  #include <linux/device.h>
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
19
  #include <linux/fs.h>
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
20
  #include <linux/cdev.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
21
  #include <linux/slab.h>
a7348347b   Jonathan Cameron   staging:iio: Add ...
22
  #include <linux/poll.h>
174cd4b1e   Ingo Molnar   sched/headers: Pr...
23
  #include <linux/sched/signal.h>
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
24

06458e277   Jonathan Cameron   IIO: Move core he...
25
  #include <linux/iio/iio.h>
df9c1c42c   Jonathan Cameron   staging:iio: Intr...
26
  #include "iio_core.h"
06458e277   Jonathan Cameron   IIO: Move core he...
27
28
  #include <linux/iio/sysfs.h>
  #include <linux/iio/buffer.h>
33dd94cb9   Jonathan Cameron   iio:buffer.h - sp...
29
  #include <linux/iio/buffer_impl.h>
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
30

8310b86c3   Jonathan Cameron   staging:iio:scan ...
31
32
33
34
  static const char * const iio_endian_prefix[] = {
  	[IIO_BE] = "be",
  	[IIO_LE] = "le",
  };
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
35

705ee2c98   Lars-Peter Clausen   iio:buffer: Simpl...
36
  static bool iio_buffer_is_active(struct iio_buffer *buf)
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
37
  {
705ee2c98   Lars-Peter Clausen   iio:buffer: Simpl...
38
  	return !list_empty(&buf->buffer_list);
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
39
  }
37d345567   Josselin Costanzi   iio: add watermar...
40
  static size_t iio_buffer_data_available(struct iio_buffer *buf)
647cc7b9b   Lars-Peter Clausen   iio: Add data_ava...
41
  {
9dd4694da   Josselin Costanzi   iio: staging: sca...
42
  	return buf->access->data_available(buf);
647cc7b9b   Lars-Peter Clausen   iio: Add data_ava...
43
  }
f4f4673b7   Octavian Purdila   iio: add support ...
44
45
46
47
48
49
50
51
  static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
  				   struct iio_buffer *buf, size_t required)
  {
  	if (!indio_dev->info->hwfifo_flush_to_buffer)
  		return -ENODEV;
  
  	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
  }
37d345567   Josselin Costanzi   iio: add watermar...
52
  static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
f4f4673b7   Octavian Purdila   iio: add support ...
53
  			     size_t to_wait, int to_flush)
37d345567   Josselin Costanzi   iio: add watermar...
54
  {
f4f4673b7   Octavian Purdila   iio: add support ...
55
56
  	size_t avail;
  	int flushed = 0;
37d345567   Josselin Costanzi   iio: add watermar...
57
58
59
60
61
  	/* wakeup if the device was unregistered */
  	if (!indio_dev->info)
  		return true;
  
  	/* drain the buffer if it was disabled */
f4f4673b7   Octavian Purdila   iio: add support ...
62
  	if (!iio_buffer_is_active(buf)) {
37d345567   Josselin Costanzi   iio: add watermar...
63
  		to_wait = min_t(size_t, to_wait, 1);
f4f4673b7   Octavian Purdila   iio: add support ...
64
65
66
67
  		to_flush = 0;
  	}
  
  	avail = iio_buffer_data_available(buf);
37d345567   Josselin Costanzi   iio: add watermar...
68

f4f4673b7   Octavian Purdila   iio: add support ...
69
70
  	if (avail >= to_wait) {
  		/* force a flush for non-blocking reads */
c6f67a1f5   Octavian Purdila   iio: allow usersp...
71
72
73
  		if (!to_wait && avail < to_flush)
  			iio_buffer_flush_hwfifo(indio_dev, buf,
  						to_flush - avail);
f4f4673b7   Octavian Purdila   iio: add support ...
74
75
76
77
78
79
80
81
82
83
  		return true;
  	}
  
  	if (to_flush)
  		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
  						  to_wait - avail);
  	if (flushed <= 0)
  		return false;
  
  	if (avail + flushed >= to_wait)
37d345567   Josselin Costanzi   iio: add watermar...
84
85
86
87
  		return true;
  
  	return false;
  }
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
88
  /**
14555b144   Jonathan Cameron   staging:iio: repl...
89
   * iio_buffer_read_first_n_outer() - chrdev read for buffer access
0123635a7   Cristina Opriceana   iio: buffer: Fix ...
90
91
92
93
   * @filp:	File structure pointer for the char device
   * @buf:	Destination buffer for iio buffer read
   * @n:		First n bytes to read
   * @f_ps:	Long offset provided by the user as a seek position
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
94
   *
14555b144   Jonathan Cameron   staging:iio: repl...
95
96
   * This function relies on all buffer implementations having an
   * iio_buffer as their first element.
0123635a7   Cristina Opriceana   iio: buffer: Fix ...
97
98
99
   *
   * Return: negative values corresponding to error codes or ret != 0
   *	   for ending the reading activity
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
100
   **/
14555b144   Jonathan Cameron   staging:iio: repl...
101
102
  ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
  				      size_t n, loff_t *f_ps)
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
103
  {
1aa042783   Jonathan Cameron   staging: iio: pus...
104
  	struct iio_dev *indio_dev = filp->private_data;
14555b144   Jonathan Cameron   staging:iio: repl...
105
  	struct iio_buffer *rb = indio_dev->buffer;
fcf68f3c0   Brian Norris   iio: fix sched WA...
106
  	DEFINE_WAIT_FUNC(wait, woken_wake_function);
37d345567   Josselin Costanzi   iio: add watermar...
107
  	size_t datum_size;
c6f67a1f5   Octavian Purdila   iio: allow usersp...
108
  	size_t to_wait;
5dba4b14b   Colin Ian King   iio: ensure ret i...
109
  	int ret = 0;
d5857d65b   Jonathan Cameron   staging:iio:buffe...
110

f18e7a068   Lars-Peter Clausen   iio: Return -ENOD...
111
112
  	if (!indio_dev->info)
  		return -ENODEV;
96e00f110   Jonathan Cameron   staging:iio: core...
113
  	if (!rb || !rb->access->read_first_n)
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
114
  		return -EINVAL;
ee551a100   Lars-Peter Clausen   iio: Add support ...
115

37d345567   Josselin Costanzi   iio: add watermar...
116
117
118
119
120
121
122
123
  	datum_size = rb->bytes_per_datum;
  
  	/*
  	 * If datum_size is 0 there will never be anything to read from the
  	 * buffer, so signal end of file now.
  	 */
  	if (!datum_size)
  		return 0;
c6f67a1f5   Octavian Purdila   iio: allow usersp...
124
125
126
127
  	if (filp->f_flags & O_NONBLOCK)
  		to_wait = 0;
  	else
  		to_wait = min_t(size_t, n / datum_size, rb->watermark);
37d345567   Josselin Costanzi   iio: add watermar...
128

fcf68f3c0   Brian Norris   iio: fix sched WA...
129
  	add_wait_queue(&rb->pollq, &wait);
ee551a100   Lars-Peter Clausen   iio: Add support ...
130
  	do {
fcf68f3c0   Brian Norris   iio: fix sched WA...
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
  		if (!indio_dev->info) {
  			ret = -ENODEV;
  			break;
  		}
  
  		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
  			if (signal_pending(current)) {
  				ret = -ERESTARTSYS;
  				break;
  			}
  
  			wait_woken(&wait, TASK_INTERRUPTIBLE,
  				   MAX_SCHEDULE_TIMEOUT);
  			continue;
  		}
ee551a100   Lars-Peter Clausen   iio: Add support ...
146
147
148
149
  
  		ret = rb->access->read_first_n(rb, n, buf);
  		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
  			ret = -EAGAIN;
5dba4b14b   Colin Ian King   iio: ensure ret i...
150
  	} while (ret == 0);
fcf68f3c0   Brian Norris   iio: fix sched WA...
151
  	remove_wait_queue(&rb->pollq, &wait);
ee551a100   Lars-Peter Clausen   iio: Add support ...
152
153
  
  	return ret;
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
154
  }
a7348347b   Jonathan Cameron   staging:iio: Add ...
155
  /**
14555b144   Jonathan Cameron   staging:iio: repl...
156
   * iio_buffer_poll() - poll the buffer to find out if it has data
0123635a7   Cristina Opriceana   iio: buffer: Fix ...
157
158
159
160
161
162
   * @filp:	File structure pointer for device access
   * @wait:	Poll table structure pointer for which the driver adds
   *		a wait queue
   *
   * Return: (POLLIN | POLLRDNORM) if data is available for reading
   *	   or 0 for other cases
a7348347b   Jonathan Cameron   staging:iio: Add ...
163
   */
14555b144   Jonathan Cameron   staging:iio: repl...
164
165
  unsigned int iio_buffer_poll(struct file *filp,
  			     struct poll_table_struct *wait)
a7348347b   Jonathan Cameron   staging:iio: Add ...
166
  {
1aa042783   Jonathan Cameron   staging: iio: pus...
167
  	struct iio_dev *indio_dev = filp->private_data;
14555b144   Jonathan Cameron   staging:iio: repl...
168
  	struct iio_buffer *rb = indio_dev->buffer;
a7348347b   Jonathan Cameron   staging:iio: Add ...
169

9dc4030c8   Stefan Windfeldt-Prytz   iio: buffer: chec...
170
  	if (!indio_dev->info || rb == NULL)
1bdc02939   Cristina Opriceana   iio: industrialio...
171
  		return 0;
f18e7a068   Lars-Peter Clausen   iio: Return -ENOD...
172

a7348347b   Jonathan Cameron   staging:iio: Add ...
173
  	poll_wait(filp, &rb->pollq, wait);
f4f4673b7   Octavian Purdila   iio: add support ...
174
  	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
a7348347b   Jonathan Cameron   staging:iio: Add ...
175
  		return POLLIN | POLLRDNORM;
8d213f24f   Jonathan Cameron   staging:iio: ring...
176
  	return 0;
a7348347b   Jonathan Cameron   staging:iio: Add ...
177
  }
d2f0a48f3   Lars-Peter Clausen   iio: Wakeup poll ...
178
179
180
181
182
183
184
185
186
187
188
189
190
191
  /**
   * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
   * @indio_dev: The IIO device
   *
   * Wakes up the event waitqueue used for poll(). Should usually
   * be called when the device is unregistered.
   */
  void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
  {
  	if (!indio_dev->buffer)
  		return;
  
  	wake_up(&indio_dev->buffer->pollq);
  }
f79a90989   Jonathan Cameron   staging:iio:buffe...
192
  void iio_buffer_init(struct iio_buffer *buffer)
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
193
  {
5ada4ea9b   Jonathan Cameron   staging:iio: add ...
194
  	INIT_LIST_HEAD(&buffer->demux_list);
705ee2c98   Lars-Peter Clausen   iio:buffer: Simpl...
195
  	INIT_LIST_HEAD(&buffer->buffer_list);
14555b144   Jonathan Cameron   staging:iio: repl...
196
  	init_waitqueue_head(&buffer->pollq);
9e69c935f   Lars-Peter Clausen   iio: Add referenc...
197
  	kref_init(&buffer->ref);
4a6053572   Lars-Peter Clausen   iio:iio_buffer_in...
198
199
  	if (!buffer->watermark)
  		buffer->watermark = 1;
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
200
  }
14555b144   Jonathan Cameron   staging:iio: repl...
201
  EXPORT_SYMBOL(iio_buffer_init);
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
202

9f4667776   Jonathan Cameron   iio:buffer: Intro...
203
204
205
206
207
208
209
210
211
212
213
  /**
   * iio_buffer_set_attrs - Set buffer specific attributes
   * @buffer: The buffer for which we are setting attributes
   * @attrs: Pointer to a null terminated list of pointers to attributes
   */
  void iio_buffer_set_attrs(struct iio_buffer *buffer,
  			 const struct attribute **attrs)
  {
  	buffer->attrs = attrs;
  }
  EXPORT_SYMBOL_GPL(iio_buffer_set_attrs);
1d892719e   Jonathan Cameron   staging:iio: allo...
214
  static ssize_t iio_show_scan_index(struct device *dev,
8d213f24f   Jonathan Cameron   staging:iio: ring...
215
216
  				   struct device_attribute *attr,
  				   char *buf)
1d892719e   Jonathan Cameron   staging:iio: allo...
217
  {
8d213f24f   Jonathan Cameron   staging:iio: ring...
218
219
  	return sprintf(buf, "%u
  ", to_iio_dev_attr(attr)->c->scan_index);
1d892719e   Jonathan Cameron   staging:iio: allo...
220
221
222
223
224
225
226
  }
  
  static ssize_t iio_show_fixed_type(struct device *dev,
  				   struct device_attribute *attr,
  				   char *buf)
  {
  	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
8310b86c3   Jonathan Cameron   staging:iio:scan ...
227
228
229
  	u8 type = this_attr->c->scan_type.endianness;
  
  	if (type == IIO_CPU) {
9d5d11539   Jonathan Cameron   staging: iio: if(...
230
231
232
233
234
  #ifdef __LITTLE_ENDIAN
  		type = IIO_LE;
  #else
  		type = IIO_BE;
  #endif
8310b86c3   Jonathan Cameron   staging:iio:scan ...
235
  	}
0ee8546ac   Srinivas Pandruvada   IIO: core: Modify...
236
237
238
239
240
241
242
243
244
245
246
247
  	if (this_attr->c->scan_type.repeat > 1)
  		return sprintf(buf, "%s:%c%d/%dX%d>>%u
  ",
  		       iio_endian_prefix[type],
  		       this_attr->c->scan_type.sign,
  		       this_attr->c->scan_type.realbits,
  		       this_attr->c->scan_type.storagebits,
  		       this_attr->c->scan_type.repeat,
  		       this_attr->c->scan_type.shift);
  		else
  			return sprintf(buf, "%s:%c%d/%d>>%u
  ",
8310b86c3   Jonathan Cameron   staging:iio:scan ...
248
  		       iio_endian_prefix[type],
1d892719e   Jonathan Cameron   staging:iio: allo...
249
250
251
252
253
  		       this_attr->c->scan_type.sign,
  		       this_attr->c->scan_type.realbits,
  		       this_attr->c->scan_type.storagebits,
  		       this_attr->c->scan_type.shift);
  }
8d213f24f   Jonathan Cameron   staging:iio: ring...
254
255
256
257
258
  static ssize_t iio_scan_el_show(struct device *dev,
  				struct device_attribute *attr,
  				char *buf)
  {
  	int ret;
e53f5ac52   Lars-Peter Clausen   iio: Use dev_to_i...
259
  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
8d213f24f   Jonathan Cameron   staging:iio: ring...
260

2076a20fc   Alec Berg   iio: querying buf...
261
262
  	/* Ensure ret is 0 or 1. */
  	ret = !!test_bit(to_iio_dev_attr(attr)->address,
5ada4ea9b   Jonathan Cameron   staging:iio: add ...
263
  		       indio_dev->buffer->scan_mask);
8d213f24f   Jonathan Cameron   staging:iio: ring...
264
265
266
  	return sprintf(buf, "%d
  ", ret);
  }
217a5cf0a   Lars-Peter Clausen   iio: Unexport iio...
267
268
269
  /* Note NULL used as error indicator as it doesn't make sense. */
  static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
  					  unsigned int masklength,
1e1ec2861   Lars-Peter Clausen   iio: Require stri...
270
271
  					  const unsigned long *mask,
  					  bool strict)
217a5cf0a   Lars-Peter Clausen   iio: Unexport iio...
272
273
274
275
  {
  	if (bitmap_empty(mask, masklength))
  		return NULL;
  	while (*av_masks) {
1e1ec2861   Lars-Peter Clausen   iio: Require stri...
276
277
278
279
280
281
282
  		if (strict) {
  			if (bitmap_equal(mask, av_masks, masklength))
  				return av_masks;
  		} else {
  			if (bitmap_subset(mask, av_masks, masklength))
  				return av_masks;
  		}
217a5cf0a   Lars-Peter Clausen   iio: Unexport iio...
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
  		av_masks += BITS_TO_LONGS(masklength);
  	}
  	return NULL;
  }
  
  static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
  	const unsigned long *mask)
  {
  	if (!indio_dev->setup_ops->validate_scan_mask)
  		return true;
  
  	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
  }
  
  /**
   * iio_scan_mask_set() - set particular bit in the scan mask
   * @indio_dev: the iio device
   * @buffer: the buffer whose scan mask we are interested in
   * @bit: the bit to be set.
   *
   * Note that at this point we have no way of knowing what other
   * buffers might request, hence this code only verifies that the
   * individual buffers request is plausible.
   */
  static int iio_scan_mask_set(struct iio_dev *indio_dev,
  		      struct iio_buffer *buffer, int bit)
  {
  	const unsigned long *mask;
  	unsigned long *trialmask;
057ac1acd   Markus Elfring   iio: Use kmalloc_...
312
313
314
  	trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
  				  sizeof(*trialmask),
  				  GFP_KERNEL);
217a5cf0a   Lars-Peter Clausen   iio: Unexport iio...
315
316
317
  	if (trialmask == NULL)
  		return -ENOMEM;
  	if (!indio_dev->masklength) {
231bfe53c   Dan Carpenter   iio: fix some war...
318
319
  		WARN(1, "Trying to set scanmask prior to registering buffer
  ");
217a5cf0a   Lars-Peter Clausen   iio: Unexport iio...
320
321
322
323
324
325
326
327
328
329
330
  		goto err_invalid_mask;
  	}
  	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
  	set_bit(bit, trialmask);
  
  	if (!iio_validate_scan_mask(indio_dev, trialmask))
  		goto err_invalid_mask;
  
  	if (indio_dev->available_scan_masks) {
  		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  					   indio_dev->masklength,
1e1ec2861   Lars-Peter Clausen   iio: Require stri...
331
  					   trialmask, false);
217a5cf0a   Lars-Peter Clausen   iio: Unexport iio...
332
333
334
335
336
337
338
339
340
341
342
343
344
  		if (!mask)
  			goto err_invalid_mask;
  	}
  	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
  
  	kfree(trialmask);
  
  	return 0;
  
  err_invalid_mask:
  	kfree(trialmask);
  	return -EINVAL;
  }
14555b144   Jonathan Cameron   staging:iio: repl...
345
  static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
8d213f24f   Jonathan Cameron   staging:iio: ring...
346
  {
14555b144   Jonathan Cameron   staging:iio: repl...
347
  	clear_bit(bit, buffer->scan_mask);
8d213f24f   Jonathan Cameron   staging:iio: ring...
348
349
  	return 0;
  }
c2bf8d5f3   Jonathan Cameron   iio:buffer: Stop ...
350
351
352
353
354
355
356
357
358
359
360
361
  static int iio_scan_mask_query(struct iio_dev *indio_dev,
  			       struct iio_buffer *buffer, int bit)
  {
  	if (bit > indio_dev->masklength)
  		return -EINVAL;
  
  	if (!buffer->scan_mask)
  		return 0;
  
  	/* Ensure return value is 0 or 1. */
  	return !!test_bit(bit, buffer->scan_mask);
  };
8d213f24f   Jonathan Cameron   staging:iio: ring...
362
363
364
365
366
  static ssize_t iio_scan_el_store(struct device *dev,
  				 struct device_attribute *attr,
  				 const char *buf,
  				 size_t len)
  {
a714af276   Jonathan Cameron   staging:iio:buffe...
367
  	int ret;
8d213f24f   Jonathan Cameron   staging:iio: ring...
368
  	bool state;
e53f5ac52   Lars-Peter Clausen   iio: Use dev_to_i...
369
  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b144   Jonathan Cameron   staging:iio: repl...
370
  	struct iio_buffer *buffer = indio_dev->buffer;
8d213f24f   Jonathan Cameron   staging:iio: ring...
371
  	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
a714af276   Jonathan Cameron   staging:iio:buffe...
372
373
374
  	ret = strtobool(buf, &state);
  	if (ret < 0)
  		return ret;
8d213f24f   Jonathan Cameron   staging:iio: ring...
375
  	mutex_lock(&indio_dev->mlock);
705ee2c98   Lars-Peter Clausen   iio:buffer: Simpl...
376
  	if (iio_buffer_is_active(indio_dev->buffer)) {
8d213f24f   Jonathan Cameron   staging:iio: ring...
377
378
379
  		ret = -EBUSY;
  		goto error_ret;
  	}
f79a90989   Jonathan Cameron   staging:iio:buffe...
380
  	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
8d213f24f   Jonathan Cameron   staging:iio: ring...
381
382
383
  	if (ret < 0)
  		goto error_ret;
  	if (!state && ret) {
14555b144   Jonathan Cameron   staging:iio: repl...
384
  		ret = iio_scan_mask_clear(buffer, this_attr->address);
8d213f24f   Jonathan Cameron   staging:iio: ring...
385
386
387
  		if (ret)
  			goto error_ret;
  	} else if (state && !ret) {
f79a90989   Jonathan Cameron   staging:iio:buffe...
388
  		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
8d213f24f   Jonathan Cameron   staging:iio: ring...
389
390
391
392
393
394
  		if (ret)
  			goto error_ret;
  	}
  
  error_ret:
  	mutex_unlock(&indio_dev->mlock);
5a2a6e116   Lars-Peter Clausen   staging:iio: Fix ...
395
  	return ret < 0 ? ret : len;
8d213f24f   Jonathan Cameron   staging:iio: ring...
396
397
398
399
400
401
402
  
  }
  
  static ssize_t iio_scan_el_ts_show(struct device *dev,
  				   struct device_attribute *attr,
  				   char *buf)
  {
e53f5ac52   Lars-Peter Clausen   iio: Use dev_to_i...
403
  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
f8c6f4e9a   Jonathan Cameron   staging:iio:core:...
404
405
  	return sprintf(buf, "%d
  ", indio_dev->buffer->scan_timestamp);
8d213f24f   Jonathan Cameron   staging:iio: ring...
406
407
408
409
410
411
412
  }
  
  static ssize_t iio_scan_el_ts_store(struct device *dev,
  				    struct device_attribute *attr,
  				    const char *buf,
  				    size_t len)
  {
a714af276   Jonathan Cameron   staging:iio:buffe...
413
  	int ret;
e53f5ac52   Lars-Peter Clausen   iio: Use dev_to_i...
414
  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
8d213f24f   Jonathan Cameron   staging:iio: ring...
415
  	bool state;
1aa042783   Jonathan Cameron   staging: iio: pus...
416

a714af276   Jonathan Cameron   staging:iio:buffe...
417
418
419
  	ret = strtobool(buf, &state);
  	if (ret < 0)
  		return ret;
8d213f24f   Jonathan Cameron   staging:iio: ring...
420
  	mutex_lock(&indio_dev->mlock);
705ee2c98   Lars-Peter Clausen   iio:buffer: Simpl...
421
  	if (iio_buffer_is_active(indio_dev->buffer)) {
8d213f24f   Jonathan Cameron   staging:iio: ring...
422
423
424
  		ret = -EBUSY;
  		goto error_ret;
  	}
14555b144   Jonathan Cameron   staging:iio: repl...
425
  	indio_dev->buffer->scan_timestamp = state;
8d213f24f   Jonathan Cameron   staging:iio: ring...
426
427
428
429
430
  error_ret:
  	mutex_unlock(&indio_dev->mlock);
  
  	return ret ? ret : len;
  }
14555b144   Jonathan Cameron   staging:iio: repl...
431
432
  static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
  					const struct iio_chan_spec *chan)
1d892719e   Jonathan Cameron   staging:iio: allo...
433
  {
26d25ae3f   Jonathan Cameron   staging:iio: rewo...
434
  	int ret, attrcount = 0;
14555b144   Jonathan Cameron   staging:iio: repl...
435
  	struct iio_buffer *buffer = indio_dev->buffer;
1d892719e   Jonathan Cameron   staging:iio: allo...
436

26d25ae3f   Jonathan Cameron   staging:iio: rewo...
437
  	ret = __iio_add_chan_devattr("index",
1d892719e   Jonathan Cameron   staging:iio: allo...
438
439
440
441
  				     chan,
  				     &iio_show_scan_index,
  				     NULL,
  				     0,
3704432fb   Jonathan Cameron   iio: refactor inf...
442
  				     IIO_SEPARATE,
1aa042783   Jonathan Cameron   staging: iio: pus...
443
  				     &indio_dev->dev,
14555b144   Jonathan Cameron   staging:iio: repl...
444
  				     &buffer->scan_el_dev_attr_list);
1d892719e   Jonathan Cameron   staging:iio: allo...
445
  	if (ret)
92825ff97   Hartmut Knaack   iio get rid of un...
446
  		return ret;
26d25ae3f   Jonathan Cameron   staging:iio: rewo...
447
448
  	attrcount++;
  	ret = __iio_add_chan_devattr("type",
1d892719e   Jonathan Cameron   staging:iio: allo...
449
450
451
452
453
  				     chan,
  				     &iio_show_fixed_type,
  				     NULL,
  				     0,
  				     0,
1aa042783   Jonathan Cameron   staging: iio: pus...
454
  				     &indio_dev->dev,
14555b144   Jonathan Cameron   staging:iio: repl...
455
  				     &buffer->scan_el_dev_attr_list);
1d892719e   Jonathan Cameron   staging:iio: allo...
456
  	if (ret)
92825ff97   Hartmut Knaack   iio get rid of un...
457
  		return ret;
26d25ae3f   Jonathan Cameron   staging:iio: rewo...
458
  	attrcount++;
a88b3ebc9   Jonathan Cameron   staging:iio: rip ...
459
  	if (chan->type != IIO_TIMESTAMP)
26d25ae3f   Jonathan Cameron   staging:iio: rewo...
460
  		ret = __iio_add_chan_devattr("en",
a88b3ebc9   Jonathan Cameron   staging:iio: rip ...
461
462
463
464
465
  					     chan,
  					     &iio_scan_el_show,
  					     &iio_scan_el_store,
  					     chan->scan_index,
  					     0,
1aa042783   Jonathan Cameron   staging: iio: pus...
466
  					     &indio_dev->dev,
14555b144   Jonathan Cameron   staging:iio: repl...
467
  					     &buffer->scan_el_dev_attr_list);
a88b3ebc9   Jonathan Cameron   staging:iio: rip ...
468
  	else
26d25ae3f   Jonathan Cameron   staging:iio: rewo...
469
  		ret = __iio_add_chan_devattr("en",
a88b3ebc9   Jonathan Cameron   staging:iio: rip ...
470
471
472
473
474
  					     chan,
  					     &iio_scan_el_ts_show,
  					     &iio_scan_el_ts_store,
  					     chan->scan_index,
  					     0,
1aa042783   Jonathan Cameron   staging: iio: pus...
475
  					     &indio_dev->dev,
14555b144   Jonathan Cameron   staging:iio: repl...
476
  					     &buffer->scan_el_dev_attr_list);
9572588c9   Peter Meerwald   iio: Minor cleanu...
477
  	if (ret)
92825ff97   Hartmut Knaack   iio get rid of un...
478
  		return ret;
26d25ae3f   Jonathan Cameron   staging:iio: rewo...
479
480
  	attrcount++;
  	ret = attrcount;
1d892719e   Jonathan Cameron   staging:iio: allo...
481
482
  	return ret;
  }
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
483
484
485
  static ssize_t iio_buffer_read_length(struct device *dev,
  				      struct device_attribute *attr,
  				      char *buf)
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
486
  {
e53f5ac52   Lars-Peter Clausen   iio: Use dev_to_i...
487
  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b144   Jonathan Cameron   staging:iio: repl...
488
  	struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
489

374956600   Lars-Peter Clausen   iio: buffer: Drop...
490
491
  	return sprintf(buf, "%d
  ", buffer->length);
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
492
  }
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
493

08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
494
495
496
  static ssize_t iio_buffer_write_length(struct device *dev,
  				       struct device_attribute *attr,
  				       const char *buf, size_t len)
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
497
  {
e53f5ac52   Lars-Peter Clausen   iio: Use dev_to_i...
498
  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b144   Jonathan Cameron   staging:iio: repl...
499
  	struct iio_buffer *buffer = indio_dev->buffer;
948ad2050   Lars-Peter Clausen   iio: Use strict_s...
500
501
  	unsigned int val;
  	int ret;
8d213f24f   Jonathan Cameron   staging:iio: ring...
502

948ad2050   Lars-Peter Clausen   iio: Use strict_s...
503
  	ret = kstrtouint(buf, 10, &val);
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
504
505
  	if (ret)
  		return ret;
374956600   Lars-Peter Clausen   iio: buffer: Drop...
506
507
  	if (val == buffer->length)
  		return len;
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
508

e38c79e08   Lars-Peter Clausen   staging:iio: Disa...
509
  	mutex_lock(&indio_dev->mlock);
705ee2c98   Lars-Peter Clausen   iio:buffer: Simpl...
510
  	if (iio_buffer_is_active(indio_dev->buffer)) {
e38c79e08   Lars-Peter Clausen   staging:iio: Disa...
511
512
  		ret = -EBUSY;
  	} else {
8d92db282   Lars-Peter Clausen   iio: buffer: Make...
513
  		buffer->access->set_length(buffer, val);
e38c79e08   Lars-Peter Clausen   staging:iio: Disa...
514
  		ret = 0;
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
515
  	}
37d345567   Josselin Costanzi   iio: add watermar...
516
517
518
519
520
  	if (ret)
  		goto out;
  	if (buffer->length && buffer->length < buffer->watermark)
  		buffer->watermark = buffer->length;
  out:
e38c79e08   Lars-Peter Clausen   staging:iio: Disa...
521
  	mutex_unlock(&indio_dev->mlock);
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
522

e38c79e08   Lars-Peter Clausen   staging:iio: Disa...
523
  	return ret ? ret : len;
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
524
  }
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
525

08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
526
527
528
  static ssize_t iio_buffer_show_enable(struct device *dev,
  				      struct device_attribute *attr,
  				      char *buf)
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
529
  {
e53f5ac52   Lars-Peter Clausen   iio: Use dev_to_i...
530
  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
705ee2c98   Lars-Peter Clausen   iio:buffer: Simpl...
531
532
  	return sprintf(buf, "%d
  ", iio_buffer_is_active(indio_dev->buffer));
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
533
  }
7026ea4b5   Jonathan Cameron   Staging: IIO: Add...
534

182b49058   Lars-Peter Clausen   iio: Add helper f...
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
  static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
  					     unsigned int scan_index)
  {
  	const struct iio_chan_spec *ch;
  	unsigned int bytes;
  
  	ch = iio_find_channel_from_si(indio_dev, scan_index);
  	bytes = ch->scan_type.storagebits / 8;
  	if (ch->scan_type.repeat > 1)
  		bytes *= ch->scan_type.repeat;
  	return bytes;
  }
  
  static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
  {
  	return iio_storage_bytes_for_si(indio_dev,
  					indio_dev->scan_index_timestamp);
  }
183f41734   Peter Meerwald   iio: Pass scan ma...
553
554
  static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
  				const unsigned long *mask, bool timestamp)
959d2952d   Jonathan Cameron   staging:iio: make...
555
  {
959d2952d   Jonathan Cameron   staging:iio: make...
556
557
  	unsigned bytes = 0;
  	int length, i;
959d2952d   Jonathan Cameron   staging:iio: make...
558
559
  
  	/* How much space will the demuxed element take? */
6b3b58ed1   Jonathan Cameron   staging:iio:buffe...
560
  	for_each_set_bit(i, mask,
959d2952d   Jonathan Cameron   staging:iio: make...
561
  			 indio_dev->masklength) {
182b49058   Lars-Peter Clausen   iio: Add helper f...
562
  		length = iio_storage_bytes_for_si(indio_dev, i);
959d2952d   Jonathan Cameron   staging:iio: make...
563
564
565
  		bytes = ALIGN(bytes, length);
  		bytes += length;
  	}
182b49058   Lars-Peter Clausen   iio: Add helper f...
566

6b3b58ed1   Jonathan Cameron   staging:iio:buffe...
567
  	if (timestamp) {
182b49058   Lars-Peter Clausen   iio: Add helper f...
568
  		length = iio_storage_bytes_for_timestamp(indio_dev);
959d2952d   Jonathan Cameron   staging:iio: make...
569
570
571
  		bytes = ALIGN(bytes, length);
  		bytes += length;
  	}
6b3b58ed1   Jonathan Cameron   staging:iio:buffe...
572
573
  	return bytes;
  }
9e69c935f   Lars-Peter Clausen   iio: Add referenc...
574
575
576
577
578
579
580
581
582
583
  static void iio_buffer_activate(struct iio_dev *indio_dev,
  	struct iio_buffer *buffer)
  {
  	iio_buffer_get(buffer);
  	list_add(&buffer->buffer_list, &indio_dev->buffer_list);
  }
  
  static void iio_buffer_deactivate(struct iio_buffer *buffer)
  {
  	list_del_init(&buffer->buffer_list);
37d345567   Josselin Costanzi   iio: add watermar...
584
  	wake_up_interruptible(&buffer->pollq);
9e69c935f   Lars-Peter Clausen   iio: Add referenc...
585
586
  	iio_buffer_put(buffer);
  }
1250186a9   Lars-Peter Clausen   iio: __iio_update...
587
588
589
590
591
592
593
594
  static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
  {
  	struct iio_buffer *buffer, *_buffer;
  
  	list_for_each_entry_safe(buffer, _buffer,
  			&indio_dev->buffer_list, buffer_list)
  		iio_buffer_deactivate(buffer);
  }
e18a2ad45   Lars-Peter Clausen   iio: Add buffer e...
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
  static int iio_buffer_enable(struct iio_buffer *buffer,
  	struct iio_dev *indio_dev)
  {
  	if (!buffer->access->enable)
  		return 0;
  	return buffer->access->enable(buffer, indio_dev);
  }
  
  static int iio_buffer_disable(struct iio_buffer *buffer,
  	struct iio_dev *indio_dev)
  {
  	if (!buffer->access->disable)
  		return 0;
  	return buffer->access->disable(buffer, indio_dev);
  }
8e050996c   Lars-Peter Clausen   iio: Update buffe...
610
611
612
613
614
615
616
617
618
619
620
621
622
  static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
  	struct iio_buffer *buffer)
  {
  	unsigned int bytes;
  
  	if (!buffer->access->set_bytes_per_datum)
  		return;
  
  	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
  		buffer->scan_timestamp);
  
  	buffer->access->set_bytes_per_datum(buffer, bytes);
  }
fcc1b2f57   Lars-Peter Clausen   iio: __iio_update...
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
  static int iio_buffer_request_update(struct iio_dev *indio_dev,
  	struct iio_buffer *buffer)
  {
  	int ret;
  
  	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
  	if (buffer->access->request_update) {
  		ret = buffer->access->request_update(buffer);
  		if (ret) {
  			dev_dbg(&indio_dev->dev,
  			       "Buffer not started: buffer parameter update failed (%d)
  ",
  				ret);
  			return ret;
  		}
  	}
  
  	return 0;
  }
248be5aaf   Lars-Peter Clausen   iio: __iio_update...
642
643
644
645
646
647
648
  static void iio_free_scan_mask(struct iio_dev *indio_dev,
  	const unsigned long *mask)
  {
  	/* If the mask is dynamically allocated free it, otherwise do nothing */
  	if (!indio_dev->available_scan_masks)
  		kfree(mask);
  }
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
649
650
  struct iio_device_config {
  	unsigned int mode;
f0566c0c4   Lars-Peter Clausen   iio: Set device w...
651
  	unsigned int watermark;
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
652
653
654
655
656
657
658
659
660
661
662
  	const unsigned long *scan_mask;
  	unsigned int scan_bytes;
  	bool scan_timestamp;
  };
  
  static int iio_verify_update(struct iio_dev *indio_dev,
  	struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
  	struct iio_device_config *config)
  {
  	unsigned long *compound_mask;
  	const unsigned long *scan_mask;
1e1ec2861   Lars-Peter Clausen   iio: Require stri...
663
  	bool strict_scanmask = false;
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
664
665
  	struct iio_buffer *buffer;
  	bool scan_timestamp;
225d59adf   Lars-Peter Clausen   iio: Specify supp...
666
  	unsigned int modes;
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
667
668
  
  	memset(config, 0, sizeof(*config));
1bef2c1d4   Irina Tirdea   iio: fix config w...
669
  	config->watermark = ~0;
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
670
671
672
673
674
675
676
677
  
  	/*
  	 * If there is just one buffer and we are removing it there is nothing
  	 * to verify.
  	 */
  	if (remove_buffer && !insert_buffer &&
  		list_is_singular(&indio_dev->buffer_list))
  			return 0;
225d59adf   Lars-Peter Clausen   iio: Specify supp...
678
679
680
681
682
683
  	modes = indio_dev->modes;
  
  	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  		if (buffer == remove_buffer)
  			continue;
  		modes &= buffer->access->modes;
f0566c0c4   Lars-Peter Clausen   iio: Set device w...
684
  		config->watermark = min(config->watermark, buffer->watermark);
225d59adf   Lars-Peter Clausen   iio: Specify supp...
685
  	}
f0566c0c4   Lars-Peter Clausen   iio: Set device w...
686
  	if (insert_buffer) {
225d59adf   Lars-Peter Clausen   iio: Specify supp...
687
  		modes &= insert_buffer->access->modes;
f0566c0c4   Lars-Peter Clausen   iio: Set device w...
688
689
690
  		config->watermark = min(config->watermark,
  			insert_buffer->watermark);
  	}
225d59adf   Lars-Peter Clausen   iio: Specify supp...
691

6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
692
  	/* Definitely possible for devices to support both of these. */
225d59adf   Lars-Peter Clausen   iio: Specify supp...
693
  	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
694
  		config->mode = INDIO_BUFFER_TRIGGERED;
225d59adf   Lars-Peter Clausen   iio: Specify supp...
695
  	} else if (modes & INDIO_BUFFER_HARDWARE) {
1e1ec2861   Lars-Peter Clausen   iio: Require stri...
696
697
698
699
700
701
  		/*
  		 * Keep things simple for now and only allow a single buffer to
  		 * be connected in hardware mode.
  		 */
  		if (insert_buffer && !list_empty(&indio_dev->buffer_list))
  			return -EINVAL;
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
702
  		config->mode = INDIO_BUFFER_HARDWARE;
1e1ec2861   Lars-Peter Clausen   iio: Require stri...
703
  		strict_scanmask = true;
225d59adf   Lars-Peter Clausen   iio: Specify supp...
704
  	} else if (modes & INDIO_BUFFER_SOFTWARE) {
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
  		config->mode = INDIO_BUFFER_SOFTWARE;
  	} else {
  		/* Can only occur on first buffer */
  		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
  			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger
  ");
  		return -EINVAL;
  	}
  
  	/* What scan mask do we actually have? */
  	compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  				sizeof(long), GFP_KERNEL);
  	if (compound_mask == NULL)
  		return -ENOMEM;
  
  	scan_timestamp = false;
  
  	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  		if (buffer == remove_buffer)
  			continue;
  		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
  			  indio_dev->masklength);
  		scan_timestamp |= buffer->scan_timestamp;
  	}
  
  	if (insert_buffer) {
  		bitmap_or(compound_mask, compound_mask,
  			  insert_buffer->scan_mask, indio_dev->masklength);
  		scan_timestamp |= insert_buffer->scan_timestamp;
  	}
  
  	if (indio_dev->available_scan_masks) {
  		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  				    indio_dev->masklength,
1e1ec2861   Lars-Peter Clausen   iio: Require stri...
739
740
  				    compound_mask,
  				    strict_scanmask);
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
741
742
743
744
745
746
747
748
749
750
751
752
753
754
  		kfree(compound_mask);
  		if (scan_mask == NULL)
  			return -EINVAL;
  	} else {
  	    scan_mask = compound_mask;
  	}
  
  	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
  				    scan_mask, scan_timestamp);
  	config->scan_mask = scan_mask;
  	config->scan_timestamp = scan_timestamp;
  
  	return 0;
  }
78c9981f6   Jonathan Cameron   iio:buffer: Stop ...
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
  /**
   * struct iio_demux_table - table describing demux memcpy ops
   * @from:	index to copy from
   * @to:		index to copy to
   * @length:	how many bytes to copy
   * @l:		list head used for management
   */
  struct iio_demux_table {
  	unsigned from;
  	unsigned to;
  	unsigned length;
  	struct list_head l;
  };
  
  static void iio_buffer_demux_free(struct iio_buffer *buffer)
  {
  	struct iio_demux_table *p, *q;
  	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
  		list_del(&p->l);
  		kfree(p);
  	}
  }
  
  static int iio_buffer_add_demux(struct iio_buffer *buffer,
  	struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
  	unsigned int length)
  {
  
  	if (*p && (*p)->from + (*p)->length == in_loc &&
  		(*p)->to + (*p)->length == out_loc) {
  		(*p)->length += length;
  	} else {
  		*p = kmalloc(sizeof(**p), GFP_KERNEL);
  		if (*p == NULL)
  			return -ENOMEM;
  		(*p)->from = in_loc;
  		(*p)->to = out_loc;
  		(*p)->length = length;
  		list_add_tail(&(*p)->l, &buffer->demux_list);
  	}
  
  	return 0;
  }
  
  static int iio_buffer_update_demux(struct iio_dev *indio_dev,
  				   struct iio_buffer *buffer)
  {
  	int ret, in_ind = -1, out_ind, length;
  	unsigned in_loc = 0, out_loc = 0;
  	struct iio_demux_table *p = NULL;
  
  	/* Clear out any old demux */
  	iio_buffer_demux_free(buffer);
  	kfree(buffer->demux_bounce);
  	buffer->demux_bounce = NULL;
  
  	/* First work out which scan mode we will actually have */
  	if (bitmap_equal(indio_dev->active_scan_mask,
  			 buffer->scan_mask,
  			 indio_dev->masklength))
  		return 0;
  
  	/* Now we have the two masks, work from least sig and build up sizes */
  	for_each_set_bit(out_ind,
  			 buffer->scan_mask,
  			 indio_dev->masklength) {
  		in_ind = find_next_bit(indio_dev->active_scan_mask,
  				       indio_dev->masklength,
  				       in_ind + 1);
  		while (in_ind != out_ind) {
  			in_ind = find_next_bit(indio_dev->active_scan_mask,
  					       indio_dev->masklength,
  					       in_ind + 1);
  			length = iio_storage_bytes_for_si(indio_dev, in_ind);
  			/* Make sure we are aligned */
  			in_loc = roundup(in_loc, length) + length;
  		}
  		length = iio_storage_bytes_for_si(indio_dev, in_ind);
  		out_loc = roundup(out_loc, length);
  		in_loc = roundup(in_loc, length);
  		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  		if (ret)
  			goto error_clear_mux_table;
  		out_loc += length;
  		in_loc += length;
  	}
  	/* Relies on scan_timestamp being last */
  	if (buffer->scan_timestamp) {
  		length = iio_storage_bytes_for_timestamp(indio_dev);
  		out_loc = roundup(out_loc, length);
  		in_loc = roundup(in_loc, length);
  		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  		if (ret)
  			goto error_clear_mux_table;
  		out_loc += length;
  		in_loc += length;
  	}
  	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
  	if (buffer->demux_bounce == NULL) {
  		ret = -ENOMEM;
  		goto error_clear_mux_table;
  	}
  	return 0;
  
  error_clear_mux_table:
  	iio_buffer_demux_free(buffer);
  
  	return ret;
  }
  
  static int iio_update_demux(struct iio_dev *indio_dev)
  {
  	struct iio_buffer *buffer;
  	int ret;
  
  	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  		ret = iio_buffer_update_demux(indio_dev, buffer);
  		if (ret < 0)
  			goto error_clear_mux_table;
  	}
  	return 0;
  
  error_clear_mux_table:
  	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
  		iio_buffer_demux_free(buffer);
  
  	return ret;
  }
623d74e37   Lars-Peter Clausen   iio: __iio_update...
883
884
  static int iio_enable_buffers(struct iio_dev *indio_dev,
  	struct iio_device_config *config)
6b3b58ed1   Jonathan Cameron   staging:iio:buffe...
885
  {
e18a2ad45   Lars-Peter Clausen   iio: Add buffer e...
886
  	struct iio_buffer *buffer;
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
887
  	int ret;
fcc1b2f57   Lars-Peter Clausen   iio: __iio_update...
888

623d74e37   Lars-Peter Clausen   iio: __iio_update...
889
890
891
  	indio_dev->active_scan_mask = config->scan_mask;
  	indio_dev->scan_timestamp = config->scan_timestamp;
  	indio_dev->scan_bytes = config->scan_bytes;
aff1eb4e3   Lars-Peter Clausen   iio: buffer: Fix ...
892

5ada4ea9b   Jonathan Cameron   staging:iio: add ...
893
  	iio_update_demux(indio_dev);
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
894
895
896
897
  	/* Wind up again */
  	if (indio_dev->setup_ops->preenable) {
  		ret = indio_dev->setup_ops->preenable(indio_dev);
  		if (ret) {
63223c5f5   Lars-Peter Clausen   iio: Replace prin...
898
  			dev_dbg(&indio_dev->dev,
bec1889d2   MichaÅ‚ MirosÅ‚aw   iio: buffer: clea...
899
900
  			       "Buffer not started: buffer preenable failed (%d)
  ", ret);
623d74e37   Lars-Peter Clausen   iio: __iio_update...
901
  			goto err_undo_config;
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
902
903
  		}
  	}
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
904

84b36ce5f   Jonathan Cameron   staging:iio: Add ...
905
906
  	if (indio_dev->info->update_scan_mode) {
  		ret = indio_dev->info
5ada4ea9b   Jonathan Cameron   staging:iio: add ...
907
908
  			->update_scan_mode(indio_dev,
  					   indio_dev->active_scan_mask);
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
909
  		if (ret < 0) {
63223c5f5   Lars-Peter Clausen   iio: Replace prin...
910
911
912
913
  			dev_dbg(&indio_dev->dev,
  				"Buffer not started: update scan mode failed (%d)
  ",
  				ret);
623d74e37   Lars-Peter Clausen   iio: __iio_update...
914
  			goto err_run_postdisable;
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
915
916
  		}
  	}
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
917

f0566c0c4   Lars-Peter Clausen   iio: Set device w...
918
919
920
  	if (indio_dev->info->hwfifo_set_watermark)
  		indio_dev->info->hwfifo_set_watermark(indio_dev,
  			config->watermark);
e18a2ad45   Lars-Peter Clausen   iio: Add buffer e...
921
922
923
924
925
  	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  		ret = iio_buffer_enable(buffer, indio_dev);
  		if (ret)
  			goto err_disable_buffers;
  	}
623d74e37   Lars-Peter Clausen   iio: __iio_update...
926
  	indio_dev->currentmode = config->mode;
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
927
928
929
930
  
  	if (indio_dev->setup_ops->postenable) {
  		ret = indio_dev->setup_ops->postenable(indio_dev);
  		if (ret) {
63223c5f5   Lars-Peter Clausen   iio: Replace prin...
931
  			dev_dbg(&indio_dev->dev,
bec1889d2   MichaÅ‚ MirosÅ‚aw   iio: buffer: clea...
932
933
  			       "Buffer not started: postenable failed (%d)
  ", ret);
e18a2ad45   Lars-Peter Clausen   iio: Add buffer e...
934
  			goto err_disable_buffers;
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
935
936
  		}
  	}
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
937
  	return 0;
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
938

e18a2ad45   Lars-Peter Clausen   iio: Add buffer e...
939
940
941
942
  err_disable_buffers:
  	list_for_each_entry_continue_reverse(buffer, &indio_dev->buffer_list,
  					     buffer_list)
  		iio_buffer_disable(buffer, indio_dev);
623d74e37   Lars-Peter Clausen   iio: __iio_update...
943
  err_run_postdisable:
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
944
  	indio_dev->currentmode = INDIO_DIRECT_MODE;
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
945
946
  	if (indio_dev->setup_ops->postdisable)
  		indio_dev->setup_ops->postdisable(indio_dev);
623d74e37   Lars-Peter Clausen   iio: __iio_update...
947
948
  err_undo_config:
  	indio_dev->active_scan_mask = NULL;
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
949
  	return ret;
623d74e37   Lars-Peter Clausen   iio: __iio_update...
950
951
952
953
  }
  
  static int iio_disable_buffers(struct iio_dev *indio_dev)
  {
e18a2ad45   Lars-Peter Clausen   iio: Add buffer e...
954
  	struct iio_buffer *buffer;
1250186a9   Lars-Peter Clausen   iio: __iio_update...
955
956
  	int ret = 0;
  	int ret2;
623d74e37   Lars-Peter Clausen   iio: __iio_update...
957
958
959
960
  
  	/* Wind down existing buffers - iff there are any */
  	if (list_empty(&indio_dev->buffer_list))
  		return 0;
1250186a9   Lars-Peter Clausen   iio: __iio_update...
961
962
963
964
965
966
  	/*
  	 * If things go wrong at some step in disable we still need to continue
  	 * to perform the other steps, otherwise we leave the device in a
  	 * inconsistent state. We return the error code for the first error we
  	 * encountered.
  	 */
623d74e37   Lars-Peter Clausen   iio: __iio_update...
967
  	if (indio_dev->setup_ops->predisable) {
1250186a9   Lars-Peter Clausen   iio: __iio_update...
968
969
970
  		ret2 = indio_dev->setup_ops->predisable(indio_dev);
  		if (ret2 && !ret)
  			ret = ret2;
623d74e37   Lars-Peter Clausen   iio: __iio_update...
971
  	}
e18a2ad45   Lars-Peter Clausen   iio: Add buffer e...
972
973
974
975
976
977
  
  	list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
  		ret2 = iio_buffer_disable(buffer, indio_dev);
  		if (ret2 && !ret)
  			ret = ret2;
  	}
623d74e37   Lars-Peter Clausen   iio: __iio_update...
978
979
980
981
  
  	indio_dev->currentmode = INDIO_DIRECT_MODE;
  
  	if (indio_dev->setup_ops->postdisable) {
1250186a9   Lars-Peter Clausen   iio: __iio_update...
982
983
984
  		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
  		if (ret2 && !ret)
  			ret = ret2;
623d74e37   Lars-Peter Clausen   iio: __iio_update...
985
  	}
1250186a9   Lars-Peter Clausen   iio: __iio_update...
986
987
988
989
  	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
  	indio_dev->active_scan_mask = NULL;
  
  	return ret;
623d74e37   Lars-Peter Clausen   iio: __iio_update...
990
991
992
993
994
995
  }
  
  static int __iio_update_buffers(struct iio_dev *indio_dev,
  		       struct iio_buffer *insert_buffer,
  		       struct iio_buffer *remove_buffer)
  {
623d74e37   Lars-Peter Clausen   iio: __iio_update...
996
  	struct iio_device_config new_config;
1250186a9   Lars-Peter Clausen   iio: __iio_update...
997
  	int ret;
623d74e37   Lars-Peter Clausen   iio: __iio_update...
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
  
  	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
  		&new_config);
  	if (ret)
  		return ret;
  
  	if (insert_buffer) {
  		ret = iio_buffer_request_update(indio_dev, insert_buffer);
  		if (ret)
  			goto err_free_config;
  	}
623d74e37   Lars-Peter Clausen   iio: __iio_update...
1009
  	ret = iio_disable_buffers(indio_dev);
1250186a9   Lars-Peter Clausen   iio: __iio_update...
1010
1011
  	if (ret)
  		goto err_deactivate_all;
623d74e37   Lars-Peter Clausen   iio: __iio_update...
1012
1013
1014
1015
1016
1017
1018
  
  	if (remove_buffer)
  		iio_buffer_deactivate(remove_buffer);
  	if (insert_buffer)
  		iio_buffer_activate(indio_dev, insert_buffer);
  
  	/* If no buffers in list, we are done */
1250186a9   Lars-Peter Clausen   iio: __iio_update...
1019
  	if (list_empty(&indio_dev->buffer_list))
623d74e37   Lars-Peter Clausen   iio: __iio_update...
1020
  		return 0;
623d74e37   Lars-Peter Clausen   iio: __iio_update...
1021
1022
  
  	ret = iio_enable_buffers(indio_dev, &new_config);
1250186a9   Lars-Peter Clausen   iio: __iio_update...
1023
1024
  	if (ret)
  		goto err_deactivate_all;
623d74e37   Lars-Peter Clausen   iio: __iio_update...
1025

623d74e37   Lars-Peter Clausen   iio: __iio_update...
1026
  	return 0;
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
1027

1250186a9   Lars-Peter Clausen   iio: __iio_update...
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
  err_deactivate_all:
  	/*
  	 * We've already verified that the config is valid earlier. If things go
  	 * wrong in either enable or disable the most likely reason is an IO
  	 * error from the device. In this case there is no good recovery
  	 * strategy. Just make sure to disable everything and leave the device
  	 * in a sane state.  With a bit of luck the device might come back to
  	 * life again later and userspace can try again.
  	 */
  	iio_buffer_deactivate_all(indio_dev);
6e509c4d9   Lars-Peter Clausen   iio: __iio_update...
1038
1039
1040
  err_free_config:
  	iio_free_scan_mask(indio_dev, new_config.scan_mask);
  	return ret;
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
1041
  }
a95194569   Lars-Peter Clausen   iio:buffer: Add p...
1042
1043
1044
1045
1046
1047
  
  int iio_update_buffers(struct iio_dev *indio_dev,
  		       struct iio_buffer *insert_buffer,
  		       struct iio_buffer *remove_buffer)
  {
  	int ret;
3909fab5a   Lars-Peter Clausen   iio:buffer: Ignor...
1048
1049
  	if (insert_buffer == remove_buffer)
  		return 0;
a95194569   Lars-Peter Clausen   iio:buffer: Add p...
1050
1051
  	mutex_lock(&indio_dev->info_exist_lock);
  	mutex_lock(&indio_dev->mlock);
3909fab5a   Lars-Peter Clausen   iio:buffer: Ignor...
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
  	if (insert_buffer && iio_buffer_is_active(insert_buffer))
  		insert_buffer = NULL;
  
  	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
  		remove_buffer = NULL;
  
  	if (!insert_buffer && !remove_buffer) {
  		ret = 0;
  		goto out_unlock;
  	}
a95194569   Lars-Peter Clausen   iio:buffer: Add p...
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
  	if (indio_dev->info == NULL) {
  		ret = -ENODEV;
  		goto out_unlock;
  	}
  
  	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
  
  out_unlock:
  	mutex_unlock(&indio_dev->mlock);
  	mutex_unlock(&indio_dev->info_exist_lock);
  
  	return ret;
  }
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
1075
  EXPORT_SYMBOL_GPL(iio_update_buffers);
623d74e37   Lars-Peter Clausen   iio: __iio_update...
1076
1077
  void iio_disable_all_buffers(struct iio_dev *indio_dev)
  {
623d74e37   Lars-Peter Clausen   iio: __iio_update...
1078
  	iio_disable_buffers(indio_dev);
1250186a9   Lars-Peter Clausen   iio: __iio_update...
1079
  	iio_buffer_deactivate_all(indio_dev);
623d74e37   Lars-Peter Clausen   iio: __iio_update...
1080
  }
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
1081
1082
1083
1084
  static ssize_t iio_buffer_store_enable(struct device *dev,
  				       struct device_attribute *attr,
  				       const char *buf,
  				       size_t len)
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
1085
1086
1087
1088
  {
  	int ret;
  	bool requested_state;
  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
1089
1090
1091
1092
1093
1094
1095
1096
1097
  	bool inlist;
  
  	ret = strtobool(buf, &requested_state);
  	if (ret < 0)
  		return ret;
  
  	mutex_lock(&indio_dev->mlock);
  
  	/* Find out if it is in the list */
705ee2c98   Lars-Peter Clausen   iio:buffer: Simpl...
1098
  	inlist = iio_buffer_is_active(indio_dev->buffer);
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
1099
1100
1101
1102
1103
  	/* Already in desired state */
  	if (inlist == requested_state)
  		goto done;
  
  	if (requested_state)
a95194569   Lars-Peter Clausen   iio:buffer: Add p...
1104
  		ret = __iio_update_buffers(indio_dev,
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
1105
1106
  					 indio_dev->buffer, NULL);
  	else
a95194569   Lars-Peter Clausen   iio:buffer: Add p...
1107
  		ret = __iio_update_buffers(indio_dev,
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
1108
  					 NULL, indio_dev->buffer);
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
1109
1110
1111
1112
  done:
  	mutex_unlock(&indio_dev->mlock);
  	return (ret < 0) ? ret : len;
  }
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
1113

d967cb6bd   Lars-Peter Clausen   iio: buffer: Move...
1114
  static const char * const iio_scan_elements_group_name = "scan_elements";
37d345567   Josselin Costanzi   iio: add watermar...
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
  static ssize_t iio_buffer_show_watermark(struct device *dev,
  					 struct device_attribute *attr,
  					 char *buf)
  {
  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  	struct iio_buffer *buffer = indio_dev->buffer;
  
  	return sprintf(buf, "%u
  ", buffer->watermark);
  }
  
  static ssize_t iio_buffer_store_watermark(struct device *dev,
  					  struct device_attribute *attr,
  					  const char *buf,
  					  size_t len)
  {
  	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  	struct iio_buffer *buffer = indio_dev->buffer;
  	unsigned int val;
  	int ret;
  
  	ret = kstrtouint(buf, 10, &val);
  	if (ret)
  		return ret;
  	if (!val)
  		return -EINVAL;
  
  	mutex_lock(&indio_dev->mlock);
  
  	if (val > buffer->length) {
  		ret = -EINVAL;
  		goto out;
  	}
  
  	if (iio_buffer_is_active(indio_dev->buffer)) {
  		ret = -EBUSY;
  		goto out;
  	}
  
  	buffer->watermark = val;
  out:
  	mutex_unlock(&indio_dev->mlock);
  
  	return ret ? ret : len;
  }
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
1160
1161
  static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
  		   iio_buffer_write_length);
8d92db282   Lars-Peter Clausen   iio: buffer: Make...
1162
1163
  static struct device_attribute dev_attr_length_ro = __ATTR(length,
  	S_IRUGO, iio_buffer_read_length, NULL);
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
1164
1165
  static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
  		   iio_buffer_show_enable, iio_buffer_store_enable);
37d345567   Josselin Costanzi   iio: add watermar...
1166
1167
  static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
  		   iio_buffer_show_watermark, iio_buffer_store_watermark);
b440655b8   Lars-Peter Clausen   iio: Add support ...
1168
1169
  static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
  	S_IRUGO, iio_buffer_show_watermark, NULL);
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
1170

6da9b382b   Octavian Purdila   iio: buffer: refa...
1171
1172
1173
  static struct attribute *iio_buffer_attrs[] = {
  	&dev_attr_length.attr,
  	&dev_attr_enable.attr,
37d345567   Josselin Costanzi   iio: add watermar...
1174
  	&dev_attr_watermark.attr,
6da9b382b   Octavian Purdila   iio: buffer: refa...
1175
  };
d967cb6bd   Lars-Peter Clausen   iio: buffer: Move...
1176
1177
1178
1179
1180
1181
1182
  int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
  {
  	struct iio_dev_attr *p;
  	struct attribute **attr;
  	struct iio_buffer *buffer = indio_dev->buffer;
  	int ret, i, attrn, attrcount, attrcount_orig = 0;
  	const struct iio_chan_spec *channels;
629bc0233   Lars-Peter Clausen   iio: Always compu...
1183
1184
1185
1186
1187
1188
1189
1190
  	channels = indio_dev->channels;
  	if (channels) {
  		int ml = indio_dev->masklength;
  
  		for (i = 0; i < indio_dev->num_channels; i++)
  			ml = max(ml, channels[i].scan_index + 1);
  		indio_dev->masklength = ml;
  	}
d967cb6bd   Lars-Peter Clausen   iio: buffer: Move...
1191
1192
  	if (!buffer)
  		return 0;
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
1193
1194
1195
1196
1197
  	attrcount = 0;
  	if (buffer->attrs) {
  		while (buffer->attrs[attrcount] != NULL)
  			attrcount++;
  	}
6da9b382b   Octavian Purdila   iio: buffer: refa...
1198
1199
1200
  	attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
  		       sizeof(struct attribute *), GFP_KERNEL);
  	if (!attr)
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
1201
  		return -ENOMEM;
6da9b382b   Octavian Purdila   iio: buffer: refa...
1202
1203
1204
  	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
  	if (!buffer->access->set_length)
  		attr[0] = &dev_attr_length_ro.attr;
b440655b8   Lars-Peter Clausen   iio: Add support ...
1205
1206
  	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
  		attr[2] = &dev_attr_watermark_ro.attr;
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
1207
  	if (buffer->attrs)
6da9b382b   Octavian Purdila   iio: buffer: refa...
1208
1209
1210
1211
1212
1213
1214
  		memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
  		       sizeof(struct attribute *) * attrcount);
  
  	attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
  
  	buffer->buffer_group.name = "buffer";
  	buffer->buffer_group.attrs = attr;
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
1215
1216
  
  	indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
d967cb6bd   Lars-Peter Clausen   iio: buffer: Move...
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
  	if (buffer->scan_el_attrs != NULL) {
  		attr = buffer->scan_el_attrs->attrs;
  		while (*attr++ != NULL)
  			attrcount_orig++;
  	}
  	attrcount = attrcount_orig;
  	INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
  	channels = indio_dev->channels;
  	if (channels) {
  		/* new magic */
  		for (i = 0; i < indio_dev->num_channels; i++) {
  			if (channels[i].scan_index < 0)
  				continue;
d967cb6bd   Lars-Peter Clausen   iio: buffer: Move...
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
  			ret = iio_buffer_add_channel_sysfs(indio_dev,
  							 &channels[i]);
  			if (ret < 0)
  				goto error_cleanup_dynamic;
  			attrcount += ret;
  			if (channels[i].type == IIO_TIMESTAMP)
  				indio_dev->scan_index_timestamp =
  					channels[i].scan_index;
  		}
  		if (indio_dev->masklength && buffer->scan_mask == NULL) {
  			buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
  						    sizeof(*buffer->scan_mask),
  						    GFP_KERNEL);
  			if (buffer->scan_mask == NULL) {
  				ret = -ENOMEM;
  				goto error_cleanup_dynamic;
  			}
  		}
  	}
  
  	buffer->scan_el_group.name = iio_scan_elements_group_name;
  
  	buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
  					      sizeof(buffer->scan_el_group.attrs[0]),
  					      GFP_KERNEL);
  	if (buffer->scan_el_group.attrs == NULL) {
  		ret = -ENOMEM;
  		goto error_free_scan_mask;
  	}
  	if (buffer->scan_el_attrs)
  		memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
  		       sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
  	attrn = attrcount_orig;
  
  	list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
  		buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
  	indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
  
  	return 0;
  
  error_free_scan_mask:
  	kfree(buffer->scan_mask);
  error_cleanup_dynamic:
  	iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
1274
  	kfree(indio_dev->buffer->buffer_group.attrs);
d967cb6bd   Lars-Peter Clausen   iio: buffer: Move...
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
  
  	return ret;
  }
  
  void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
  {
  	if (!indio_dev->buffer)
  		return;
  
  	kfree(indio_dev->buffer->scan_mask);
08e7e0ada   Lars-Peter Clausen   iio: buffer: Allo...
1285
  	kfree(indio_dev->buffer->buffer_group.attrs);
d967cb6bd   Lars-Peter Clausen   iio: buffer: Move...
1286
1287
1288
  	kfree(indio_dev->buffer->scan_el_group.attrs);
  	iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
  }
32b5eecab   Jonathan Cameron   staging:iio: Swit...
1289
  /**
816366320   Lars-Peter Clausen   iio: Introduce ii...
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
   * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
   * @indio_dev: the iio device
   * @mask: scan mask to be checked
   *
   * Return true if exactly one bit is set in the scan mask, false otherwise. It
   * can be used for devices where only one channel can be active for sampling at
   * a time.
   */
  bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
  	const unsigned long *mask)
  {
  	return bitmap_weight(mask, indio_dev->masklength) == 1;
  }
  EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
5d65d9204   Lars-Peter Clausen   iio: iio_push_to_...
1304
1305
  static const void *iio_demux(struct iio_buffer *buffer,
  				 const void *datain)
5ada4ea9b   Jonathan Cameron   staging:iio: add ...
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
  {
  	struct iio_demux_table *t;
  
  	if (list_empty(&buffer->demux_list))
  		return datain;
  	list_for_each_entry(t, &buffer->demux_list, l)
  		memcpy(buffer->demux_bounce + t->to,
  		       datain + t->from, t->length);
  
  	return buffer->demux_bounce;
  }
5d65d9204   Lars-Peter Clausen   iio: iio_push_to_...
1317
  static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
5ada4ea9b   Jonathan Cameron   staging:iio: add ...
1318
  {
5d65d9204   Lars-Peter Clausen   iio: iio_push_to_...
1319
  	const void *dataout = iio_demux(buffer, data);
37d345567   Josselin Costanzi   iio: add watermar...
1320
1321
1322
1323
1324
  	int ret;
  
  	ret = buffer->access->store_to(buffer, dataout);
  	if (ret)
  		return ret;
5ada4ea9b   Jonathan Cameron   staging:iio: add ...
1325

37d345567   Josselin Costanzi   iio: add watermar...
1326
1327
1328
1329
1330
1331
  	/*
  	 * We can't just test for watermark to decide if we wake the poll queue
  	 * because read may request less samples than the watermark.
  	 */
  	wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM);
  	return 0;
5ada4ea9b   Jonathan Cameron   staging:iio: add ...
1332
  }
5ada4ea9b   Jonathan Cameron   staging:iio: add ...
1333

315a19eca   Jonathan Cameron   iio:buffers: Push...
1334
1335
1336
1337
1338
  /**
   * iio_push_to_buffers() - push to a registered buffer.
   * @indio_dev:		iio_dev structure for device.
   * @data:		Full scan.
   */
5d65d9204   Lars-Peter Clausen   iio: iio_push_to_...
1339
  int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
84b36ce5f   Jonathan Cameron   staging:iio: Add ...
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
  {
  	int ret;
  	struct iio_buffer *buf;
  
  	list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
  		ret = iio_push_to_buffer(buf, data);
  		if (ret < 0)
  			return ret;
  	}
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(iio_push_to_buffers);
9e69c935f   Lars-Peter Clausen   iio: Add referenc...
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
  /**
   * iio_buffer_release() - Free a buffer's resources
   * @ref: Pointer to the kref embedded in the iio_buffer struct
   *
   * This function is called when the last reference to the buffer has been
   * dropped. It will typically free all resources allocated by the buffer. Do not
   * call this function manually, always use iio_buffer_put() when done using a
   * buffer.
   */
  static void iio_buffer_release(struct kref *ref)
  {
  	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
  
  	buffer->access->release(buffer);
  }
  
  /**
   * iio_buffer_get() - Grab a reference to the buffer
   * @buffer: The buffer to grab a reference for, may be NULL
   *
   * Returns the pointer to the buffer that was passed into the function.
   */
  struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
  {
  	if (buffer)
  		kref_get(&buffer->ref);
  
  	return buffer;
  }
  EXPORT_SYMBOL_GPL(iio_buffer_get);
  
  /**
   * iio_buffer_put() - Release the reference to the buffer
   * @buffer: The buffer to release the reference for, may be NULL
   */
  void iio_buffer_put(struct iio_buffer *buffer)
  {
  	if (buffer)
  		kref_put(&buffer->ref, iio_buffer_release);
  }
  EXPORT_SYMBOL_GPL(iio_buffer_put);
2b827ad54   Jonathan Cameron   iio:buffer: Push ...
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
  
  /**
   * iio_device_attach_buffer - Attach a buffer to a IIO device
   * @indio_dev: The device the buffer should be attached to
   * @buffer: The buffer to attach to the device
   *
   * This function attaches a buffer to a IIO device. The buffer stays attached to
   * the device until the device is freed. The function should only be called at
   * most once per device.
   */
  void iio_device_attach_buffer(struct iio_dev *indio_dev,
  			      struct iio_buffer *buffer)
  {
  	indio_dev->buffer = iio_buffer_get(buffer);
  }
  EXPORT_SYMBOL_GPL(iio_device_attach_buffer);