blob: 36d48776bcdddd3eee9fddcfc8fa1a278bfa1370 [file] [log] [blame]
Paul Cercueil10d722a2015-06-30 13:50:43 +02001/*
2 * libiio - Library for interfacing industrial I/O (IIO) devices
3 *
4 * Copyright (C) 2014-2015 Analog Devices, Inc.
5 * Author: Paul Cercueil <paul.cercueil@analog.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * */
18
Paul Cercueilcb98f892016-09-20 17:15:15 +020019#include "iio-config.h"
Paul Cercueila689cd92014-03-20 16:37:25 +010020#include "iio-private.h"
21
Paul Cercueil66ba1712014-04-01 16:11:48 +020022#include <errno.h>
Paul Cercueil9efeb062014-04-29 13:12:45 +020023#include <string.h>
Paul Cercueil66ba1712014-04-01 16:11:48 +020024
Paul Cercueila689cd92014-03-20 16:37:25 +010025struct callback_wrapper_data {
26 ssize_t (*callback)(const struct iio_channel *, void *, size_t, void *);
27 void *data;
28 uint32_t *mask;
29};
30
Paul Cercueil2a74bd72014-04-28 13:18:48 +020031static bool device_is_high_speed(const struct iio_device *dev)
32{
33 /* Little trick: We call the backend's get_buffer() function, which is
34 * for now only implemented in the Local backend, with a NULL pointer.
35 * It will return -ENOSYS if the device is not high speed, and either
36 * -EBADF or -EINVAL otherwise. */
37 const struct iio_backend_ops *ops = dev->ctx->ops;
38 return !!ops->get_buffer &&
Paul Cercueil76ca8842015-03-05 11:16:16 +010039 (ops->get_buffer(dev, NULL, 0, NULL, 0) != -ENOSYS);
Paul Cercueil2a74bd72014-04-28 13:18:48 +020040}
41
Paul Cercueila689cd92014-03-20 16:37:25 +010042struct iio_buffer * iio_device_create_buffer(const struct iio_device *dev,
Paul Cercueil2004eaa2014-05-22 14:04:39 +020043 size_t samples_count, bool cyclic)
Paul Cercueila689cd92014-03-20 16:37:25 +010044{
Paul Cercueil3106e892014-04-30 11:29:51 +020045 int ret = -EINVAL;
Paul Cercueilde5a3472014-04-01 14:09:56 +020046 struct iio_buffer *buf;
47 unsigned int sample_size = iio_device_get_sample_size(dev);
Lars-Peter Clausen6391df72016-09-21 17:06:04 +020048
49 if (!sample_size || !samples_count)
Paul Cercueil3106e892014-04-30 11:29:51 +020050 goto err_set_errno;
Paul Cercueilde5a3472014-04-01 14:09:56 +020051
52 buf = malloc(sizeof(*buf));
Paul Cercueil3106e892014-04-30 11:29:51 +020053 if (!buf) {
54 ret = -ENOMEM;
55 goto err_set_errno;
56 }
Paul Cercueila689cd92014-03-20 16:37:25 +010057
Paul Cercueil9e5301d2014-11-19 15:34:28 +010058 buf->dev_sample_size = sample_size;
59 buf->length = sample_size * samples_count;
Paul Cercueila689cd92014-03-20 16:37:25 +010060 buf->dev = dev;
Paul Cercueil645ab972014-03-24 14:36:12 +010061 buf->mask = calloc(dev->words, sizeof(*buf->mask));
Paul Cercueil3106e892014-04-30 11:29:51 +020062 if (!buf->mask) {
63 ret = -ENOMEM;
Paul Cercueila689cd92014-03-20 16:37:25 +010064 goto err_free_buf;
Paul Cercueil3106e892014-04-30 11:29:51 +020065 }
Paul Cercueila689cd92014-03-20 16:37:25 +010066
Paul Cercueil9efeb062014-04-29 13:12:45 +020067 /* Set the default channel mask to the one used by the device.
68 * While input buffers will erase this as soon as the refill function
69 * is used, it is useful for output buffers, as it permits
70 * iio_buffer_foreach_sample to be used. */
71 memcpy(buf->mask, dev->mask, dev->words * sizeof(*buf->mask));
72
Paul Cercueil2004eaa2014-05-22 14:04:39 +020073 ret = iio_device_open(dev, samples_count, cyclic);
Paul Cercueil3106e892014-04-30 11:29:51 +020074 if (ret < 0)
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020075 goto err_free_mask;
76
Paul Cercueil2a74bd72014-04-28 13:18:48 +020077 buf->dev_is_high_speed = device_is_high_speed(dev);
78 if (buf->dev_is_high_speed) {
Paul Cercueil3b837dd2014-05-13 10:28:23 +020079 /* Dequeue the first buffer, so that buf->buffer is correctly
80 * initialized */
Paul Cercueilb65adcb2015-02-25 17:05:21 +010081 buf->buffer = NULL;
Lars-Peter Clausen62bd5522015-03-27 16:06:52 +010082 if (iio_device_is_tx(dev)) {
83 ret = dev->ctx->ops->get_buffer(dev, &buf->buffer,
84 buf->length, buf->mask, dev->words);
85 if (ret < 0)
86 goto err_close_device;
87 }
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020088 } else {
89 buf->buffer = malloc(buf->length);
Paul Cercueil3106e892014-04-30 11:29:51 +020090 if (!buf->buffer) {
91 ret = -ENOMEM;
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020092 goto err_close_device;
Paul Cercueil3106e892014-04-30 11:29:51 +020093 }
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020094 }
Paul Cercueila689cd92014-03-20 16:37:25 +010095
Paul Cercueil9e5301d2014-11-19 15:34:28 +010096 buf->sample_size = iio_device_get_sample_size_mask(dev,
97 buf->mask, dev->words);
Paul Cercueileb4fb3d2014-04-25 11:33:01 +020098 buf->data_length = buf->length;
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020099 return buf;
Paul Cercueil66ba1712014-04-01 16:11:48 +0200100
Paul Cercueilf17f2dc2014-04-29 15:38:57 +0200101err_close_device:
102 iio_device_close(dev);
Paul Cercueilde5a3472014-04-01 14:09:56 +0200103err_free_mask:
Paul Cercueila689cd92014-03-20 16:37:25 +0100104 free(buf->mask);
105err_free_buf:
106 free(buf);
Paul Cercueil3106e892014-04-30 11:29:51 +0200107err_set_errno:
108 errno = -ret;
Paul Cercueila689cd92014-03-20 16:37:25 +0100109 return NULL;
110}
111
112void iio_buffer_destroy(struct iio_buffer *buffer)
113{
Paul Cercueilde5a3472014-04-01 14:09:56 +0200114 iio_device_close(buffer->dev);
Paul Cercueil2a74bd72014-04-28 13:18:48 +0200115 if (!buffer->dev_is_high_speed)
Paul Cercueil93f7e1f2014-04-23 16:49:56 +0200116 free(buffer->buffer);
Paul Cercueila689cd92014-03-20 16:37:25 +0100117 free(buffer->mask);
118 free(buffer);
119}
120
Romain Roffé6a881702015-06-30 16:25:43 +0200121int iio_buffer_get_poll_fd(struct iio_buffer *buffer)
122{
123 return iio_device_get_poll_fd(buffer->dev);
124}
125
Romain Roffé0ea038d2015-06-30 13:35:38 +0200126int iio_buffer_set_blocking_mode(struct iio_buffer *buffer, bool blocking)
127{
128 return iio_device_set_blocking_mode(buffer->dev, blocking);
129}
130
Paul Cercueilcbe78562014-04-01 14:42:20 +0200131ssize_t iio_buffer_refill(struct iio_buffer *buffer)
Paul Cercueila689cd92014-03-20 16:37:25 +0100132{
Paul Cercueil66ba1712014-04-01 16:11:48 +0200133 ssize_t read;
Paul Cercueil93f7e1f2014-04-23 16:49:56 +0200134 const struct iio_device *dev = buffer->dev;
Paul Cercueil66ba1712014-04-01 16:11:48 +0200135
Paul Cercueil2a74bd72014-04-28 13:18:48 +0200136 if (buffer->dev_is_high_speed) {
Paul Cercueil21b9dab2015-04-20 11:51:35 +0200137 read = dev->ctx->ops->get_buffer(dev, &buffer->buffer,
138 buffer->length, buffer->mask, dev->words);
Paul Cercueil93f7e1f2014-04-23 16:49:56 +0200139 } else {
140 read = iio_device_read_raw(dev, buffer->buffer, buffer->length,
141 buffer->mask, dev->words);
142 }
143
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100144 if (read >= 0) {
Paul Cercueilcbe78562014-04-01 14:42:20 +0200145 buffer->data_length = read;
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100146 buffer->sample_size = iio_device_get_sample_size_mask(dev,
147 buffer->mask, dev->words);
148 }
Paul Cercueilcbe78562014-04-01 14:42:20 +0200149 return read;
Paul Cercueila689cd92014-03-20 16:37:25 +0100150}
151
Paul Cercueil497af142014-05-13 10:32:46 +0200152ssize_t iio_buffer_push(struct iio_buffer *buffer)
Paul Cercueilfa2185d2014-04-01 16:13:32 +0200153{
Paul Cercueil497af142014-05-13 10:32:46 +0200154 const struct iio_device *dev = buffer->dev;
Paul Cercueil8e7b4192015-08-05 13:03:35 +0200155 ssize_t ret;
Paul Cercueil497af142014-05-13 10:32:46 +0200156
157 if (buffer->dev_is_high_speed) {
158 void *buf;
Paul Cercueil8e7b4192015-08-05 13:03:35 +0200159 ret = dev->ctx->ops->get_buffer(dev, &buf,
160 buffer->data_length, buffer->mask, dev->words);
Lars-Peter Clausenfef74c62016-06-16 10:43:28 +0200161 if (ret >= 0) {
Paul Cercueil497af142014-05-13 10:32:46 +0200162 buffer->buffer = buf;
Paul Cercueil57f16652016-06-28 16:43:45 +0200163 ret = (ssize_t) buffer->data_length;
Lars-Peter Clausenfef74c62016-06-16 10:43:28 +0200164 }
Paul Cercueil497af142014-05-13 10:32:46 +0200165 } else {
Paul Cercueil90496342014-09-02 16:44:25 +0200166 void *ptr = buffer->buffer;
Paul Cercueil8e7b4192015-08-05 13:03:35 +0200167 size_t tmp_len;
Paul Cercueil90496342014-09-02 16:44:25 +0200168
169 /* iio_device_write_raw doesn't guarantee that all bytes are
170 * written */
Paul Cercueil8e7b4192015-08-05 13:03:35 +0200171 for (tmp_len = buffer->data_length; tmp_len; ) {
172 ret = iio_device_write_raw(dev, ptr, tmp_len);
Paul Cercueil90496342014-09-02 16:44:25 +0200173 if (ret < 0)
Paul Cercueil8e7b4192015-08-05 13:03:35 +0200174 goto out_reset_data_length;
Paul Cercueil90496342014-09-02 16:44:25 +0200175
Paul Cercueil8e7b4192015-08-05 13:03:35 +0200176 tmp_len -= ret;
Paul Cercueil90496342014-09-02 16:44:25 +0200177 ptr = (void *) ((uintptr_t) ptr + ret);
178 }
179
Paul Cercueil8e7b4192015-08-05 13:03:35 +0200180 ret = (ssize_t) buffer->data_length;
Paul Cercueil497af142014-05-13 10:32:46 +0200181 }
Paul Cercueil8e7b4192015-08-05 13:03:35 +0200182
183out_reset_data_length:
184 buffer->data_length = buffer->length;
185 return ret;
186}
187
188ssize_t iio_buffer_push_partial(struct iio_buffer *buffer, size_t samples_count)
189{
190 size_t new_len = samples_count * buffer->dev_sample_size;
191
192 if (new_len == 0 || new_len > buffer->length)
193 return -EINVAL;
194
195 buffer->data_length = new_len;
196 return iio_buffer_push(buffer);
Paul Cercueilfa2185d2014-04-01 16:13:32 +0200197}
198
Paul Cercueila689cd92014-03-20 16:37:25 +0100199ssize_t iio_buffer_foreach_sample(struct iio_buffer *buffer,
200 ssize_t (*callback)(const struct iio_channel *,
201 void *, size_t, void *), void *d)
202{
Paul Cercueil0d099e72014-05-08 16:07:30 +0200203 uintptr_t ptr = (uintptr_t) buffer->buffer,
204 end = ptr + buffer->data_length;
205 const struct iio_device *dev = buffer->dev;
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100206 ssize_t processed = 0;
207
208 if (buffer->sample_size <= 0)
Paul Cercueil0d099e72014-05-08 16:07:30 +0200209 return -EINVAL;
Paul Cercueila689cd92014-03-20 16:37:25 +0100210
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100211 if (buffer->data_length < buffer->dev_sample_size)
Paul Cercueila689cd92014-03-20 16:37:25 +0100212 return 0;
213
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100214 while (end - ptr >= (size_t) buffer->sample_size) {
Paul Cercueil0d099e72014-05-08 16:07:30 +0200215 unsigned int i;
216
217 for (i = 0; i < dev->nb_channels; i++) {
218 const struct iio_channel *chn = dev->channels[i];
219 unsigned int length = chn->format.length / 8;
220
221 if (chn->index < 0)
222 break;
223
224 /* Test if the buffer has samples for this channel */
225 if (!TEST_BIT(buffer->mask, chn->index))
226 continue;
227
228 if (ptr % length)
229 ptr += length - (ptr % length);
230
231 /* Test if the client wants samples from this channel */
232 if (TEST_BIT(dev->mask, chn->index)) {
233 ssize_t ret = callback(chn,
234 (void *) ptr, length, d);
235 if (ret < 0)
236 return ret;
237 else
238 processed += ret;
239 }
240
Lucas Magasweran77fe2912016-08-29 13:47:29 -0700241 ptr += length * chn->format.repeat;
Paul Cercueil0d099e72014-05-08 16:07:30 +0200242 }
243 }
244 return processed;
Paul Cercueila689cd92014-03-20 16:37:25 +0100245}
Paul Cercueil95347b92014-03-21 09:50:17 +0100246
Paul Cercueil6d927162014-04-16 15:53:22 +0200247void * iio_buffer_start(const struct iio_buffer *buffer)
248{
249 return buffer->buffer;
250}
251
Paul Cercueil95347b92014-03-21 09:50:17 +0100252void * iio_buffer_first(const struct iio_buffer *buffer,
253 const struct iio_channel *chn)
254{
255 size_t len;
256 unsigned int i;
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200257 uintptr_t ptr = (uintptr_t) buffer->buffer;
Paul Cercueil95347b92014-03-21 09:50:17 +0100258
Paul Cercueil645ab972014-03-24 14:36:12 +0100259 if (!iio_channel_is_enabled(chn))
Paul Cercueil95347b92014-03-21 09:50:17 +0100260 return iio_buffer_end(buffer);
261
262 for (i = 0; i < buffer->dev->nb_channels; i++) {
263 struct iio_channel *cur = buffer->dev->channels[i];
Lucas Magasweran77fe2912016-08-29 13:47:29 -0700264 len = cur->format.length / 8 * cur->format.repeat;
Paul Cercueil95347b92014-03-21 09:50:17 +0100265
266 /* NOTE: dev->channels are ordered by index */
267 if (cur->index < 0 || cur->index == chn->index)
268 break;
269
Romain Roffé5c213152015-06-10 15:44:50 +0200270 /* Test if the buffer has samples for this channel */
271 if (!TEST_BIT(buffer->mask, cur->index))
272 continue;
273
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200274 if (ptr % len)
275 ptr += len - (ptr % len);
Paul Cercueil95347b92014-03-21 09:50:17 +0100276 ptr += len;
277 }
278
279 len = chn->format.length / 8;
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200280 if (ptr % len)
281 ptr += len - (ptr % len);
282 return (void *) ptr;
Paul Cercueil95347b92014-03-21 09:50:17 +0100283}
284
Paul Cercueil10682b32014-04-04 12:34:37 +0200285ptrdiff_t iio_buffer_step(const struct iio_buffer *buffer)
Paul Cercueil95347b92014-03-21 09:50:17 +0100286{
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100287 return (ptrdiff_t) buffer->sample_size;
Paul Cercueil95347b92014-03-21 09:50:17 +0100288}
289
290void * iio_buffer_end(const struct iio_buffer *buffer)
291{
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200292 return (void *) ((uintptr_t) buffer->buffer + buffer->data_length);
Paul Cercueil95347b92014-03-21 09:50:17 +0100293}
Paul Cercueila2d4bad2014-05-27 10:15:29 +0200294
295void iio_buffer_set_data(struct iio_buffer *buf, void *data)
296{
297 buf->userdata = data;
298}
299
300void * iio_buffer_get_data(const struct iio_buffer *buf)
301{
302 return buf->userdata;
303}
Paul Cercueil03b6c812015-04-14 16:49:06 +0200304
305const struct iio_device * iio_buffer_get_device(const struct iio_buffer *buf)
306{
307 return buf->dev;
308}
Lars-Peter Clausen48c01602016-04-20 13:10:05 +0200309
310void iio_buffer_cancel(struct iio_buffer *buf)
311{
312 const struct iio_backend_ops *ops = buf->dev->ctx->ops;
313
314 if (ops->cancel)
315 ops->cancel(buf->dev);
316}