blob: a91b6e9ed87817d14f36886c6573bc1c08464d6c [file] [log] [blame]
Paul Cercueil10d722a2015-06-30 13:50:43 +02001/*
2 * libiio - Library for interfacing industrial I/O (IIO) devices
3 *
4 * Copyright (C) 2014-2015 Analog Devices, Inc.
5 * Author: Paul Cercueil <paul.cercueil@analog.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * */
18
Paul Cercueila689cd92014-03-20 16:37:25 +010019#include "iio-private.h"
20
Paul Cercueil66ba1712014-04-01 16:11:48 +020021#include <errno.h>
Paul Cercueil9efeb062014-04-29 13:12:45 +020022#include <string.h>
Paul Cercueil66ba1712014-04-01 16:11:48 +020023
Paul Cercueila689cd92014-03-20 16:37:25 +010024struct callback_wrapper_data {
25 ssize_t (*callback)(const struct iio_channel *, void *, size_t, void *);
26 void *data;
27 uint32_t *mask;
28};
29
Paul Cercueil2a74bd72014-04-28 13:18:48 +020030static bool device_is_high_speed(const struct iio_device *dev)
31{
32 /* Little trick: We call the backend's get_buffer() function, which is
33 * for now only implemented in the Local backend, with a NULL pointer.
34 * It will return -ENOSYS if the device is not high speed, and either
35 * -EBADF or -EINVAL otherwise. */
36 const struct iio_backend_ops *ops = dev->ctx->ops;
37 return !!ops->get_buffer &&
Paul Cercueil76ca8842015-03-05 11:16:16 +010038 (ops->get_buffer(dev, NULL, 0, NULL, 0) != -ENOSYS);
Paul Cercueil2a74bd72014-04-28 13:18:48 +020039}
40
Paul Cercueila689cd92014-03-20 16:37:25 +010041struct iio_buffer * iio_device_create_buffer(const struct iio_device *dev,
Paul Cercueil2004eaa2014-05-22 14:04:39 +020042 size_t samples_count, bool cyclic)
Paul Cercueila689cd92014-03-20 16:37:25 +010043{
Paul Cercueil3106e892014-04-30 11:29:51 +020044 int ret = -EINVAL;
Paul Cercueilde5a3472014-04-01 14:09:56 +020045 struct iio_buffer *buf;
46 unsigned int sample_size = iio_device_get_sample_size(dev);
47 if (!sample_size)
Paul Cercueil3106e892014-04-30 11:29:51 +020048 goto err_set_errno;
Paul Cercueilde5a3472014-04-01 14:09:56 +020049
50 buf = malloc(sizeof(*buf));
Paul Cercueil3106e892014-04-30 11:29:51 +020051 if (!buf) {
52 ret = -ENOMEM;
53 goto err_set_errno;
54 }
Paul Cercueila689cd92014-03-20 16:37:25 +010055
Paul Cercueil9e5301d2014-11-19 15:34:28 +010056 buf->dev_sample_size = sample_size;
57 buf->length = sample_size * samples_count;
Paul Cercueila689cd92014-03-20 16:37:25 +010058 buf->dev = dev;
Paul Cercueil645ab972014-03-24 14:36:12 +010059 buf->mask = calloc(dev->words, sizeof(*buf->mask));
Paul Cercueil3106e892014-04-30 11:29:51 +020060 if (!buf->mask) {
61 ret = -ENOMEM;
Paul Cercueila689cd92014-03-20 16:37:25 +010062 goto err_free_buf;
Paul Cercueil3106e892014-04-30 11:29:51 +020063 }
Paul Cercueila689cd92014-03-20 16:37:25 +010064
Paul Cercueil9efeb062014-04-29 13:12:45 +020065 /* Set the default channel mask to the one used by the device.
66 * While input buffers will erase this as soon as the refill function
67 * is used, it is useful for output buffers, as it permits
68 * iio_buffer_foreach_sample to be used. */
69 memcpy(buf->mask, dev->mask, dev->words * sizeof(*buf->mask));
70
Paul Cercueil2004eaa2014-05-22 14:04:39 +020071 ret = iio_device_open(dev, samples_count, cyclic);
Paul Cercueil3106e892014-04-30 11:29:51 +020072 if (ret < 0)
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020073 goto err_free_mask;
74
Paul Cercueil2a74bd72014-04-28 13:18:48 +020075 buf->dev_is_high_speed = device_is_high_speed(dev);
76 if (buf->dev_is_high_speed) {
Paul Cercueil3b837dd2014-05-13 10:28:23 +020077 /* Dequeue the first buffer, so that buf->buffer is correctly
78 * initialized */
Paul Cercueilb65adcb2015-02-25 17:05:21 +010079 buf->buffer = NULL;
Lars-Peter Clausen62bd5522015-03-27 16:06:52 +010080 if (iio_device_is_tx(dev)) {
81 ret = dev->ctx->ops->get_buffer(dev, &buf->buffer,
82 buf->length, buf->mask, dev->words);
83 if (ret < 0)
84 goto err_close_device;
85 }
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020086 } else {
87 buf->buffer = malloc(buf->length);
Paul Cercueil3106e892014-04-30 11:29:51 +020088 if (!buf->buffer) {
89 ret = -ENOMEM;
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020090 goto err_close_device;
Paul Cercueil3106e892014-04-30 11:29:51 +020091 }
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020092 }
Paul Cercueila689cd92014-03-20 16:37:25 +010093
Paul Cercueil9e5301d2014-11-19 15:34:28 +010094 buf->sample_size = iio_device_get_sample_size_mask(dev,
95 buf->mask, dev->words);
Paul Cercueileb4fb3d2014-04-25 11:33:01 +020096 buf->data_length = buf->length;
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020097 return buf;
Paul Cercueil66ba1712014-04-01 16:11:48 +020098
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020099err_close_device:
100 iio_device_close(dev);
Paul Cercueilde5a3472014-04-01 14:09:56 +0200101err_free_mask:
Paul Cercueila689cd92014-03-20 16:37:25 +0100102 free(buf->mask);
103err_free_buf:
104 free(buf);
Paul Cercueil3106e892014-04-30 11:29:51 +0200105err_set_errno:
106 errno = -ret;
Paul Cercueila689cd92014-03-20 16:37:25 +0100107 return NULL;
108}
109
110void iio_buffer_destroy(struct iio_buffer *buffer)
111{
Paul Cercueilde5a3472014-04-01 14:09:56 +0200112 iio_device_close(buffer->dev);
Paul Cercueil2a74bd72014-04-28 13:18:48 +0200113 if (!buffer->dev_is_high_speed)
Paul Cercueil93f7e1f2014-04-23 16:49:56 +0200114 free(buffer->buffer);
Paul Cercueila689cd92014-03-20 16:37:25 +0100115 free(buffer->mask);
116 free(buffer);
117}
118
Romain Roffé6a881702015-06-30 16:25:43 +0200119int iio_buffer_get_poll_fd(struct iio_buffer *buffer)
120{
121 return iio_device_get_poll_fd(buffer->dev);
122}
123
Romain Roffé0ea038d2015-06-30 13:35:38 +0200124int iio_buffer_set_blocking_mode(struct iio_buffer *buffer, bool blocking)
125{
126 return iio_device_set_blocking_mode(buffer->dev, blocking);
127}
128
Paul Cercueilcbe78562014-04-01 14:42:20 +0200129ssize_t iio_buffer_refill(struct iio_buffer *buffer)
Paul Cercueila689cd92014-03-20 16:37:25 +0100130{
Paul Cercueil66ba1712014-04-01 16:11:48 +0200131 ssize_t read;
Paul Cercueil93f7e1f2014-04-23 16:49:56 +0200132 const struct iio_device *dev = buffer->dev;
Paul Cercueil66ba1712014-04-01 16:11:48 +0200133
Paul Cercueil2a74bd72014-04-28 13:18:48 +0200134 if (buffer->dev_is_high_speed) {
Paul Cercueil21b9dab2015-04-20 11:51:35 +0200135 read = dev->ctx->ops->get_buffer(dev, &buffer->buffer,
136 buffer->length, buffer->mask, dev->words);
Paul Cercueil93f7e1f2014-04-23 16:49:56 +0200137 } else {
138 read = iio_device_read_raw(dev, buffer->buffer, buffer->length,
139 buffer->mask, dev->words);
140 }
141
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100142 if (read >= 0) {
Paul Cercueilcbe78562014-04-01 14:42:20 +0200143 buffer->data_length = read;
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100144 buffer->sample_size = iio_device_get_sample_size_mask(dev,
145 buffer->mask, dev->words);
146 }
Paul Cercueilcbe78562014-04-01 14:42:20 +0200147 return read;
Paul Cercueila689cd92014-03-20 16:37:25 +0100148}
149
Paul Cercueil497af142014-05-13 10:32:46 +0200150ssize_t iio_buffer_push(struct iio_buffer *buffer)
Paul Cercueilfa2185d2014-04-01 16:13:32 +0200151{
Paul Cercueil497af142014-05-13 10:32:46 +0200152 const struct iio_device *dev = buffer->dev;
153
154 if (buffer->dev_is_high_speed) {
155 void *buf;
156 ssize_t ret = dev->ctx->ops->get_buffer(dev,
Paul Cercueil76ca8842015-03-05 11:16:16 +0100157 &buf, buffer->length, buffer->mask, dev->words);
Paul Cercueil497af142014-05-13 10:32:46 +0200158 if (ret >= 0)
159 buffer->buffer = buf;
160 return ret;
161 } else {
Paul Cercueil90496342014-09-02 16:44:25 +0200162 size_t length = buffer->length;
163 void *ptr = buffer->buffer;
164
165 /* iio_device_write_raw doesn't guarantee that all bytes are
166 * written */
167 while (length) {
168 ssize_t ret = iio_device_write_raw(dev, ptr, length);
169 if (ret < 0)
170 return ret;
171
172 length -= ret;
173 ptr = (void *) ((uintptr_t) ptr + ret);
174 }
175
Paul Cercueil4012cff2015-05-11 10:47:40 +0200176 return (ssize_t) buffer->length;
Paul Cercueil497af142014-05-13 10:32:46 +0200177 }
Paul Cercueilfa2185d2014-04-01 16:13:32 +0200178}
179
Paul Cercueila689cd92014-03-20 16:37:25 +0100180ssize_t iio_buffer_foreach_sample(struct iio_buffer *buffer,
181 ssize_t (*callback)(const struct iio_channel *,
182 void *, size_t, void *), void *d)
183{
Paul Cercueil0d099e72014-05-08 16:07:30 +0200184 uintptr_t ptr = (uintptr_t) buffer->buffer,
185 end = ptr + buffer->data_length;
186 const struct iio_device *dev = buffer->dev;
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100187 ssize_t processed = 0;
188
189 if (buffer->sample_size <= 0)
Paul Cercueil0d099e72014-05-08 16:07:30 +0200190 return -EINVAL;
Paul Cercueila689cd92014-03-20 16:37:25 +0100191
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100192 if (buffer->data_length < buffer->dev_sample_size)
Paul Cercueila689cd92014-03-20 16:37:25 +0100193 return 0;
194
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100195 while (end - ptr >= (size_t) buffer->sample_size) {
Paul Cercueil0d099e72014-05-08 16:07:30 +0200196 unsigned int i;
197
198 for (i = 0; i < dev->nb_channels; i++) {
199 const struct iio_channel *chn = dev->channels[i];
200 unsigned int length = chn->format.length / 8;
201
202 if (chn->index < 0)
203 break;
204
205 /* Test if the buffer has samples for this channel */
206 if (!TEST_BIT(buffer->mask, chn->index))
207 continue;
208
209 if (ptr % length)
210 ptr += length - (ptr % length);
211
212 /* Test if the client wants samples from this channel */
213 if (TEST_BIT(dev->mask, chn->index)) {
214 ssize_t ret = callback(chn,
215 (void *) ptr, length, d);
216 if (ret < 0)
217 return ret;
218 else
219 processed += ret;
220 }
221
222 ptr += length;
223 }
224 }
225 return processed;
Paul Cercueila689cd92014-03-20 16:37:25 +0100226}
Paul Cercueil95347b92014-03-21 09:50:17 +0100227
Paul Cercueil6d927162014-04-16 15:53:22 +0200228void * iio_buffer_start(const struct iio_buffer *buffer)
229{
230 return buffer->buffer;
231}
232
Paul Cercueil95347b92014-03-21 09:50:17 +0100233void * iio_buffer_first(const struct iio_buffer *buffer,
234 const struct iio_channel *chn)
235{
236 size_t len;
237 unsigned int i;
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200238 uintptr_t ptr = (uintptr_t) buffer->buffer;
Paul Cercueil95347b92014-03-21 09:50:17 +0100239
Paul Cercueil645ab972014-03-24 14:36:12 +0100240 if (!iio_channel_is_enabled(chn))
Paul Cercueil95347b92014-03-21 09:50:17 +0100241 return iio_buffer_end(buffer);
242
243 for (i = 0; i < buffer->dev->nb_channels; i++) {
244 struct iio_channel *cur = buffer->dev->channels[i];
245 len = cur->format.length / 8;
246
247 /* NOTE: dev->channels are ordered by index */
248 if (cur->index < 0 || cur->index == chn->index)
249 break;
250
Romain Roffé5c213152015-06-10 15:44:50 +0200251 /* Test if the buffer has samples for this channel */
252 if (!TEST_BIT(buffer->mask, cur->index))
253 continue;
254
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200255 if (ptr % len)
256 ptr += len - (ptr % len);
Paul Cercueil95347b92014-03-21 09:50:17 +0100257 ptr += len;
258 }
259
260 len = chn->format.length / 8;
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200261 if (ptr % len)
262 ptr += len - (ptr % len);
263 return (void *) ptr;
Paul Cercueil95347b92014-03-21 09:50:17 +0100264}
265
Paul Cercueil10682b32014-04-04 12:34:37 +0200266ptrdiff_t iio_buffer_step(const struct iio_buffer *buffer)
Paul Cercueil95347b92014-03-21 09:50:17 +0100267{
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100268 return (ptrdiff_t) buffer->sample_size;
Paul Cercueil95347b92014-03-21 09:50:17 +0100269}
270
271void * iio_buffer_end(const struct iio_buffer *buffer)
272{
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200273 return (void *) ((uintptr_t) buffer->buffer + buffer->data_length);
Paul Cercueil95347b92014-03-21 09:50:17 +0100274}
Paul Cercueila2d4bad2014-05-27 10:15:29 +0200275
276void iio_buffer_set_data(struct iio_buffer *buf, void *data)
277{
278 buf->userdata = data;
279}
280
281void * iio_buffer_get_data(const struct iio_buffer *buf)
282{
283 return buf->userdata;
284}
Paul Cercueil03b6c812015-04-14 16:49:06 +0200285
286const struct iio_device * iio_buffer_get_device(const struct iio_buffer *buf)
287{
288 return buf->dev;
289}