blob: 95a80c3f187f9d597c63e005def679ca836060c9 [file] [log] [blame]
Paul Cercueila689cd92014-03-20 16:37:25 +01001#include "iio-private.h"
2
Paul Cercueil66ba1712014-04-01 16:11:48 +02003#include <errno.h>
Paul Cercueil9efeb062014-04-29 13:12:45 +02004#include <string.h>
Paul Cercueil66ba1712014-04-01 16:11:48 +02005
Paul Cercueila689cd92014-03-20 16:37:25 +01006struct callback_wrapper_data {
7 ssize_t (*callback)(const struct iio_channel *, void *, size_t, void *);
8 void *data;
9 uint32_t *mask;
10};
11
Paul Cercueil2a74bd72014-04-28 13:18:48 +020012static bool device_is_high_speed(const struct iio_device *dev)
13{
14 /* Little trick: We call the backend's get_buffer() function, which is
15 * for now only implemented in the Local backend, with a NULL pointer.
16 * It will return -ENOSYS if the device is not high speed, and either
17 * -EBADF or -EINVAL otherwise. */
18 const struct iio_backend_ops *ops = dev->ctx->ops;
19 return !!ops->get_buffer &&
Paul Cercueil76ca8842015-03-05 11:16:16 +010020 (ops->get_buffer(dev, NULL, 0, NULL, 0) != -ENOSYS);
Paul Cercueil2a74bd72014-04-28 13:18:48 +020021}
22
Paul Cercueila689cd92014-03-20 16:37:25 +010023struct iio_buffer * iio_device_create_buffer(const struct iio_device *dev,
Paul Cercueil2004eaa2014-05-22 14:04:39 +020024 size_t samples_count, bool cyclic)
Paul Cercueila689cd92014-03-20 16:37:25 +010025{
Paul Cercueil3106e892014-04-30 11:29:51 +020026 int ret = -EINVAL;
Paul Cercueilde5a3472014-04-01 14:09:56 +020027 struct iio_buffer *buf;
28 unsigned int sample_size = iio_device_get_sample_size(dev);
29 if (!sample_size)
Paul Cercueil3106e892014-04-30 11:29:51 +020030 goto err_set_errno;
Paul Cercueilde5a3472014-04-01 14:09:56 +020031
32 buf = malloc(sizeof(*buf));
Paul Cercueil3106e892014-04-30 11:29:51 +020033 if (!buf) {
34 ret = -ENOMEM;
35 goto err_set_errno;
36 }
Paul Cercueila689cd92014-03-20 16:37:25 +010037
Paul Cercueil9e5301d2014-11-19 15:34:28 +010038 buf->dev_sample_size = sample_size;
39 buf->length = sample_size * samples_count;
Paul Cercueila689cd92014-03-20 16:37:25 +010040 buf->dev = dev;
Paul Cercueil645ab972014-03-24 14:36:12 +010041 buf->mask = calloc(dev->words, sizeof(*buf->mask));
Paul Cercueil3106e892014-04-30 11:29:51 +020042 if (!buf->mask) {
43 ret = -ENOMEM;
Paul Cercueila689cd92014-03-20 16:37:25 +010044 goto err_free_buf;
Paul Cercueil3106e892014-04-30 11:29:51 +020045 }
Paul Cercueila689cd92014-03-20 16:37:25 +010046
Paul Cercueil9efeb062014-04-29 13:12:45 +020047 /* Set the default channel mask to the one used by the device.
48 * While input buffers will erase this as soon as the refill function
49 * is used, it is useful for output buffers, as it permits
50 * iio_buffer_foreach_sample to be used. */
51 memcpy(buf->mask, dev->mask, dev->words * sizeof(*buf->mask));
52
Paul Cercueil2004eaa2014-05-22 14:04:39 +020053 ret = iio_device_open(dev, samples_count, cyclic);
Paul Cercueil3106e892014-04-30 11:29:51 +020054 if (ret < 0)
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020055 goto err_free_mask;
56
Paul Cercueil2a74bd72014-04-28 13:18:48 +020057 buf->dev_is_high_speed = device_is_high_speed(dev);
58 if (buf->dev_is_high_speed) {
Paul Cercueil3b837dd2014-05-13 10:28:23 +020059 /* Dequeue the first buffer, so that buf->buffer is correctly
60 * initialized */
Paul Cercueilb65adcb2015-02-25 17:05:21 +010061 buf->buffer = NULL;
Lars-Peter Clausen62bd5522015-03-27 16:06:52 +010062 if (iio_device_is_tx(dev)) {
63 ret = dev->ctx->ops->get_buffer(dev, &buf->buffer,
64 buf->length, buf->mask, dev->words);
65 if (ret < 0)
66 goto err_close_device;
67 }
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020068 } else {
69 buf->buffer = malloc(buf->length);
Paul Cercueil3106e892014-04-30 11:29:51 +020070 if (!buf->buffer) {
71 ret = -ENOMEM;
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020072 goto err_close_device;
Paul Cercueil3106e892014-04-30 11:29:51 +020073 }
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020074 }
Paul Cercueila689cd92014-03-20 16:37:25 +010075
Paul Cercueil9e5301d2014-11-19 15:34:28 +010076 buf->sample_size = iio_device_get_sample_size_mask(dev,
77 buf->mask, dev->words);
Paul Cercueileb4fb3d2014-04-25 11:33:01 +020078 buf->data_length = buf->length;
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020079 return buf;
Paul Cercueil66ba1712014-04-01 16:11:48 +020080
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020081err_close_device:
82 iio_device_close(dev);
Paul Cercueilde5a3472014-04-01 14:09:56 +020083err_free_mask:
Paul Cercueila689cd92014-03-20 16:37:25 +010084 free(buf->mask);
85err_free_buf:
86 free(buf);
Paul Cercueil3106e892014-04-30 11:29:51 +020087err_set_errno:
88 errno = -ret;
Paul Cercueila689cd92014-03-20 16:37:25 +010089 return NULL;
90}
91
92void iio_buffer_destroy(struct iio_buffer *buffer)
93{
Paul Cercueilde5a3472014-04-01 14:09:56 +020094 iio_device_close(buffer->dev);
Paul Cercueil2a74bd72014-04-28 13:18:48 +020095 if (!buffer->dev_is_high_speed)
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020096 free(buffer->buffer);
Paul Cercueila689cd92014-03-20 16:37:25 +010097 free(buffer->mask);
98 free(buffer);
99}
100
Paul Cercueilcbe78562014-04-01 14:42:20 +0200101ssize_t iio_buffer_refill(struct iio_buffer *buffer)
Paul Cercueila689cd92014-03-20 16:37:25 +0100102{
Paul Cercueil66ba1712014-04-01 16:11:48 +0200103 ssize_t read;
Paul Cercueil93f7e1f2014-04-23 16:49:56 +0200104 const struct iio_device *dev = buffer->dev;
Paul Cercueil66ba1712014-04-01 16:11:48 +0200105
Paul Cercueil2a74bd72014-04-28 13:18:48 +0200106 if (buffer->dev_is_high_speed) {
Paul Cercueil21b9dab2015-04-20 11:51:35 +0200107 read = dev->ctx->ops->get_buffer(dev, &buffer->buffer,
108 buffer->length, buffer->mask, dev->words);
Paul Cercueil93f7e1f2014-04-23 16:49:56 +0200109 } else {
110 read = iio_device_read_raw(dev, buffer->buffer, buffer->length,
111 buffer->mask, dev->words);
112 }
113
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100114 if (read >= 0) {
Paul Cercueilcbe78562014-04-01 14:42:20 +0200115 buffer->data_length = read;
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100116 buffer->sample_size = iio_device_get_sample_size_mask(dev,
117 buffer->mask, dev->words);
118 }
Paul Cercueilcbe78562014-04-01 14:42:20 +0200119 return read;
Paul Cercueila689cd92014-03-20 16:37:25 +0100120}
121
Paul Cercueil497af142014-05-13 10:32:46 +0200122ssize_t iio_buffer_push(struct iio_buffer *buffer)
Paul Cercueilfa2185d2014-04-01 16:13:32 +0200123{
Paul Cercueil497af142014-05-13 10:32:46 +0200124 const struct iio_device *dev = buffer->dev;
125
126 if (buffer->dev_is_high_speed) {
127 void *buf;
128 ssize_t ret = dev->ctx->ops->get_buffer(dev,
Paul Cercueil76ca8842015-03-05 11:16:16 +0100129 &buf, buffer->length, buffer->mask, dev->words);
Paul Cercueil497af142014-05-13 10:32:46 +0200130 if (ret >= 0)
131 buffer->buffer = buf;
132 return ret;
133 } else {
Paul Cercueil90496342014-09-02 16:44:25 +0200134 size_t length = buffer->length;
135 void *ptr = buffer->buffer;
136
137 /* iio_device_write_raw doesn't guarantee that all bytes are
138 * written */
139 while (length) {
140 ssize_t ret = iio_device_write_raw(dev, ptr, length);
141 if (ret < 0)
142 return ret;
143
144 length -= ret;
145 ptr = (void *) ((uintptr_t) ptr + ret);
146 }
147
Paul Cercueil4012cff2015-05-11 10:47:40 +0200148 return (ssize_t) buffer->length;
Paul Cercueil497af142014-05-13 10:32:46 +0200149 }
Paul Cercueilfa2185d2014-04-01 16:13:32 +0200150}
151
Paul Cercueila689cd92014-03-20 16:37:25 +0100152ssize_t iio_buffer_foreach_sample(struct iio_buffer *buffer,
153 ssize_t (*callback)(const struct iio_channel *,
154 void *, size_t, void *), void *d)
155{
Paul Cercueil0d099e72014-05-08 16:07:30 +0200156 uintptr_t ptr = (uintptr_t) buffer->buffer,
157 end = ptr + buffer->data_length;
158 const struct iio_device *dev = buffer->dev;
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100159 ssize_t processed = 0;
160
161 if (buffer->sample_size <= 0)
Paul Cercueil0d099e72014-05-08 16:07:30 +0200162 return -EINVAL;
Paul Cercueila689cd92014-03-20 16:37:25 +0100163
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100164 if (buffer->data_length < buffer->dev_sample_size)
Paul Cercueila689cd92014-03-20 16:37:25 +0100165 return 0;
166
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100167 while (end - ptr >= (size_t) buffer->sample_size) {
Paul Cercueil0d099e72014-05-08 16:07:30 +0200168 unsigned int i;
169
170 for (i = 0; i < dev->nb_channels; i++) {
171 const struct iio_channel *chn = dev->channels[i];
172 unsigned int length = chn->format.length / 8;
173
174 if (chn->index < 0)
175 break;
176
177 /* Test if the buffer has samples for this channel */
178 if (!TEST_BIT(buffer->mask, chn->index))
179 continue;
180
181 if (ptr % length)
182 ptr += length - (ptr % length);
183
184 /* Test if the client wants samples from this channel */
185 if (TEST_BIT(dev->mask, chn->index)) {
186 ssize_t ret = callback(chn,
187 (void *) ptr, length, d);
188 if (ret < 0)
189 return ret;
190 else
191 processed += ret;
192 }
193
194 ptr += length;
195 }
196 }
197 return processed;
Paul Cercueila689cd92014-03-20 16:37:25 +0100198}
Paul Cercueil95347b92014-03-21 09:50:17 +0100199
Paul Cercueil6d927162014-04-16 15:53:22 +0200200void * iio_buffer_start(const struct iio_buffer *buffer)
201{
202 return buffer->buffer;
203}
204
Paul Cercueil95347b92014-03-21 09:50:17 +0100205void * iio_buffer_first(const struct iio_buffer *buffer,
206 const struct iio_channel *chn)
207{
208 size_t len;
209 unsigned int i;
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200210 uintptr_t ptr = (uintptr_t) buffer->buffer;
Paul Cercueil95347b92014-03-21 09:50:17 +0100211
Paul Cercueil645ab972014-03-24 14:36:12 +0100212 if (!iio_channel_is_enabled(chn))
Paul Cercueil95347b92014-03-21 09:50:17 +0100213 return iio_buffer_end(buffer);
214
215 for (i = 0; i < buffer->dev->nb_channels; i++) {
216 struct iio_channel *cur = buffer->dev->channels[i];
217 len = cur->format.length / 8;
218
219 /* NOTE: dev->channels are ordered by index */
220 if (cur->index < 0 || cur->index == chn->index)
221 break;
222
Romain Roffé5c213152015-06-10 15:44:50 +0200223 /* Test if the buffer has samples for this channel */
224 if (!TEST_BIT(buffer->mask, cur->index))
225 continue;
226
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200227 if (ptr % len)
228 ptr += len - (ptr % len);
Paul Cercueil95347b92014-03-21 09:50:17 +0100229 ptr += len;
230 }
231
232 len = chn->format.length / 8;
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200233 if (ptr % len)
234 ptr += len - (ptr % len);
235 return (void *) ptr;
Paul Cercueil95347b92014-03-21 09:50:17 +0100236}
237
Paul Cercueil10682b32014-04-04 12:34:37 +0200238ptrdiff_t iio_buffer_step(const struct iio_buffer *buffer)
Paul Cercueil95347b92014-03-21 09:50:17 +0100239{
Paul Cercueil9e5301d2014-11-19 15:34:28 +0100240 return (ptrdiff_t) buffer->sample_size;
Paul Cercueil95347b92014-03-21 09:50:17 +0100241}
242
243void * iio_buffer_end(const struct iio_buffer *buffer)
244{
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200245 return (void *) ((uintptr_t) buffer->buffer + buffer->data_length);
Paul Cercueil95347b92014-03-21 09:50:17 +0100246}
Paul Cercueila2d4bad2014-05-27 10:15:29 +0200247
248void iio_buffer_set_data(struct iio_buffer *buf, void *data)
249{
250 buf->userdata = data;
251}
252
253void * iio_buffer_get_data(const struct iio_buffer *buf)
254{
255 return buf->userdata;
256}
Paul Cercueil03b6c812015-04-14 16:49:06 +0200257
258const struct iio_device * iio_buffer_get_device(const struct iio_buffer *buf)
259{
260 return buf->dev;
261}