blob: c7a38bc4c8eec195697fe1ff560fa7f501e1725d [file] [log] [blame]
Paul Cercueila689cd92014-03-20 16:37:25 +01001#include "iio-private.h"
2
Paul Cercueil66ba1712014-04-01 16:11:48 +02003#include <errno.h>
Paul Cercueil9efeb062014-04-29 13:12:45 +02004#include <string.h>
Paul Cercueil66ba1712014-04-01 16:11:48 +02005
Paul Cercueila689cd92014-03-20 16:37:25 +01006struct callback_wrapper_data {
7 ssize_t (*callback)(const struct iio_channel *, void *, size_t, void *);
8 void *data;
9 uint32_t *mask;
10};
11
Paul Cercueil2a74bd72014-04-28 13:18:48 +020012static bool device_is_high_speed(const struct iio_device *dev)
13{
14 /* Little trick: We call the backend's get_buffer() function, which is
15 * for now only implemented in the Local backend, with a NULL pointer.
16 * It will return -ENOSYS if the device is not high speed, and either
17 * -EBADF or -EINVAL otherwise. */
18 const struct iio_backend_ops *ops = dev->ctx->ops;
19 return !!ops->get_buffer &&
20 (ops->get_buffer(dev, NULL, NULL, 0) != -ENOSYS);
21}
22
Paul Cercueila689cd92014-03-20 16:37:25 +010023struct iio_buffer * iio_device_create_buffer(const struct iio_device *dev,
Paul Cercueileb4fb3d2014-04-25 11:33:01 +020024 size_t samples_count)
Paul Cercueila689cd92014-03-20 16:37:25 +010025{
Paul Cercueil3106e892014-04-30 11:29:51 +020026 int ret = -EINVAL;
Paul Cercueilde5a3472014-04-01 14:09:56 +020027 struct iio_buffer *buf;
28 unsigned int sample_size = iio_device_get_sample_size(dev);
29 if (!sample_size)
Paul Cercueil3106e892014-04-30 11:29:51 +020030 goto err_set_errno;
Paul Cercueilde5a3472014-04-01 14:09:56 +020031
32 buf = malloc(sizeof(*buf));
Paul Cercueil3106e892014-04-30 11:29:51 +020033 if (!buf) {
34 ret = -ENOMEM;
35 goto err_set_errno;
36 }
Paul Cercueila689cd92014-03-20 16:37:25 +010037
Paul Cercueilde5a3472014-04-01 14:09:56 +020038 buf->sample_size = sample_size;
Paul Cercueilf88f8852014-04-01 11:50:59 +020039 buf->length = buf->sample_size * samples_count;
Paul Cercueila689cd92014-03-20 16:37:25 +010040 buf->dev = dev;
Paul Cercueil645ab972014-03-24 14:36:12 +010041 buf->mask = calloc(dev->words, sizeof(*buf->mask));
Paul Cercueil3106e892014-04-30 11:29:51 +020042 if (!buf->mask) {
43 ret = -ENOMEM;
Paul Cercueila689cd92014-03-20 16:37:25 +010044 goto err_free_buf;
Paul Cercueil3106e892014-04-30 11:29:51 +020045 }
Paul Cercueila689cd92014-03-20 16:37:25 +010046
Paul Cercueil9efeb062014-04-29 13:12:45 +020047 /* Set the default channel mask to the one used by the device.
48 * While input buffers will erase this as soon as the refill function
49 * is used, it is useful for output buffers, as it permits
50 * iio_buffer_foreach_sample to be used. */
51 memcpy(buf->mask, dev->mask, dev->words * sizeof(*buf->mask));
52
Paul Cercueil3106e892014-04-30 11:29:51 +020053 ret = iio_device_open(dev, samples_count);
54 if (ret < 0)
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020055 goto err_free_mask;
56
Paul Cercueil2a74bd72014-04-28 13:18:48 +020057 buf->dev_is_high_speed = device_is_high_speed(dev);
58 if (buf->dev_is_high_speed) {
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020059 /* We will use the get_buffer backend function is available.
60 * In that case, we don't need our own buffer. */
61 buf->buffer = NULL;
62 } else {
63 buf->buffer = malloc(buf->length);
Paul Cercueil3106e892014-04-30 11:29:51 +020064 if (!buf->buffer) {
65 ret = -ENOMEM;
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020066 goto err_close_device;
Paul Cercueil3106e892014-04-30 11:29:51 +020067 }
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020068 }
Paul Cercueila689cd92014-03-20 16:37:25 +010069
Paul Cercueileb4fb3d2014-04-25 11:33:01 +020070 buf->data_length = buf->length;
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020071 return buf;
Paul Cercueil66ba1712014-04-01 16:11:48 +020072
Paul Cercueilf17f2dc2014-04-29 15:38:57 +020073err_close_device:
74 iio_device_close(dev);
Paul Cercueilde5a3472014-04-01 14:09:56 +020075err_free_mask:
Paul Cercueila689cd92014-03-20 16:37:25 +010076 free(buf->mask);
77err_free_buf:
78 free(buf);
Paul Cercueil3106e892014-04-30 11:29:51 +020079err_set_errno:
80 errno = -ret;
Paul Cercueila689cd92014-03-20 16:37:25 +010081 return NULL;
82}
83
84void iio_buffer_destroy(struct iio_buffer *buffer)
85{
Paul Cercueilde5a3472014-04-01 14:09:56 +020086 iio_device_close(buffer->dev);
Paul Cercueil2a74bd72014-04-28 13:18:48 +020087 if (!buffer->dev_is_high_speed)
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020088 free(buffer->buffer);
Paul Cercueila689cd92014-03-20 16:37:25 +010089 free(buffer->mask);
90 free(buffer);
91}
92
Paul Cercueilcbe78562014-04-01 14:42:20 +020093ssize_t iio_buffer_refill(struct iio_buffer *buffer)
Paul Cercueila689cd92014-03-20 16:37:25 +010094{
Paul Cercueil66ba1712014-04-01 16:11:48 +020095 ssize_t read;
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020096 const struct iio_device *dev = buffer->dev;
Paul Cercueil66ba1712014-04-01 16:11:48 +020097
Paul Cercueil2a74bd72014-04-28 13:18:48 +020098 if (buffer->dev_is_high_speed) {
Paul Cercueil93f7e1f2014-04-23 16:49:56 +020099 void *buf;
100 read = dev->ctx->ops->get_buffer(dev, &buf,
101 buffer->mask, dev->words);
102 if (read >= 0)
103 buffer->buffer = buf;
104 } else {
105 read = iio_device_read_raw(dev, buffer->buffer, buffer->length,
106 buffer->mask, dev->words);
107 }
108
Paul Cercueilcbe78562014-04-01 14:42:20 +0200109 if (read >= 0)
110 buffer->data_length = read;
111 return read;
Paul Cercueila689cd92014-03-20 16:37:25 +0100112}
113
Paul Cercueilea32b042014-04-11 13:47:12 +0200114ssize_t iio_buffer_push(const struct iio_buffer *buffer)
Paul Cercueilfa2185d2014-04-01 16:13:32 +0200115{
Paul Cercueilfa2185d2014-04-01 16:13:32 +0200116 return iio_device_write_raw(buffer->dev,
Paul Cercueileb4fb3d2014-04-25 11:33:01 +0200117 buffer->buffer, buffer->data_length);
Paul Cercueilfa2185d2014-04-01 16:13:32 +0200118}
119
Paul Cercueila689cd92014-03-20 16:37:25 +0100120ssize_t iio_buffer_foreach_sample(struct iio_buffer *buffer,
121 ssize_t (*callback)(const struct iio_channel *,
122 void *, size_t, void *), void *d)
123{
Paul Cercueil0d099e72014-05-08 16:07:30 +0200124 uintptr_t ptr = (uintptr_t) buffer->buffer,
125 end = ptr + buffer->data_length;
126 const struct iio_device *dev = buffer->dev;
127 ssize_t processed = 0,
128 sample_size = iio_device_get_sample_size_mask(dev,
129 buffer->mask, buffer->dev->words);
130 if (sample_size <= 0)
131 return -EINVAL;
Paul Cercueila689cd92014-03-20 16:37:25 +0100132
133 if (buffer->data_length < buffer->sample_size)
134 return 0;
135
Paul Cercueil0d099e72014-05-08 16:07:30 +0200136 while (end - ptr >= (size_t) sample_size) {
137 unsigned int i;
138
139 for (i = 0; i < dev->nb_channels; i++) {
140 const struct iio_channel *chn = dev->channels[i];
141 unsigned int length = chn->format.length / 8;
142
143 if (chn->index < 0)
144 break;
145
146 /* Test if the buffer has samples for this channel */
147 if (!TEST_BIT(buffer->mask, chn->index))
148 continue;
149
150 if (ptr % length)
151 ptr += length - (ptr % length);
152
153 /* Test if the client wants samples from this channel */
154 if (TEST_BIT(dev->mask, chn->index)) {
155 ssize_t ret = callback(chn,
156 (void *) ptr, length, d);
157 if (ret < 0)
158 return ret;
159 else
160 processed += ret;
161 }
162
163 ptr += length;
164 }
165 }
166 return processed;
Paul Cercueila689cd92014-03-20 16:37:25 +0100167}
Paul Cercueil95347b92014-03-21 09:50:17 +0100168
Paul Cercueil6d927162014-04-16 15:53:22 +0200169void * iio_buffer_start(const struct iio_buffer *buffer)
170{
171 return buffer->buffer;
172}
173
Paul Cercueil95347b92014-03-21 09:50:17 +0100174void * iio_buffer_first(const struct iio_buffer *buffer,
175 const struct iio_channel *chn)
176{
177 size_t len;
178 unsigned int i;
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200179 uintptr_t ptr = (uintptr_t) buffer->buffer;
Paul Cercueil95347b92014-03-21 09:50:17 +0100180
Paul Cercueil645ab972014-03-24 14:36:12 +0100181 if (!iio_channel_is_enabled(chn))
Paul Cercueil95347b92014-03-21 09:50:17 +0100182 return iio_buffer_end(buffer);
183
184 for (i = 0; i < buffer->dev->nb_channels; i++) {
185 struct iio_channel *cur = buffer->dev->channels[i];
186 len = cur->format.length / 8;
187
188 /* NOTE: dev->channels are ordered by index */
189 if (cur->index < 0 || cur->index == chn->index)
190 break;
191
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200192 if (ptr % len)
193 ptr += len - (ptr % len);
Paul Cercueil95347b92014-03-21 09:50:17 +0100194 ptr += len;
195 }
196
197 len = chn->format.length / 8;
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200198 if (ptr % len)
199 ptr += len - (ptr % len);
200 return (void *) ptr;
Paul Cercueil95347b92014-03-21 09:50:17 +0100201}
202
Paul Cercueil10682b32014-04-04 12:34:37 +0200203ptrdiff_t iio_buffer_step(const struct iio_buffer *buffer)
Paul Cercueil95347b92014-03-21 09:50:17 +0100204{
Paul Cercueil645ab972014-03-24 14:36:12 +0100205 return (ptrdiff_t) iio_device_get_sample_size_mask(buffer->dev,
206 buffer->mask, buffer->dev->words);
Paul Cercueil95347b92014-03-21 09:50:17 +0100207}
208
209void * iio_buffer_end(const struct iio_buffer *buffer)
210{
Paul Cercueil6e7f79e2014-04-04 12:27:24 +0200211 return (void *) ((uintptr_t) buffer->buffer + buffer->data_length);
Paul Cercueil95347b92014-03-21 09:50:17 +0100212}