blob: e8030aafe4355cd04cb7ad8fea3f26d052931e19 [file] [log] [blame]
Mark McLoughlinf7105842009-10-08 19:58:31 +01001/*
2 * Copyright (c) 2003-2008 Fabrice Bellard
3 * Copyright (c) 2009 Red Hat, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 * THE SOFTWARE.
22 */
23
Mark McLoughline1144d02009-10-23 17:52:16 +010024#include "net/queue.h"
Mark McLoughlinf7105842009-10-08 19:58:31 +010025#include "qemu-queue.h"
Zhi Yong Wu86a77c32012-07-24 16:35:17 +010026#include "net.h"
Mark McLoughlinf7105842009-10-08 19:58:31 +010027
28/* The delivery handler may only return zero if it will call
29 * qemu_net_queue_flush() when it determines that it is once again able
30 * to deliver packets. It must also call qemu_net_queue_purge() in its
31 * cleanup path.
32 *
33 * If a sent callback is provided to send(), the caller must handle a
34 * zero return from the delivery handler by not sending any more packets
35 * until we have invoked the callback. Only in that case will we queue
36 * the packet.
37 *
38 * If a sent callback isn't provided, we just drop the packet to avoid
39 * unbounded queueing.
40 */
41
42struct NetPacket {
43 QTAILQ_ENTRY(NetPacket) entry;
Stefan Hajnoczi4e68f7a2012-07-24 16:35:13 +010044 NetClientState *sender;
Mark McLoughlinc0b8e492009-10-22 17:43:40 +010045 unsigned flags;
Mark McLoughlinf7105842009-10-08 19:58:31 +010046 int size;
47 NetPacketSent *sent_cb;
48 uint8_t data[0];
49};
50
51struct NetQueue {
Mark McLoughlinf7105842009-10-08 19:58:31 +010052 void *opaque;
53
54 QTAILQ_HEAD(packets, NetPacket) packets;
55
56 unsigned delivering : 1;
57};
58
Zhi Yong Wu86a77c32012-07-24 16:35:17 +010059NetQueue *qemu_new_net_queue(void *opaque)
Mark McLoughlinf7105842009-10-08 19:58:31 +010060{
61 NetQueue *queue;
62
Anthony Liguori7267c092011-08-20 22:09:37 -050063 queue = g_malloc0(sizeof(NetQueue));
Mark McLoughlinf7105842009-10-08 19:58:31 +010064
Mark McLoughlinf7105842009-10-08 19:58:31 +010065 queue->opaque = opaque;
66
67 QTAILQ_INIT(&queue->packets);
68
69 queue->delivering = 0;
70
71 return queue;
72}
73
74void qemu_del_net_queue(NetQueue *queue)
75{
76 NetPacket *packet, *next;
77
78 QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) {
79 QTAILQ_REMOVE(&queue->packets, packet, entry);
Anthony Liguori7267c092011-08-20 22:09:37 -050080 g_free(packet);
Mark McLoughlinf7105842009-10-08 19:58:31 +010081 }
82
Anthony Liguori7267c092011-08-20 22:09:37 -050083 g_free(queue);
Mark McLoughlinf7105842009-10-08 19:58:31 +010084}
85
86static ssize_t qemu_net_queue_append(NetQueue *queue,
Stefan Hajnoczi4e68f7a2012-07-24 16:35:13 +010087 NetClientState *sender,
Mark McLoughlinc0b8e492009-10-22 17:43:40 +010088 unsigned flags,
Mark McLoughlinf7105842009-10-08 19:58:31 +010089 const uint8_t *buf,
90 size_t size,
91 NetPacketSent *sent_cb)
92{
93 NetPacket *packet;
94
Anthony Liguori7267c092011-08-20 22:09:37 -050095 packet = g_malloc(sizeof(NetPacket) + size);
Mark McLoughlinf7105842009-10-08 19:58:31 +010096 packet->sender = sender;
Mark McLoughlinc0b8e492009-10-22 17:43:40 +010097 packet->flags = flags;
Mark McLoughlinf7105842009-10-08 19:58:31 +010098 packet->size = size;
99 packet->sent_cb = sent_cb;
100 memcpy(packet->data, buf, size);
101
102 QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
103
104 return size;
105}
106
107static ssize_t qemu_net_queue_append_iov(NetQueue *queue,
Stefan Hajnoczi4e68f7a2012-07-24 16:35:13 +0100108 NetClientState *sender,
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100109 unsigned flags,
Mark McLoughlinf7105842009-10-08 19:58:31 +0100110 const struct iovec *iov,
111 int iovcnt,
112 NetPacketSent *sent_cb)
113{
114 NetPacket *packet;
115 size_t max_len = 0;
116 int i;
117
118 for (i = 0; i < iovcnt; i++) {
119 max_len += iov[i].iov_len;
120 }
121
Anthony Liguori7267c092011-08-20 22:09:37 -0500122 packet = g_malloc(sizeof(NetPacket) + max_len);
Mark McLoughlinf7105842009-10-08 19:58:31 +0100123 packet->sender = sender;
124 packet->sent_cb = sent_cb;
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100125 packet->flags = flags;
Mark McLoughlinf7105842009-10-08 19:58:31 +0100126 packet->size = 0;
127
128 for (i = 0; i < iovcnt; i++) {
129 size_t len = iov[i].iov_len;
130
131 memcpy(packet->data + packet->size, iov[i].iov_base, len);
132 packet->size += len;
133 }
134
135 QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
136
137 return packet->size;
138}
139
140static ssize_t qemu_net_queue_deliver(NetQueue *queue,
Stefan Hajnoczi4e68f7a2012-07-24 16:35:13 +0100141 NetClientState *sender,
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100142 unsigned flags,
Mark McLoughlinf7105842009-10-08 19:58:31 +0100143 const uint8_t *data,
144 size_t size)
145{
146 ssize_t ret = -1;
147
148 queue->delivering = 1;
Zhi Yong Wu86a77c32012-07-24 16:35:17 +0100149 ret = qemu_deliver_packet(sender, flags, data, size, queue->opaque);
Mark McLoughlinf7105842009-10-08 19:58:31 +0100150 queue->delivering = 0;
151
152 return ret;
153}
154
155static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue,
Stefan Hajnoczi4e68f7a2012-07-24 16:35:13 +0100156 NetClientState *sender,
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100157 unsigned flags,
Mark McLoughlinf7105842009-10-08 19:58:31 +0100158 const struct iovec *iov,
159 int iovcnt)
160{
161 ssize_t ret = -1;
162
163 queue->delivering = 1;
Zhi Yong Wu86a77c32012-07-24 16:35:17 +0100164 ret = qemu_deliver_packet_iov(sender, flags, iov, iovcnt, queue->opaque);
Mark McLoughlinf7105842009-10-08 19:58:31 +0100165 queue->delivering = 0;
166
167 return ret;
168}
169
170ssize_t qemu_net_queue_send(NetQueue *queue,
Stefan Hajnoczi4e68f7a2012-07-24 16:35:13 +0100171 NetClientState *sender,
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100172 unsigned flags,
Mark McLoughlinf7105842009-10-08 19:58:31 +0100173 const uint8_t *data,
174 size_t size,
175 NetPacketSent *sent_cb)
176{
177 ssize_t ret;
178
Zhi Yong Wu691a4f32012-07-24 16:35:18 +0100179 if (queue->delivering || !qemu_can_send_packet(sender)) {
180 return qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
Mark McLoughlinf7105842009-10-08 19:58:31 +0100181 }
182
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100183 ret = qemu_net_queue_deliver(queue, sender, flags, data, size);
Mark McLoughlin839f3682009-10-27 18:16:37 +0000184 if (ret == 0) {
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100185 qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
Mark McLoughlinf7105842009-10-08 19:58:31 +0100186 return 0;
187 }
188
189 qemu_net_queue_flush(queue);
190
191 return ret;
192}
193
194ssize_t qemu_net_queue_send_iov(NetQueue *queue,
Stefan Hajnoczi4e68f7a2012-07-24 16:35:13 +0100195 NetClientState *sender,
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100196 unsigned flags,
Mark McLoughlinf7105842009-10-08 19:58:31 +0100197 const struct iovec *iov,
198 int iovcnt,
199 NetPacketSent *sent_cb)
200{
201 ssize_t ret;
202
Zhi Yong Wu691a4f32012-07-24 16:35:18 +0100203 if (queue->delivering || !qemu_can_send_packet(sender)) {
204 return qemu_net_queue_append_iov(queue, sender, flags,
205 iov, iovcnt, sent_cb);
Mark McLoughlinf7105842009-10-08 19:58:31 +0100206 }
207
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100208 ret = qemu_net_queue_deliver_iov(queue, sender, flags, iov, iovcnt);
Mark McLoughlin839f3682009-10-27 18:16:37 +0000209 if (ret == 0) {
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100210 qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, sent_cb);
Mark McLoughlinf7105842009-10-08 19:58:31 +0100211 return 0;
212 }
213
214 qemu_net_queue_flush(queue);
215
216 return ret;
217}
218
Stefan Hajnoczi4e68f7a2012-07-24 16:35:13 +0100219void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
Mark McLoughlinf7105842009-10-08 19:58:31 +0100220{
221 NetPacket *packet, *next;
222
223 QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) {
224 if (packet->sender == from) {
225 QTAILQ_REMOVE(&queue->packets, packet, entry);
Anthony Liguori7267c092011-08-20 22:09:37 -0500226 g_free(packet);
Mark McLoughlinf7105842009-10-08 19:58:31 +0100227 }
228 }
229}
230
231void qemu_net_queue_flush(NetQueue *queue)
232{
233 while (!QTAILQ_EMPTY(&queue->packets)) {
234 NetPacket *packet;
235 int ret;
236
237 packet = QTAILQ_FIRST(&queue->packets);
238 QTAILQ_REMOVE(&queue->packets, packet, entry);
239
240 ret = qemu_net_queue_deliver(queue,
241 packet->sender,
Mark McLoughlinc0b8e492009-10-22 17:43:40 +0100242 packet->flags,
Mark McLoughlinf7105842009-10-08 19:58:31 +0100243 packet->data,
244 packet->size);
Mark McLoughlin839f3682009-10-27 18:16:37 +0000245 if (ret == 0) {
Mark McLoughlinf7105842009-10-08 19:58:31 +0100246 QTAILQ_INSERT_HEAD(&queue->packets, packet, entry);
247 break;
248 }
249
250 if (packet->sent_cb) {
251 packet->sent_cb(packet->sender, ret);
252 }
253
Anthony Liguori7267c092011-08-20 22:09:37 -0500254 g_free(packet);
Mark McLoughlinf7105842009-10-08 19:58:31 +0100255 }
256}