monitor: Convert do_physical_memory_save() to QObject
[armpft.git] / net-queue.c
blob75457f07e939529009e962ad494d58b336054c24
1 /*
2 * Copyright (c) 2003-2008 Fabrice Bellard
3 * Copyright (c) 2009 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 * THE SOFTWARE.
24 #include "net-queue.h"
25 #include "qemu-queue.h"
27 /* The delivery handler may only return zero if it will call
28 * qemu_net_queue_flush() when it determines that it is once again able
29 * to deliver packets. It must also call qemu_net_queue_purge() in its
30 * cleanup path.
32 * If a sent callback is provided to send(), the caller must handle a
33 * zero return from the delivery handler by not sending any more packets
34 * until we have invoked the callback. Only in that case will we queue
35 * the packet.
37 * If a sent callback isn't provided, we just drop the packet to avoid
38 * unbounded queueing.
41 struct NetPacket {
42 QTAILQ_ENTRY(NetPacket) entry;
43 VLANClientState *sender;
44 int size;
45 NetPacketSent *sent_cb;
46 uint8_t data[0];
49 struct NetQueue {
50 NetPacketDeliver *deliver;
51 NetPacketDeliverIOV *deliver_iov;
52 void *opaque;
54 QTAILQ_HEAD(packets, NetPacket) packets;
56 unsigned delivering : 1;
59 NetQueue *qemu_new_net_queue(NetPacketDeliver *deliver,
60 NetPacketDeliverIOV *deliver_iov,
61 void *opaque)
63 NetQueue *queue;
65 queue = qemu_mallocz(sizeof(NetQueue));
67 queue->deliver = deliver;
68 queue->deliver_iov = deliver_iov;
69 queue->opaque = opaque;
71 QTAILQ_INIT(&queue->packets);
73 queue->delivering = 0;
75 return queue;
78 void qemu_del_net_queue(NetQueue *queue)
80 NetPacket *packet, *next;
82 QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) {
83 QTAILQ_REMOVE(&queue->packets, packet, entry);
84 qemu_free(packet);
87 qemu_free(queue);
90 static ssize_t qemu_net_queue_append(NetQueue *queue,
91 VLANClientState *sender,
92 const uint8_t *buf,
93 size_t size,
94 NetPacketSent *sent_cb)
96 NetPacket *packet;
98 packet = qemu_malloc(sizeof(NetPacket) + size);
99 packet->sender = sender;
100 packet->size = size;
101 packet->sent_cb = sent_cb;
102 memcpy(packet->data, buf, size);
104 QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
106 return size;
109 static ssize_t qemu_net_queue_append_iov(NetQueue *queue,
110 VLANClientState *sender,
111 const struct iovec *iov,
112 int iovcnt,
113 NetPacketSent *sent_cb)
115 NetPacket *packet;
116 size_t max_len = 0;
117 int i;
119 for (i = 0; i < iovcnt; i++) {
120 max_len += iov[i].iov_len;
123 packet = qemu_malloc(sizeof(NetPacket) + max_len);
124 packet->sender = sender;
125 packet->sent_cb = sent_cb;
126 packet->size = 0;
128 for (i = 0; i < iovcnt; i++) {
129 size_t len = iov[i].iov_len;
131 memcpy(packet->data + packet->size, iov[i].iov_base, len);
132 packet->size += len;
135 QTAILQ_INSERT_TAIL(&queue->packets, packet, entry);
137 return packet->size;
140 static ssize_t qemu_net_queue_deliver(NetQueue *queue,
141 VLANClientState *sender,
142 const uint8_t *data,
143 size_t size)
145 ssize_t ret = -1;
147 queue->delivering = 1;
148 ret = queue->deliver(sender, data, size, queue->opaque);
149 queue->delivering = 0;
151 return ret;
154 static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue,
155 VLANClientState *sender,
156 const struct iovec *iov,
157 int iovcnt)
159 ssize_t ret = -1;
161 queue->delivering = 1;
162 ret = queue->deliver_iov(sender, iov, iovcnt, queue->opaque);
163 queue->delivering = 0;
165 return ret;
168 ssize_t qemu_net_queue_send(NetQueue *queue,
169 VLANClientState *sender,
170 const uint8_t *data,
171 size_t size,
172 NetPacketSent *sent_cb)
174 ssize_t ret;
176 if (queue->delivering) {
177 return qemu_net_queue_append(queue, sender, data, size, NULL);
180 ret = qemu_net_queue_deliver(queue, sender, data, size);
181 if (ret == 0 && sent_cb != NULL) {
182 qemu_net_queue_append(queue, sender, data, size, sent_cb);
183 return 0;
186 qemu_net_queue_flush(queue);
188 return ret;
191 ssize_t qemu_net_queue_send_iov(NetQueue *queue,
192 VLANClientState *sender,
193 const struct iovec *iov,
194 int iovcnt,
195 NetPacketSent *sent_cb)
197 ssize_t ret;
199 if (queue->delivering) {
200 return qemu_net_queue_append_iov(queue, sender, iov, iovcnt, NULL);
203 ret = qemu_net_queue_deliver_iov(queue, sender, iov, iovcnt);
204 if (ret == 0 && sent_cb != NULL) {
205 qemu_net_queue_append_iov(queue, sender, iov, iovcnt, sent_cb);
206 return 0;
209 qemu_net_queue_flush(queue);
211 return ret;
214 void qemu_net_queue_purge(NetQueue *queue, VLANClientState *from)
216 NetPacket *packet, *next;
218 QTAILQ_FOREACH_SAFE(packet, &queue->packets, entry, next) {
219 if (packet->sender == from) {
220 QTAILQ_REMOVE(&queue->packets, packet, entry);
221 qemu_free(packet);
226 void qemu_net_queue_flush(NetQueue *queue)
228 while (!QTAILQ_EMPTY(&queue->packets)) {
229 NetPacket *packet;
230 int ret;
232 packet = QTAILQ_FIRST(&queue->packets);
233 QTAILQ_REMOVE(&queue->packets, packet, entry);
235 ret = qemu_net_queue_deliver(queue,
236 packet->sender,
237 packet->data,
238 packet->size);
239 if (ret == 0 && packet->sent_cb != NULL) {
240 QTAILQ_INSERT_HEAD(&queue->packets, packet, entry);
241 break;
244 if (packet->sent_cb) {
245 packet->sent_cb(packet->sender, ret);
248 qemu_free(packet);