hw/pci-host: save/restore pci host config register
[qemu.git] / net / filter-rewriter.c
blob1aaad101b6a6668375173ec2990b5bc451d0d00b
1 /*
2 * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
3 * Copyright (c) 2016 FUJITSU LIMITED
4 * Copyright (c) 2016 Intel Corporation
6 * Author: Zhang Chen <zhangchen.fnst@cn.fujitsu.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or
9 * later. See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "trace.h"
14 #include "colo.h"
15 #include "net/filter.h"
16 #include "net/net.h"
17 #include "qemu/error-report.h"
18 #include "qom/object.h"
19 #include "qemu/main-loop.h"
20 #include "qemu/iov.h"
21 #include "net/checksum.h"
22 #include "net/colo.h"
23 #include "migration/colo.h"
24 #include "util.h"
26 #define FILTER_COLO_REWRITER(obj) \
27 OBJECT_CHECK(RewriterState, (obj), TYPE_FILTER_REWRITER)
29 #define TYPE_FILTER_REWRITER "filter-rewriter"
30 #define FAILOVER_MODE_ON true
31 #define FAILOVER_MODE_OFF false
33 typedef struct RewriterState {
34 NetFilterState parent_obj;
35 NetQueue *incoming_queue;
36 /* hashtable to save connection */
37 GHashTable *connection_track_table;
38 bool vnet_hdr;
39 bool failover_mode;
40 } RewriterState;
42 static void filter_rewriter_failover_mode(RewriterState *s)
44 s->failover_mode = FAILOVER_MODE_ON;
47 static void filter_rewriter_flush(NetFilterState *nf)
49 RewriterState *s = FILTER_COLO_REWRITER(nf);
51 if (!qemu_net_queue_flush(s->incoming_queue)) {
52 /* Unable to empty the queue, purge remaining packets */
53 qemu_net_queue_purge(s->incoming_queue, nf->netdev);
58 * Return 1 on success, if return 0 means the pkt
59 * is not TCP packet
61 static int is_tcp_packet(Packet *pkt)
63 if (!parse_packet_early(pkt) &&
64 pkt->ip->ip_p == IPPROTO_TCP) {
65 return 1;
66 } else {
67 return 0;
71 /* handle tcp packet from primary guest */
72 static int handle_primary_tcp_pkt(RewriterState *rf,
73 Connection *conn,
74 Packet *pkt, ConnectionKey *key)
76 struct tcp_hdr *tcp_pkt;
78 tcp_pkt = (struct tcp_hdr *)pkt->transport_header;
79 if (trace_event_get_state_backends(TRACE_COLO_FILTER_REWRITER_DEBUG)) {
80 trace_colo_filter_rewriter_pkt_info(__func__,
81 inet_ntoa(pkt->ip->ip_src), inet_ntoa(pkt->ip->ip_dst),
82 ntohl(tcp_pkt->th_seq), ntohl(tcp_pkt->th_ack),
83 tcp_pkt->th_flags);
84 trace_colo_filter_rewriter_conn_offset(conn->offset);
87 if (((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == (TH_ACK | TH_SYN)) &&
88 conn->tcp_state == TCPS_SYN_SENT) {
89 conn->tcp_state = TCPS_ESTABLISHED;
92 if (((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == TH_SYN)) {
94 * we use this flag update offset func
95 * run once in independent tcp connection
97 conn->tcp_state = TCPS_SYN_RECEIVED;
100 if (((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == TH_ACK)) {
101 if (conn->tcp_state == TCPS_SYN_RECEIVED) {
103 * offset = secondary_seq - primary seq
104 * ack packet sent by guest from primary node,
105 * so we use th_ack - 1 get primary_seq
107 conn->offset -= (ntohl(tcp_pkt->th_ack) - 1);
108 conn->tcp_state = TCPS_ESTABLISHED;
110 if (conn->offset) {
111 /* handle packets to the secondary from the primary */
112 tcp_pkt->th_ack = htonl(ntohl(tcp_pkt->th_ack) + conn->offset);
114 net_checksum_calculate((uint8_t *)pkt->data + pkt->vnet_hdr_len,
115 pkt->size - pkt->vnet_hdr_len);
119 * Passive close step 3
121 if ((conn->tcp_state == TCPS_LAST_ACK) &&
122 (ntohl(tcp_pkt->th_ack) == (conn->fin_ack_seq + 1))) {
123 conn->tcp_state = TCPS_CLOSED;
124 g_hash_table_remove(rf->connection_track_table, key);
128 if ((tcp_pkt->th_flags & TH_FIN) == TH_FIN) {
130 * Passive close.
131 * Step 1:
132 * The *server* side of this connect is VM, *client* tries to close
133 * the connection. We will into CLOSE_WAIT status.
135 * Step 2:
136 * In this step we will into LAST_ACK status.
138 * We got 'fin=1, ack=1' packet from server side, we need to
139 * record the seq of 'fin=1, ack=1' packet.
141 * Step 3:
142 * We got 'ack=1' packets from client side, it acks 'fin=1, ack=1'
143 * packet from server side. From this point, we can ensure that there
144 * will be no packets in the connection, except that, some errors
145 * happen between the path of 'filter object' and vNIC, if this rare
146 * case really happen, we can still create a new connection,
147 * So it is safe to remove the connection from connection_track_table.
150 if (conn->tcp_state == TCPS_ESTABLISHED) {
151 conn->tcp_state = TCPS_CLOSE_WAIT;
155 * Active close step 2.
157 if (conn->tcp_state == TCPS_FIN_WAIT_1) {
159 * For simplify implementation, we needn't wait 2MSL time
160 * in filter rewriter. Because guest kernel will track the
161 * TCP status and wait 2MSL time, if client resend the FIN
162 * packet, guest will apply the last ACK too.
163 * So, we skip the TCPS_TIME_WAIT state here and go straight
164 * to TCPS_CLOSED state.
166 conn->tcp_state = TCPS_CLOSED;
167 g_hash_table_remove(rf->connection_track_table, key);
171 return 0;
174 /* handle tcp packet from secondary guest */
175 static int handle_secondary_tcp_pkt(RewriterState *rf,
176 Connection *conn,
177 Packet *pkt, ConnectionKey *key)
179 struct tcp_hdr *tcp_pkt;
181 tcp_pkt = (struct tcp_hdr *)pkt->transport_header;
183 if (trace_event_get_state_backends(TRACE_COLO_FILTER_REWRITER_DEBUG)) {
184 trace_colo_filter_rewriter_pkt_info(__func__,
185 inet_ntoa(pkt->ip->ip_src), inet_ntoa(pkt->ip->ip_dst),
186 ntohl(tcp_pkt->th_seq), ntohl(tcp_pkt->th_ack),
187 tcp_pkt->th_flags);
188 trace_colo_filter_rewriter_conn_offset(conn->offset);
191 if (conn->tcp_state == TCPS_SYN_RECEIVED &&
192 ((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == (TH_ACK | TH_SYN))) {
194 * save offset = secondary_seq and then
195 * in handle_primary_tcp_pkt make offset
196 * = secondary_seq - primary_seq
198 conn->offset = ntohl(tcp_pkt->th_seq);
201 /* VM active connect */
202 if (conn->tcp_state == TCPS_CLOSED &&
203 ((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == TH_SYN)) {
204 conn->tcp_state = TCPS_SYN_SENT;
207 if ((tcp_pkt->th_flags & (TH_ACK | TH_SYN)) == TH_ACK) {
208 /* Only need to adjust seq while offset is Non-zero */
209 if (conn->offset) {
210 /* handle packets to the primary from the secondary*/
211 tcp_pkt->th_seq = htonl(ntohl(tcp_pkt->th_seq) - conn->offset);
213 net_checksum_calculate((uint8_t *)pkt->data + pkt->vnet_hdr_len,
214 pkt->size - pkt->vnet_hdr_len);
219 * Passive close step 2:
221 if (conn->tcp_state == TCPS_CLOSE_WAIT &&
222 (tcp_pkt->th_flags & (TH_ACK | TH_FIN)) == (TH_ACK | TH_FIN)) {
223 conn->fin_ack_seq = ntohl(tcp_pkt->th_seq);
224 conn->tcp_state = TCPS_LAST_ACK;
228 * Active close
230 * Step 1:
231 * The *server* side of this connect is VM, *server* tries to close
232 * the connection.
234 * Step 2:
235 * We will into CLOSE_WAIT status.
236 * We simplify the TCPS_FIN_WAIT_2, TCPS_TIME_WAIT and
237 * CLOSING status.
239 if (conn->tcp_state == TCPS_ESTABLISHED &&
240 (tcp_pkt->th_flags & (TH_ACK | TH_FIN)) == TH_FIN) {
241 conn->tcp_state = TCPS_FIN_WAIT_1;
244 return 0;
247 static ssize_t colo_rewriter_receive_iov(NetFilterState *nf,
248 NetClientState *sender,
249 unsigned flags,
250 const struct iovec *iov,
251 int iovcnt,
252 NetPacketSent *sent_cb)
254 RewriterState *s = FILTER_COLO_REWRITER(nf);
255 Connection *conn;
256 ConnectionKey key;
257 Packet *pkt;
258 ssize_t size = iov_size(iov, iovcnt);
259 ssize_t vnet_hdr_len = 0;
260 char *buf = g_malloc0(size);
262 iov_to_buf(iov, iovcnt, 0, buf, size);
264 if (s->vnet_hdr) {
265 vnet_hdr_len = nf->netdev->vnet_hdr_len;
268 pkt = packet_new(buf, size, vnet_hdr_len);
269 g_free(buf);
272 * if we get tcp packet
273 * we will rewrite it to make secondary guest's
274 * connection established successfully
276 if (pkt && is_tcp_packet(pkt)) {
278 fill_connection_key(pkt, &key);
280 if (sender == nf->netdev) {
282 * We need make tcp TX and RX packet
283 * into one connection.
285 reverse_connection_key(&key);
288 /* After failover we needn't change new TCP packet */
289 if (s->failover_mode &&
290 !connection_has_tracked(s->connection_track_table, &key)) {
291 goto out;
294 conn = connection_get(s->connection_track_table,
295 &key,
296 NULL);
298 if (sender == nf->netdev) {
299 /* NET_FILTER_DIRECTION_TX */
300 if (!handle_primary_tcp_pkt(s, conn, pkt, &key)) {
301 qemu_net_queue_send(s->incoming_queue, sender, 0,
302 (const uint8_t *)pkt->data, pkt->size, NULL);
303 packet_destroy(pkt, NULL);
304 pkt = NULL;
306 * We block the packet here,after rewrite pkt
307 * and will send it
309 return 1;
311 } else {
312 /* NET_FILTER_DIRECTION_RX */
313 if (!handle_secondary_tcp_pkt(s, conn, pkt, &key)) {
314 qemu_net_queue_send(s->incoming_queue, sender, 0,
315 (const uint8_t *)pkt->data, pkt->size, NULL);
316 packet_destroy(pkt, NULL);
317 pkt = NULL;
319 * We block the packet here,after rewrite pkt
320 * and will send it
322 return 1;
327 out:
328 packet_destroy(pkt, NULL);
329 pkt = NULL;
330 return 0;
333 static void reset_seq_offset(gpointer key, gpointer value, gpointer user_data)
335 Connection *conn = (Connection *)value;
337 conn->offset = 0;
340 static gboolean offset_is_nonzero(gpointer key,
341 gpointer value,
342 gpointer user_data)
344 Connection *conn = (Connection *)value;
346 return conn->offset ? true : false;
349 static void colo_rewriter_handle_event(NetFilterState *nf, int event,
350 Error **errp)
352 RewriterState *rs = FILTER_COLO_REWRITER(nf);
354 switch (event) {
355 case COLO_EVENT_CHECKPOINT:
356 g_hash_table_foreach(rs->connection_track_table,
357 reset_seq_offset, NULL);
358 break;
359 case COLO_EVENT_FAILOVER:
360 if (!g_hash_table_find(rs->connection_track_table,
361 offset_is_nonzero, NULL)) {
362 filter_rewriter_failover_mode(rs);
364 break;
365 default:
366 break;
370 static void colo_rewriter_cleanup(NetFilterState *nf)
372 RewriterState *s = FILTER_COLO_REWRITER(nf);
374 /* flush packets */
375 if (s->incoming_queue) {
376 filter_rewriter_flush(nf);
377 g_free(s->incoming_queue);
381 static void colo_rewriter_setup(NetFilterState *nf, Error **errp)
383 RewriterState *s = FILTER_COLO_REWRITER(nf);
385 s->connection_track_table = g_hash_table_new_full(connection_key_hash,
386 connection_key_equal,
387 g_free,
388 connection_destroy);
389 s->incoming_queue = qemu_new_net_queue(qemu_netfilter_pass_to_next, nf);
392 static bool filter_rewriter_get_vnet_hdr(Object *obj, Error **errp)
394 RewriterState *s = FILTER_COLO_REWRITER(obj);
396 return s->vnet_hdr;
399 static void filter_rewriter_set_vnet_hdr(Object *obj,
400 bool value,
401 Error **errp)
403 RewriterState *s = FILTER_COLO_REWRITER(obj);
405 s->vnet_hdr = value;
408 static void filter_rewriter_init(Object *obj)
410 RewriterState *s = FILTER_COLO_REWRITER(obj);
412 s->vnet_hdr = false;
413 s->failover_mode = FAILOVER_MODE_OFF;
414 object_property_add_bool(obj, "vnet_hdr_support",
415 filter_rewriter_get_vnet_hdr,
416 filter_rewriter_set_vnet_hdr);
419 static void colo_rewriter_class_init(ObjectClass *oc, void *data)
421 NetFilterClass *nfc = NETFILTER_CLASS(oc);
423 nfc->setup = colo_rewriter_setup;
424 nfc->cleanup = colo_rewriter_cleanup;
425 nfc->receive_iov = colo_rewriter_receive_iov;
426 nfc->handle_event = colo_rewriter_handle_event;
429 static const TypeInfo colo_rewriter_info = {
430 .name = TYPE_FILTER_REWRITER,
431 .parent = TYPE_NETFILTER,
432 .class_init = colo_rewriter_class_init,
433 .instance_init = filter_rewriter_init,
434 .instance_size = sizeof(RewriterState),
437 static void register_types(void)
439 type_register_static(&colo_rewriter_info);
442 type_init(register_types);