target/arm: Convert SUDOT, USDOT to decodetree
[qemu/ar7.git] / migration / multifd-uadk.c
blobd12353fb2185f49efb14e4cae56c4d008c17c3b5
1 /*
2 * Multifd UADK compression accelerator implementation
4 * Copyright (c) 2024 Huawei Technologies R & D (UK) Ltd
6 * Authors:
7 * Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include "qemu/module.h"
15 #include "qapi/error.h"
16 #include "exec/ramblock.h"
17 #include "migration.h"
18 #include "multifd.h"
19 #include "options.h"
20 #include "qemu/error-report.h"
21 #include "uadk/wd_comp.h"
22 #include "uadk/wd_sched.h"
24 struct wd_data {
25 handle_t handle;
26 uint8_t *buf;
27 uint32_t *buf_hdr;
30 static bool uadk_hw_init(void)
32 char alg[] = "zlib";
33 int ret;
35 ret = wd_comp_init2(alg, SCHED_POLICY_RR, TASK_HW);
36 if (ret && ret != -WD_EEXIST) {
37 return false;
38 } else {
39 return true;
43 static struct wd_data *multifd_uadk_init_sess(uint32_t count,
44 uint32_t page_size,
45 bool compress, Error **errp)
47 struct wd_comp_sess_setup ss = {0};
48 struct sched_params param = {0};
49 uint32_t size = count * page_size;
50 struct wd_data *wd;
52 wd = g_new0(struct wd_data, 1);
54 if (uadk_hw_init()) {
55 ss.alg_type = WD_ZLIB;
56 if (compress) {
57 ss.op_type = WD_DIR_COMPRESS;
58 /* Add an additional page for handling output > input */
59 size += page_size;
60 } else {
61 ss.op_type = WD_DIR_DECOMPRESS;
63 /* We use default level 1 compression and 4K window size */
64 param.type = ss.op_type;
65 ss.sched_param = &param;
67 wd->handle = wd_comp_alloc_sess(&ss);
68 if (!wd->handle) {
69 error_setg(errp, "multifd: failed wd_comp_alloc_sess");
70 goto out;
72 } else {
73 /* For CI test use */
74 warn_report_once("UADK hardware not available. Switch to no compression mode");
77 wd->buf = g_try_malloc(size);
78 if (!wd->buf) {
79 error_setg(errp, "multifd: out of mem for uadk buf");
80 goto out_free_sess;
82 wd->buf_hdr = g_new0(uint32_t, count);
83 return wd;
85 out_free_sess:
86 if (wd->handle) {
87 wd_comp_free_sess(wd->handle);
89 out:
90 wd_comp_uninit2();
91 g_free(wd);
92 return NULL;
95 static void multifd_uadk_uninit_sess(struct wd_data *wd)
97 if (wd->handle) {
98 wd_comp_free_sess(wd->handle);
100 wd_comp_uninit2();
101 g_free(wd->buf);
102 g_free(wd->buf_hdr);
103 g_free(wd);
107 * multifd_uadk_send_setup: setup send side
109 * Returns 0 for success or -1 for error
111 * @p: Params for the channel that we are using
112 * @errp: pointer to an error
114 static int multifd_uadk_send_setup(MultiFDSendParams *p, Error **errp)
116 struct wd_data *wd;
118 wd = multifd_uadk_init_sess(p->page_count, p->page_size, true, errp);
119 if (!wd) {
120 return -1;
123 p->compress_data = wd;
124 assert(p->iov == NULL);
126 * Each page will be compressed independently and sent using an IOV. The
127 * additional two IOVs are used to store packet header and compressed data
128 * length
131 p->iov = g_new0(struct iovec, p->page_count + 2);
132 return 0;
136 * multifd_uadk_send_cleanup: cleanup send side
138 * Close the channel and return memory.
140 * @p: Params for the channel that we are using
141 * @errp: pointer to an error
143 static void multifd_uadk_send_cleanup(MultiFDSendParams *p, Error **errp)
145 struct wd_data *wd = p->compress_data;
147 multifd_uadk_uninit_sess(wd);
148 p->compress_data = NULL;
151 static inline void prepare_next_iov(MultiFDSendParams *p, void *base,
152 uint32_t len)
154 p->iov[p->iovs_num].iov_base = (uint8_t *)base;
155 p->iov[p->iovs_num].iov_len = len;
156 p->next_packet_size += len;
157 p->iovs_num++;
161 * multifd_uadk_send_prepare: prepare data to be able to send
163 * Create a compressed buffer with all the pages that we are going to
164 * send.
166 * Returns 0 for success or -1 for error
168 * @p: Params for the channel that we are using
169 * @errp: pointer to an error
171 static int multifd_uadk_send_prepare(MultiFDSendParams *p, Error **errp)
173 struct wd_data *uadk_data = p->compress_data;
174 uint32_t hdr_size;
175 uint8_t *buf = uadk_data->buf;
176 int ret = 0;
178 if (!multifd_send_prepare_common(p)) {
179 goto out;
182 hdr_size = p->pages->normal_num * sizeof(uint32_t);
183 /* prepare the header that stores the lengths of all compressed data */
184 prepare_next_iov(p, uadk_data->buf_hdr, hdr_size);
186 for (int i = 0; i < p->pages->normal_num; i++) {
187 struct wd_comp_req creq = {
188 .op_type = WD_DIR_COMPRESS,
189 .src = p->pages->block->host + p->pages->offset[i],
190 .src_len = p->page_size,
191 .dst = buf,
192 /* Set dst_len to double the src in case compressed out >= page_size */
193 .dst_len = p->page_size * 2,
196 if (uadk_data->handle) {
197 ret = wd_do_comp_sync(uadk_data->handle, &creq);
198 if (ret || creq.status) {
199 error_setg(errp, "multifd %u: failed compression, ret %d status %d",
200 p->id, ret, creq.status);
201 return -1;
203 if (creq.dst_len < p->page_size) {
204 uadk_data->buf_hdr[i] = cpu_to_be32(creq.dst_len);
205 prepare_next_iov(p, buf, creq.dst_len);
206 buf += creq.dst_len;
210 * Send raw data if no UADK hardware or if compressed out >= page_size.
211 * We might be better off sending raw data if output is slightly less
212 * than page_size as well because at the receive end we can skip the
213 * decompression. But it is tricky to find the right number here.
215 if (!uadk_data->handle || creq.dst_len >= p->page_size) {
216 uadk_data->buf_hdr[i] = cpu_to_be32(p->page_size);
217 prepare_next_iov(p, p->pages->block->host + p->pages->offset[i],
218 p->page_size);
219 buf += p->page_size;
222 out:
223 p->flags |= MULTIFD_FLAG_UADK;
224 multifd_send_fill_packet(p);
225 return 0;
229 * multifd_uadk_recv_setup: setup receive side
231 * Create the compressed channel and buffer.
233 * Returns 0 for success or -1 for error
235 * @p: Params for the channel that we are using
236 * @errp: pointer to an error
238 static int multifd_uadk_recv_setup(MultiFDRecvParams *p, Error **errp)
240 struct wd_data *wd;
242 wd = multifd_uadk_init_sess(p->page_count, p->page_size, false, errp);
243 if (!wd) {
244 return -1;
246 p->compress_data = wd;
247 return 0;
251 * multifd_uadk_recv_cleanup: cleanup receive side
253 * Close the channel and return memory.
255 * @p: Params for the channel that we are using
257 static void multifd_uadk_recv_cleanup(MultiFDRecvParams *p)
259 struct wd_data *wd = p->compress_data;
261 multifd_uadk_uninit_sess(wd);
262 p->compress_data = NULL;
266 * multifd_uadk_recv: read the data from the channel into actual pages
268 * Read the compressed buffer, and uncompress it into the actual
269 * pages.
271 * Returns 0 for success or -1 for error
273 * @p: Params for the channel that we are using
274 * @errp: pointer to an error
276 static int multifd_uadk_recv(MultiFDRecvParams *p, Error **errp)
278 struct wd_data *uadk_data = p->compress_data;
279 uint32_t in_size = p->next_packet_size;
280 uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK;
281 uint32_t hdr_len = p->normal_num * sizeof(uint32_t);
282 uint32_t data_len = 0;
283 uint8_t *buf = uadk_data->buf;
284 int ret = 0;
286 if (flags != MULTIFD_FLAG_UADK) {
287 error_setg(errp, "multifd %u: flags received %x flags expected %x",
288 p->id, flags, MULTIFD_FLAG_ZLIB);
289 return -1;
292 multifd_recv_zero_page_process(p);
293 if (!p->normal_num) {
294 assert(in_size == 0);
295 return 0;
298 /* read compressed data lengths */
299 assert(hdr_len < in_size);
300 ret = qio_channel_read_all(p->c, (void *) uadk_data->buf_hdr,
301 hdr_len, errp);
302 if (ret != 0) {
303 return ret;
306 for (int i = 0; i < p->normal_num; i++) {
307 uadk_data->buf_hdr[i] = be32_to_cpu(uadk_data->buf_hdr[i]);
308 data_len += uadk_data->buf_hdr[i];
309 assert(uadk_data->buf_hdr[i] <= p->page_size);
312 /* read compressed data */
313 assert(in_size == hdr_len + data_len);
314 ret = qio_channel_read_all(p->c, (void *)buf, data_len, errp);
315 if (ret != 0) {
316 return ret;
319 for (int i = 0; i < p->normal_num; i++) {
320 struct wd_comp_req creq = {
321 .op_type = WD_DIR_DECOMPRESS,
322 .src = buf,
323 .src_len = uadk_data->buf_hdr[i],
324 .dst = p->host + p->normal[i],
325 .dst_len = p->page_size,
328 if (uadk_data->buf_hdr[i] == p->page_size) {
329 memcpy(p->host + p->normal[i], buf, p->page_size);
330 buf += p->page_size;
331 continue;
334 if (unlikely(!uadk_data->handle)) {
335 error_setg(errp, "multifd %u: UADK HW not available for decompression",
336 p->id);
337 return -1;
340 ret = wd_do_comp_sync(uadk_data->handle, &creq);
341 if (ret || creq.status) {
342 error_setg(errp, "multifd %u: failed decompression, ret %d status %d",
343 p->id, ret, creq.status);
344 return -1;
346 if (creq.dst_len != p->page_size) {
347 error_setg(errp, "multifd %u: decompressed length error", p->id);
348 return -1;
350 buf += uadk_data->buf_hdr[i];
353 return 0;
356 static MultiFDMethods multifd_uadk_ops = {
357 .send_setup = multifd_uadk_send_setup,
358 .send_cleanup = multifd_uadk_send_cleanup,
359 .send_prepare = multifd_uadk_send_prepare,
360 .recv_setup = multifd_uadk_recv_setup,
361 .recv_cleanup = multifd_uadk_recv_cleanup,
362 .recv = multifd_uadk_recv,
365 static void multifd_uadk_register(void)
367 multifd_register_ops(MULTIFD_COMPRESSION_UADK, &multifd_uadk_ops);
369 migration_init(multifd_uadk_register);