crypto: qat - when stopping all devices make fure VF are stopped first
[linux-2.6/btrfs-unstable.git] / drivers / crypto / qat / qat_common / qat_uclo.c
blobc48f181e894157a1c8767d64d4ecf00975bd1ef6
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 Contact Information:
17 qat-linux@intel.com
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63 unsigned int ae, unsigned int image_num)
65 struct icp_qat_uclo_aedata *ae_data;
66 struct icp_qat_uclo_encapme *encap_image;
67 struct icp_qat_uclo_page *page = NULL;
68 struct icp_qat_uclo_aeslice *ae_slice = NULL;
70 ae_data = &obj_handle->ae_data[ae];
71 encap_image = &obj_handle->ae_uimage[image_num];
72 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73 ae_slice->encap_image = encap_image;
75 if (encap_image->img_ptr) {
76 ae_slice->ctx_mask_assigned =
77 encap_image->img_ptr->ctx_assigned;
78 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
79 } else {
80 ae_slice->ctx_mask_assigned = 0;
82 ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
83 if (!ae_slice->region)
84 return -ENOMEM;
85 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
86 if (!ae_slice->page)
87 goto out_err;
88 page = ae_slice->page;
89 page->encap_page = encap_image->page;
90 ae_slice->page->region = ae_slice->region;
91 ae_data->slice_num++;
92 return 0;
93 out_err:
94 kfree(ae_slice->region);
95 ae_slice->region = NULL;
96 return -ENOMEM;
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
101 unsigned int i;
103 if (!ae_data) {
104 pr_err("QAT: bad argument, ae_data is NULL\n ");
105 return -EINVAL;
108 for (i = 0; i < ae_data->slice_num; i++) {
109 kfree(ae_data->ae_slices[i].region);
110 ae_data->ae_slices[i].region = NULL;
111 kfree(ae_data->ae_slices[i].page);
112 ae_data->ae_slices[i].page = NULL;
114 return 0;
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118 unsigned int str_offset)
120 if ((!str_table->table_len) || (str_offset > str_table->table_len))
121 return NULL;
122 return (char *)(((unsigned long)(str_table->strings)) + str_offset);
125 static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
127 int maj = hdr->maj_ver & 0xff;
128 int min = hdr->min_ver & 0xff;
130 if (hdr->file_id != ICP_QAT_UOF_FID) {
131 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
132 return -EINVAL;
134 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
136 maj, min);
137 return -EINVAL;
139 return 0;
142 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
143 unsigned int addr, unsigned int *val,
144 unsigned int num_in_bytes)
146 unsigned int outval;
147 unsigned char *ptr = (unsigned char *)val;
149 while (num_in_bytes) {
150 memcpy(&outval, ptr, 4);
151 SRAM_WRITE(handle, addr, outval);
152 num_in_bytes -= 4;
153 ptr += 4;
154 addr += 4;
158 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
159 unsigned char ae, unsigned int addr,
160 unsigned int *val,
161 unsigned int num_in_bytes)
163 unsigned int outval;
164 unsigned char *ptr = (unsigned char *)val;
166 addr >>= 0x2; /* convert to uword address */
168 while (num_in_bytes) {
169 memcpy(&outval, ptr, 4);
170 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
171 num_in_bytes -= 4;
172 ptr += 4;
176 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
177 unsigned char ae,
178 struct icp_qat_uof_batch_init
179 *umem_init_header)
181 struct icp_qat_uof_batch_init *umem_init;
183 if (!umem_init_header)
184 return;
185 umem_init = umem_init_header->next;
186 while (umem_init) {
187 unsigned int addr, *value, size;
189 ae = umem_init->ae;
190 addr = umem_init->addr;
191 value = umem_init->value;
192 size = umem_init->size;
193 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
194 umem_init = umem_init->next;
198 static void
199 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
200 struct icp_qat_uof_batch_init **base)
202 struct icp_qat_uof_batch_init *umem_init;
204 umem_init = *base;
205 while (umem_init) {
206 struct icp_qat_uof_batch_init *pre;
208 pre = umem_init;
209 umem_init = umem_init->next;
210 kfree(pre);
212 *base = NULL;
215 static int qat_uclo_parse_num(char *str, unsigned int *num)
217 char buf[16] = {0};
218 unsigned long ae = 0;
219 int i;
221 strncpy(buf, str, 15);
222 for (i = 0; i < 16; i++) {
223 if (!isdigit(buf[i])) {
224 buf[i] = '\0';
225 break;
228 if ((kstrtoul(buf, 10, &ae)))
229 return -EFAULT;
231 *num = (unsigned int)ae;
232 return 0;
235 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
236 struct icp_qat_uof_initmem *init_mem,
237 unsigned int size_range, unsigned int *ae)
239 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
240 char *str;
242 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
243 pr_err("QAT: initmem is out of range");
244 return -EINVAL;
246 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
247 pr_err("QAT: Memory scope for init_mem error\n");
248 return -EINVAL;
250 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
251 if (!str) {
252 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
253 return -EINVAL;
255 if (qat_uclo_parse_num(str, ae)) {
256 pr_err("QAT: Parse num for AE number failed\n");
257 return -EINVAL;
259 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
260 pr_err("QAT: ae %d out of range\n", *ae);
261 return -EINVAL;
263 return 0;
266 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
267 *handle, struct icp_qat_uof_initmem
268 *init_mem, unsigned int ae,
269 struct icp_qat_uof_batch_init
270 **init_tab_base)
272 struct icp_qat_uof_batch_init *init_header, *tail;
273 struct icp_qat_uof_batch_init *mem_init, *tail_old;
274 struct icp_qat_uof_memvar_attr *mem_val_attr;
275 unsigned int i, flag = 0;
277 mem_val_attr =
278 (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
279 sizeof(struct icp_qat_uof_initmem));
281 init_header = *init_tab_base;
282 if (!init_header) {
283 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
284 if (!init_header)
285 return -ENOMEM;
286 init_header->size = 1;
287 *init_tab_base = init_header;
288 flag = 1;
290 tail_old = init_header;
291 while (tail_old->next)
292 tail_old = tail_old->next;
293 tail = tail_old;
294 for (i = 0; i < init_mem->val_attr_num; i++) {
295 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
296 if (!mem_init)
297 goto out_err;
298 mem_init->ae = ae;
299 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
300 mem_init->value = &mem_val_attr->value;
301 mem_init->size = 4;
302 mem_init->next = NULL;
303 tail->next = mem_init;
304 tail = mem_init;
305 init_header->size += qat_hal_get_ins_num();
306 mem_val_attr++;
308 return 0;
309 out_err:
310 while (tail_old) {
311 mem_init = tail_old->next;
312 kfree(tail_old);
313 tail_old = mem_init;
315 if (flag)
316 kfree(*init_tab_base);
317 return -ENOMEM;
320 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
321 struct icp_qat_uof_initmem *init_mem)
323 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
324 unsigned int ae;
326 if (qat_uclo_fetch_initmem_ae(handle, init_mem,
327 ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
328 return -EINVAL;
329 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
330 &obj_handle->lm_init_tab[ae]))
331 return -EINVAL;
332 return 0;
335 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
336 struct icp_qat_uof_initmem *init_mem)
338 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
339 unsigned int ae, ustore_size, uaddr, i;
341 ustore_size = obj_handle->ustore_phy_size;
342 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
343 return -EINVAL;
344 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
345 &obj_handle->umem_init_tab[ae]))
346 return -EINVAL;
347 /* set the highest ustore address referenced */
348 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
349 for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
350 if (obj_handle->ae_data[ae].ae_slices[i].
351 encap_image->uwords_num < uaddr)
352 obj_handle->ae_data[ae].ae_slices[i].
353 encap_image->uwords_num = uaddr;
355 return 0;
358 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
359 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
360 struct icp_qat_uof_initmem *init_mem)
362 switch (init_mem->region) {
363 case ICP_QAT_UOF_LMEM_REGION:
364 if (qat_uclo_init_lmem_seg(handle, init_mem))
365 return -EINVAL;
366 break;
367 case ICP_QAT_UOF_UMEM_REGION:
368 if (qat_uclo_init_umem_seg(handle, init_mem))
369 return -EINVAL;
370 break;
371 default:
372 pr_err("QAT: initmem region error. region type=0x%x\n",
373 init_mem->region);
374 return -EINVAL;
376 return 0;
379 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
380 struct icp_qat_uclo_encapme *image)
382 unsigned int i;
383 struct icp_qat_uclo_encap_page *page;
384 struct icp_qat_uof_image *uof_image;
385 unsigned char ae;
386 unsigned int ustore_size;
387 unsigned int patt_pos;
388 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
389 uint64_t *fill_data;
391 uof_image = image->img_ptr;
392 fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
393 GFP_KERNEL);
394 if (!fill_data)
395 return -ENOMEM;
396 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
397 memcpy(&fill_data[i], &uof_image->fill_pattern,
398 sizeof(uint64_t));
399 page = image->page;
401 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
402 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
403 continue;
404 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
405 patt_pos = page->beg_addr_p + page->micro_words_num;
407 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
408 page->beg_addr_p, &fill_data[0]);
409 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
410 ustore_size - patt_pos + 1,
411 &fill_data[page->beg_addr_p]);
413 kfree(fill_data);
414 return 0;
417 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
419 int i, ae;
420 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
421 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
423 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
424 if (initmem->num_in_bytes) {
425 if (qat_uclo_init_ae_memory(handle, initmem))
426 return -EINVAL;
428 initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
429 (unsigned long)initmem +
430 sizeof(struct icp_qat_uof_initmem)) +
431 (sizeof(struct icp_qat_uof_memvar_attr) *
432 initmem->val_attr_num));
434 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
435 if (qat_hal_batch_wr_lm(handle, ae,
436 obj_handle->lm_init_tab[ae])) {
437 pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
438 return -EINVAL;
440 qat_uclo_cleanup_batch_init_list(handle,
441 &obj_handle->lm_init_tab[ae]);
442 qat_uclo_batch_wr_umem(handle, ae,
443 obj_handle->umem_init_tab[ae]);
444 qat_uclo_cleanup_batch_init_list(handle,
445 &obj_handle->
446 umem_init_tab[ae]);
448 return 0;
451 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
452 char *chunk_id, void *cur)
454 int i;
455 struct icp_qat_uof_chunkhdr *chunk_hdr =
456 (struct icp_qat_uof_chunkhdr *)
457 ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
459 for (i = 0; i < obj_hdr->num_chunks; i++) {
460 if ((cur < (void *)&chunk_hdr[i]) &&
461 !strncmp(chunk_hdr[i].chunk_id, chunk_id,
462 ICP_QAT_UOF_OBJID_LEN)) {
463 return &chunk_hdr[i];
466 return NULL;
469 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
471 int i;
472 unsigned int topbit = 1 << 0xF;
473 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
475 reg ^= inbyte << 0x8;
476 for (i = 0; i < 0x8; i++) {
477 if (reg & topbit)
478 reg = (reg << 1) ^ 0x1021;
479 else
480 reg <<= 1;
482 return reg & 0xFFFF;
485 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
487 unsigned int chksum = 0;
489 if (ptr)
490 while (num--)
491 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
492 return chksum;
495 static struct icp_qat_uclo_objhdr *
496 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
497 char *chunk_id)
499 struct icp_qat_uof_filechunkhdr *file_chunk;
500 struct icp_qat_uclo_objhdr *obj_hdr;
501 char *chunk;
502 int i;
504 file_chunk = (struct icp_qat_uof_filechunkhdr *)
505 (buf + sizeof(struct icp_qat_uof_filehdr));
506 for (i = 0; i < file_hdr->num_chunks; i++) {
507 if (!strncmp(file_chunk->chunk_id, chunk_id,
508 ICP_QAT_UOF_OBJID_LEN)) {
509 chunk = buf + file_chunk->offset;
510 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
511 chunk, file_chunk->size))
512 break;
513 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
514 if (!obj_hdr)
515 break;
516 obj_hdr->file_buff = chunk;
517 obj_hdr->checksum = file_chunk->checksum;
518 obj_hdr->size = file_chunk->size;
519 return obj_hdr;
521 file_chunk++;
523 return NULL;
526 static unsigned int
527 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
528 struct icp_qat_uof_image *image)
530 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
531 struct icp_qat_uof_objtable *neigh_reg_tab;
532 struct icp_qat_uof_code_page *code_page;
534 code_page = (struct icp_qat_uof_code_page *)
535 ((char *)image + sizeof(struct icp_qat_uof_image));
536 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
537 code_page->uc_var_tab_offset);
538 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
539 code_page->imp_var_tab_offset);
540 imp_expr_tab = (struct icp_qat_uof_objtable *)
541 (encap_uof_obj->beg_uof +
542 code_page->imp_expr_tab_offset);
543 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
544 imp_expr_tab->entry_num) {
545 pr_err("QAT: UOF can't contain imported variable to be parsed");
546 return -EINVAL;
548 neigh_reg_tab = (struct icp_qat_uof_objtable *)
549 (encap_uof_obj->beg_uof +
550 code_page->neigh_reg_tab_offset);
551 if (neigh_reg_tab->entry_num) {
552 pr_err("QAT: UOF can't contain shared control store feature");
553 return -EINVAL;
555 if (image->numpages > 1) {
556 pr_err("QAT: UOF can't contain multiple pages");
557 return -EINVAL;
559 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
560 pr_err("QAT: UOF can't use shared control store feature");
561 return -EFAULT;
563 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
564 pr_err("QAT: UOF can't use reloadable feature");
565 return -EFAULT;
567 return 0;
570 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
571 *encap_uof_obj,
572 struct icp_qat_uof_image *img,
573 struct icp_qat_uclo_encap_page *page)
575 struct icp_qat_uof_code_page *code_page;
576 struct icp_qat_uof_code_area *code_area;
577 struct icp_qat_uof_objtable *uword_block_tab;
578 struct icp_qat_uof_uword_block *uwblock;
579 int i;
581 code_page = (struct icp_qat_uof_code_page *)
582 ((char *)img + sizeof(struct icp_qat_uof_image));
583 page->def_page = code_page->def_page;
584 page->page_region = code_page->page_region;
585 page->beg_addr_v = code_page->beg_addr_v;
586 page->beg_addr_p = code_page->beg_addr_p;
587 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
588 code_page->code_area_offset);
589 page->micro_words_num = code_area->micro_words_num;
590 uword_block_tab = (struct icp_qat_uof_objtable *)
591 (encap_uof_obj->beg_uof +
592 code_area->uword_block_tab);
593 page->uwblock_num = uword_block_tab->entry_num;
594 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
595 sizeof(struct icp_qat_uof_objtable));
596 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
597 for (i = 0; i < uword_block_tab->entry_num; i++)
598 page->uwblock[i].micro_words =
599 (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
602 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
603 struct icp_qat_uclo_encapme *ae_uimage,
604 int max_image)
606 int i, j;
607 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
608 struct icp_qat_uof_image *image;
609 struct icp_qat_uof_objtable *ae_regtab;
610 struct icp_qat_uof_objtable *init_reg_sym_tab;
611 struct icp_qat_uof_objtable *sbreak_tab;
612 struct icp_qat_uof_encap_obj *encap_uof_obj =
613 &obj_handle->encap_uof_obj;
615 for (j = 0; j < max_image; j++) {
616 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
617 ICP_QAT_UOF_IMAG, chunk_hdr);
618 if (!chunk_hdr)
619 break;
620 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
621 chunk_hdr->offset);
622 ae_regtab = (struct icp_qat_uof_objtable *)
623 (image->reg_tab_offset +
624 obj_handle->obj_hdr->file_buff);
625 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
626 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
627 (((char *)ae_regtab) +
628 sizeof(struct icp_qat_uof_objtable));
629 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
630 (image->init_reg_sym_tab +
631 obj_handle->obj_hdr->file_buff);
632 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
633 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
634 (((char *)init_reg_sym_tab) +
635 sizeof(struct icp_qat_uof_objtable));
636 sbreak_tab = (struct icp_qat_uof_objtable *)
637 (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
638 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
639 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
640 (((char *)sbreak_tab) +
641 sizeof(struct icp_qat_uof_objtable));
642 ae_uimage[j].img_ptr = image;
643 if (qat_uclo_check_image_compat(encap_uof_obj, image))
644 goto out_err;
645 ae_uimage[j].page =
646 kzalloc(sizeof(struct icp_qat_uclo_encap_page),
647 GFP_KERNEL);
648 if (!ae_uimage[j].page)
649 goto out_err;
650 qat_uclo_map_image_page(encap_uof_obj, image,
651 ae_uimage[j].page);
653 return j;
654 out_err:
655 for (i = 0; i < j; i++)
656 kfree(ae_uimage[i].page);
657 return 0;
660 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
662 int i, ae;
663 int mflag = 0;
664 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
666 for (ae = 0; ae <= max_ae; ae++) {
667 if (!test_bit(ae,
668 (unsigned long *)&handle->hal_handle->ae_mask))
669 continue;
670 for (i = 0; i < obj_handle->uimage_num; i++) {
671 if (!test_bit(ae, (unsigned long *)
672 &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
673 continue;
674 mflag = 1;
675 if (qat_uclo_init_ae_data(obj_handle, ae, i))
676 return -EINVAL;
679 if (!mflag) {
680 pr_err("QAT: uimage uses AE not set");
681 return -EINVAL;
683 return 0;
686 static struct icp_qat_uof_strtable *
687 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
688 char *tab_name, struct icp_qat_uof_strtable *str_table)
690 struct icp_qat_uof_chunkhdr *chunk_hdr;
692 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
693 obj_hdr->file_buff, tab_name, NULL);
694 if (chunk_hdr) {
695 int hdr_size;
697 memcpy(&str_table->table_len, obj_hdr->file_buff +
698 chunk_hdr->offset, sizeof(str_table->table_len));
699 hdr_size = (char *)&str_table->strings - (char *)str_table;
700 str_table->strings = (unsigned long)obj_hdr->file_buff +
701 chunk_hdr->offset + hdr_size;
702 return str_table;
704 return NULL;
707 static void
708 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
709 struct icp_qat_uclo_init_mem_table *init_mem_tab)
711 struct icp_qat_uof_chunkhdr *chunk_hdr;
713 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
714 ICP_QAT_UOF_IMEM, NULL);
715 if (chunk_hdr) {
716 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
717 chunk_hdr->offset, sizeof(unsigned int));
718 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
719 (encap_uof_obj->beg_uof + chunk_hdr->offset +
720 sizeof(unsigned int));
724 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
726 unsigned int maj_ver, prod_type = obj_handle->prod_type;
728 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
729 pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
730 obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
731 return -EINVAL;
733 maj_ver = obj_handle->prod_rev & 0xff;
734 if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
735 (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
736 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
737 return -EINVAL;
739 return 0;
742 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
743 unsigned char ae, unsigned char ctx_mask,
744 enum icp_qat_uof_regtype reg_type,
745 unsigned short reg_addr, unsigned int value)
747 switch (reg_type) {
748 case ICP_GPA_ABS:
749 case ICP_GPB_ABS:
750 ctx_mask = 0;
751 case ICP_GPA_REL:
752 case ICP_GPB_REL:
753 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
754 reg_addr, value);
755 case ICP_SR_ABS:
756 case ICP_DR_ABS:
757 case ICP_SR_RD_ABS:
758 case ICP_DR_RD_ABS:
759 ctx_mask = 0;
760 case ICP_SR_REL:
761 case ICP_DR_REL:
762 case ICP_SR_RD_REL:
763 case ICP_DR_RD_REL:
764 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
765 reg_addr, value);
766 case ICP_SR_WR_ABS:
767 case ICP_DR_WR_ABS:
768 ctx_mask = 0;
769 case ICP_SR_WR_REL:
770 case ICP_DR_WR_REL:
771 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
772 reg_addr, value);
773 case ICP_NEIGH_REL:
774 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
775 default:
776 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
777 return -EFAULT;
779 return 0;
782 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
783 unsigned int ae,
784 struct icp_qat_uclo_encapme *encap_ae)
786 unsigned int i;
787 unsigned char ctx_mask;
788 struct icp_qat_uof_init_regsym *init_regsym;
790 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
791 ICP_QAT_UCLO_MAX_CTX)
792 ctx_mask = 0xff;
793 else
794 ctx_mask = 0x55;
796 for (i = 0; i < encap_ae->init_regsym_num; i++) {
797 unsigned int exp_res;
799 init_regsym = &encap_ae->init_regsym[i];
800 exp_res = init_regsym->value;
801 switch (init_regsym->init_type) {
802 case ICP_QAT_UOF_INIT_REG:
803 qat_uclo_init_reg(handle, ae, ctx_mask,
804 (enum icp_qat_uof_regtype)
805 init_regsym->reg_type,
806 (unsigned short)init_regsym->reg_addr,
807 exp_res);
808 break;
809 case ICP_QAT_UOF_INIT_REG_CTX:
810 /* check if ctx is appropriate for the ctxMode */
811 if (!((1 << init_regsym->ctx) & ctx_mask)) {
812 pr_err("QAT: invalid ctx num = 0x%x\n",
813 init_regsym->ctx);
814 return -EINVAL;
816 qat_uclo_init_reg(handle, ae,
817 (unsigned char)
818 (1 << init_regsym->ctx),
819 (enum icp_qat_uof_regtype)
820 init_regsym->reg_type,
821 (unsigned short)init_regsym->reg_addr,
822 exp_res);
823 break;
824 case ICP_QAT_UOF_INIT_EXPR:
825 pr_err("QAT: INIT_EXPR feature not supported\n");
826 return -EINVAL;
827 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
828 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
829 return -EINVAL;
830 default:
831 break;
834 return 0;
837 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
839 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
840 unsigned int s, ae;
842 if (obj_handle->global_inited)
843 return 0;
844 if (obj_handle->init_mem_tab.entry_num) {
845 if (qat_uclo_init_memory(handle)) {
846 pr_err("QAT: initialize memory failed\n");
847 return -EINVAL;
850 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
851 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
852 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
853 continue;
854 if (qat_uclo_init_reg_sym(handle, ae,
855 obj_handle->ae_data[ae].
856 ae_slices[s].encap_image))
857 return -EINVAL;
860 obj_handle->global_inited = 1;
861 return 0;
864 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
866 unsigned char ae, nn_mode, s;
867 struct icp_qat_uof_image *uof_image;
868 struct icp_qat_uclo_aedata *ae_data;
869 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
871 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
872 if (!test_bit(ae,
873 (unsigned long *)&handle->hal_handle->ae_mask))
874 continue;
875 ae_data = &obj_handle->ae_data[ae];
876 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
877 ICP_QAT_UCLO_MAX_CTX); s++) {
878 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
879 continue;
880 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
881 if (qat_hal_set_ae_ctx_mode(handle, ae,
882 (char)ICP_QAT_CTX_MODE
883 (uof_image->ae_mode))) {
884 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
885 return -EFAULT;
887 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
888 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
889 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
890 return -EFAULT;
892 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
893 (char)ICP_QAT_LOC_MEM0_MODE
894 (uof_image->ae_mode))) {
895 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
896 return -EFAULT;
898 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
899 (char)ICP_QAT_LOC_MEM1_MODE
900 (uof_image->ae_mode))) {
901 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
902 return -EFAULT;
906 return 0;
909 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
911 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
912 struct icp_qat_uclo_encapme *image;
913 int a;
915 for (a = 0; a < obj_handle->uimage_num; a++) {
916 image = &obj_handle->ae_uimage[a];
917 image->uwords_num = image->page->beg_addr_p +
918 image->page->micro_words_num;
922 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
924 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
925 unsigned int ae;
927 obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
928 GFP_KERNEL);
929 if (!obj_handle->uword_buf)
930 return -ENOMEM;
931 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
932 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
933 obj_handle->obj_hdr->file_buff;
934 obj_handle->uword_in_bytes = 6;
935 obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
936 obj_handle->prod_rev = PID_MAJOR_REV |
937 (PID_MINOR_REV & handle->hal_handle->revision_id);
938 if (qat_uclo_check_uof_compat(obj_handle)) {
939 pr_err("QAT: UOF incompatible\n");
940 return -EINVAL;
942 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
943 if (!obj_handle->obj_hdr->file_buff ||
944 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
945 &obj_handle->str_table)) {
946 pr_err("QAT: UOF doesn't have effective images\n");
947 goto out_err;
949 obj_handle->uimage_num =
950 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
951 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
952 if (!obj_handle->uimage_num)
953 goto out_err;
954 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
955 pr_err("QAT: Bad object\n");
956 goto out_check_uof_aemask_err;
958 qat_uclo_init_uword_num(handle);
959 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
960 &obj_handle->init_mem_tab);
961 if (qat_uclo_set_ae_mode(handle))
962 goto out_check_uof_aemask_err;
963 return 0;
964 out_check_uof_aemask_err:
965 for (ae = 0; ae < obj_handle->uimage_num; ae++)
966 kfree(obj_handle->ae_uimage[ae].page);
967 out_err:
968 kfree(obj_handle->uword_buf);
969 return -EFAULT;
972 void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
973 void *addr_ptr, int mem_size)
975 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
978 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
979 void *addr_ptr, int mem_size)
981 struct icp_qat_uof_filehdr *filehdr;
982 struct icp_qat_uclo_objhandle *objhdl;
984 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
985 (sizeof(handle->hal_handle->ae_mask) * 8));
987 if (!handle || !addr_ptr || mem_size < 24)
988 return -EINVAL;
989 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
990 if (!objhdl)
991 return -ENOMEM;
992 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
993 if (!objhdl->obj_buf)
994 goto out_objbuf_err;
995 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
996 if (qat_uclo_check_format(filehdr))
997 goto out_objhdr_err;
998 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
999 ICP_QAT_UOF_OBJS);
1000 if (!objhdl->obj_hdr) {
1001 pr_err("QAT: object file chunk is null\n");
1002 goto out_objhdr_err;
1004 handle->obj_handle = objhdl;
1005 if (qat_uclo_parse_uof_obj(handle))
1006 goto out_overlay_obj_err;
1007 return 0;
1009 out_overlay_obj_err:
1010 handle->obj_handle = NULL;
1011 kfree(objhdl->obj_hdr);
1012 out_objhdr_err:
1013 kfree(objhdl->obj_buf);
1014 out_objbuf_err:
1015 kfree(objhdl);
1016 return -ENOMEM;
1019 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1021 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1022 unsigned int a;
1024 if (!obj_handle)
1025 return;
1027 kfree(obj_handle->uword_buf);
1028 for (a = 0; a < obj_handle->uimage_num; a++)
1029 kfree(obj_handle->ae_uimage[a].page);
1031 for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1032 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1034 kfree(obj_handle->obj_hdr);
1035 kfree(obj_handle->obj_buf);
1036 kfree(obj_handle);
1037 handle->obj_handle = NULL;
1040 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1041 struct icp_qat_uclo_encap_page *encap_page,
1042 uint64_t *uword, unsigned int addr_p,
1043 unsigned int raddr, uint64_t fill)
1045 uint64_t uwrd = 0;
1046 unsigned int i;
1048 if (!encap_page) {
1049 *uword = fill;
1050 return;
1052 for (i = 0; i < encap_page->uwblock_num; i++) {
1053 if (raddr >= encap_page->uwblock[i].start_addr &&
1054 raddr <= encap_page->uwblock[i].start_addr +
1055 encap_page->uwblock[i].words_num - 1) {
1056 raddr -= encap_page->uwblock[i].start_addr;
1057 raddr *= obj_handle->uword_in_bytes;
1058 memcpy(&uwrd, (void *)(((unsigned long)
1059 encap_page->uwblock[i].micro_words) + raddr),
1060 obj_handle->uword_in_bytes);
1061 uwrd = uwrd & 0xbffffffffffull;
1064 *uword = uwrd;
1065 if (*uword == INVLD_UWORD)
1066 *uword = fill;
1069 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1070 struct icp_qat_uclo_encap_page
1071 *encap_page, unsigned int ae)
1073 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1074 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1075 uint64_t fill_pat;
1077 /* load the page starting at appropriate ustore address */
1078 /* get fill-pattern from an image -- they are all the same */
1079 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1080 sizeof(uint64_t));
1081 uw_physical_addr = encap_page->beg_addr_p;
1082 uw_relative_addr = 0;
1083 words_num = encap_page->micro_words_num;
1084 while (words_num) {
1085 if (words_num < UWORD_CPYBUF_SIZE)
1086 cpylen = words_num;
1087 else
1088 cpylen = UWORD_CPYBUF_SIZE;
1090 /* load the buffer */
1091 for (i = 0; i < cpylen; i++)
1092 qat_uclo_fill_uwords(obj_handle, encap_page,
1093 &obj_handle->uword_buf[i],
1094 uw_physical_addr + i,
1095 uw_relative_addr + i, fill_pat);
1097 /* copy the buffer to ustore */
1098 qat_hal_wr_uwords(handle, (unsigned char)ae,
1099 uw_physical_addr, cpylen,
1100 obj_handle->uword_buf);
1102 uw_physical_addr += cpylen;
1103 uw_relative_addr += cpylen;
1104 words_num -= cpylen;
1108 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1109 struct icp_qat_uof_image *image)
1111 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1112 unsigned int ctx_mask, s;
1113 struct icp_qat_uclo_page *page;
1114 unsigned char ae;
1115 int ctx;
1117 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1118 ctx_mask = 0xff;
1119 else
1120 ctx_mask = 0x55;
1121 /* load the default page and set assigned CTX PC
1122 * to the entrypoint address */
1123 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
1124 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1125 continue;
1126 /* find the slice to which this image is assigned */
1127 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1128 if (image->ctx_assigned & obj_handle->ae_data[ae].
1129 ae_slices[s].ctx_mask_assigned)
1130 break;
1132 if (s >= obj_handle->ae_data[ae].slice_num)
1133 continue;
1134 page = obj_handle->ae_data[ae].ae_slices[s].page;
1135 if (!page->encap_page->def_page)
1136 continue;
1137 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1139 page = obj_handle->ae_data[ae].ae_slices[s].page;
1140 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1141 obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1142 (ctx_mask & (1 << ctx)) ? page : NULL;
1143 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1144 image->ctx_assigned);
1145 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1146 image->entry_address);
1150 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1152 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1153 unsigned int i;
1155 if (qat_uclo_init_globals(handle))
1156 return -EINVAL;
1157 for (i = 0; i < obj_handle->uimage_num; i++) {
1158 if (!obj_handle->ae_uimage[i].img_ptr)
1159 return -EINVAL;
1160 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1161 return -EINVAL;
1162 qat_uclo_wr_uimage_page(handle,
1163 obj_handle->ae_uimage[i].img_ptr);
1165 return 0;