s390x: sigp: Fix sense running reporting
[qemu/ar7.git] / include / standard-headers / rdma / vmw_pvrdma-abi.h
blob0989426a3f5288aab81693e5747e91f9030c8154
1 /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
2 /*
3 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of EITHER the GNU General Public License
7 * version 2 as published by the Free Software Foundation or the BSD
8 * 2-Clause License. This program is distributed in the hope that it
9 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
10 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
11 * See the GNU General Public License version 2 for more details at
12 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
14 * You should have received a copy of the GNU General Public License
15 * along with this program available in the file COPYING in the main
16 * directory of this source tree.
18 * The BSD 2-Clause License
20 * Redistribution and use in source and binary forms, with or
21 * without modification, are permitted provided that the following
22 * conditions are met:
24 * - Redistributions of source code must retain the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer.
28 * - Redistributions in binary form must reproduce the above
29 * copyright notice, this list of conditions and the following
30 * disclaimer in the documentation and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
36 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
37 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
38 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
39 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
40 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
42 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
44 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 #ifndef __VMW_PVRDMA_ABI_H__
48 #define __VMW_PVRDMA_ABI_H__
50 #include "standard-headers/linux/types.h"
52 #define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
53 #define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
54 #define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
55 #define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
56 #define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
57 #define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
58 #define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
59 #define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
60 #define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
61 #define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
62 #define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
64 enum pvrdma_wr_opcode {
65 PVRDMA_WR_RDMA_WRITE,
66 PVRDMA_WR_RDMA_WRITE_WITH_IMM,
67 PVRDMA_WR_SEND,
68 PVRDMA_WR_SEND_WITH_IMM,
69 PVRDMA_WR_RDMA_READ,
70 PVRDMA_WR_ATOMIC_CMP_AND_SWP,
71 PVRDMA_WR_ATOMIC_FETCH_AND_ADD,
72 PVRDMA_WR_LSO,
73 PVRDMA_WR_SEND_WITH_INV,
74 PVRDMA_WR_RDMA_READ_WITH_INV,
75 PVRDMA_WR_LOCAL_INV,
76 PVRDMA_WR_FAST_REG_MR,
77 PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP,
78 PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
79 PVRDMA_WR_BIND_MW,
80 PVRDMA_WR_REG_SIG_MR,
81 PVRDMA_WR_ERROR,
84 enum pvrdma_wc_status {
85 PVRDMA_WC_SUCCESS,
86 PVRDMA_WC_LOC_LEN_ERR,
87 PVRDMA_WC_LOC_QP_OP_ERR,
88 PVRDMA_WC_LOC_EEC_OP_ERR,
89 PVRDMA_WC_LOC_PROT_ERR,
90 PVRDMA_WC_WR_FLUSH_ERR,
91 PVRDMA_WC_MW_BIND_ERR,
92 PVRDMA_WC_BAD_RESP_ERR,
93 PVRDMA_WC_LOC_ACCESS_ERR,
94 PVRDMA_WC_REM_INV_REQ_ERR,
95 PVRDMA_WC_REM_ACCESS_ERR,
96 PVRDMA_WC_REM_OP_ERR,
97 PVRDMA_WC_RETRY_EXC_ERR,
98 PVRDMA_WC_RNR_RETRY_EXC_ERR,
99 PVRDMA_WC_LOC_RDD_VIOL_ERR,
100 PVRDMA_WC_REM_INV_RD_REQ_ERR,
101 PVRDMA_WC_REM_ABORT_ERR,
102 PVRDMA_WC_INV_EECN_ERR,
103 PVRDMA_WC_INV_EEC_STATE_ERR,
104 PVRDMA_WC_FATAL_ERR,
105 PVRDMA_WC_RESP_TIMEOUT_ERR,
106 PVRDMA_WC_GENERAL_ERR,
109 enum pvrdma_wc_opcode {
110 PVRDMA_WC_SEND,
111 PVRDMA_WC_RDMA_WRITE,
112 PVRDMA_WC_RDMA_READ,
113 PVRDMA_WC_COMP_SWAP,
114 PVRDMA_WC_FETCH_ADD,
115 PVRDMA_WC_BIND_MW,
116 PVRDMA_WC_LSO,
117 PVRDMA_WC_LOCAL_INV,
118 PVRDMA_WC_FAST_REG_MR,
119 PVRDMA_WC_MASKED_COMP_SWAP,
120 PVRDMA_WC_MASKED_FETCH_ADD,
121 PVRDMA_WC_RECV = 1 << 7,
122 PVRDMA_WC_RECV_RDMA_WITH_IMM,
125 enum pvrdma_wc_flags {
126 PVRDMA_WC_GRH = 1 << 0,
127 PVRDMA_WC_WITH_IMM = 1 << 1,
128 PVRDMA_WC_WITH_INVALIDATE = 1 << 2,
129 PVRDMA_WC_IP_CSUM_OK = 1 << 3,
130 PVRDMA_WC_WITH_SMAC = 1 << 4,
131 PVRDMA_WC_WITH_VLAN = 1 << 5,
132 PVRDMA_WC_WITH_NETWORK_HDR_TYPE = 1 << 6,
133 PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
136 struct pvrdma_alloc_ucontext_resp {
137 uint32_t qp_tab_size;
138 uint32_t reserved;
141 struct pvrdma_alloc_pd_resp {
142 uint32_t pdn;
143 uint32_t reserved;
146 struct pvrdma_create_cq {
147 uint64_t __attribute__((aligned(8))) buf_addr;
148 uint32_t buf_size;
149 uint32_t reserved;
152 struct pvrdma_create_cq_resp {
153 uint32_t cqn;
154 uint32_t reserved;
157 struct pvrdma_resize_cq {
158 uint64_t __attribute__((aligned(8))) buf_addr;
159 uint32_t buf_size;
160 uint32_t reserved;
163 struct pvrdma_create_srq {
164 uint64_t __attribute__((aligned(8))) buf_addr;
165 uint32_t buf_size;
166 uint32_t reserved;
169 struct pvrdma_create_srq_resp {
170 uint32_t srqn;
171 uint32_t reserved;
174 struct pvrdma_create_qp {
175 uint64_t __attribute__((aligned(8))) rbuf_addr;
176 uint64_t __attribute__((aligned(8))) sbuf_addr;
177 uint32_t rbuf_size;
178 uint32_t sbuf_size;
179 uint64_t __attribute__((aligned(8))) qp_addr;
182 struct pvrdma_create_qp_resp {
183 uint32_t qpn;
184 uint32_t qp_handle;
187 /* PVRDMA masked atomic compare and swap */
188 struct pvrdma_ex_cmp_swap {
189 uint64_t __attribute__((aligned(8))) swap_val;
190 uint64_t __attribute__((aligned(8))) compare_val;
191 uint64_t __attribute__((aligned(8))) swap_mask;
192 uint64_t __attribute__((aligned(8))) compare_mask;
195 /* PVRDMA masked atomic fetch and add */
196 struct pvrdma_ex_fetch_add {
197 uint64_t __attribute__((aligned(8))) add_val;
198 uint64_t __attribute__((aligned(8))) field_boundary;
201 /* PVRDMA address vector. */
202 struct pvrdma_av {
203 uint32_t port_pd;
204 uint32_t sl_tclass_flowlabel;
205 uint8_t dgid[16];
206 uint8_t src_path_bits;
207 uint8_t gid_index;
208 uint8_t stat_rate;
209 uint8_t hop_limit;
210 uint8_t dmac[6];
211 uint8_t reserved[6];
214 /* PVRDMA scatter/gather entry */
215 struct pvrdma_sge {
216 uint64_t __attribute__((aligned(8))) addr;
217 uint32_t length;
218 uint32_t lkey;
221 /* PVRDMA receive queue work request */
222 struct pvrdma_rq_wqe_hdr {
223 uint64_t __attribute__((aligned(8))) wr_id; /* wr id */
224 uint32_t num_sge; /* size of s/g array */
225 uint32_t total_len; /* reserved */
227 /* Use pvrdma_sge (ib_sge) for receive queue s/g array elements. */
229 /* PVRDMA send queue work request */
230 struct pvrdma_sq_wqe_hdr {
231 uint64_t __attribute__((aligned(8))) wr_id; /* wr id */
232 uint32_t num_sge; /* size of s/g array */
233 uint32_t total_len; /* reserved */
234 uint32_t opcode; /* operation type */
235 uint32_t send_flags; /* wr flags */
236 union {
237 uint32_t imm_data;
238 uint32_t invalidate_rkey;
239 } ex;
240 uint32_t reserved;
241 union {
242 struct {
243 uint64_t __attribute__((aligned(8))) remote_addr;
244 uint32_t rkey;
245 uint8_t reserved[4];
246 } rdma;
247 struct {
248 uint64_t __attribute__((aligned(8))) remote_addr;
249 uint64_t __attribute__((aligned(8))) compare_add;
250 uint64_t __attribute__((aligned(8))) swap;
251 uint32_t rkey;
252 uint32_t reserved;
253 } atomic;
254 struct {
255 uint64_t __attribute__((aligned(8))) remote_addr;
256 uint32_t log_arg_sz;
257 uint32_t rkey;
258 union {
259 struct pvrdma_ex_cmp_swap cmp_swap;
260 struct pvrdma_ex_fetch_add fetch_add;
261 } wr_data;
262 } masked_atomics;
263 struct {
264 uint64_t __attribute__((aligned(8))) iova_start;
265 uint64_t __attribute__((aligned(8))) pl_pdir_dma;
266 uint32_t page_shift;
267 uint32_t page_list_len;
268 uint32_t length;
269 uint32_t access_flags;
270 uint32_t rkey;
271 uint32_t reserved;
272 } fast_reg;
273 struct {
274 uint32_t remote_qpn;
275 uint32_t remote_qkey;
276 struct pvrdma_av av;
277 } ud;
278 } wr;
280 /* Use pvrdma_sge (ib_sge) for send queue s/g array elements. */
282 /* Completion queue element. */
283 struct pvrdma_cqe {
284 uint64_t __attribute__((aligned(8))) wr_id;
285 uint64_t __attribute__((aligned(8))) qp;
286 uint32_t opcode;
287 uint32_t status;
288 uint32_t byte_len;
289 uint32_t imm_data;
290 uint32_t src_qp;
291 uint32_t wc_flags;
292 uint32_t vendor_err;
293 uint16_t pkey_index;
294 uint16_t slid;
295 uint8_t sl;
296 uint8_t dlid_path_bits;
297 uint8_t port_num;
298 uint8_t smac[6];
299 uint8_t network_hdr_type;
300 uint8_t reserved2[6]; /* Pad to next power of 2 (64). */
303 #endif /* __VMW_PVRDMA_ABI_H__ */