|  | 
|  | 1 | +/* SPDX-License-Identifier: GPL-2.0 */ | 
|  | 2 | +/* | 
|  | 3 | + * Copyright (c) 2021 - 2022, Shanghai Yunsilicon Technology Co., Ltd. | 
|  | 4 | + * All rights reserved. | 
|  | 5 | + */ | 
|  | 6 | + | 
|  | 7 | +#ifndef XSC_ABI_USER_H | 
|  | 8 | +#define XSC_ABI_USER_H | 
|  | 9 | + | 
|  | 10 | +#include <linux/types.h> | 
|  | 11 | +#include <linux/if_ether.h>	/* For ETH_ALEN. */ | 
|  | 12 | +#include <rdma/ib_user_ioctl_verbs.h> | 
|  | 13 | + | 
|  | 14 | +enum { | 
|  | 15 | +	XSC_WQ_FLAG_SIGNATURE		= 1 << 0, | 
|  | 16 | +}; | 
|  | 17 | + | 
|  | 18 | +/* Make sure that all structs defined in this file remain laid out so | 
|  | 19 | + * that they pack the same way on 32-bit and 64-bit architectures (to | 
|  | 20 | + * avoid incompatibility between 32-bit userspace and 64-bit kernels). | 
|  | 21 | + * In particular do not use pointer types -- pass pointers in __u64 | 
|  | 22 | + * instead. | 
|  | 23 | + */ | 
|  | 24 | + | 
|  | 25 | +struct xsc_ib_alloc_ucontext_req { | 
|  | 26 | +	__u32	rsvd0; | 
|  | 27 | +	__u32	rsvd1; | 
|  | 28 | +}; | 
|  | 29 | + | 
|  | 30 | +enum xsc_user_cmds_supp_uhw { | 
|  | 31 | +	XSC_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, | 
|  | 32 | +	XSC_USER_CMDS_SUPP_UHW_CREATE_AH    = 1 << 1, | 
|  | 33 | +}; | 
|  | 34 | + | 
|  | 35 | +struct xsc_ib_alloc_ucontext_resp { | 
|  | 36 | +	__u32	qp_tab_size; | 
|  | 37 | +	__u32	cache_line_size; | 
|  | 38 | +	__u16	max_sq_desc_sz; | 
|  | 39 | +	__u16	max_rq_desc_sz; | 
|  | 40 | +	__u32	max_send_wqebb; | 
|  | 41 | +	__u32	max_recv_wr; | 
|  | 42 | +	__u16	num_ports; | 
|  | 43 | +	__u16	device_id; | 
|  | 44 | +	__aligned_u64	qpm_tx_db; | 
|  | 45 | +	__aligned_u64	qpm_rx_db; | 
|  | 46 | +	__aligned_u64	cqm_next_cid_reg; | 
|  | 47 | +	__aligned_u64	cqm_armdb; | 
|  | 48 | +	__u32	send_ds_num; | 
|  | 49 | +	__u32	recv_ds_num; | 
|  | 50 | +	__u32   cmds_supp_uhw; | 
|  | 51 | +}; | 
|  | 52 | + | 
|  | 53 | +struct xsc_ib_create_qp { | 
|  | 54 | +	__aligned_u64 buf_addr; | 
|  | 55 | +	__aligned_u64 db_addr; | 
|  | 56 | +	__u32	sq_wqe_count; | 
|  | 57 | +	__u32	rq_wqe_count; | 
|  | 58 | +	__u32	rq_wqe_shift; | 
|  | 59 | +	__u32	flags; | 
|  | 60 | +}; | 
|  | 61 | + | 
|  | 62 | +struct xsc_ib_create_qp_resp { | 
|  | 63 | +	__u32   bfreg_index; | 
|  | 64 | +	__u32   resv; | 
|  | 65 | +}; | 
|  | 66 | + | 
|  | 67 | +struct xsc_ib_create_cq { | 
|  | 68 | +	__aligned_u64 buf_addr; | 
|  | 69 | +	__aligned_u64 db_addr; | 
|  | 70 | +	__u32	cqe_size; | 
|  | 71 | +}; | 
|  | 72 | + | 
|  | 73 | +struct xsc_ib_create_cq_resp { | 
|  | 74 | +	__u32	cqn; | 
|  | 75 | +	__u32	reserved; | 
|  | 76 | +}; | 
|  | 77 | + | 
|  | 78 | +struct xsc_ib_create_ah_resp { | 
|  | 79 | +	__u32	response_length; | 
|  | 80 | +	__u8	dmac[ETH_ALEN]; | 
|  | 81 | +	__u8	reserved[6]; | 
|  | 82 | +}; | 
|  | 83 | + | 
|  | 84 | +struct xsc_ib_alloc_pd_resp { | 
|  | 85 | +	__u32	pdn; | 
|  | 86 | +}; | 
|  | 87 | + | 
|  | 88 | +struct xsc_ib_tso_caps { | 
|  | 89 | +	__u32 max_tso; /* Maximum tso payload size in bytes */ | 
|  | 90 | + | 
|  | 91 | +	/* Corresponding bit will be set if qp type from | 
|  | 92 | +	 * 'enum ib_qp_type' is supported, e.g. | 
|  | 93 | +	 * supported_qpts |= 1 << IB_QPT_UD | 
|  | 94 | +	 */ | 
|  | 95 | +	__u32 supported_qpts; | 
|  | 96 | +}; | 
|  | 97 | + | 
|  | 98 | +/* RX Hash function flags */ | 
|  | 99 | +enum xsc_rx_hash_function_flags { | 
|  | 100 | +	XSC_RX_HASH_FUNC_TOEPLITZ	= 1 << 0, | 
|  | 101 | +}; | 
|  | 102 | + | 
|  | 103 | +/* | 
|  | 104 | + * RX Hash flags, these flags allows to set which incoming packet's field should | 
|  | 105 | + * participates in RX Hash. Each flag represent certain packet's field, | 
|  | 106 | + * when the flag is set the field that is represented by the flag will | 
|  | 107 | + * participate in RX Hash calculation. | 
|  | 108 | + * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP | 
|  | 109 | + * and *TCP and *UDP flags can't be enabled together on the same QP. | 
|  | 110 | + */ | 
|  | 111 | +enum xsc_rx_hash_fields { | 
|  | 112 | +	XSC_RX_HASH_SRC_IPV4	= 1 << 0, | 
|  | 113 | +	XSC_RX_HASH_DST_IPV4	= 1 << 1, | 
|  | 114 | +	XSC_RX_HASH_SRC_IPV6	= 1 << 2, | 
|  | 115 | +	XSC_RX_HASH_DST_IPV6	= 1 << 3, | 
|  | 116 | +	XSC_RX_HASH_SRC_PORT_TCP	= 1 << 4, | 
|  | 117 | +	XSC_RX_HASH_DST_PORT_TCP	= 1 << 5, | 
|  | 118 | +	XSC_RX_HASH_SRC_PORT_UDP	= 1 << 6, | 
|  | 119 | +	XSC_RX_HASH_DST_PORT_UDP	= 1 << 7, | 
|  | 120 | +	XSC_RX_HASH_IPSEC_SPI		= 1 << 8, | 
|  | 121 | +	/* Save bits for future fields */ | 
|  | 122 | +	XSC_RX_HASH_INNER		= (1UL << 31), | 
|  | 123 | +}; | 
|  | 124 | + | 
|  | 125 | +struct xsc_ib_rss_caps { | 
|  | 126 | +	__aligned_u64 rx_hash_fields_mask; /* enum xsc_rx_hash_fields */ | 
|  | 127 | +	__u8 rx_hash_function; /* enum xsc_rx_hash_function_flags */ | 
|  | 128 | +	__u8 reserved[7]; | 
|  | 129 | +}; | 
|  | 130 | + | 
|  | 131 | +enum xsc_ib_cqe_comp_res_format { | 
|  | 132 | +	XSC_IB_CQE_RES_FORMAT_HASH	= 1 << 0, | 
|  | 133 | +	XSC_IB_CQE_RES_FORMAT_CSUM	= 1 << 1, | 
|  | 134 | +	XSC_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2, | 
|  | 135 | +}; | 
|  | 136 | + | 
|  | 137 | +struct xsc_ib_cqe_comp_caps { | 
|  | 138 | +	__u32 max_num; | 
|  | 139 | +	__u32 supported_format; /* enum xsc_ib_cqe_comp_res_format */ | 
|  | 140 | +}; | 
|  | 141 | + | 
|  | 142 | +enum xsc_ib_packet_pacing_cap_flags { | 
|  | 143 | +	XSC_IB_PP_SUPPORT_BURST	= 1 << 0, | 
|  | 144 | +}; | 
|  | 145 | + | 
|  | 146 | +struct xsc_packet_pacing_caps { | 
|  | 147 | +	__u32 qp_rate_limit_min; | 
|  | 148 | +	__u32 qp_rate_limit_max; /* In kpbs */ | 
|  | 149 | + | 
|  | 150 | +	/* Corresponding bit will be set if qp type from | 
|  | 151 | +	 * 'enum ib_qp_type' is supported, e.g. | 
|  | 152 | +	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET | 
|  | 153 | +	 */ | 
|  | 154 | +	__u32 supported_qpts; | 
|  | 155 | +	__u8  cap_flags; /* enum xsc_ib_packet_pacing_cap_flags */ | 
|  | 156 | +	__u8  reserved[3]; | 
|  | 157 | +}; | 
|  | 158 | + | 
|  | 159 | +enum xsc_ib_mpw_caps { | 
|  | 160 | +	MPW_RESERVED		= 1 << 0, | 
|  | 161 | +	XSC_IB_ALLOW_MPW	= 1 << 1, | 
|  | 162 | +	XSC_IB_SUPPORT_EMPW	= 1 << 2, | 
|  | 163 | +}; | 
|  | 164 | + | 
|  | 165 | +enum xsc_ib_sw_parsing_offloads { | 
|  | 166 | +	XSC_IB_SW_PARSING = 1 << 0, | 
|  | 167 | +	XSC_IB_SW_PARSING_CSUM = 1 << 1, | 
|  | 168 | +	XSC_IB_SW_PARSING_LSO = 1 << 2, | 
|  | 169 | +}; | 
|  | 170 | + | 
|  | 171 | +struct xsc_ib_sw_parsing_caps { | 
|  | 172 | +	__u32 sw_parsing_offloads; /* enum xsc_ib_sw_parsing_offloads */ | 
|  | 173 | + | 
|  | 174 | +	/* Corresponding bit will be set if qp type from | 
|  | 175 | +	 * 'enum ib_qp_type' is supported, e.g. | 
|  | 176 | +	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET | 
|  | 177 | +	 */ | 
|  | 178 | +	__u32 supported_qpts; | 
|  | 179 | +}; | 
|  | 180 | + | 
|  | 181 | +struct xsc_ib_striding_rq_caps { | 
|  | 182 | +	__u32 min_single_stride_log_num_of_bytes; | 
|  | 183 | +	__u32 max_single_stride_log_num_of_bytes; | 
|  | 184 | +	__u32 min_single_wqe_log_num_of_strides; | 
|  | 185 | +	__u32 max_single_wqe_log_num_of_strides; | 
|  | 186 | + | 
|  | 187 | +	/* Corresponding bit will be set if qp type from | 
|  | 188 | +	 * 'enum ib_qp_type' is supported, e.g. | 
|  | 189 | +	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET | 
|  | 190 | +	 */ | 
|  | 191 | +	__u32 supported_qpts; | 
|  | 192 | +	__u32 reserved; | 
|  | 193 | +}; | 
|  | 194 | + | 
|  | 195 | +enum xsc_ib_query_dev_resp_flags { | 
|  | 196 | +	/* Support 128B CQE compression */ | 
|  | 197 | +	XSC_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, | 
|  | 198 | +	XSC_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD  = 1 << 1, | 
|  | 199 | +}; | 
|  | 200 | + | 
|  | 201 | +enum xsc_ib_tunnel_offloads { | 
|  | 202 | +	XSC_IB_TUNNELED_OFFLOADS_VXLAN  = 1 << 0, | 
|  | 203 | +	XSC_IB_TUNNELED_OFFLOADS_GRE    = 1 << 1, | 
|  | 204 | +	XSC_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2, | 
|  | 205 | +	XSC_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3, | 
|  | 206 | +	XSC_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4, | 
|  | 207 | +}; | 
|  | 208 | + | 
|  | 209 | +struct xsc_ib_query_device_resp { | 
|  | 210 | +	__u32	comp_mask; | 
|  | 211 | +	__u32	response_length; | 
|  | 212 | +	struct	xsc_ib_tso_caps tso_caps; | 
|  | 213 | +	struct	xsc_ib_rss_caps rss_caps; | 
|  | 214 | +	struct	xsc_ib_cqe_comp_caps cqe_comp_caps; | 
|  | 215 | +	struct	xsc_packet_pacing_caps packet_pacing_caps; | 
|  | 216 | +	__u32	xsc_ib_support_multi_pkt_send_wqes; | 
|  | 217 | +	__u32	flags; /* Use enum xsc_ib_query_dev_resp_flags */ | 
|  | 218 | +	struct xsc_ib_sw_parsing_caps sw_parsing_caps; | 
|  | 219 | +	struct xsc_ib_striding_rq_caps striding_rq_caps; | 
|  | 220 | +	__u32	tunnel_offloads_caps; /* enum xsc_ib_tunnel_offloads */ | 
|  | 221 | +	__u32	reserved; | 
|  | 222 | +}; | 
|  | 223 | + | 
|  | 224 | +struct xsc_ib_burst_info { | 
|  | 225 | +	__u32       max_burst_sz; | 
|  | 226 | +	__u16       typical_pkt_sz; | 
|  | 227 | +	__u16       reserved; | 
|  | 228 | +}; | 
|  | 229 | + | 
|  | 230 | +struct xsc_ib_modify_qp { | 
|  | 231 | +	__u32			   comp_mask; | 
|  | 232 | +	struct xsc_ib_burst_info  burst_info; | 
|  | 233 | +	__u32			   reserved; | 
|  | 234 | +}; | 
|  | 235 | + | 
|  | 236 | +struct xsc_ib_modify_qp_resp { | 
|  | 237 | +	__u32	response_length; | 
|  | 238 | +	__u32	dctn; | 
|  | 239 | +}; | 
|  | 240 | + | 
|  | 241 | +#endif /* XSC_ABI_USER_H */ | 
0 commit comments