4 * Copyright (C) Cavium networks Ltd. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifndef _THUNDERX_NICVF_HW_DEFS_H
34 #define _THUNDERX_NICVF_HW_DEFS_H
39 /* Virtual function register offsets */
41 #define NIC_VF_CFG (0x000020)
42 #define NIC_VF_PF_MAILBOX_0_1 (0x000130)
43 #define NIC_VF_INT (0x000200)
44 #define NIC_VF_INT_W1S (0x000220)
45 #define NIC_VF_ENA_W1C (0x000240)
46 #define NIC_VF_ENA_W1S (0x000260)
48 #define NIC_VNIC_RSS_CFG (0x0020E0)
49 #define NIC_VNIC_RSS_KEY_0_4 (0x002200)
50 #define NIC_VNIC_TX_STAT_0_4 (0x004000)
51 #define NIC_VNIC_RX_STAT_0_13 (0x004100)
52 #define NIC_VNIC_RQ_GEN_CFG (0x010010)
54 #define NIC_QSET_CQ_0_7_CFG (0x010400)
55 #define NIC_QSET_CQ_0_7_CFG2 (0x010408)
56 #define NIC_QSET_CQ_0_7_THRESH (0x010410)
57 #define NIC_QSET_CQ_0_7_BASE (0x010420)
58 #define NIC_QSET_CQ_0_7_HEAD (0x010428)
59 #define NIC_QSET_CQ_0_7_TAIL (0x010430)
60 #define NIC_QSET_CQ_0_7_DOOR (0x010438)
61 #define NIC_QSET_CQ_0_7_STATUS (0x010440)
62 #define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
63 #define NIC_QSET_CQ_0_7_DEBUG (0x010450)
65 #define NIC_QSET_RQ_0_7_CFG (0x010600)
66 #define NIC_QSET_RQ_0_7_STATUS0 (0x010700)
67 #define NIC_QSET_RQ_0_7_STATUS1 (0x010708)
69 #define NIC_QSET_SQ_0_7_CFG (0x010800)
70 #define NIC_QSET_SQ_0_7_THRESH (0x010810)
71 #define NIC_QSET_SQ_0_7_BASE (0x010820)
72 #define NIC_QSET_SQ_0_7_HEAD (0x010828)
73 #define NIC_QSET_SQ_0_7_TAIL (0x010830)
74 #define NIC_QSET_SQ_0_7_DOOR (0x010838)
75 #define NIC_QSET_SQ_0_7_STATUS (0x010840)
76 #define NIC_QSET_SQ_0_7_DEBUG (0x010848)
77 #define NIC_QSET_SQ_0_7_STATUS0 (0x010900)
78 #define NIC_QSET_SQ_0_7_STATUS1 (0x010908)
80 #define NIC_QSET_RBDR_0_1_CFG (0x010C00)
81 #define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
82 #define NIC_QSET_RBDR_0_1_BASE (0x010C20)
83 #define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
84 #define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
85 #define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
86 #define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
87 #define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
88 #define NIC_QSET_RBDR_0_1_PRFCH_STATUS (0x010C50)
90 /* vNIC HW Constants */
92 #define NIC_Q_NUM_SHIFT 18
94 #define MAX_QUEUE_SET 128
95 #define MAX_RCV_QUEUES_PER_QS 8
96 #define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
97 #define MAX_SND_QUEUES_PER_QS 8
98 #define MAX_CMP_QUEUES_PER_QS 8
100 #define NICVF_INTR_CQ_SHIFT 0
101 #define NICVF_INTR_SQ_SHIFT 8
102 #define NICVF_INTR_RBDR_SHIFT 16
103 #define NICVF_INTR_PKT_DROP_SHIFT 20
104 #define NICVF_INTR_TCP_TIMER_SHIFT 21
105 #define NICVF_INTR_MBOX_SHIFT 22
106 #define NICVF_INTR_QS_ERR_SHIFT 23
108 #define NICVF_QS_RQ_DIS_APAD_SHIFT 22
110 #define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
111 #define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
112 #define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
113 #define NICVF_INTR_PKT_DROP_MASK (1 << NICVF_INTR_PKT_DROP_SHIFT)
114 #define NICVF_INTR_TCP_TIMER_MASK (1 << NICVF_INTR_TCP_TIMER_SHIFT)
115 #define NICVF_INTR_MBOX_MASK (1 << NICVF_INTR_MBOX_SHIFT)
116 #define NICVF_INTR_QS_ERR_MASK (1 << NICVF_INTR_QS_ERR_SHIFT)
117 #define NICVF_INTR_ALL_MASK (0x7FFFFF)
119 #define NICVF_CQ_WR_FULL (1ULL << 26)
120 #define NICVF_CQ_WR_DISABLE (1ULL << 25)
121 #define NICVF_CQ_WR_FAULT (1ULL << 24)
122 #define NICVF_CQ_ERR_MASK (NICVF_CQ_WR_FULL |\
123 NICVF_CQ_WR_DISABLE |\
125 #define NICVF_CQ_CQE_COUNT_MASK (0xFFFF)
127 #define NICVF_SQ_ERR_STOPPED (1ULL << 21)
128 #define NICVF_SQ_ERR_SEND (1ULL << 20)
129 #define NICVF_SQ_ERR_DPE (1ULL << 19)
130 #define NICVF_SQ_ERR_MASK (NICVF_SQ_ERR_STOPPED |\
133 #define NICVF_SQ_STATUS_STOPPED_BIT (21)
135 #define NICVF_RBDR_FIFO_STATE_SHIFT (62)
136 #define NICVF_RBDR_FIFO_STATE_MASK (3ULL << NICVF_RBDR_FIFO_STATE_SHIFT)
137 #define NICVF_RBDR_COUNT_MASK (0x7FFFF)
140 #define NICVF_CQ_RESET (1ULL << 41)
141 #define NICVF_SQ_RESET (1ULL << 17)
142 #define NICVF_RBDR_RESET (1ULL << 43)
145 #define NIC_MAX_RSS_HASH_BITS (8)
146 #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
147 #define RSS_HASH_KEY_SIZE (5) /* 320 bit key */
148 #define RSS_HASH_KEY_BYTE_SIZE (40) /* 320 bit key */
150 #define RSS_L2_EXTENDED_HASH_ENA (1 << 0)
151 #define RSS_IP_ENA (1 << 1)
152 #define RSS_TCP_ENA (1 << 2)
153 #define RSS_TCP_SYN_ENA (1 << 3)
154 #define RSS_UDP_ENA (1 << 4)
155 #define RSS_L4_EXTENDED_ENA (1 << 5)
156 #define RSS_L3_BI_DIRECTION_ENA (1 << 7)
157 #define RSS_L4_BI_DIRECTION_ENA (1 << 8)
158 #define RSS_TUN_VXLAN_ENA (1 << 9)
159 #define RSS_TUN_GENEVE_ENA (1 << 10)
160 #define RSS_TUN_NVGRE_ENA (1 << 11)
162 #define RBDR_QUEUE_SZ_8K (8 * 1024)
163 #define RBDR_QUEUE_SZ_16K (16 * 1024)
164 #define RBDR_QUEUE_SZ_32K (32 * 1024)
165 #define RBDR_QUEUE_SZ_64K (64 * 1024)
166 #define RBDR_QUEUE_SZ_128K (128 * 1024)
167 #define RBDR_QUEUE_SZ_256K (256 * 1024)
168 #define RBDR_QUEUE_SZ_512K (512 * 1024)
169 #define RBDR_QUEUE_SZ_MAX RBDR_QUEUE_SZ_512K
171 #define RBDR_SIZE_SHIFT (13) /* 8k */
173 #define SND_QUEUE_SZ_1K (1 * 1024)
174 #define SND_QUEUE_SZ_2K (2 * 1024)
175 #define SND_QUEUE_SZ_4K (4 * 1024)
176 #define SND_QUEUE_SZ_8K (8 * 1024)
177 #define SND_QUEUE_SZ_16K (16 * 1024)
178 #define SND_QUEUE_SZ_32K (32 * 1024)
179 #define SND_QUEUE_SZ_64K (64 * 1024)
180 #define SND_QUEUE_SZ_MAX SND_QUEUE_SZ_64K
182 #define SND_QSIZE_SHIFT (10) /* 1k */
184 #define CMP_QUEUE_SZ_1K (1 * 1024)
185 #define CMP_QUEUE_SZ_2K (2 * 1024)
186 #define CMP_QUEUE_SZ_4K (4 * 1024)
187 #define CMP_QUEUE_SZ_8K (8 * 1024)
188 #define CMP_QUEUE_SZ_16K (16 * 1024)
189 #define CMP_QUEUE_SZ_32K (32 * 1024)
190 #define CMP_QUEUE_SZ_64K (64 * 1024)
191 #define CMP_QUEUE_SZ_MAX CMP_QUEUE_SZ_64K
193 #define CMP_QSIZE_SHIFT (10) /* 1k */
195 #define NICVF_QSIZE_MIN_VAL (0)
196 #define NICVF_QSIZE_MAX_VAL (6)
198 /* Min/Max packet size */
199 #define NIC_HW_MIN_FRS (64)
200 /* ETH_HLEN+ETH_FCS_LEN+2*VLAN_HLEN */
201 #define NIC_HW_L2_OVERHEAD (26)
202 #define NIC_HW_MAX_MTU (9190)
203 #define NIC_HW_MAX_FRS (NIC_HW_MAX_MTU + NIC_HW_L2_OVERHEAD)
204 #define NIC_HW_MAX_SEGS (12)
206 /* Descriptor alignments */
207 #define NICVF_RBDR_BASE_ALIGN_BYTES (128) /* 7 bits */
208 #define NICVF_CQ_BASE_ALIGN_BYTES (512) /* 9 bits */
209 #define NICVF_SQ_BASE_ALIGN_BYTES (128) /* 7 bits */
211 #define NICVF_CQE_RBPTR_WORD (6)
212 #define NICVF_CQE_RX2_RBPTR_WORD (7)
214 #define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s)
215 #define assert_primary(nic) assert((nic)->sqs_mode == 0)
217 typedef uint64_t nicvf_phys_addr_t;
219 #ifndef __BYTE_ORDER__
220 #error __BYTE_ORDER__ not defined
223 /* vNIC HW Enumerations */
225 enum nic_send_ld_type_e {
226 NIC_SEND_LD_TYPE_E_LDD,
227 NIC_SEND_LD_TYPE_E_LDT,
228 NIC_SEND_LD_TYPE_E_LDWB,
229 NIC_SEND_LD_TYPE_E_ENUM_LAST,
232 enum ether_type_algorithm {
237 ETYPE_ALG_VLAN_STRIP,
244 L3TYPE_IPV4_OPTIONS = 0x5,
246 L3TYPE_IPV6_OPTIONS = 0x7,
247 L3TYPE_ET_STOP = 0xD,
251 #define NICVF_L3TYPE_OPTIONS_MASK ((uint8_t)1)
252 #define NICVF_L3TYPE_IPVX_MASK ((uint8_t)0x06)
267 /* CPI and RSSI configuration */
268 enum cpi_algorithm_type {
275 enum rss_algorithm_type {
290 RSS_HASH_TCP_SYN_DIS,
298 /* Completion queue entry types */
302 CQE_TYPE_RX_SPLIT = 0x3,
303 CQE_TYPE_RX_TCP = 0x4,
305 CQE_TYPE_SEND_PTP = 0x9,
308 enum cqe_rx_tcp_status {
309 CQE_RX_STATUS_VALID_TCP_CNXT,
310 CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
313 enum cqe_send_status {
314 CQE_SEND_STATUS_GOOD,
315 CQE_SEND_STATUS_DESC_FAULT = 0x01,
316 CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
317 CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
318 CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
319 CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
320 CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
321 CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
322 CQE_SEND_STATUS_LOCK_VIOL = 0x84,
323 CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
324 CQE_SEND_STATUS_DATA_FAULT = 0x86,
325 CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
326 CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
327 CQE_SEND_STATUS_MEM_FAULT = 0x89,
328 CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
329 CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
332 enum cqe_rx_tcp_end_reason {
333 CQE_RX_TCP_END_FIN_FLAG_DET,
334 CQE_RX_TCP_END_INVALID_FLAG,
335 CQE_RX_TCP_END_TIMEOUT,
336 CQE_RX_TCP_END_OUT_OF_SEQ,
337 CQE_RX_TCP_END_PKT_ERR,
338 CQE_RX_TCP_END_QS_DISABLED = 0x0F,
341 /* Packet protocol level error enumeration */
342 enum cqe_rx_err_level {
349 /* Packet protocol level error type enumeration */
350 enum cqe_rx_err_opcode {
352 CQE_RX_ERR_RE_PARTIAL,
353 CQE_RX_ERR_RE_JABBER,
354 CQE_RX_ERR_RE_FCS = 0x7,
355 CQE_RX_ERR_RE_TERMINATE = 0x9,
356 CQE_RX_ERR_RE_RX_CTL = 0xb,
357 CQE_RX_ERR_PREL2_ERR = 0x1f,
358 CQE_RX_ERR_L2_FRAGMENT = 0x20,
359 CQE_RX_ERR_L2_OVERRUN = 0x21,
360 CQE_RX_ERR_L2_PFCS = 0x22,
361 CQE_RX_ERR_L2_PUNY = 0x23,
362 CQE_RX_ERR_L2_MAL = 0x24,
363 CQE_RX_ERR_L2_OVERSIZE = 0x25,
364 CQE_RX_ERR_L2_UNDERSIZE = 0x26,
365 CQE_RX_ERR_L2_LENMISM = 0x27,
366 CQE_RX_ERR_L2_PCLP = 0x28,
367 CQE_RX_ERR_IP_NOT = 0x41,
368 CQE_RX_ERR_IP_CHK = 0x42,
369 CQE_RX_ERR_IP_MAL = 0x43,
370 CQE_RX_ERR_IP_MALD = 0x44,
371 CQE_RX_ERR_IP_HOP = 0x45,
372 CQE_RX_ERR_L3_ICRC = 0x46,
373 CQE_RX_ERR_L3_PCLP = 0x47,
374 CQE_RX_ERR_L4_MAL = 0x61,
375 CQE_RX_ERR_L4_CHK = 0x62,
376 CQE_RX_ERR_UDP_LEN = 0x63,
377 CQE_RX_ERR_L4_PORT = 0x64,
378 CQE_RX_ERR_TCP_FLAG = 0x65,
379 CQE_RX_ERR_TCP_OFFSET = 0x66,
380 CQE_RX_ERR_L4_PCLP = 0x67,
381 CQE_RX_ERR_RBDR_TRUNC = 0x70,
384 enum send_l4_csum_type {
385 SEND_L4_CSUM_DISABLE,
396 enum send_load_type {
402 enum send_mem_alg_type {
404 SEND_MEMALG_ADD = 0x08,
405 SEND_MEMALG_SUB = 0x09,
406 SEND_MEMALG_ADDLEN = 0x0A,
407 SEND_MEMALG_SUBLEN = 0x0B,
410 enum send_mem_dsz_type {
413 SEND_MEMDSZ_B8 = 0x03,
416 enum sq_subdesc_type {
417 SQ_DESC_TYPE_INVALID,
420 SQ_DESC_TYPE_IMMEDIATE,
444 L4_UDP_GENEVE = 0x09,
458 RBDR_FIFO_STATE_INACTIVE,
459 RBDR_FIFO_STATE_ACTIVE,
460 RBDR_FIFO_STATE_RESET,
461 RBDR_FIFO_STATE_FAIL,
464 enum rq_cache_allocation {
467 RQ_CACHE_ALLOC_FIRST,
471 enum cq_rx_errlvl_e {
480 CQ_RX_ERROP_RE_PARTIAL = 0x1,
481 CQ_RX_ERROP_RE_JABBER = 0x2,
482 CQ_RX_ERROP_RE_FCS = 0x7,
483 CQ_RX_ERROP_RE_TERMINATE = 0x9,
484 CQ_RX_ERROP_RE_RX_CTL = 0xb,
485 CQ_RX_ERROP_PREL2_ERR = 0x1f,
486 CQ_RX_ERROP_L2_FRAGMENT = 0x20,
487 CQ_RX_ERROP_L2_OVERRUN = 0x21,
488 CQ_RX_ERROP_L2_PFCS = 0x22,
489 CQ_RX_ERROP_L2_PUNY = 0x23,
490 CQ_RX_ERROP_L2_MAL = 0x24,
491 CQ_RX_ERROP_L2_OVERSIZE = 0x25,
492 CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
493 CQ_RX_ERROP_L2_LENMISM = 0x27,
494 CQ_RX_ERROP_L2_PCLP = 0x28,
495 CQ_RX_ERROP_IP_NOT = 0x41,
496 CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
497 CQ_RX_ERROP_IP_MAL = 0x43,
498 CQ_RX_ERROP_IP_MALD = 0x44,
499 CQ_RX_ERROP_IP_HOP = 0x45,
500 CQ_RX_ERROP_L3_ICRC = 0x46,
501 CQ_RX_ERROP_L3_PCLP = 0x47,
502 CQ_RX_ERROP_L4_MAL = 0x61,
503 CQ_RX_ERROP_L4_CHK = 0x62,
504 CQ_RX_ERROP_UDP_LEN = 0x63,
505 CQ_RX_ERROP_L4_PORT = 0x64,
506 CQ_RX_ERROP_TCP_FLAG = 0x65,
507 CQ_RX_ERROP_TCP_OFFSET = 0x66,
508 CQ_RX_ERROP_L4_PCLP = 0x67,
509 CQ_RX_ERROP_RBDR_TRUNC = 0x70,
514 CQ_TX_ERROP_DESC_FAULT = 0x10,
515 CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
516 CQ_TX_ERROP_SUBDC_ERR = 0x12,
517 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
518 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
519 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
520 CQ_TX_ERROP_LOCK_VIOL = 0x83,
521 CQ_TX_ERROP_DATA_FAULT = 0x84,
522 CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
523 CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
524 CQ_TX_ERROP_MEM_FAULT = 0x87,
525 CQ_TX_ERROP_CK_OVERLAP = 0x88,
526 CQ_TX_ERROP_CK_OFLOW = 0x89,
527 CQ_TX_ERROP_ENUM_LAST = 0x8a,
530 enum rq_sq_stats_reg_offset {
535 enum nic_stat_vnic_rx_e {
552 enum nic_stat_vnic_tx_e {
560 /* vNIC HW Register structures */
565 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
567 uint64_t stdn_fault:1;
575 uint64_t vlan_found:1;
576 uint64_t vlan_stripped:1;
577 uint64_t vlan2_found:1;
578 uint64_t vlan2_stripped:1;
581 uint64_t l2_present:1;
582 uint64_t err_level:3;
583 uint64_t err_opcode:8;
585 uint64_t err_opcode:8;
586 uint64_t err_level:3;
587 uint64_t l2_present:1;
590 uint64_t vlan2_stripped:1;
591 uint64_t vlan2_found:1;
592 uint64_t vlan_stripped:1;
593 uint64_t vlan_found:1;
601 uint64_t stdn_fault:1;
610 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
615 uint64_t cq_pkt_len:8;
616 uint64_t align_pad:3;
622 uint64_t align_pad:3;
623 uint64_t cq_pkt_len:8;
635 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
637 uint64_t vlan_tci:16;
639 uint64_t vlan2_ptr:8;
641 uint64_t vlan2_ptr:8;
643 uint64_t vlan_tci:16;
652 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
669 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
686 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
703 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
704 uint64_t vlan_found:1;
705 uint64_t vlan_stripped:1;
706 uint64_t vlan2_found:1;
707 uint64_t vlan2_stripped:1;
710 uint64_t inner_l4type:4;
711 uint64_t inner_l3type:4;
713 uint64_t vlan2_ptr:8;
716 uint64_t inner_l3ptr:8;
717 uint64_t inner_l4ptr:8;
719 uint64_t inner_l4ptr:8;
720 uint64_t inner_l3ptr:8;
723 uint64_t vlan2_ptr:8;
725 uint64_t inner_l3type:4;
726 uint64_t inner_l4type:4;
729 uint64_t vlan2_stripped:1;
730 uint64_t vlan2_found:1;
731 uint64_t vlan_stripped:1;
732 uint64_t vlan_found:1;
738 cqe_rx_word0_t word0;
739 cqe_rx_word1_t word1;
740 cqe_rx_word2_t word2;
741 cqe_rx_word3_t word3;
742 cqe_rx_word4_t word4;
743 cqe_rx_word5_t word5;
744 cqe_rx2_word6_t word6; /* if NIC_PF_RX_CFG[CQE_RX2_ENA] set */
747 struct cqe_rx_tcp_err_t {
748 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
749 uint64_t cqe_type:4; /* W0 */
752 uint64_t rsvd1:4; /* W1 */
753 uint64_t partial_first:1;
755 uint64_t rbdr_bytes:8;
762 uint64_t rbdr_bytes:8;
764 uint64_t partial_first:1;
769 struct cqe_rx_tcp_t {
770 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
771 uint64_t cqe_type:4; /* W0 */
773 uint64_t cq_tcp_status:8;
775 uint64_t rsvd1:32; /* W1 */
776 uint64_t tcp_cntx_bytes:8;
778 uint64_t tcp_err_bytes:16;
780 uint64_t cq_tcp_status:8;
782 uint64_t cqe_type:4; /* W0 */
784 uint64_t tcp_err_bytes:16;
786 uint64_t tcp_cntx_bytes:8;
787 uint64_t rsvd1:32; /* W1 */
792 #if defined(__BIG_ENDIAN_BITFIELD)
793 uint64_t cqe_type:4; /* W0 */
801 uint64_t send_status:8;
803 uint64_t ptp_timestamp:64; /* W1 */
804 #elif defined(__LITTLE_ENDIAN_BITFIELD)
805 uint64_t send_status:8;
813 uint64_t cqe_type:4; /* W0 */
815 uint64_t ptp_timestamp:64;
819 struct cq_entry_type_t {
820 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
831 struct cq_entry_type_t type;
832 struct cqe_rx_t rx_hdr;
833 struct cqe_rx_tcp_t rx_tcp_hdr;
834 struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
835 struct cqe_send_t cqe_send;
838 NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512);
840 struct rbdr_entry_t {
841 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
845 uint64_t buf_addr:42;
846 uint64_t cache_align:7;
848 nicvf_phys_addr_t full_addr;
853 uint64_t cache_align:7;
854 uint64_t buf_addr:42;
857 nicvf_phys_addr_t full_addr;
862 NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t));
864 /* TCP reassembly context */
865 struct rbe_tcp_cnxt_t {
866 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
867 uint64_t tcp_pkt_cnt:12;
869 uint64_t align_hdr_bytes:4;
870 uint64_t align_ptr_bytes:4;
871 uint64_t ptr_bytes:16;
875 uint64_t tcp_end_reason:2;
876 uint64_t tcp_status:4;
878 uint64_t tcp_status:4;
879 uint64_t tcp_end_reason:2;
883 uint64_t ptr_bytes:16;
884 uint64_t align_ptr_bytes:4;
885 uint64_t align_hdr_bytes:4;
887 uint64_t tcp_pkt_cnt:12;
891 /* Always Big endian */
895 uint64_t skip_length:6;
896 uint64_t disable_rss:1;
897 uint64_t disable_tcp_reassembly:1;
904 struct sq_crc_subdesc {
905 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
907 uint64_t crc_ival:32;
908 uint64_t subdesc_type:4;
911 uint64_t crc_insert_pos:16;
912 uint64_t hdr_start:16;
916 uint64_t hdr_start:16;
917 uint64_t crc_insert_pos:16;
920 uint64_t subdesc_type:4;
921 uint64_t crc_ival:32;
926 struct sq_gather_subdesc {
927 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
928 uint64_t subdesc_type:4; /* W0 */
933 uint64_t rsvd1:15; /* W1 */
939 uint64_t subdesc_type:4; /* W0 */
942 uint64_t rsvd1:15; /* W1 */
946 /* SQ immediate subdescriptor */
947 struct sq_imm_subdesc {
948 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
949 uint64_t subdesc_type:4; /* W0 */
953 uint64_t data:64; /* W1 */
957 uint64_t subdesc_type:4; /* W0 */
959 uint64_t data:64; /* W1 */
963 struct sq_mem_subdesc {
964 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
965 uint64_t subdesc_type:4; /* W0 */
972 uint64_t rsvd1:15; /* W1 */
980 uint64_t subdesc_type:4; /* W0 */
983 uint64_t rsvd1:15; /* W1 */
987 struct sq_hdr_subdesc {
988 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
989 uint64_t subdesc_type:4;
991 uint64_t post_cqe:1; /* Post CQE on no error also */
992 uint64_t dont_send:1;
994 uint64_t subdesc_cnt:8;
997 uint64_t csum_inner_l4:2;
998 uint64_t csum_inner_l3:1;
1000 uint64_t l4_offset:8;
1001 uint64_t l3_offset:8;
1003 uint64_t tot_len:20; /* W0 */
1006 uint64_t inner_l4_offset:8;
1007 uint64_t inner_l3_offset:8;
1008 uint64_t tso_start:8;
1010 uint64_t tso_max_paysize:14; /* W1 */
1012 uint64_t tot_len:20;
1014 uint64_t l3_offset:8;
1015 uint64_t l4_offset:8;
1017 uint64_t csum_inner_l3:1;
1018 uint64_t csum_inner_l4:2;
1021 uint64_t subdesc_cnt:8;
1023 uint64_t dont_send:1;
1024 uint64_t post_cqe:1; /* Post CQE on no error also */
1026 uint64_t subdesc_type:4; /* W0 */
1028 uint64_t tso_max_paysize:14;
1030 uint64_t tso_start:8;
1031 uint64_t inner_l3_offset:8;
1032 uint64_t inner_l4_offset:8;
1033 uint64_t rsvd2:24; /* W1 */
1037 /* Each sq entry is 128 bits wide */
1040 struct sq_hdr_subdesc hdr;
1041 struct sq_imm_subdesc imm;
1042 struct sq_gather_subdesc gather;
1043 struct sq_crc_subdesc crc;
1044 struct sq_mem_subdesc mem;
1047 NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16);
1049 /* Queue config register formats */
1050 struct rq_cfg { union { struct {
1051 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1052 uint64_t reserved_2_63:62;
1054 uint64_t reserved_0:1;
1056 uint64_t reserved_0:1;
1058 uint64_t reserved_2_63:62;
1064 struct cq_cfg { union { struct {
1065 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1066 uint64_t reserved_43_63:21;
1070 uint64_t reserved_35_39:5;
1072 uint64_t reserved_25_31:7;
1074 uint64_t reserved_0_15:16;
1076 uint64_t reserved_0_15:16;
1078 uint64_t reserved_25_31:7;
1080 uint64_t reserved_35_39:5;
1084 uint64_t reserved_43_63:21;
1090 struct sq_cfg { union { struct {
1091 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1092 uint64_t reserved_20_63:44;
1094 uint64_t reserved_18_18:1;
1097 uint64_t reserved_11_15:5;
1099 uint64_t reserved_3_7:5;
1100 uint64_t tstmp_bgx_intf:3;
1102 uint64_t tstmp_bgx_intf:3;
1103 uint64_t reserved_3_7:5;
1105 uint64_t reserved_11_15:5;
1108 uint64_t reserved_18_18:1;
1110 uint64_t reserved_20_63:44;
1116 struct rbdr_cfg { union { struct {
1117 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1118 uint64_t reserved_45_63:19;
1122 uint64_t reserved_36_41:6;
1124 uint64_t reserved_25_31:7;
1126 uint64_t reserved_12_15:4;
1130 uint64_t reserved_12_15:4;
1132 uint64_t reserved_25_31:7;
1134 uint64_t reserved_36_41:6;
1138 uint64_t reserved_45_63:19;
1144 struct pf_qs_cfg { union { struct {
1145 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1146 uint64_t reserved_32_63:32;
1148 uint64_t reserved_27_30:4;
1149 uint64_t sq_ins_ena:1;
1150 uint64_t sq_ins_pos:6;
1151 uint64_t lock_ena:1;
1152 uint64_t lock_viol_cqe_ena:1;
1153 uint64_t send_tstmp_ena:1;
1155 uint64_t reserved_7_15:9;
1159 uint64_t reserved_7_15:9;
1161 uint64_t send_tstmp_ena:1;
1162 uint64_t lock_viol_cqe_ena:1;
1163 uint64_t lock_ena:1;
1164 uint64_t sq_ins_pos:6;
1165 uint64_t sq_ins_ena:1;
1166 uint64_t reserved_27_30:4;
1168 uint64_t reserved_32_63:32;
1174 struct pf_rq_cfg { union { struct {
1175 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1176 uint64_t reserved1:1;
1177 uint64_t reserved0:34;
1178 uint64_t strip_pre_l2:1;
1182 uint64_t rbdr_cont_qs:7;
1183 uint64_t rbdr_cont_idx:1;
1184 uint64_t rbdr_strt_qs:7;
1185 uint64_t rbdr_strt_idx:1;
1187 uint64_t rbdr_strt_idx:1;
1188 uint64_t rbdr_strt_qs:7;
1189 uint64_t rbdr_cont_idx:1;
1190 uint64_t rbdr_cont_qs:7;
1194 uint64_t strip_pre_l2:1;
1195 uint64_t reserved0:34;
1196 uint64_t reserved1:1;
1202 struct pf_rq_drop_cfg { union { struct {
1203 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1204 uint64_t rbdr_red:1;
1206 uint64_t reserved3:14;
1207 uint64_t rbdr_pass:8;
1208 uint64_t rbdr_drop:8;
1209 uint64_t reserved2:8;
1212 uint64_t reserved1:8;
1214 uint64_t reserved1:8;
1217 uint64_t reserved2:8;
1218 uint64_t rbdr_drop:8;
1219 uint64_t rbdr_pass:8;
1220 uint64_t reserved3:14;
1222 uint64_t rbdr_red:1;
1228 #endif /* _THUNDERX_NICVF_HW_DEFS_H */