2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <avf/virtchnl.h>
22 #include <avf/avf_advanced_flow.h>
24 #include <vppinfra/types.h>
25 #include <vppinfra/error_bootstrap.h>
26 #include <vppinfra/lock.h>
29 #include <vlib/pci/pci.h>
31 #include <vnet/interface.h>
33 #include <vnet/devices/devices.h>
34 #include <vnet/flow/flow.h>
36 #define AVF_QUEUE_SZ_MAX 4096
37 #define AVF_QUEUE_SZ_MIN 64
39 #define AVF_AQ_ENQ_SUSPEND_TIME 50e-6
40 #define AVF_AQ_ENQ_MAX_WAIT_TIME 250e-3
41 #define AVF_AQ_BUF_SIZE 4096
43 #define AVF_RESET_SUSPEND_TIME 20e-3
44 #define AVF_RESET_MAX_WAIT_TIME 1
46 #define AVF_SEND_TO_PF_SUSPEND_TIME 10e-3
47 #define AVF_SEND_TO_PF_MAX_WAIT_TIME 1
49 #define AVF_RXD_STATUS(x) (1ULL << x)
50 #define AVF_RXD_STATUS_DD AVF_RXD_STATUS(0)
51 #define AVF_RXD_STATUS_EOP AVF_RXD_STATUS(1)
52 #define AVF_RXD_STATUS_FLM AVF_RXD_STATUS (11)
53 #define AVF_RXD_ERROR_SHIFT 19
54 #define AVF_RXD_PTYPE_SHIFT 30
55 #define AVF_RXD_LEN_SHIFT 38
56 #define AVF_RX_MAX_DESC_IN_CHAIN 5
58 #define AVF_RXD_ERROR_IPE (1ULL << (AVF_RXD_ERROR_SHIFT + 3))
59 #define AVF_RXD_ERROR_L4E (1ULL << (AVF_RXD_ERROR_SHIFT + 4))
61 #define AVF_TXD_CMD(x) (1 << (x + 4))
62 #define AVF_TXD_CMD_EXT(x, val) ((u64)val << (x + 4))
63 #define AVF_TXD_CMD_EOP AVF_TXD_CMD(0)
64 #define AVF_TXD_CMD_RS AVF_TXD_CMD(1)
65 #define AVF_TXD_CMD_RSV AVF_TXD_CMD(2)
67 #define AVF_TXD_CMD_IIPT_NONE AVF_TXD_CMD_EXT(5, 0)
68 #define AVF_TXD_CMD_IIPT_IPV6 AVF_TXD_CMD_EXT(5, 1)
69 #define AVF_TXD_CMD_IIPT_IPV4_NO_CSUM AVF_TXD_CMD_EXT(5, 2)
70 #define AVF_TXD_CMD_IIPT_IPV4 AVF_TXD_CMD_EXT(5, 3)
72 #define AVF_TXD_CMD_L4T_UNKNOWN AVF_TXD_CMD_EXT(8, 0)
73 #define AVF_TXD_CMD_L4T_TCP AVF_TXD_CMD_EXT(8, 1)
74 #define AVF_TXD_CMD_L4T_SCTP AVF_TXD_CMD_EXT(8, 2)
75 #define AVF_TXD_CMD_L4T_UDP AVF_TXD_CMD_EXT(8, 3)
77 #define AVF_TXD_OFFSET(x,factor,val) (((u64)val/(u64)factor) << (16 + x))
78 #define AVF_TXD_OFFSET_MACLEN(val) AVF_TXD_OFFSET( 0, 2, val)
79 #define AVF_TXD_OFFSET_IPLEN(val) AVF_TXD_OFFSET( 7, 4, val)
80 #define AVF_TXD_OFFSET_L4LEN(val) AVF_TXD_OFFSET(14, 4, val)
82 #define AVF_TXD_DTYP_CTX 0x1ULL
83 #define AVF_TXD_CTX_CMD_TSO AVF_TXD_CMD(0)
84 #define AVF_TXD_CTX_SEG(val,x) (((u64)val) << (30 + x))
85 #define AVF_TXD_CTX_SEG_TLEN(val) AVF_TXD_CTX_SEG(val,0)
86 #define AVF_TXD_CTX_SEG_MSS(val) AVF_TXD_CTX_SEG(val,20)
89 extern vlib_log_class_registration_t avf_log;
90 extern vlib_log_class_registration_t avf_stats_log;
92 #define avf_log_err(dev, f, ...) \
93 vlib_log (VLIB_LOG_LEVEL_ERR, avf_log.class, "%U: " f, \
94 format_vlib_pci_addr, &dev->pci_addr, \
97 #define avf_log_warn(dev, f, ...) \
98 vlib_log (VLIB_LOG_LEVEL_WARNING, avf_log.class, "%U: " f, \
99 format_vlib_pci_addr, &dev->pci_addr, \
102 #define avf_log_debug(dev, f, ...) \
103 vlib_log (VLIB_LOG_LEVEL_DEBUG, avf_log.class, "%U: " f, \
104 format_vlib_pci_addr, &dev->pci_addr, \
107 #define avf_stats_log_debug(dev, f, ...) \
108 vlib_log (VLIB_LOG_LEVEL_DEBUG, avf_stats_log.class, "%U: " f, \
109 format_vlib_pci_addr, &dev->pci_addr, ##__VA_ARGS__)
111 #define foreach_avf_device_flags \
112 _ (0, INITIALIZED, "initialized") \
113 _ (1, ERROR, "error") \
114 _ (2, ADMIN_UP, "admin-up") \
115 _ (3, VA_DMA, "vaddr-dma") \
116 _ (4, LINK_UP, "link-up") \
117 _ (6, ELOG, "elog") \
118 _ (7, PROMISC, "promisc") \
119 _ (8, RX_INT, "rx-interrupts") \
120 _ (9, RX_FLOW_OFFLOAD, "rx-flow-offload")
124 #define _(a, b, c) AVF_DEVICE_F_##b = (1 << a),
125 foreach_avf_device_flags
129 typedef volatile struct
138 u64 filter_status:32;
150 #ifdef CLIB_HAVE_VEC256
156 STATIC_ASSERT_SIZEOF (avf_rx_desc_t, 32);
163 #ifdef CLIB_HAVE_VEC128
169 STATIC_ASSERT_SIZEOF (avf_tx_desc_t, 16);
173 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
174 volatile u32 *qrx_tail;
177 avf_rx_desc_t *descs;
181 u8 buffer_pool_index;
187 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
188 volatile u32 *qtx_tail;
192 clib_spinlock_t lock;
193 avf_tx_desc_t *descs;
198 avf_tx_desc_t *tmp_descs;
208 struct avf_fdir_conf *rcfg;
209 struct virtchnl_rss_cfg *rss_cfg;
217 } avf_flow_lookup_entry_t;
221 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
223 u32 per_interface_next_index;
228 vlib_pci_dev_handle_t pci_dev_handle;
248 virtchnl_pf_event_t *events;
259 virtchnl_link_speed_t link_speed;
260 vlib_pci_addr_t pci_addr;
263 avf_flow_entry_t *flow_entries; /* pool */
264 avf_flow_lookup_entry_t *flow_lookup_entries; /* pool */
267 virtchnl_eth_stats_t eth_stats;
268 virtchnl_eth_stats_t last_cleared_eth_stats;
274 #define AVF_RX_VECTOR_SZ VLIB_FRAME_SIZE
278 AVF_PROCESS_EVENT_START = 1,
279 AVF_PROCESS_EVENT_DELETE_IF = 2,
280 AVF_PROCESS_EVENT_AQ_INT = 3,
281 AVF_PROCESS_EVENT_REQ = 4,
282 } avf_process_event_t;
286 AVF_PROCESS_REQ_ADD_DEL_ETH_ADDR = 1,
287 AVF_PROCESS_REQ_CONFIG_PROMISC_MDDE = 2,
288 AVF_PROCESS_REQ_PROGRAM_FLOW = 3,
289 } avf_process_req_type_t;
293 avf_process_req_type_t type;
295 u32 calling_process_index;
297 int is_add, is_enable;
298 enum virthnl_adv_ops vc_op;
300 /* below parameters are used for 'program flow' event */
311 u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN - 1];
312 u32 buffers[AVF_RX_MAX_DESC_IN_CHAIN - 1];
317 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
318 vlib_buffer_t *bufs[AVF_RX_VECTOR_SZ];
319 u16 next[AVF_RX_VECTOR_SZ];
320 u64 qw1s[AVF_RX_VECTOR_SZ];
321 u32 flow_ids[AVF_RX_VECTOR_SZ];
322 avf_rx_tail_t tails[AVF_RX_VECTOR_SZ];
323 vlib_buffer_t buffer_template;
324 } avf_per_thread_data_t;
330 avf_device_t **devices;
331 avf_per_thread_data_t *per_thread_data;
334 extern avf_main_t avf_main;
338 vlib_pci_addr_t addr;
349 } avf_create_if_args_t;
351 void avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args);
353 extern vlib_node_registration_t avf_input_node;
354 extern vlib_node_registration_t avf_process_node;
355 extern vnet_device_class_t avf_device_class;
357 clib_error_t *avf_program_flow (u32 dev_instance, int is_add,
358 enum virthnl_adv_ops vc_op, u8 *rule,
359 u32 rule_len, u8 *program_status,
363 format_function_t format_avf_device;
364 format_function_t format_avf_device_name;
365 format_function_t format_avf_input_trace;
366 format_function_t format_avf_vf_cap_flags;
367 format_function_t format_avf_vlan_supported_caps;
368 format_function_t format_avf_vlan_caps;
369 format_function_t format_avf_vlan_support;
370 format_function_t format_avf_eth_stats;
371 vnet_flow_dev_ops_function_t avf_flow_ops_fn;
373 static_always_inline avf_device_t *
374 avf_get_device (u32 dev_instance)
376 return pool_elt_at_index (avf_main.devices, dev_instance)[0];
380 void avf_elog_init ();
381 void avf_elog_reg (avf_device_t *ad, u32 addr, u32 val, int is_read);
382 void avf_elog_aq_enq_req (avf_device_t *ad, avf_aq_desc_t *d);
383 void avf_elog_aq_enq_resp (avf_device_t *ad, avf_aq_desc_t *d);
384 void avf_elog_arq_desc (avf_device_t *ad, avf_aq_desc_t *d);
387 avf_get_u32 (void *start, int offset)
389 return *(u32 *) (((u8 *) start) + offset);
393 avf_get_u64 (void *start, int offset)
395 return *(u64 *) (((u8 *) start) + offset);
399 avf_get_u32_bits (void *start, int offset, int first, int last)
401 u32 value = avf_get_u32 (start, offset);
402 if ((last == 0) && (first == 31))
405 value &= (1 << (first - last + 1)) - 1;
410 avf_get_u64_bits (void *start, int offset, int first, int last)
412 u64 value = avf_get_u64 (start, offset);
413 if ((last == 0) && (first == 63))
416 value &= (1 << (first - last + 1)) - 1;
421 avf_set_u32 (void *start, int offset, u32 value)
423 (*(u32 *) (((u8 *) start) + offset)) = value;
427 avf_reg_write (avf_device_t * ad, u32 addr, u32 val)
429 if (ad->flags & AVF_DEVICE_F_ELOG)
430 avf_elog_reg (ad, addr, val, 0);
431 __atomic_store_n ((u32 *) ((u8 *) ad->bar0 + addr), val, __ATOMIC_RELEASE);
435 avf_reg_read (avf_device_t * ad, u32 addr)
437 u32 val = *(volatile u32 *) (ad->bar0 + addr);
439 if (ad->flags & AVF_DEVICE_F_ELOG)
440 avf_elog_reg (ad, addr, val, 1);
446 avf_reg_flush (avf_device_t * ad)
448 avf_reg_read (ad, AVFGEN_RSTAT);
449 asm volatile ("":::"memory");
453 avf_tail_write (volatile u32 *addr, u32 val)
457 _directstoreu_u32 ((void *) addr, val);
459 clib_atomic_store_rel_n (addr, val);
463 static_always_inline int
464 avf_rxd_is_not_eop (avf_rx_desc_t * d)
466 return (d->qword[1] & AVF_RXD_STATUS_EOP) == 0;
469 static_always_inline int
470 avf_rxd_is_not_dd (avf_rx_desc_t * d)
472 return (d->qword[1] & AVF_RXD_STATUS_DD) == 0;
481 u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN];
484 #define foreach_avf_tx_func_error \
485 _(SEGMENT_SIZE_EXCEEDED, "segment size exceeded") \
486 _(NO_FREE_SLOTS, "no free tx slots")
490 #define _(f,s) AVF_TX_ERROR_##f,
491 foreach_avf_tx_func_error
494 } avf_tx_func_error_t;
499 * fd.io coding-style-patch-verification: ON
502 * eval: (c-set-style "gnu")