2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <avf/virtchnl.h>
25 #define AVF_AQ_ENQ_SUSPEND_TIME 50e-6
26 #define AVF_AQ_ENQ_MAX_WAIT_TIME 50e-3
28 #define AVF_RXD_STATUS(x) (1ULL << x)
29 #define AVF_RXD_STATUS_DD AVF_RXD_STATUS(0)
30 #define AVF_RXD_STATUS_EOP AVF_RXD_STATUS(1)
31 #define AVF_RXD_ERROR_SHIFT 19
32 #define AVF_RXD_PTYPE_SHIFT 30
33 #define AVF_RXD_LEN_SHIFT 38
34 #define AVF_RX_MAX_DESC_IN_CHAIN 5
36 #define AVF_RXD_ERROR_IPE (1ULL << (AVF_RXD_ERROR_SHIFT + 3))
37 #define AVF_RXD_ERROR_L4E (1ULL << (AVF_RXD_ERROR_SHIFT + 4))
39 #define AVF_TXD_CMD(x) (1 << (x + 4))
40 #define AVF_TXD_CMD_EOP AVF_TXD_CMD(0)
41 #define AVF_TXD_CMD_RS AVF_TXD_CMD(1)
42 #define AVF_TXD_CMD_RSV AVF_TXD_CMD(2)
44 #define avf_log_err(dev, f, ...) \
45 vlib_log (VLIB_LOG_LEVEL_ERR, avf_main.log_class, "%U: " f, \
46 format_vlib_pci_addr, &dev->pci_addr, \
49 #define avf_log_warn(dev, f, ...) \
50 vlib_log (VLIB_LOG_LEVEL_WARNING, avf_main.log_class, "%U: " f, \
51 format_vlib_pci_addr, &dev->pci_addr, \
54 #define avf_log_debug(dev, f, ...) \
55 vlib_log (VLIB_LOG_LEVEL_DEBUG, avf_main.log_class, "%U: " f, \
56 format_vlib_pci_addr, &dev->pci_addr, \
59 #define foreach_avf_device_flags \
60 _(0, INITIALIZED, "initialized") \
61 _(1, ERROR, "error") \
62 _(2, ADMIN_UP, "admin-up") \
63 _(3, VA_DMA, "vaddr-dma") \
64 _(4, LINK_UP, "link-up") \
65 _(5, SHARED_TXQ_LOCK, "shared-txq-lock") \
70 #define _(a, b, c) AVF_DEVICE_F_##b = (1 << a),
71 foreach_avf_device_flags
75 typedef volatile struct
92 #ifdef CLIB_HAVE_VEC256
98 STATIC_ASSERT_SIZEOF (avf_rx_desc_t, 32);
100 typedef volatile struct
105 #ifdef CLIB_HAVE_VEC128
111 STATIC_ASSERT_SIZEOF (avf_tx_desc_t, 16);
115 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
116 volatile u32 *qrx_tail;
119 avf_rx_desc_t *descs;
123 u8 buffer_pool_index;
128 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
129 volatile u32 *qtx_tail;
132 clib_spinlock_t lock;
133 avf_tx_desc_t *descs;
141 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
143 u32 per_interface_next_index;
148 vlib_pci_dev_handle_t pci_dev_handle;
168 virtchnl_pf_event_t *events;
178 virtchnl_link_speed_t link_speed;
179 vlib_pci_addr_t pci_addr;
182 virtchnl_eth_stats_t eth_stats;
188 #define AVF_RX_VECTOR_SZ VLIB_FRAME_SIZE
192 AVF_PROCESS_EVENT_START = 1,
193 AVF_PROCESS_EVENT_STOP = 2,
194 AVF_PROCESS_EVENT_AQ_INT = 3,
195 } avf_process_event_t;
199 u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN - 1];
200 u32 buffers[AVF_RX_MAX_DESC_IN_CHAIN - 1];
205 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
206 vlib_buffer_t *bufs[AVF_RX_VECTOR_SZ];
207 u64 qw1s[AVF_RX_VECTOR_SZ];
208 avf_rx_tail_t tails[AVF_RX_VECTOR_SZ];
209 vlib_buffer_t buffer_template;
210 } avf_per_thread_data_t;
216 avf_device_t *devices;
217 avf_per_thread_data_t *per_thread_data;
219 vlib_log_class_t log_class;
222 extern avf_main_t avf_main;
226 vlib_pci_addr_t addr;
236 } avf_create_if_args_t;
238 void avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args);
239 void avf_delete_if (vlib_main_t * vm, avf_device_t * ad);
241 extern vlib_node_registration_t avf_input_node;
242 extern vnet_device_class_t avf_device_class;
245 format_function_t format_avf_device;
246 format_function_t format_avf_device_name;
247 format_function_t format_avf_input_trace;
250 avf_get_u32 (void *start, int offset)
252 return *(u32 *) (((u8 *) start) + offset);
256 avf_get_u64 (void *start, int offset)
258 return *(u64 *) (((u8 *) start) + offset);
262 avf_get_u32_bits (void *start, int offset, int first, int last)
264 u32 value = avf_get_u32 (start, offset);
265 if ((last == 0) && (first == 31))
268 value &= (1 << (first - last + 1)) - 1;
273 avf_get_u64_bits (void *start, int offset, int first, int last)
275 u64 value = avf_get_u64 (start, offset);
276 if ((last == 0) && (first == 63))
279 value &= (1 << (first - last + 1)) - 1;
284 avf_set_u32 (void *start, int offset, u32 value)
286 (*(u32 *) (((u8 *) start) + offset)) = value;
290 avf_reg_write (avf_device_t * ad, u32 addr, u32 val)
292 *(volatile u32 *) ((u8 *) ad->bar0 + addr) = val;
296 avf_reg_read (avf_device_t * ad, u32 addr)
298 return *(volatile u32 *) (ad->bar0 + addr);
302 avf_reg_flush (avf_device_t * ad)
304 avf_reg_read (ad, AVFGEN_RSTAT);
305 asm volatile ("":::"memory");
308 static_always_inline int
309 avf_rxd_is_not_eop (avf_rx_desc_t * d)
311 return (d->qword[1] & AVF_RXD_STATUS_EOP) == 0;
314 static_always_inline int
315 avf_rxd_is_not_dd (avf_rx_desc_t * d)
317 return (d->qword[1] & AVF_RXD_STATUS_DD) == 0;
324 u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN];
327 #define foreach_avf_tx_func_error \
328 _(NO_FREE_SLOTS, "no free tx slots")
332 #define _(f,s) AVF_TX_ERROR_##f,
333 foreach_avf_tx_func_error
336 } avf_tx_func_error_t;
341 * fd.io coding-style-patch-verification: ON
344 * eval: (c-set-style "gnu")