2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <avf/virtchnl.h>
25 #define foreach_avf_device_flags \
26 _(0, INITIALIZED, "initialized") \
27 _(1, ERROR, "error") \
28 _(2, ADMIN_UP, "admin-up") \
30 _(4, LINK_UP, "link-up") \
31 _(5, SHARED_TXQ_LOCK, "shared-txq-lock") \
36 #define _(a, b, c) AVF_DEVICE_F_##b = (1 << a),
37 foreach_avf_device_flags
41 typedef volatile struct
58 #ifdef CLIB_HAVE_VEC256
64 STATIC_ASSERT_SIZEOF (avf_rx_desc_t, 32);
66 typedef volatile struct
71 #ifdef CLIB_HAVE_VEC128
77 STATIC_ASSERT_SIZEOF (avf_tx_desc_t, 16);
81 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
82 volatile u32 *qrx_tail;
93 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
94 volatile u32 *qtx_tail;
105 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
107 u32 per_interface_next_index;
112 vlib_pci_dev_handle_t pci_dev_handle;
130 virtchnl_pf_event_t *events;
140 virtchnl_link_speed_t link_speed;
143 virtchnl_eth_stats_t eth_stats;
155 } avf_rx_vector_entry_t;
157 STATIC_ASSERT_SIZEOF (avf_rx_vector_entry_t, 8);
159 #define AVF_RX_VECTOR_SZ VLIB_FRAME_SIZE
163 AVF_PROCESS_EVENT_START = 1,
164 AVF_PROCESS_EVENT_STOP = 2,
165 AVF_PROCESS_EVENT_AQ_INT = 3,
166 } avf_process_event_t;
170 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
171 avf_rx_vector_entry_t rx_vector[AVF_RX_VECTOR_SZ];
173 vlib_buffer_t buffer_template;
174 } avf_per_thread_data_t;
183 STATIC_ASSERT (VNET_DEVICE_INPUT_N_NEXT_NODES < 256, "too many next nodes");
189 avf_device_t *devices;
190 avf_per_thread_data_t *per_thread_data;
191 vlib_physmem_region_index_t physmem_region;
192 int physmem_region_alloc;
194 vlib_log_class_t log_class;
196 /* 256 element array for ptype based lookup */
200 extern avf_main_t avf_main;
204 vlib_pci_addr_t addr;
212 } avf_create_if_args_t;
214 void avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args);
215 void avf_delete_if (vlib_main_t * vm, avf_device_t * ad);
217 extern vlib_node_registration_t avf_input_node;
218 extern vnet_device_class_t avf_device_class;
221 format_function_t format_avf_device;
222 format_function_t format_avf_device_name;
223 format_function_t format_avf_input_trace;
226 avf_get_u32 (void *start, int offset)
228 return *(u32 *) (((u8 *) start) + offset);
232 avf_get_u64 (void *start, int offset)
234 return *(u64 *) (((u8 *) start) + offset);
238 avf_get_u32_bits (void *start, int offset, int first, int last)
240 u32 value = avf_get_u32 (start, offset);
241 if ((last == 0) && (first == 31))
244 value &= (1 << (first - last + 1)) - 1;
249 avf_get_u64_bits (void *start, int offset, int first, int last)
251 u64 value = avf_get_u64 (start, offset);
252 if ((last == 0) && (first == 63))
255 value &= (1 << (first - last + 1)) - 1;
260 avf_set_u32 (void *start, int offset, u32 value)
262 (*(u32 *) (((u8 *) start) + offset)) = value;
266 avf_reg_write (avf_device_t * ad, u32 addr, u32 val)
268 *(volatile u32 *) ((u8 *) ad->bar0 + addr) = val;
272 avf_reg_read (avf_device_t * ad, u32 addr)
274 return *(volatile u32 *) (ad->bar0 + addr);
278 avf_reg_flush (avf_device_t * ad)
280 avf_reg_read (ad, AVFGEN_RSTAT);
281 asm volatile ("":::"memory");
288 avf_rx_vector_entry_t rxve;
294 * fd.io coding-style-patch-verification: ON
297 * eval: (c-set-style "gnu")