2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <avf/virtchnl.h>
25 #define foreach_avf_device_flags \
26 _(0, INITIALIZED, "initialized") \
27 _(1, ERROR, "error") \
28 _(2, ADMIN_UP, "admin-up") \
30 _(4, LINK_UP, "link-up") \
31 _(5, SHARED_TXQ_LOCK, "shared-txq-lock") \
36 #define _(a, b, c) AVF_DEVICE_F_##b = (1 << a),
37 foreach_avf_device_flags
41 typedef volatile struct
58 #ifdef CLIB_HAVE_VEC256
64 STATIC_ASSERT_SIZEOF (avf_rx_desc_t, 32);
66 typedef volatile struct
71 #ifdef CLIB_HAVE_VEC128
77 STATIC_ASSERT_SIZEOF (avf_tx_desc_t, 16);
81 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
82 volatile u32 *qrx_tail;
92 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
93 volatile u32 *qtx_tail;
104 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
106 u32 per_interface_next_index;
111 vlib_pci_dev_handle_t pci_dev_handle;
127 virtchnl_pf_event_t *events;
137 virtchnl_link_speed_t link_speed;
140 virtchnl_eth_stats_t eth_stats;
152 } avf_rx_vector_entry_t;
154 STATIC_ASSERT_SIZEOF (avf_rx_vector_entry_t, 8);
156 #define AVF_RX_VECTOR_SZ VLIB_FRAME_SIZE
160 AVF_PROCESS_EVENT_START = 1,
161 AVF_PROCESS_EVENT_STOP = 2,
162 AVF_PROCESS_EVENT_AQ_INT = 3,
163 } avf_process_event_t;
167 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
168 avf_rx_vector_entry_t rx_vector[AVF_RX_VECTOR_SZ];
170 vlib_buffer_t buffer_template;
171 } avf_per_thread_data_t;
180 STATIC_ASSERT (VNET_DEVICE_INPUT_N_NEXT_NODES < 256, "too many next nodes");
186 avf_device_t *devices;
187 avf_per_thread_data_t *per_thread_data;
188 vlib_physmem_region_index_t physmem_region;
189 int physmem_region_alloc;
191 vlib_log_class_t log_class;
193 /* 256 element array for ptype based lookup */
197 extern avf_main_t avf_main;
201 vlib_pci_addr_t addr;
209 } avf_create_if_args_t;
211 void avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args);
212 void avf_delete_if (vlib_main_t * vm, avf_device_t * ad);
214 extern vlib_node_registration_t avf_input_node;
215 extern vnet_device_class_t avf_device_class;
216 uword avf_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
217 vlib_frame_t * frame);
220 format_function_t format_avf_device;
221 format_function_t format_avf_device_name;
222 format_function_t format_avf_input_trace;
225 avf_get_u32 (void *start, int offset)
227 return *(u32 *) (((u8 *) start) + offset);
231 avf_get_u64 (void *start, int offset)
233 return *(u64 *) (((u8 *) start) + offset);
237 avf_get_u32_bits (void *start, int offset, int first, int last)
239 u32 value = avf_get_u32 (start, offset);
240 if ((last == 0) && (first == 31))
243 value &= (1 << (first - last + 1)) - 1;
248 avf_get_u64_bits (void *start, int offset, int first, int last)
250 u64 value = avf_get_u64 (start, offset);
251 if ((last == 0) && (first == 63))
254 value &= (1 << (first - last + 1)) - 1;
259 avf_set_u32 (void *start, int offset, u32 value)
261 (*(u32 *) (((u8 *) start) + offset)) = value;
265 avf_reg_write (avf_device_t * ad, u32 addr, u32 val)
267 *(volatile u32 *) ((u8 *) ad->bar0 + addr) = val;
271 avf_reg_read (avf_device_t * ad, u32 addr)
273 return *(volatile u32 *) (ad->bar0 + addr);
277 avf_reg_flush (avf_device_t * ad)
279 avf_reg_read (ad, AVFGEN_RSTAT);
280 asm volatile ("":::"memory");
287 avf_rx_vector_entry_t rxve;
293 * fd.io coding-style-patch-verification: ON
296 * eval: (c-set-style "gnu")