2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <avf/virtchnl.h>
22 #define foreach_avf_device_flags \
23 _(0, INITIALIZED, "initialized") \
24 _(1, ERROR, "error") \
25 _(2, ADMIN_UP, "admin-up") \
27 _(4, LINK_UP, "link-up") \
28 _(5, SHARED_TXQ_LOCK, "shared-txq-lock") \
33 #define _(a, b, c) AVF_DEVICE_F_##b = (1 << a),
34 foreach_avf_device_flags
43 STATIC_ASSERT_SIZEOF (avf_rx_desc_t, 32);
54 STATIC_ASSERT_SIZEOF (avf_tx_desc_t, 16);
58 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
59 volatile u32 *qrx_tail;
69 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
70 volatile u32 *qtx_tail;
81 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
83 u32 per_interface_next_index;
88 vlib_pci_dev_handle_t pci_dev_handle;
104 virtchnl_pf_event_t *events;
114 virtchnl_link_speed_t link_speed;
117 virtchnl_eth_stats_t eth_stats;
129 } avf_rx_vector_entry_t;
131 STATIC_ASSERT_SIZEOF (avf_rx_vector_entry_t, 8);
133 #define AVF_RX_VECTOR_SZ VLIB_FRAME_SIZE
137 AVF_PROCESS_EVENT_START = 1,
138 AVF_PROCESS_EVENT_STOP = 2,
139 AVF_PROCESS_EVENT_AQ_INT = 3,
140 } avf_process_event_t;
144 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
145 avf_rx_vector_entry_t rx_vector[AVF_RX_VECTOR_SZ];
147 vlib_buffer_t buffer_template;
148 } avf_per_thread_data_t;
157 STATIC_ASSERT (VNET_DEVICE_INPUT_N_NEXT_NODES < 256, "too many next nodes");
161 avf_device_t *devices;
162 avf_per_thread_data_t *per_thread_data;
163 vlib_physmem_region_index_t physmem_region;
164 int physmem_region_alloc;
166 vlib_log_class_t log_class;
168 /* 256 element array for ptype based lookup */
172 extern avf_main_t avf_main;
176 vlib_pci_addr_t addr;
181 } avf_create_if_args_t;
183 void avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args);
184 void avf_delete_if (vlib_main_t * vm, avf_device_t * ad);
186 extern vlib_node_registration_t avf_input_node;
187 extern vnet_device_class_t avf_device_class;
188 uword avf_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
189 vlib_frame_t * frame);
192 format_function_t format_avf_device;
193 format_function_t format_avf_device_name;
194 format_function_t format_avf_input_trace;
197 avf_get_u32 (void *start, int offset)
199 return *(u32 *) (((u8 *) start) + offset);
203 avf_get_u64 (void *start, int offset)
205 return *(u64 *) (((u8 *) start) + offset);
209 avf_get_u32_bits (void *start, int offset, int first, int last)
211 u32 value = avf_get_u32 (start, offset);
212 if ((last == 0) && (first == 31))
215 value &= (1 << (first - last + 1)) - 1;
220 avf_get_u64_bits (void *start, int offset, int first, int last)
222 u64 value = avf_get_u64 (start, offset);
223 if ((last == 0) && (first == 63))
226 value &= (1 << (first - last + 1)) - 1;
231 avf_set_u32 (void *start, int offset, u32 value)
233 (*(u32 *) (((u8 *) start) + offset)) = value;
237 avf_reg_write (avf_device_t * ad, u32 addr, u32 val)
239 *(volatile u32 *) ((u8 *) ad->bar0 + addr) = val;
243 avf_reg_read (avf_device_t * ad, u32 addr)
245 return *(volatile u32 *) (ad->bar0 + addr);
249 avf_reg_flush (avf_device_t * ad)
251 avf_reg_read (ad, AVFGEN_RSTAT);
252 asm volatile ("":::"memory");
259 avf_rx_vector_entry_t rxve;
263 * fd.io coding-style-patch-verification: ON
266 * eval: (c-set-style "gnu")