2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <avf/virtchnl.h>
25 #define foreach_avf_device_flags \
26 _(0, INITIALIZED, "initialized") \
27 _(1, ERROR, "error") \
28 _(2, ADMIN_UP, "admin-up") \
30 _(4, LINK_UP, "link-up") \
31 _(5, SHARED_TXQ_LOCK, "shared-txq-lock") \
36 #define _(a, b, c) AVF_DEVICE_F_##b = (1 << a),
37 foreach_avf_device_flags
46 STATIC_ASSERT_SIZEOF (avf_rx_desc_t, 32);
57 STATIC_ASSERT_SIZEOF (avf_tx_desc_t, 16);
61 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
62 volatile u32 *qrx_tail;
72 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
73 volatile u32 *qtx_tail;
84 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
86 u32 per_interface_next_index;
91 vlib_pci_dev_handle_t pci_dev_handle;
107 virtchnl_pf_event_t *events;
117 virtchnl_link_speed_t link_speed;
120 virtchnl_eth_stats_t eth_stats;
132 } avf_rx_vector_entry_t;
134 STATIC_ASSERT_SIZEOF (avf_rx_vector_entry_t, 8);
136 #define AVF_RX_VECTOR_SZ VLIB_FRAME_SIZE
140 AVF_PROCESS_EVENT_START = 1,
141 AVF_PROCESS_EVENT_STOP = 2,
142 AVF_PROCESS_EVENT_AQ_INT = 3,
143 } avf_process_event_t;
147 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
148 avf_rx_vector_entry_t rx_vector[AVF_RX_VECTOR_SZ];
150 vlib_buffer_t buffer_template;
151 } avf_per_thread_data_t;
160 STATIC_ASSERT (VNET_DEVICE_INPUT_N_NEXT_NODES < 256, "too many next nodes");
166 avf_device_t *devices;
167 avf_per_thread_data_t *per_thread_data;
168 vlib_physmem_region_index_t physmem_region;
169 int physmem_region_alloc;
171 vlib_log_class_t log_class;
173 /* 256 element array for ptype based lookup */
177 extern avf_main_t avf_main;
181 vlib_pci_addr_t addr;
188 } avf_create_if_args_t;
190 void avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args);
191 void avf_delete_if (vlib_main_t * vm, avf_device_t * ad);
193 extern vlib_node_registration_t avf_input_node;
194 extern vnet_device_class_t avf_device_class;
195 uword avf_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
196 vlib_frame_t * frame);
199 format_function_t format_avf_device;
200 format_function_t format_avf_device_name;
201 format_function_t format_avf_input_trace;
204 avf_get_u32 (void *start, int offset)
206 return *(u32 *) (((u8 *) start) + offset);
210 avf_get_u64 (void *start, int offset)
212 return *(u64 *) (((u8 *) start) + offset);
216 avf_get_u32_bits (void *start, int offset, int first, int last)
218 u32 value = avf_get_u32 (start, offset);
219 if ((last == 0) && (first == 31))
222 value &= (1 << (first - last + 1)) - 1;
227 avf_get_u64_bits (void *start, int offset, int first, int last)
229 u64 value = avf_get_u64 (start, offset);
230 if ((last == 0) && (first == 63))
233 value &= (1 << (first - last + 1)) - 1;
238 avf_set_u32 (void *start, int offset, u32 value)
240 (*(u32 *) (((u8 *) start) + offset)) = value;
244 avf_reg_write (avf_device_t * ad, u32 addr, u32 val)
246 *(volatile u32 *) ((u8 *) ad->bar0 + addr) = val;
250 avf_reg_read (avf_device_t * ad, u32 addr)
252 return *(volatile u32 *) (ad->bar0 + addr);
256 avf_reg_flush (avf_device_t * ad)
258 avf_reg_read (ad, AVFGEN_RSTAT);
259 asm volatile ("":::"memory");
266 avf_rx_vector_entry_t rxve;
272 * fd.io coding-style-patch-verification: ON
275 * eval: (c-set-style "gnu")