1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2023 Cisco Systems, Inc.
5 #ifndef _VNET_DEV_FUNCS_H_
6 #define _VNET_DEV_FUNCS_H_
8 #include <vppinfra/clib.h>
9 #include <vnet/dev/dev.h>
11 static_always_inline void *
12 vnet_dev_get_data (vnet_dev_t *dev)
17 static_always_inline vnet_dev_t *
18 vnet_dev_from_data (void *p)
20 return (void *) ((u8 *) p - STRUCT_OFFSET_OF (vnet_dev_t, data));
23 static_always_inline void *
24 vnet_dev_get_port_data (vnet_dev_port_t *port)
29 static_always_inline void *
30 vnet_dev_get_rx_queue_data (vnet_dev_rx_queue_t *rxq)
35 static_always_inline void *
36 vnet_dev_get_tx_queue_data (vnet_dev_tx_queue_t *txq)
41 static_always_inline vnet_dev_t *
42 vnet_dev_get_by_index (u32 index)
44 vnet_dev_main_t *dm = &vnet_dev_main;
45 return pool_elt_at_index (dm->devices, index)[0];
48 static_always_inline vnet_dev_port_t *
49 vnet_dev_get_port_by_index (vnet_dev_t *dev, u32 index)
51 return pool_elt_at_index (dev->ports, index)[0];
54 static_always_inline vnet_dev_port_t *
55 vnet_dev_get_port_from_dev_instance (u32 dev_instance)
57 vnet_dev_main_t *dm = &vnet_dev_main;
58 if (pool_is_free_index (dm->ports_by_dev_instance, dev_instance))
60 return pool_elt_at_index (dm->ports_by_dev_instance, dev_instance)[0];
63 static_always_inline vnet_dev_port_t *
64 vnet_dev_get_port_from_hw_if_index (u32 hw_if_index)
66 vnet_hw_interface_t *hw;
67 vnet_dev_port_t *port;
68 hw = vnet_get_hw_interface (vnet_get_main (), hw_if_index);
69 port = vnet_dev_get_port_from_dev_instance (hw->dev_instance);
71 if (!port || port->intf.hw_if_index != hw_if_index)
77 static_always_inline vnet_dev_t *
78 vnet_dev_by_id (char *id)
80 vnet_dev_main_t *dm = &vnet_dev_main;
81 uword *p = hash_get (dm->device_index_by_id, id);
83 return *pool_elt_at_index (dm->devices, p[0]);
87 static_always_inline uword
88 vnet_dev_get_dma_addr (vlib_main_t *vm, vnet_dev_t *dev, void *p)
90 return dev->va_dma ? pointer_to_uword (p) : vlib_physmem_get_pa (vm, p);
93 static_always_inline void *
94 vnet_dev_get_bus_data (vnet_dev_t *dev)
96 return (void *) dev->bus_data;
99 static_always_inline vnet_dev_bus_t *
100 vnet_dev_get_bus (vnet_dev_t *dev)
102 vnet_dev_main_t *dm = &vnet_dev_main;
103 return pool_elt_at_index (dm->buses, dev->bus_index);
106 static_always_inline void
107 vnet_dev_validate (vlib_main_t *vm, vnet_dev_t *dev)
109 ASSERT (dev->process_node_index == vlib_get_current_process_node_index (vm));
110 ASSERT (vm->thread_index == 0);
113 static_always_inline void
114 vnet_dev_port_validate (vlib_main_t *vm, vnet_dev_port_t *port)
116 ASSERT (port->dev->process_node_index ==
117 vlib_get_current_process_node_index (vm));
118 ASSERT (vm->thread_index == 0);
121 static_always_inline u32
122 vnet_dev_port_get_sw_if_index (vnet_dev_port_t *port)
124 return port->intf.sw_if_index;
127 static_always_inline vnet_dev_port_t *
128 vnet_dev_get_port_by_id (vnet_dev_t *dev, vnet_dev_port_id_t port_id)
130 foreach_vnet_dev_port (p, dev)
131 if (p->port_id == port_id)
136 static_always_inline vnet_dev_rx_queue_t *
137 vnet_dev_port_get_rx_queue_by_id (vnet_dev_port_t *port,
138 vnet_dev_queue_id_t queue_id)
140 foreach_vnet_dev_port_rx_queue (q, port)
141 if (q->queue_id == queue_id)
146 static_always_inline vnet_dev_tx_queue_t *
147 vnet_dev_port_get_tx_queue_by_id (vnet_dev_port_t *port,
148 vnet_dev_queue_id_t queue_id)
150 foreach_vnet_dev_port_tx_queue (q, port)
151 if (q->queue_id == queue_id)
156 static_always_inline void *
157 vnet_dev_alloc_with_data (u32 sz, u32 data_sz)
161 sz = round_pow2 (sz, CLIB_CACHE_LINE_BYTES);
162 p = clib_mem_alloc_aligned (sz, CLIB_CACHE_LINE_BYTES);
163 clib_memset (p, 0, sz);
167 static_always_inline void
168 vnet_dev_tx_queue_lock_if_needed (vnet_dev_tx_queue_t *txq)
172 if (!txq->lock_needed)
175 while (!__atomic_compare_exchange_n (&txq->lock, &free, 1, 0,
176 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
178 while (__atomic_load_n (&txq->lock, __ATOMIC_RELAXED))
184 static_always_inline void
185 vnet_dev_tx_queue_unlock_if_needed (vnet_dev_tx_queue_t *txq)
187 if (!txq->lock_needed)
189 __atomic_store_n (&txq->lock, 0, __ATOMIC_RELEASE);
192 static_always_inline u8
193 vnet_dev_get_rx_queue_buffer_pool_index (vnet_dev_rx_queue_t *rxq)
195 return rxq->buffer_template.buffer_pool_index;
198 static_always_inline u32
199 vnet_dev_get_rx_queue_buffer_data_size (vlib_main_t *vm,
200 vnet_dev_rx_queue_t *rxq)
202 u8 bpi = vnet_dev_get_rx_queue_buffer_pool_index (rxq);
203 return vlib_get_buffer_pool (vm, bpi)->data_size;
206 static_always_inline void
207 vnet_dev_rx_queue_rt_request (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq,
208 vnet_dev_rx_queue_rt_req_t req)
210 __atomic_fetch_or (&rxq->runtime_request.as_number, req.as_number,
214 static_always_inline vnet_dev_rx_node_runtime_t *
215 vnet_dev_get_rx_node_runtime (vlib_node_runtime_t *node)
217 return (void *) node->runtime_data;
220 static_always_inline vnet_dev_tx_node_runtime_t *
221 vnet_dev_get_tx_node_runtime (vlib_node_runtime_t *node)
223 return (void *) node->runtime_data;
226 static_always_inline vnet_dev_rx_queue_t *
227 foreach_vnet_dev_rx_queue_runtime_helper (vlib_node_runtime_t *node,
228 vnet_dev_rx_queue_t *rxq)
230 vnet_dev_port_t *port;
231 vnet_dev_rx_queue_rt_req_t req;
234 rxq = vnet_dev_get_rx_node_runtime (node)->first_rx_queue;
237 rxq = rxq->next_on_thread;
239 if (PREDICT_FALSE (rxq == 0))
242 if (PREDICT_TRUE (rxq->runtime_request.as_number == 0))
246 __atomic_exchange_n (&rxq->runtime_request.as_number, 0, __ATOMIC_ACQUIRE);
249 if (req.update_next_index)
250 rxq->next_index = port->intf.rx_next_index;
252 if (req.update_feature_arc)
254 vlib_buffer_template_t *bt = &rxq->buffer_template;
255 bt->current_config_index = port->intf.current_config_index;
256 vnet_buffer (bt)->feature_arc_index = port->intf.feature_arc_index;
271 #define foreach_vnet_dev_rx_queue_runtime(q, node) \
272 for (vnet_dev_rx_queue_t * (q) = \
273 foreach_vnet_dev_rx_queue_runtime_helper (node, 0); \
274 q; (q) = foreach_vnet_dev_rx_queue_runtime_helper (node, q))
276 static_always_inline void *
277 vnet_dev_get_rt_temp_space (vlib_main_t *vm)
279 return vnet_dev_main.runtime_temp_spaces +
280 ((uword) vm->thread_index
281 << vnet_dev_main.log2_runtime_temp_space_sz);
284 static_always_inline void
285 vnet_dev_set_hw_addr_eth_mac (vnet_dev_hw_addr_t *addr, const u8 *eth_mac_addr)
287 vnet_dev_hw_addr_t ha = {};
288 clib_memcpy_fast (&ha.eth_mac, eth_mac_addr, sizeof (ha.eth_mac));
292 static_always_inline vnet_dev_arg_t *
293 vnet_dev_get_port_arg_by_id (vnet_dev_port_t *port, u32 id)
295 foreach_vnet_dev_port_args (a, port)
301 static_always_inline int
302 vnet_dev_arg_get_bool (vnet_dev_arg_t *arg)
304 ASSERT (arg->type == VNET_DEV_ARG_TYPE_BOOL);
305 return arg->val_set ? arg->val.boolean : arg->default_val.boolean;
308 static_always_inline u32
309 vnet_dev_arg_get_uint32 (vnet_dev_arg_t *arg)
311 ASSERT (arg->type == VNET_DEV_ARG_TYPE_UINT32);
312 return arg->val_set ? arg->val.uint32 : arg->default_val.uint32;
315 static_always_inline u8 *
316 vnet_dev_arg_get_string (vnet_dev_arg_t *arg)
318 ASSERT (arg->type == VNET_DEV_ARG_TYPE_STRING);
319 return arg->val_set ? arg->val.string : arg->default_val.string;
322 #endif /* _VNET_DEV_FUNCS_H_ */