dev: new device driver infra
[vpp.git] / src / vnet / dev / dev_funcs.h
1 /* SPDX-License-Identifier: Apache-2.0
2  * Copyright (c) 2023 Cisco Systems, Inc.
3  */
4
5 #ifndef _VNET_DEV_FUNCS_H_
6 #define _VNET_DEV_FUNCS_H_
7
8 #include <vppinfra/clib.h>
9 #include <vnet/dev/dev.h>
10
11 static_always_inline void *
12 vnet_dev_get_data (vnet_dev_t *dev)
13 {
14   return dev->data;
15 }
16
17 static_always_inline vnet_dev_t *
18 vnet_dev_from_data (void *p)
19 {
20   return (void *) ((u8 *) p - STRUCT_OFFSET_OF (vnet_dev_t, data));
21 }
22
23 static_always_inline void *
24 vnet_dev_get_port_data (vnet_dev_port_t *port)
25 {
26   return port->data;
27 }
28
29 static_always_inline void *
30 vnet_dev_get_rx_queue_data (vnet_dev_rx_queue_t *rxq)
31 {
32   return rxq->data;
33 }
34
35 static_always_inline void *
36 vnet_dev_get_tx_queue_data (vnet_dev_tx_queue_t *txq)
37 {
38   return txq->data;
39 }
40
41 static_always_inline vnet_dev_t *
42 vnet_dev_get_by_index (u32 index)
43 {
44   vnet_dev_main_t *dm = &vnet_dev_main;
45   return pool_elt_at_index (dm->devices, index)[0];
46 }
47
48 static_always_inline vnet_dev_port_t *
49 vnet_dev_get_port_by_index (vnet_dev_t *dev, u32 index)
50 {
51   return pool_elt_at_index (dev->ports, index)[0];
52 }
53
54 static_always_inline vnet_dev_port_t *
55 vnet_dev_get_port_from_dev_instance (u32 dev_instance)
56 {
57   vnet_dev_main_t *dm = &vnet_dev_main;
58   if (pool_is_free_index (dm->ports_by_dev_instance, dev_instance))
59     return 0;
60   return pool_elt_at_index (dm->ports_by_dev_instance, dev_instance)[0];
61 }
62
63 static_always_inline vnet_dev_t *
64 vnet_dev_by_id (char *id)
65 {
66   vnet_dev_main_t *dm = &vnet_dev_main;
67   uword *p = hash_get (dm->device_index_by_id, id);
68   if (p)
69     return *pool_elt_at_index (dm->devices, p[0]);
70   return 0;
71 }
72
73 static_always_inline uword
74 vnet_dev_get_dma_addr (vlib_main_t *vm, vnet_dev_t *dev, void *p)
75 {
76   return dev->va_dma ? pointer_to_uword (p) : vlib_physmem_get_pa (vm, p);
77 }
78
79 static_always_inline void *
80 vnet_dev_get_bus_data (vnet_dev_t *dev)
81 {
82   return (void *) dev->bus_data;
83 }
84
85 static_always_inline vnet_dev_bus_t *
86 vnet_dev_get_bus (vnet_dev_t *dev)
87 {
88   vnet_dev_main_t *dm = &vnet_dev_main;
89   return pool_elt_at_index (dm->buses, dev->bus_index);
90 }
91
92 static_always_inline void
93 vnet_dev_validate (vlib_main_t *vm, vnet_dev_t *dev)
94 {
95   ASSERT (dev->process_node_index == vlib_get_current_process_node_index (vm));
96   ASSERT (vm->thread_index == 0);
97 }
98
99 static_always_inline void
100 vnet_dev_port_validate (vlib_main_t *vm, vnet_dev_port_t *port)
101 {
102   ASSERT (port->dev->process_node_index ==
103           vlib_get_current_process_node_index (vm));
104   ASSERT (vm->thread_index == 0);
105 }
106
107 static_always_inline u32
108 vnet_dev_port_get_sw_if_index (vnet_dev_port_t *port)
109 {
110   return port->intf.sw_if_index;
111 }
112
113 static_always_inline vnet_dev_port_t *
114 vnet_dev_get_port_by_id (vnet_dev_t *dev, vnet_dev_port_id_t port_id)
115 {
116   foreach_vnet_dev_port (p, dev)
117     if (p->port_id == port_id)
118       return p;
119   return 0;
120 }
121
122 static_always_inline void *
123 vnet_dev_alloc_with_data (u32 sz, u32 data_sz)
124 {
125   void *p;
126   sz += data_sz;
127   sz = round_pow2 (sz, CLIB_CACHE_LINE_BYTES);
128   p = clib_mem_alloc_aligned (sz, CLIB_CACHE_LINE_BYTES);
129   clib_memset (p, 0, sz);
130   return p;
131 }
132
133 static_always_inline void
134 vnet_dev_tx_queue_lock_if_needed (vnet_dev_tx_queue_t *txq)
135 {
136   u8 free = 0;
137
138   if (!txq->lock_needed)
139     return;
140
141   while (!__atomic_compare_exchange_n (&txq->lock, &free, 1, 0,
142                                        __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
143     {
144       while (__atomic_load_n (&txq->lock, __ATOMIC_RELAXED))
145         CLIB_PAUSE ();
146       free = 0;
147     }
148 }
149
150 static_always_inline void
151 vnet_dev_tx_queue_unlock_if_needed (vnet_dev_tx_queue_t *txq)
152 {
153   if (!txq->lock_needed)
154     return;
155   __atomic_store_n (&txq->lock, 0, __ATOMIC_RELEASE);
156 }
157
158 static_always_inline u8
159 vnet_dev_get_rx_queue_buffer_pool_index (vnet_dev_rx_queue_t *rxq)
160 {
161   return rxq->buffer_template.buffer_pool_index;
162 }
163
164 static_always_inline void
165 vnet_dev_rx_queue_rt_request (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq,
166                               vnet_dev_rx_queue_rt_req_t req)
167 {
168   __atomic_fetch_or (&rxq->runtime_request.as_number, req.as_number,
169                      __ATOMIC_RELEASE);
170 }
171
172 static_always_inline vnet_dev_rx_node_runtime_t *
173 vnet_dev_get_rx_node_runtime (vlib_node_runtime_t *node)
174 {
175   return (void *) node->runtime_data;
176 }
177
178 static_always_inline vnet_dev_tx_node_runtime_t *
179 vnet_dev_get_tx_node_runtime (vlib_node_runtime_t *node)
180 {
181   return (void *) node->runtime_data;
182 }
183
184 static_always_inline vnet_dev_rx_queue_t **
185 foreach_vnet_dev_rx_queue_runtime_helper (vlib_node_runtime_t *node)
186 {
187   vnet_dev_rx_node_runtime_t *rt = vnet_dev_get_rx_node_runtime (node);
188   return rt->rx_queues;
189 }
190
191 static_always_inline int
192 vnet_dev_rx_queue_runtime_update (vnet_dev_rx_queue_t *rxq)
193 {
194   vnet_dev_port_t *port;
195   vnet_dev_rx_queue_rt_req_t req;
196   int rv = 1;
197
198   if (PREDICT_TRUE (rxq->runtime_request.as_number == 0))
199     return 1;
200
201   req.as_number =
202     __atomic_exchange_n (&rxq->runtime_request.as_number, 0, __ATOMIC_ACQUIRE);
203
204   port = rxq->port;
205   if (req.update_next_index)
206     rxq->next_index = port->intf.rx_next_index;
207
208   if (req.update_feature_arc)
209     {
210       vlib_buffer_template_t *bt = &rxq->buffer_template;
211       bt->current_config_index = port->intf.current_config_index;
212       vnet_buffer (bt)->feature_arc_index = port->intf.feature_arc_index;
213     }
214
215   if (req.suspend_on)
216     {
217       rxq->suspended = 1;
218       rv = 0;
219     }
220
221   if (req.suspend_off)
222     rxq->suspended = 0;
223
224   return rv;
225 }
226
227 static_always_inline void *
228 vnet_dev_get_rt_temp_space (vlib_main_t *vm)
229 {
230   return vnet_dev_main.runtime_temp_spaces +
231          ((uword) vm->thread_index
232           << vnet_dev_main.log2_runtime_temp_space_sz);
233 }
234
235 static_always_inline void
236 vnet_dev_set_hw_addr_eth_mac (vnet_dev_hw_addr_t *addr, const u8 *eth_mac_addr)
237 {
238   vnet_dev_hw_addr_t ha = {};
239   clib_memcpy_fast (&ha.eth_mac, eth_mac_addr, sizeof (ha.eth_mac));
240   *addr = ha;
241 }
242
243 #define foreach_vnet_dev_rx_queue_runtime(q, node)                            \
244   for (vnet_dev_rx_queue_t *                                                  \
245          *__qp = foreach_vnet_dev_rx_queue_runtime_helper (node),             \
246         **__last = __qp + (vnet_dev_get_rx_node_runtime (node))->n_rx_queues, \
247         *(q) = *__qp;                                                         \
248        __qp < __last; __qp++, (q) = *__qp)                                    \
249     if (vnet_dev_rx_queue_runtime_update (q))
250
251 #endif /* _VNET_DEV_FUNCS_H_ */