2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vlib/unix/unix.h>
21 #include <vlib/pci/pci.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/devices/devices.h>
24 #include <vnet/interface/rx_queue_funcs.h>
27 #define foreach_af_xdp_input_error \
28 _ (SYSCALL_REQUIRED, "syscall required") \
29 _ (SYSCALL_FAILURES, "syscall failures")
33 #define _(f,s) AF_XDP_INPUT_ERROR_##f,
34 foreach_af_xdp_input_error
37 } af_xdp_input_error_t;
39 static __clib_unused char *af_xdp_input_error_strings[] = {
41 foreach_af_xdp_input_error
45 static_always_inline void
46 af_xdp_device_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
47 u32 n_left, const u32 * bi, u32 next_index,
50 u32 n_trace = vlib_get_trace_count (vm, node);
52 if (PREDICT_TRUE (0 == n_trace))
55 while (n_trace && n_left)
57 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
59 (vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0)))
61 af_xdp_input_trace_t *tr =
62 vlib_add_trace (vm, node, b, sizeof (*tr));
63 tr->next_index = next_index;
64 tr->hw_if_index = hw_if_index;
71 vlib_set_trace_count (vm, node, n_trace);
74 static_always_inline void
75 af_xdp_device_input_refill_db (vlib_main_t * vm,
76 const vlib_node_runtime_t * node,
77 af_xdp_device_t * ad, af_xdp_rxq_t * rxq,
80 xsk_ring_prod__submit (&rxq->fq, n_alloc);
82 if (AF_XDP_RXQ_MODE_INTERRUPT == rxq->mode ||
83 !xsk_ring_prod__needs_wakeup (&rxq->fq))
87 vlib_error_count (vm, node->node_index,
88 AF_XDP_INPUT_ERROR_SYSCALL_REQUIRED, 1);
90 if (clib_spinlock_trylock_if_init (&rxq->syscall_lock))
92 struct pollfd fd = { .fd = rxq->xsk_fd, .events = POLLIN | POLLOUT };
93 int ret = poll (&fd, 1, 0);
94 clib_spinlock_unlock_if_init (&rxq->syscall_lock);
95 if (PREDICT_FALSE (ret < 0))
97 /* something bad is happening */
99 vlib_error_count (vm, node->node_index,
100 AF_XDP_INPUT_ERROR_SYSCALL_FAILURES, 1);
101 af_xdp_device_error (ad, "rx poll() failed");
106 static_always_inline void
107 af_xdp_device_input_refill_inline (vlib_main_t *vm,
108 const vlib_node_runtime_t *node,
109 af_xdp_device_t *ad, af_xdp_rxq_t *rxq)
112 const u32 size = rxq->fq.size;
113 const u32 mask = size - 1;
114 u32 bis[VLIB_FRAME_SIZE], *bi = bis;
115 u32 n_alloc, n, n_wrap;
118 ASSERT (mask == rxq->fq.mask);
120 /* do not enqueue more packet than ring space */
121 n_alloc = xsk_prod_nb_free (&rxq->fq, 16);
122 /* do not bother to allocate if too small */
126 n_alloc = clib_min (n_alloc, ARRAY_LEN (bis));
127 n_alloc = vlib_buffer_alloc_from_pool (vm, bis, n_alloc, ad->pool);
128 n = xsk_ring_prod__reserve (&rxq->fq, n_alloc, &idx);
129 ASSERT (n == n_alloc);
131 fill = xsk_ring_prod__fill_addr (&rxq->fq, idx);
132 n = clib_min (n_alloc, size - (idx & mask));
133 n_wrap = n_alloc - n;
135 #define bi2addr(bi) ((bi) << CLIB_LOG2_CACHE_LINE_BYTES)
141 #ifdef CLIB_HAVE_VEC256
142 u64x4 b0 = u64x4_from_u32x4 (*(u32x4u *) (bi + 0));
143 u64x4 b1 = u64x4_from_u32x4 (*(u32x4u *) (bi + 4));
144 *(u64x4u *) (fill + 0) = bi2addr (b0);
145 *(u64x4u *) (fill + 4) = bi2addr (b1);
147 fill[0] = bi2addr (bi[0]);
148 fill[1] = bi2addr (bi[1]);
149 fill[2] = bi2addr (bi[2]);
150 fill[3] = bi2addr (bi[3]);
151 fill[4] = bi2addr (bi[4]);
152 fill[5] = bi2addr (bi[5]);
153 fill[6] = bi2addr (bi[6]);
154 fill[7] = bi2addr (bi[7]);
163 fill[0] = bi2addr (bi[0]);
171 fill = xsk_ring_prod__fill_addr (&rxq->fq, 0);
177 af_xdp_device_input_refill_db (vm, node, ad, rxq, n_alloc);
180 static_always_inline void
181 af_xdp_device_input_ethernet (vlib_main_t * vm, vlib_node_runtime_t * node,
182 const u32 next_index, const u32 sw_if_index,
183 const u32 hw_if_index)
185 vlib_next_frame_t *nf;
187 ethernet_input_frame_t *ef;
189 if (PREDICT_FALSE (VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT != next_index))
193 vlib_node_runtime_get_next_frame (vm, node,
194 VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT);
195 f = vlib_get_frame (vm, nf->frame);
196 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
198 ef = vlib_frame_scalar_args (f);
199 ef->sw_if_index = sw_if_index;
200 ef->hw_if_index = hw_if_index;
203 static_always_inline u32
204 af_xdp_device_input_bufs (vlib_main_t *vm, const af_xdp_device_t *ad,
205 af_xdp_rxq_t *rxq, u32 *bis, const u32 n_rx,
206 vlib_buffer_t *bt, u32 idx)
208 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
209 u16 offs[VLIB_FRAME_SIZE], *off = offs;
210 u16 lens[VLIB_FRAME_SIZE], *len = lens;
211 const u32 mask = rxq->rx.mask;
212 u32 n = n_rx, *bi = bis, bytes = 0;
214 #define addr2bi(addr) ((addr) >> CLIB_LOG2_CACHE_LINE_BYTES)
218 const struct xdp_desc *desc = xsk_ring_cons__rx_desc (&rxq->rx, idx);
219 const u64 addr = desc->addr;
220 bi[0] = addr2bi (xsk_umem__extract_addr (addr));
221 ASSERT (vlib_buffer_is_known (vm, bi[0]) ==
222 VLIB_BUFFER_KNOWN_ALLOCATED);
223 off[0] = xsk_umem__extract_offset (addr) - sizeof (vlib_buffer_t);
225 idx = (idx + 1) & mask;
232 vlib_get_buffers (vm, bis, bufs, n_rx);
240 vlib_prefetch_buffer_header (b[4], LOAD);
241 vlib_buffer_copy_template (b[0], bt);
242 b[0]->current_data = off[0];
243 bytes += b[0]->current_length = len[0];
245 vlib_prefetch_buffer_header (b[5], LOAD);
246 vlib_buffer_copy_template (b[1], bt);
247 b[1]->current_data = off[1];
248 bytes += b[1]->current_length = len[1];
250 vlib_prefetch_buffer_header (b[6], LOAD);
251 vlib_buffer_copy_template (b[2], bt);
252 b[2]->current_data = off[2];
253 bytes += b[2]->current_length = len[2];
255 vlib_prefetch_buffer_header (b[7], LOAD);
256 vlib_buffer_copy_template (b[3], bt);
257 b[3]->current_data = off[3];
258 bytes += b[3]->current_length = len[3];
268 vlib_buffer_copy_template (b[0], bt);
269 b[0]->current_data = off[0];
270 bytes += b[0]->current_length = len[0];
277 xsk_ring_cons__release (&rxq->rx, n_rx);
281 static_always_inline uword
282 af_xdp_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
283 vlib_frame_t *frame, af_xdp_device_t *ad, u16 qid)
285 vnet_main_t *vnm = vnet_get_main ();
286 af_xdp_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
288 u32 next_index, *to_next, n_left_to_next;
289 u32 n_rx_packets, n_rx_bytes;
292 n_rx_packets = xsk_ring_cons__peek (&rxq->rx, VLIB_FRAME_SIZE, &idx);
294 if (PREDICT_FALSE (0 == n_rx_packets))
297 vlib_buffer_copy_template (&bt, ad->buffer_template);
298 next_index = ad->per_interface_next_index;
299 if (PREDICT_FALSE (vnet_device_input_have_features (ad->sw_if_index)))
300 vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, &bt);
302 vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
305 af_xdp_device_input_bufs (vm, ad, rxq, to_next, n_rx_packets, &bt, idx);
306 af_xdp_device_input_ethernet (vm, node, next_index, ad->sw_if_index,
309 vlib_put_next_frame (vm, node, next_index, n_left_to_next - n_rx_packets);
311 af_xdp_device_input_trace (vm, node, n_rx_packets, to_next, next_index,
314 vlib_increment_combined_counter
315 (vnm->interface_main.combined_sw_if_counters +
316 VNET_INTERFACE_COUNTER_RX, vm->thread_index,
317 ad->hw_if_index, n_rx_packets, n_rx_bytes);
320 af_xdp_device_input_refill_inline (vm, node, ad, rxq);
325 VLIB_NODE_FN (af_xdp_input_node) (vlib_main_t * vm,
326 vlib_node_runtime_t * node,
327 vlib_frame_t * frame)
330 af_xdp_main_t *am = &af_xdp_main;
331 vnet_hw_if_rxq_poll_vector_t *p,
332 *pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
336 af_xdp_device_t *ad = vec_elt_at_index (am->devices, p->dev_instance);
337 if ((ad->flags & AF_XDP_DEVICE_F_ADMIN_UP) == 0)
339 n_rx += af_xdp_device_input_inline (vm, node, frame, ad, p->queue_id);
345 #ifndef CLIB_MARCH_VARIANT
347 af_xdp_device_input_refill (af_xdp_device_t *ad)
349 vlib_main_t *vm = vlib_get_main ();
351 vec_foreach (rxq, ad->rxqs)
352 af_xdp_device_input_refill_inline (vm, 0, ad, rxq);
354 #endif /* CLIB_MARCH_VARIANT */
357 VLIB_REGISTER_NODE (af_xdp_input_node) = {
358 .name = "af_xdp-input",
359 .sibling_of = "device-input",
360 .format_trace = format_af_xdp_input_trace,
361 .type = VLIB_NODE_TYPE_INPUT,
362 .state = VLIB_NODE_STATE_DISABLED,
363 .n_errors = AF_XDP_INPUT_N_ERROR,
364 .error_strings = af_xdp_input_error_strings,
365 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
370 * fd.io coding-style-patch-verification: ON
373 * eval: (c-set-style "gnu")