2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
26 #define foreach_avf_input_error \
27 _(BUFFER_ALLOC, "buffer alloc error") \
28 _(RX_PACKET_ERROR, "Rx packet errors")
32 #define _(f,s) AVF_INPUT_ERROR_##f,
33 foreach_avf_input_error
38 static __clib_unused char *avf_input_error_strings[] = {
40 foreach_avf_input_error
44 #define AVF_RX_DESC_STATUS(x) (1 << x)
45 #define AVF_RX_DESC_STATUS_DD AVF_RX_DESC_STATUS(0)
46 #define AVF_RX_DESC_STATUS_EOP AVF_RX_DESC_STATUS(1)
48 #define AVF_INPUT_REFILL_TRESHOLD 32
49 static_always_inline void
50 avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq,
53 u16 n_refill, mask, n_alloc, slot;
58 n_refill = rxq->size - 1 - rxq->n_enqueued;
59 if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
63 slot = (rxq->next - n_refill - 1) & mask;
65 n_refill &= ~7; /* round to 8 */
66 n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, rxq->size,
69 if (PREDICT_FALSE (n_alloc != n_refill))
71 vlib_error_count (vm, node->node_index,
72 AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
74 vlib_buffer_free_from_ring (vm, rxq->bufs, slot, rxq->size, n_alloc);
78 rxq->n_enqueued += n_alloc;
82 if (PREDICT_TRUE (slot + 3 < rxq->size))
92 s1 = (slot + 1) & mask;
93 s2 = (slot + 2) & mask;
94 s3 = (slot + 3) & mask;
97 d[0] = ((avf_rx_desc_t *) rxq->descs) + s0;
98 d[1] = ((avf_rx_desc_t *) rxq->descs) + s1;
99 d[2] = ((avf_rx_desc_t *) rxq->descs) + s2;
100 d[3] = ((avf_rx_desc_t *) rxq->descs) + s3;
101 b[0] = vlib_get_buffer (vm, rxq->bufs[s0]);
102 b[1] = vlib_get_buffer (vm, rxq->bufs[s1]);
103 b[2] = vlib_get_buffer (vm, rxq->bufs[s2]);
104 b[3] = vlib_get_buffer (vm, rxq->bufs[s3]);
108 d[0]->qword[0] = vlib_buffer_get_va (b[0]);
109 d[1]->qword[0] = vlib_buffer_get_va (b[1]);
110 d[2]->qword[0] = vlib_buffer_get_va (b[2]);
111 d[3]->qword[0] = vlib_buffer_get_va (b[3]);
115 d[0]->qword[0] = vlib_buffer_get_pa (vm, b[0]);
116 d[1]->qword[0] = vlib_buffer_get_pa (vm, b[1]);
117 d[2]->qword[0] = vlib_buffer_get_pa (vm, b[2]);
118 d[3]->qword[0] = vlib_buffer_get_pa (vm, b[3]);
127 slot = (slot + 4) & mask;
133 d[0] = ((avf_rx_desc_t *) rxq->descs) + s0;
134 b[0] = vlib_get_buffer (vm, rxq->bufs[s0]);
136 d[0]->qword[0] = vlib_buffer_get_va (b[0]);
138 d[0]->qword[0] = vlib_buffer_get_pa (vm, b[0]);
142 slot = (slot + 1) & mask;
146 CLIB_MEMORY_BARRIER ();
147 *(rxq->qrx_tail) = slot;
150 static_always_inline uword
151 avf_process_rx_burst (vlib_main_t * vm, vlib_node_runtime_t * node,
152 vlib_buffer_t * bt, avf_rx_vector_entry_t * rxve,
153 vlib_buffer_t ** b, u32 n_rxv)
155 uword n_rx_bytes = 0;
161 vlib_prefetch_buffer_header (b[8], LOAD);
162 vlib_prefetch_buffer_header (b[9], LOAD);
163 vlib_prefetch_buffer_header (b[10], LOAD);
164 vlib_prefetch_buffer_header (b[11], LOAD);
167 n_rx_bytes += b[0]->current_length = rxve[0].length;
168 n_rx_bytes += b[1]->current_length = rxve[1].length;
169 n_rx_bytes += b[2]->current_length = rxve[2].length;
170 n_rx_bytes += b[3]->current_length = rxve[3].length;
172 clib_memcpy_fast (vnet_buffer (b[0])->sw_if_index,
173 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
174 clib_memcpy_fast (vnet_buffer (b[1])->sw_if_index,
175 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
176 clib_memcpy_fast (vnet_buffer (b[2])->sw_if_index,
177 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
178 clib_memcpy_fast (vnet_buffer (b[3])->sw_if_index,
179 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
181 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
182 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
183 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
184 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
193 b[0]->current_length = rxve->length;
194 n_rx_bytes += b[0]->current_length;
196 clib_memcpy_fast (vnet_buffer (b[0])->sw_if_index,
197 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
199 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
209 static_always_inline uword
210 avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
211 vlib_frame_t * frame, avf_device_t * ad, u16 qid)
213 avf_main_t *am = &avf_main;
214 vnet_main_t *vnm = vnet_get_main ();
215 u32 thr_idx = vlib_get_thread_index ();
216 avf_per_thread_data_t *ptd =
217 vec_elt_at_index (am->per_thread_data, thr_idx);
218 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
219 avf_rx_vector_entry_t *rxve = 0;
222 u32 n_rx_packets = 0, n_rx_bytes = 0;
223 u16 mask = rxq->size - 1;
227 vlib_buffer_t *bufs[AVF_RX_VECTOR_SZ];
228 vlib_buffer_t *bt = &ptd->buffer_template;
229 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
231 STATIC_ASSERT_SIZEOF (avf_rx_vector_entry_t, 8);
232 STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, status, 0);
233 STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, length, 4);
234 STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, ptype, 6);
235 STATIC_ASSERT_OFFSET_OF (avf_rx_vector_entry_t, error, 7);
237 /* is there anything on the ring */
238 d = rxq->descs + rxq->next;
239 if ((d->qword[1] & AVF_RX_DESC_STATUS_DD) == 0)
242 u32 *to_next, n_left_to_next;
243 if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
244 next_index = ad->per_interface_next_index;
245 vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
247 /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
248 copy needed data from descriptor to rx vector */
250 while (n_rxv < AVF_RX_VECTOR_SZ)
252 if (rxq->next + 11 < rxq->size)
255 CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride)),
256 CLIB_CACHE_LINE_BYTES, LOAD);
257 CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride + 1)),
258 CLIB_CACHE_LINE_BYTES, LOAD);
259 CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride + 2)),
260 CLIB_CACHE_LINE_BYTES, LOAD);
261 CLIB_PREFETCH ((void *) (rxq->descs + (rxq->next + stride + 3)),
262 CLIB_CACHE_LINE_BYTES, LOAD);
265 #ifdef CLIB_HAVE_VEC256
267 u64x4 status_dd_eop_mask = u64x4_splat (0x3);
269 if (n_rxv >= AVF_RX_VECTOR_SZ - 4)
272 if (rxq->next >= rxq->size - 4)
275 /* load 1st quadword of 4 dscriptors into 256-bit vector register */
285 /* not all packets are ready or at least one of them is chained */
286 if (!u64x4_is_equal (q1x4 & status_dd_eop_mask, status_dd_eop_mask))
289 /* shift and mask status, length, ptype and err */
290 v = q1x4 & u64x4_splat ((u64) 0x3FFFFULL);
291 v |= (q1x4 >> 6) & u64x4_splat ((u64) 0xFFFF << 32);
292 v |= (q1x4 << 18) & u64x4_splat ((u64) 0xFF << 48);
293 v |= err4 = (q1x4 << 37) & u64x4_splat ((u64) 0xFF << 56);
295 u64x4_store_unaligned (v, ptd->rx_vector + n_rxv);
297 if (!u64x4_is_all_zero (err4))
298 or_error |= err4[0] | err4[1] | err4[2] | err4[3];
300 clib_memcpy_fast (bi, rxq->bufs + rxq->next, 4 * sizeof (u32));
303 rxq->next = (rxq->next + 4) & mask;
304 d = rxq->descs + rxq->next;
306 rxq->n_enqueued -= 4;
311 CLIB_PREFETCH ((void *) (rxq->descs + ((rxq->next + 8) & mask)),
312 CLIB_CACHE_LINE_BYTES, LOAD);
313 if ((d->qword[1] & AVF_RX_DESC_STATUS_DD) == 0)
315 rxve = ptd->rx_vector + n_rxv;
316 bi[0] = rxq->bufs[rxq->next];
317 rxve->status = avf_get_u64_bits ((void *) d, 8, 18, 0);
318 rxve->error = avf_get_u64_bits ((void *) d, 8, 26, 19);
319 rxve->ptype = avf_get_u64_bits ((void *) d, 8, 37, 30);
320 rxve->length = avf_get_u64_bits ((void *) d, 8, 63, 38);
321 or_error |= rxve->error;
323 /* deal with chained buffers */
324 while (PREDICT_FALSE ((d->qword[1] & AVF_RX_DESC_STATUS_EOP) == 0))
326 clib_error ("fixme");
330 rxq->next = (rxq->next + 1) & mask;
331 d = rxq->descs + rxq->next;
341 if (ad->flags & AVF_DEVICE_F_VA_DMA)
342 avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
344 avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
346 vlib_get_buffers (vm, to_next, bufs, n_rxv);
347 n_rx_packets = n_rxv;
349 vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
350 vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
352 n_rx_bytes = avf_process_rx_burst (vm, node, bt, ptd->rx_vector, bufs,
355 /* packet trace if enabled */
356 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
358 u32 n_left = n_rx_packets;
360 while (n_trace && n_left)
363 avf_input_trace_t *tr;
364 b = vlib_get_buffer (vm, bi[0]);
365 vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0);
366 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
367 tr->next_index = next_index;
368 tr->hw_if_index = ad->hw_if_index;
369 clib_memcpy_fast (&tr->rxve, rxve, sizeof (avf_rx_vector_entry_t));
376 vlib_set_trace_count (vm, node, n_trace);
379 if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
381 vlib_next_frame_t *nf;
383 ethernet_input_frame_t *ef;
384 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
385 f = vlib_get_frame (vm, nf->frame_index);
386 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
388 ef = vlib_frame_scalar_args (f);
389 ef->sw_if_index = ad->sw_if_index;
390 ef->hw_if_index = ad->hw_if_index;
392 if ((or_error & (1 << 3)) == 0)
393 f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
395 n_left_to_next -= n_rx_packets;
396 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
398 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
399 + VNET_INTERFACE_COUNTER_RX, thr_idx,
400 ad->hw_if_index, n_rx_packets, n_rx_bytes);
406 VLIB_NODE_FN (avf_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
407 vlib_frame_t * frame)
410 avf_main_t *am = &avf_main;
411 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
412 vnet_device_and_queue_t *dq;
414 foreach_device_and_queue (dq, rt->devices_and_queues)
417 ad = vec_elt_at_index (am->devices, dq->dev_instance);
418 if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
420 n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id);
426 VLIB_REGISTER_NODE (avf_input_node) = {
428 .sibling_of = "device-input",
429 .format_trace = format_avf_input_trace,
430 .type = VLIB_NODE_TYPE_INPUT,
431 .state = VLIB_NODE_STATE_DISABLED,
432 .n_errors = AVF_INPUT_N_ERROR,
433 .error_strings = avf_input_error_strings,
440 * fd.io coding-style-patch-verification: ON
443 * eval: (c-set-style "gnu")