2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
26 #define foreach_avf_input_error \
27 _(BUFFER_ALLOC, "buffer alloc error") \
28 _(RX_PACKET_ERROR, "Rx packet errors")
32 #define _(f,s) AVF_INPUT_ERROR_##f,
33 foreach_avf_input_error
38 static __clib_unused char *avf_input_error_strings[] = {
40 foreach_avf_input_error
44 #define AVF_INPUT_REFILL_TRESHOLD 32
46 static_always_inline void
47 avf_rx_desc_write (avf_rx_desc_t * d, u64 addr)
49 #ifdef CLIB_HAVE_VEC256
50 u64x4 v = { addr, 0, 0, 0 };
51 u64x4_store_unaligned (v, (void *) d);
58 static_always_inline void
59 avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq,
62 u16 n_refill, mask, n_alloc, slot, size;
64 avf_rx_desc_t *d, *first_d;
69 n_refill = mask - rxq->n_enqueued;
70 if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
73 slot = (rxq->next - n_refill - 1) & mask;
75 n_refill &= ~7; /* round to 8 */
76 n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, size, n_refill);
78 if (PREDICT_FALSE (n_alloc != n_refill))
80 vlib_error_count (vm, node->node_index,
81 AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
83 vlib_buffer_free_from_ring (vm, rxq->bufs, slot, size, n_alloc);
87 rxq->n_enqueued += n_alloc;
90 ASSERT (slot % 8 == 0);
98 vlib_get_buffers_with_offset (vm, rxq->bufs + slot, p, 8,
99 sizeof (vlib_buffer_t));
100 avf_rx_desc_write (d + 0, pointer_to_uword (p[0]));
101 avf_rx_desc_write (d + 1, pointer_to_uword (p[1]));
102 avf_rx_desc_write (d + 2, pointer_to_uword (p[2]));
103 avf_rx_desc_write (d + 3, pointer_to_uword (p[3]));
104 avf_rx_desc_write (d + 4, pointer_to_uword (p[4]));
105 avf_rx_desc_write (d + 5, pointer_to_uword (p[5]));
106 avf_rx_desc_write (d + 6, pointer_to_uword (p[6]));
107 avf_rx_desc_write (d + 7, pointer_to_uword (p[7]));
111 vlib_get_buffers (vm, rxq->bufs + slot, b, 8);
112 avf_rx_desc_write (d + 0, vlib_buffer_get_pa (vm, b[0]));
113 avf_rx_desc_write (d + 1, vlib_buffer_get_pa (vm, b[1]));
114 avf_rx_desc_write (d + 2, vlib_buffer_get_pa (vm, b[2]));
115 avf_rx_desc_write (d + 3, vlib_buffer_get_pa (vm, b[3]));
116 avf_rx_desc_write (d + 4, vlib_buffer_get_pa (vm, b[4]));
117 avf_rx_desc_write (d + 5, vlib_buffer_get_pa (vm, b[5]));
118 avf_rx_desc_write (d + 6, vlib_buffer_get_pa (vm, b[6]));
119 avf_rx_desc_write (d + 7, vlib_buffer_get_pa (vm, b[7]));
123 slot = (slot + 8) & mask;
127 CLIB_MEMORY_STORE_BARRIER ();
128 *(rxq->qrx_tail) = slot;
131 static_always_inline uword
132 avf_process_rx_burst (vlib_main_t * vm, vlib_node_runtime_t * node,
133 vlib_buffer_t * bt, u64 * qw1,
134 vlib_buffer_t ** b, u32 n_left)
136 uword n_rx_bytes = 0;
142 vlib_prefetch_buffer_header (b[8], LOAD);
143 vlib_prefetch_buffer_header (b[9], LOAD);
144 vlib_prefetch_buffer_header (b[10], LOAD);
145 vlib_prefetch_buffer_header (b[11], LOAD);
148 n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
149 n_rx_bytes += b[1]->current_length = qw1[1] >> AVF_RXD_LEN_SHIFT;
150 n_rx_bytes += b[2]->current_length = qw1[2] >> AVF_RXD_LEN_SHIFT;
151 n_rx_bytes += b[3]->current_length = qw1[3] >> AVF_RXD_LEN_SHIFT;
153 clib_memcpy_fast (vnet_buffer (b[0])->sw_if_index,
154 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
155 clib_memcpy_fast (vnet_buffer (b[1])->sw_if_index,
156 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
157 clib_memcpy_fast (vnet_buffer (b[2])->sw_if_index,
158 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
159 clib_memcpy_fast (vnet_buffer (b[3])->sw_if_index,
160 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
162 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
163 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
164 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
165 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
174 n_rx_bytes += b[0]->current_length = qw1[0] >> AVF_RXD_LEN_SHIFT;
176 clib_memcpy_fast (vnet_buffer (b[0])->sw_if_index,
177 vnet_buffer (bt)->sw_if_index, 2 * sizeof (u32));
179 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
189 static_always_inline uword
190 avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
191 vlib_frame_t * frame, avf_device_t * ad, u16 qid)
193 avf_main_t *am = &avf_main;
194 vnet_main_t *vnm = vnet_get_main ();
195 u32 thr_idx = vlib_get_thread_index ();
196 avf_per_thread_data_t *ptd =
197 vec_elt_at_index (am->per_thread_data, thr_idx);
198 avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
199 u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
202 u32 *bi, *to_next, n_left_to_next;
203 vlib_buffer_t *bufs[AVF_RX_VECTOR_SZ];
204 vlib_buffer_t *bt = &ptd->buffer_template;
205 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
206 u64 qw1s[AVF_RX_VECTOR_SZ];
207 u16 next = rxq->next;
208 u16 size = rxq->size;
210 avf_rx_desc_t *d, *fd = rxq->descs;
211 #ifdef CLIB_HAVE_VEC256
212 u64x4 q1x4, or_q1x4 = { 0 };
213 u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
216 /* is there anything on the ring */
218 if ((d->qword[1] & AVF_RXD_STATUS_DD) == 0)
221 if (PREDICT_FALSE (ad->per_interface_next_index != ~0))
222 next_index = ad->per_interface_next_index;
223 vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
225 /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
226 copy needed data from descriptor to rx vector */
229 while (n_desc < AVF_RX_VECTOR_SZ)
231 if (next + 11 < size)
234 CLIB_PREFETCH ((void *) (fd + (next + stride)),
235 CLIB_CACHE_LINE_BYTES, LOAD);
236 CLIB_PREFETCH ((void *) (fd + (next + stride + 1)),
237 CLIB_CACHE_LINE_BYTES, LOAD);
238 CLIB_PREFETCH ((void *) (fd + (next + stride + 2)),
239 CLIB_CACHE_LINE_BYTES, LOAD);
240 CLIB_PREFETCH ((void *) (fd + (next + stride + 3)),
241 CLIB_CACHE_LINE_BYTES, LOAD);
244 #ifdef CLIB_HAVE_VEC256
245 if (n_desc >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
248 q1x4 = u64x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
249 (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
251 /* not all packets are ready or at least one of them is chained */
252 if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
256 u64x4_store_unaligned (q1x4, qw1s + n_desc);
257 clib_memcpy_fast (bi, rxq->bufs + next, 4 * sizeof (u32));
260 next = (next + 4) & mask;
267 CLIB_PREFETCH ((void *) (fd + ((next + 8) & mask)),
268 CLIB_CACHE_LINE_BYTES, LOAD);
269 if ((d->qword[1] & AVF_RXD_STATUS_DD) == 0)
272 or_qw1 |= qw1s[n_desc] = d[0].qword[1];
273 bi[0] = rxq->bufs[next];
275 /* deal with chained buffers */
276 while (PREDICT_FALSE ((d->qword[1] & AVF_RXD_STATUS_EOP) == 0))
278 clib_error ("fixme");
282 next = (next + 1) & mask;
292 rxq->n_enqueued -= n_desc;
294 #ifdef CLIB_HAVE_VEC256
295 or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
299 if (ad->flags & AVF_DEVICE_F_VA_DMA)
300 avf_rxq_refill (vm, node, rxq, 1 /* use_va_dma */ );
302 avf_rxq_refill (vm, node, rxq, 0 /* use_va_dma */ );
304 vlib_get_buffers (vm, to_next, bufs, n_desc);
305 n_rx_packets = n_desc;
307 vnet_buffer (bt)->sw_if_index[VLIB_RX] = ad->sw_if_index;
308 vnet_buffer (bt)->sw_if_index[VLIB_TX] = ~0;
310 n_rx_bytes = avf_process_rx_burst (vm, node, bt, qw1s, bufs, n_desc);
312 /* packet trace if enabled */
313 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
315 u32 n_left = n_rx_packets;
318 while (n_trace && n_left)
321 avf_input_trace_t *tr;
322 b = vlib_get_buffer (vm, bi[0]);
323 vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0);
324 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
325 tr->next_index = next_index;
326 tr->hw_if_index = ad->hw_if_index;
335 vlib_set_trace_count (vm, node, n_trace);
338 if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
340 vlib_next_frame_t *nf;
342 ethernet_input_frame_t *ef;
343 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
344 f = vlib_get_frame (vm, nf->frame_index);
345 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
347 ef = vlib_frame_scalar_args (f);
348 ef->sw_if_index = ad->sw_if_index;
349 ef->hw_if_index = ad->hw_if_index;
351 if ((or_qw1 & AVF_RXD_ERROR_IPE) == 0)
352 f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
355 n_left_to_next -= n_rx_packets;
356 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
358 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
359 + VNET_INTERFACE_COUNTER_RX, thr_idx,
360 ad->hw_if_index, n_rx_packets, n_rx_bytes);
366 VLIB_NODE_FN (avf_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
367 vlib_frame_t * frame)
370 avf_main_t *am = &avf_main;
371 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
372 vnet_device_and_queue_t *dq;
374 foreach_device_and_queue (dq, rt->devices_and_queues)
377 ad = vec_elt_at_index (am->devices, dq->dev_instance);
378 if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
380 n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id);
386 VLIB_REGISTER_NODE (avf_input_node) = {
388 .sibling_of = "device-input",
389 .format_trace = format_avf_input_trace,
390 .type = VLIB_NODE_TYPE_INPUT,
391 .state = VLIB_NODE_STATE_DISABLED,
392 .n_errors = AVF_INPUT_N_ERROR,
393 .error_strings = avf_input_error_strings,
400 * fd.io coding-style-patch-verification: ON
403 * eval: (c-set-style "gnu")