2 *------------------------------------------------------------------
3 * af_packet.c - linux kernel packet interface
5 * Copyright (c) 2016 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
20 #include <linux/if_packet.h>
22 #include <vlib/vlib.h>
23 #include <vlib/unix/unix.h>
24 #include <vnet/ip/ip.h>
25 #include <vnet/ethernet/ethernet.h>
26 #include <vnet/interface/rx_queue_funcs.h>
27 #include <vnet/feature/feature.h>
28 #include <vnet/ethernet/packet.h>
30 #include <af_packet/af_packet.h>
31 #include <vnet/devices/virtio/virtio_std.h>
33 #define foreach_af_packet_input_error \
34 _ (PARTIAL_PKT, "partial packet") \
35 _ (TIMEDOUT_BLK, "timed out block") \
36 _ (TOTAL_RECV_BLK, "total received block")
39 #define _(f,s) AF_PACKET_INPUT_ERROR_##f,
40 foreach_af_packet_input_error
42 AF_PACKET_INPUT_N_ERROR,
43 } af_packet_input_error_t;
45 static char *af_packet_input_error_strings[] = {
47 foreach_af_packet_input_error
65 vnet_virtio_net_hdr_t vnet_hdr;
67 } af_packet_input_trace_t;
70 format_af_packet_input_trace (u8 * s, va_list * args)
72 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
73 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
74 af_packet_input_trace_t *t = va_arg (*args, af_packet_input_trace_t *);
75 u32 indent = format_get_indent (s);
77 s = format (s, "af_packet: hw_if_index %d rx-queue %u next-index %d",
78 t->hw_if_index, t->queue_id, t->next_index);
83 s, "\n%Ublock %u:\n%Uaddress %p version %u seq_num %lu pkt_num %u",
84 format_white_space, indent + 2, t->block, format_white_space,
85 indent + 4, t->block_start, t->bd.version, t->bd.hdr.bh1.seq_num,
89 "\n%Utpacket3_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u"
90 "\n%Usec 0x%x nsec 0x%x vlan %U"
91 #ifdef TP_STATUS_VLAN_TPID_VALID
95 format_white_space, indent + 2, format_white_space, indent + 4,
96 t->tph3.tp_status, t->tph3.tp_len, t->tph3.tp_snaplen, t->tph3.tp_mac,
97 t->tph3.tp_net, format_white_space, indent + 4, t->tph3.tp_sec,
98 t->tph3.tp_nsec, format_ethernet_vlan_tci, t->tph3.hv1.tp_vlan_tci
99 #ifdef TP_STATUS_VLAN_TPID_VALID
101 t->tph3.hv1.tp_vlan_tpid
109 "\n%Utpacket2_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u"
110 "\n%Usec 0x%x nsec 0x%x vlan %U"
111 #ifdef TP_STATUS_VLAN_TPID_VALID
115 format_white_space, indent + 2, format_white_space, indent + 4,
116 t->tph2.tp_status, t->tph2.tp_len, t->tph2.tp_snaplen, t->tph2.tp_mac,
117 t->tph2.tp_net, format_white_space, indent + 4, t->tph2.tp_sec,
118 t->tph2.tp_nsec, format_ethernet_vlan_tci, t->tph2.tp_vlan_tci
119 #ifdef TP_STATUS_VLAN_TPID_VALID
127 "\n%Uvnet-hdr:\n%Uflags 0x%02x gso_type 0x%02x hdr_len %u"
128 "\n%Ugso_size %u csum_start %u csum_offset %u",
129 format_white_space, indent + 2, format_white_space, indent + 4,
130 t->vnet_hdr.flags, t->vnet_hdr.gso_type, t->vnet_hdr.hdr_len,
131 format_white_space, indent + 4, t->vnet_hdr.gso_size,
132 t->vnet_hdr.csum_start, t->vnet_hdr.csum_offset);
137 buffer_add_to_chain (vlib_buffer_t *b, vlib_buffer_t *first_b,
138 vlib_buffer_t *prev_b, u32 bi)
140 /* update first buffer */
141 first_b->total_length_not_including_first_buffer += b->current_length;
143 /* update previous buffer */
144 prev_b->next_buffer = bi;
145 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
147 /* update current buffer */
151 static_always_inline void
152 fill_gso_offload (vlib_buffer_t *b, u32 gso_size, u8 l4_hdr_sz)
154 b->flags |= VNET_BUFFER_F_GSO;
155 vnet_buffer2 (b)->gso_size = gso_size;
156 vnet_buffer2 (b)->gso_l4_hdr_sz = l4_hdr_sz;
159 static_always_inline void
160 fill_cksum_offload (vlib_buffer_t *b, u8 *l4_hdr_sz, u8 is_ip)
162 vnet_buffer_oflags_t oflags = 0;
169 switch (b->data[0] & 0xf0)
172 ethertype = ETHERNET_TYPE_IP4;
175 ethertype = ETHERNET_TYPE_IP6;
181 ethernet_header_t *eth = (ethernet_header_t *) b->data;
182 ethertype = clib_net_to_host_u16 (eth->type);
183 l2hdr_sz = sizeof (ethernet_header_t);
184 if (ethernet_frame_is_tagged (ethertype))
186 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eth + 1);
188 ethertype = clib_net_to_host_u16 (vlan->type);
189 l2hdr_sz += sizeof (*vlan);
190 if (ethertype == ETHERNET_TYPE_VLAN)
193 ethertype = clib_net_to_host_u16 (vlan->type);
194 l2hdr_sz += sizeof (*vlan);
199 vnet_buffer (b)->l2_hdr_offset = 0;
200 vnet_buffer (b)->l3_hdr_offset = l2hdr_sz;
202 if (ethertype == ETHERNET_TYPE_IP4)
204 ip4_header_t *ip4 = (ip4_header_t *) (b->data + l2hdr_sz);
205 vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
206 b->flags |= (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
207 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
208 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
210 l4_proto = ip4->protocol;
212 else if (ethertype == ETHERNET_TYPE_IP6)
214 ip6_header_t *ip6 = (ip6_header_t *) (b->data + l2hdr_sz);
215 b->flags |= (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
216 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
217 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
218 u16 ip6_hdr_len = sizeof (ip6_header_t);
220 if (ip6_ext_hdr (ip6->protocol))
222 ip6_ext_header_t *p = (void *) (ip6 + 1);
223 ip6_hdr_len += ip6_ext_header_len (p);
224 while (ip6_ext_hdr (p->next_hdr))
226 ip6_hdr_len += ip6_ext_header_len (p);
227 p = ip6_ext_next_header (p);
229 l4_proto = p->next_hdr;
232 l4_proto = ip6->protocol;
233 vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip6_hdr_len;
236 if (l4_proto == IP_PROTOCOL_TCP)
238 oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
240 (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
241 *l4_hdr_sz = tcp_header_bytes (tcp);
243 else if (l4_proto == IP_PROTOCOL_UDP)
245 oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
246 *l4_hdr_sz = sizeof (udp_header_t);
250 vnet_buffer_offload_flags_set (b, oflags);
254 af_packet_v3_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
255 vlib_frame_t *frame, af_packet_if_t *apif,
256 u16 queue_id, u8 is_cksum_gso_enabled)
258 af_packet_main_t *apm = &af_packet_main;
259 af_packet_queue_t *rx_queue = vec_elt_at_index (apif->rx_queues, queue_id);
263 u32 n_rx_packets = 0;
265 u32 timedout_blk = 0;
268 u32 block = rx_queue->next_rx_block;
269 u32 block_nr = rx_queue->rx_req->req3.tp_block_nr;
271 uword n_trace = vlib_get_trace_count (vm, node);
272 u32 thread_index = vm->thread_index;
273 u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
274 u32 min_bufs = rx_queue->rx_req->req3.tp_frame_size / n_buffer_bytes;
276 u32 rx_frame_offset = 0;
277 block_desc_t *bd = 0;
278 u32 sw_if_index = apif->sw_if_index;
279 u8 is_ip = (apif->mode == AF_PACKET_IF_MODE_IP);
282 next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
284 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
286 if ((((block_desc_t *) (block_start = rx_queue->rx_ring[block]))
287 ->hdr.bh1.block_status &
288 TP_STATUS_USER) != 0)
291 bd = (block_desc_t *) block_start;
293 if (PREDICT_FALSE (rx_queue->is_rx_pending))
295 num_pkts = rx_queue->num_rx_pkts;
296 rx_frame_offset = rx_queue->rx_frame_offset;
297 rx_queue->is_rx_pending = 0;
301 num_pkts = bd->hdr.bh1.num_pkts;
302 rx_frame_offset = bd->hdr.bh1.offset_to_first_pkt;
305 if (TP_STATUS_BLK_TMO & bd->hdr.bh1.block_status)
309 n_required = clib_max (num_pkts, VLIB_FRAME_SIZE);
310 n_free_bufs = vec_len (apm->rx_buffers[thread_index]);
311 if (PREDICT_FALSE (n_free_bufs < n_required))
313 vec_validate (apm->rx_buffers[thread_index],
314 n_required + n_free_bufs - 1);
315 n_free_bufs += vlib_buffer_alloc (
316 vm, &apm->rx_buffers[thread_index][n_free_bufs], n_required);
317 vec_set_len (apm->rx_buffers[thread_index], n_free_bufs);
320 while (num_pkts && (n_free_bufs >= min_bufs))
322 u32 next0 = next_index;
325 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
327 while (num_pkts && n_left_to_next && (n_free_bufs >= min_bufs))
329 tph = (tpacket3_hdr_t *) (block_start + rx_frame_offset);
332 CLIB_PREFETCH (block_start + rx_frame_offset +
334 2 * CLIB_CACHE_LINE_BYTES, LOAD);
336 vlib_buffer_t *b0 = 0, *first_b0 = 0, *prev_b0 = 0;
337 vnet_virtio_net_hdr_t *vnet_hdr = 0;
338 u32 data_len = tph->tp_snaplen;
340 u32 bi0 = ~0, first_bi0 = ~0;
343 if (is_cksum_gso_enabled)
345 (vnet_virtio_net_hdr_t *) ((u8 *) tph + tph->tp_mac -
346 sizeof (vnet_virtio_net_hdr_t));
348 // save current state and return
349 if (PREDICT_FALSE (((data_len / n_buffer_bytes) + 1) >
350 vec_len (apm->rx_buffers[thread_index])))
352 rx_queue->rx_frame_offset = rx_frame_offset;
353 rx_queue->num_rx_pkts = num_pkts;
354 rx_queue->is_rx_pending = 1;
355 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
361 /* grab free buffer */
362 u32 last_empty_buffer =
363 vec_len (apm->rx_buffers[thread_index]) - 1;
364 bi0 = apm->rx_buffers[thread_index][last_empty_buffer];
365 vec_set_len (apm->rx_buffers[thread_index],
371 data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
373 u32 bytes_copied = 0;
375 b0 = vlib_get_buffer (vm, bi0);
376 b0->current_data = 0;
378 /* Kernel removes VLAN headers, so reconstruct VLAN */
379 if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID))
381 if (PREDICT_TRUE (offset == 0))
383 clib_memcpy_fast (vlib_buffer_get_current (b0),
384 (u8 *) tph + tph->tp_mac,
385 sizeof (ethernet_header_t));
386 ethernet_header_t *eth =
387 vlib_buffer_get_current (b0);
388 ethernet_vlan_header_t *vlan =
389 (ethernet_vlan_header_t *) (eth + 1);
390 vlan->priority_cfi_and_id =
391 clib_host_to_net_u16 (tph->hv1.tp_vlan_tci);
392 vlan->type = eth->type;
394 clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
395 vlan_len = sizeof (ethernet_vlan_header_t);
396 bytes_copied = sizeof (ethernet_header_t);
399 clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) +
400 bytes_copied + vlan_len,
401 (u8 *) tph + tph->tp_mac + offset +
403 (bytes_to_copy - bytes_copied));
405 /* fill buffer header */
406 b0->current_length = bytes_to_copy + vlan_len;
410 b0->total_length_not_including_first_buffer = 0;
411 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
412 vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index;
413 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~0;
416 if (is_cksum_gso_enabled)
418 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
419 fill_cksum_offload (first_b0, &l4_hdr_sz, is_ip);
420 if (vnet_hdr->gso_type & (VIRTIO_NET_HDR_GSO_TCPV4 |
421 VIRTIO_NET_HDR_GSO_TCPV6))
422 fill_gso_offload (first_b0, vnet_hdr->gso_size,
427 buffer_add_to_chain (b0, first_b0, prev_b0, bi0);
430 offset += bytes_to_copy;
431 data_len -= bytes_to_copy;
434 n_rx_bytes += tph->tp_snaplen;
435 to_next[0] = first_bi0;
439 /* drop partial packets */
440 if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen))
442 next0 = VNET_DEVICE_INPUT_NEXT_DROP;
444 node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT];
448 if (PREDICT_FALSE (apif->mode == AF_PACKET_IF_MODE_IP))
450 switch (first_b0->data[0] & 0xf0)
453 next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
456 next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
459 next0 = VNET_DEVICE_INPUT_NEXT_DROP;
462 if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
463 next0 = apif->per_interface_next_index;
467 next0 = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
468 if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
469 next0 = apif->per_interface_next_index;
470 /* redirect if feature path enabled */
471 vnet_feature_start_device_input (sw_if_index, &next0,
477 if (PREDICT_FALSE (n_trace > 0 &&
478 vlib_trace_buffer (vm, node, next0, first_b0,
479 /* follow_chain */ 0)))
481 af_packet_input_trace_t *tr;
482 vlib_set_trace_count (vm, node, --n_trace);
483 tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
485 tr->next_index = next0;
486 tr->hw_if_index = apif->hw_if_index;
487 tr->queue_id = queue_id;
489 tr->block_start = bd;
490 tr->pkt_num = bd->hdr.bh1.num_pkts - num_pkts;
491 clib_memcpy_fast (&tr->bd, bd, sizeof (block_desc_t));
492 clib_memcpy_fast (&tr->tph3, tph, sizeof (tpacket3_hdr_t));
493 if (is_cksum_gso_enabled)
494 clib_memcpy_fast (&tr->vnet_hdr, vnet_hdr,
495 sizeof (vnet_virtio_net_hdr_t));
497 clib_memset_u8 (&tr->vnet_hdr, 0,
498 sizeof (vnet_virtio_net_hdr_t));
501 /* enque and take next packet */
502 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
503 n_left_to_next, first_bi0,
508 rx_frame_offset += tph->tp_next_offset;
511 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
514 if (PREDICT_TRUE (num_pkts == 0))
516 bd->hdr.bh1.block_status = TP_STATUS_KERNEL;
517 block = (block + 1) % block_nr;
521 rx_queue->rx_frame_offset = rx_frame_offset;
522 rx_queue->num_rx_pkts = num_pkts;
523 rx_queue->is_rx_pending = 1;
527 rx_queue->next_rx_block = block;
531 if (apm->polling_count == 0)
533 if ((((block_desc_t *) (block_start = rx_queue->rx_ring[block]))
534 ->hdr.bh1.block_status &
535 TP_STATUS_USER) != 0)
536 vlib_node_set_state (vm, node->node_index, VLIB_NODE_STATE_POLLING);
538 vlib_node_set_state (vm, node->node_index, VLIB_NODE_STATE_INTERRUPT);
541 vlib_error_count (vm, node->node_index, AF_PACKET_INPUT_ERROR_TOTAL_RECV_BLK,
543 vlib_error_count (vm, node->node_index, AF_PACKET_INPUT_ERROR_TIMEDOUT_BLK,
546 vlib_increment_combined_counter
547 (vnet_get_main ()->interface_main.combined_sw_if_counters
548 + VNET_INTERFACE_COUNTER_RX,
549 vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes);
551 vnet_device_increment_rx_packets (thread_index, n_rx_packets);
556 af_packet_v2_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
557 vlib_frame_t *frame, af_packet_if_t *apif,
558 u16 queue_id, u8 is_cksum_gso_enabled)
560 af_packet_main_t *apm = &af_packet_main;
561 af_packet_queue_t *rx_queue = vec_elt_at_index (apif->rx_queues, queue_id);
567 u32 n_rx_packets = 0;
570 u32 frame_size = rx_queue->rx_req->req.tp_frame_size;
571 u32 frame_num = rx_queue->rx_req->req.tp_frame_nr;
572 u8 *block_start = rx_queue->rx_ring[block];
573 uword n_trace = vlib_get_trace_count (vm, node);
574 u32 thread_index = vm->thread_index;
575 u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
576 u32 min_bufs = rx_queue->rx_req->req.tp_frame_size / n_buffer_bytes;
577 u32 sw_if_index = apif->sw_if_index;
578 u8 is_ip = (apif->mode == AF_PACKET_IF_MODE_IP);
581 next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
583 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
585 n_free_bufs = vec_len (apm->rx_buffers[thread_index]);
586 if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE))
588 vec_validate (apm->rx_buffers[thread_index],
589 VLIB_FRAME_SIZE + n_free_bufs - 1);
590 n_free_bufs += vlib_buffer_alloc (
591 vm, &apm->rx_buffers[thread_index][n_free_bufs], VLIB_FRAME_SIZE);
592 vec_set_len (apm->rx_buffers[thread_index], n_free_bufs);
595 rx_frame = rx_queue->next_rx_frame;
596 tph = (tpacket2_hdr_t *) (block_start + rx_frame * frame_size);
597 while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs))
599 vlib_buffer_t *b0 = 0, *first_b0 = 0, *prev_b0 = 0;
600 u32 next0 = next_index;
603 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
604 while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs) &&
607 vnet_virtio_net_hdr_t *vnet_hdr = 0;
608 u32 data_len = tph->tp_snaplen;
610 u32 bi0 = 0, first_bi0 = 0;
613 if (is_cksum_gso_enabled)
615 (vnet_virtio_net_hdr_t *) ((u8 *) tph + tph->tp_mac -
616 sizeof (vnet_virtio_net_hdr_t));
619 /* grab free buffer */
620 u32 last_empty_buffer =
621 vec_len (apm->rx_buffers[thread_index]) - 1;
622 bi0 = apm->rx_buffers[thread_index][last_empty_buffer];
623 b0 = vlib_get_buffer (vm, bi0);
624 vec_set_len (apm->rx_buffers[thread_index], last_empty_buffer);
629 data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
631 u32 bytes_copied = 0;
632 b0->current_data = 0;
633 /* Kernel removes VLAN headers, so reconstruct VLAN */
634 if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID))
636 if (PREDICT_TRUE (offset == 0))
638 clib_memcpy_fast (vlib_buffer_get_current (b0),
639 (u8 *) tph + tph->tp_mac,
640 sizeof (ethernet_header_t));
641 ethernet_header_t *eth = vlib_buffer_get_current (b0);
642 ethernet_vlan_header_t *vlan =
643 (ethernet_vlan_header_t *) (eth + 1);
644 vlan->priority_cfi_and_id =
645 clib_host_to_net_u16 (tph->tp_vlan_tci);
646 vlan->type = eth->type;
647 eth->type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
648 vlan_len = sizeof (ethernet_vlan_header_t);
649 bytes_copied = sizeof (ethernet_header_t);
652 clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) +
653 bytes_copied + vlan_len,
654 (u8 *) tph + tph->tp_mac + offset +
656 (bytes_to_copy - bytes_copied));
658 /* fill buffer header */
659 b0->current_length = bytes_to_copy + vlan_len;
663 b0->total_length_not_including_first_buffer = 0;
664 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
665 vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index;
666 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~0;
668 first_b0 = vlib_get_buffer (vm, first_bi0);
670 if (is_cksum_gso_enabled)
672 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
673 fill_cksum_offload (first_b0, &l4_hdr_sz, is_ip);
674 if (vnet_hdr->gso_type & (VIRTIO_NET_HDR_GSO_TCPV4 |
675 VIRTIO_NET_HDR_GSO_TCPV6))
676 fill_gso_offload (first_b0, vnet_hdr->gso_size,
681 buffer_add_to_chain (b0, first_b0, prev_b0, bi0);
684 offset += bytes_to_copy;
685 data_len -= bytes_to_copy;
688 n_rx_bytes += tph->tp_snaplen;
689 to_next[0] = first_bi0;
693 /* drop partial packets */
694 if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen))
696 next0 = VNET_DEVICE_INPUT_NEXT_DROP;
698 node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT];
702 if (PREDICT_FALSE (is_ip))
704 switch (first_b0->data[0] & 0xf0)
707 next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
710 next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
713 next0 = VNET_DEVICE_INPUT_NEXT_DROP;
716 if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
717 next0 = apif->per_interface_next_index;
721 next0 = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
722 if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
723 next0 = apif->per_interface_next_index;
724 /* redirect if feature path enabled */
725 vnet_feature_start_device_input (sw_if_index, &next0,
731 if (PREDICT_FALSE (n_trace > 0 &&
732 vlib_trace_buffer (vm, node, next0, first_b0,
733 /* follow_chain */ 0)))
735 af_packet_input_trace_t *tr;
736 vlib_set_trace_count (vm, node, --n_trace);
737 tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
739 tr->next_index = next0;
740 tr->hw_if_index = apif->hw_if_index;
741 tr->queue_id = queue_id;
742 clib_memcpy_fast (&tr->tph2, tph, sizeof (struct tpacket2_hdr));
743 if (is_cksum_gso_enabled)
744 clib_memcpy_fast (&tr->vnet_hdr, vnet_hdr,
745 sizeof (vnet_virtio_net_hdr_t));
747 clib_memset_u8 (&tr->vnet_hdr, 0,
748 sizeof (vnet_virtio_net_hdr_t));
751 /* enque and take next packet */
752 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
753 n_left_to_next, first_bi0, next0);
756 tph->tp_status = TP_STATUS_KERNEL;
757 rx_frame = (rx_frame + 1) % frame_num;
758 tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size);
761 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
764 rx_queue->next_rx_frame = rx_frame;
766 vlib_increment_combined_counter (
767 vnet_get_main ()->interface_main.combined_sw_if_counters +
768 VNET_INTERFACE_COUNTER_RX,
769 vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes);
771 vnet_device_increment_rx_packets (thread_index, n_rx_packets);
776 af_packet_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
777 vlib_frame_t *frame, af_packet_if_t *apif,
778 u16 queue_id, u8 is_cksum_gso_enabled)
781 if (apif->version == TPACKET_V3)
782 return af_packet_v3_device_input_fn (vm, node, frame, apif, queue_id,
783 is_cksum_gso_enabled);
785 return af_packet_v2_device_input_fn (vm, node, frame, apif, queue_id,
786 is_cksum_gso_enabled);
789 VLIB_NODE_FN (af_packet_input_node) (vlib_main_t * vm,
790 vlib_node_runtime_t * node,
791 vlib_frame_t * frame)
793 u32 n_rx_packets = 0;
794 af_packet_main_t *apm = &af_packet_main;
795 vnet_hw_if_rxq_poll_vector_t *pv;
796 pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
797 for (int i = 0; i < vec_len (pv); i++)
799 af_packet_if_t *apif;
800 apif = vec_elt_at_index (apm->interfaces, pv[i].dev_instance);
801 if (apif->is_admin_up)
803 if (apif->is_cksum_gso_enabled)
804 n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif,
807 n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif,
814 VLIB_REGISTER_NODE (af_packet_input_node) = {
815 .name = "af-packet-input",
816 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
817 .sibling_of = "device-input",
818 .format_trace = format_af_packet_input_trace,
819 .type = VLIB_NODE_TYPE_INPUT,
820 .state = VLIB_NODE_STATE_INTERRUPT,
821 .n_errors = AF_PACKET_INPUT_N_ERROR,
822 .error_strings = af_packet_input_error_strings,
827 * fd.io coding-style-patch-verification: ON
830 * eval: (c-set-style "gnu")