-/*------------------------------------------------------------------
+/*
+ *------------------------------------------------------------------
* af_packet.c - linux kernel packet interface
*
* Copyright (c) 2016 Cisco and/or its affiliates.
*/
#include <linux/if_packet.h>
-#include <linux/virtio_net.h>
#include <vlib/vlib.h>
#include <vlib/unix/unix.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/devices/devices.h>
#include <vnet/feature/feature.h>
+#include <vnet/ethernet/packet.h>
#include <vnet/devices/af_packet/af_packet.h>
-#define foreach_af_packet_input_error
+#define foreach_af_packet_input_error \
+ _(PARTIAL_PKT, "partial packet")
typedef enum
{
b->next_buffer = 0;
}
+static_always_inline void
+mark_tcp_udp_cksum_calc (vlib_buffer_t * b)
+{
+ ethernet_header_t *eth = vlib_buffer_get_current (b);
+ if (clib_net_to_host_u16 (eth->type) == ETHERNET_TYPE_IP4)
+ {
+ ip4_header_t *ip4 =
+ (vlib_buffer_get_current (b) + sizeof (ethernet_header_t));
+ b->flags |= VNET_BUFFER_F_IS_IP4;
+ if (ip4->protocol == IP_PROTOCOL_TCP)
+ {
+ b->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ ((tcp_header_t
+ *) (vlib_buffer_get_current (b) +
+ sizeof (ethernet_header_t) +
+ ip4_header_bytes (ip4)))->checksum = 0;
+ }
+ else if (ip4->protocol == IP_PROTOCOL_UDP)
+ {
+ b->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
+ ((udp_header_t
+ *) (vlib_buffer_get_current (b) +
+ sizeof (ethernet_header_t) +
+ ip4_header_bytes (ip4)))->checksum = 0;
+ }
+ vnet_buffer (b)->l3_hdr_offset = sizeof (ethernet_header_t);
+ vnet_buffer (b)->l4_hdr_offset =
+ sizeof (ethernet_header_t) + ip4_header_bytes (ip4);
+ }
+ else if (clib_net_to_host_u16 (eth->type) == ETHERNET_TYPE_IP6)
+ {
+ ip6_header_t *ip6 =
+ (vlib_buffer_get_current (b) + sizeof (ethernet_header_t));
+ b->flags |= VNET_BUFFER_F_IS_IP6;
+ u16 ip6_hdr_len = sizeof (ip6_header_t);
+ if (ip6_ext_hdr (ip6->protocol))
+ {
+ ip6_ext_header_t *p = (void *) (ip6 + 1);
+ ip6_hdr_len += ip6_ext_header_len (p);
+ while (ip6_ext_hdr (p->next_hdr))
+ {
+ ip6_hdr_len += ip6_ext_header_len (p);
+ p = ip6_ext_next_header (p);
+ }
+ }
+ if (ip6->protocol == IP_PROTOCOL_TCP)
+ {
+ b->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ ((tcp_header_t
+ *) (vlib_buffer_get_current (b) +
+ sizeof (ethernet_header_t) + ip6_hdr_len))->checksum = 0;
+ }
+ else if (ip6->protocol == IP_PROTOCOL_UDP)
+ {
+ b->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
+ ((udp_header_t
+ *) (vlib_buffer_get_current (b) +
+ sizeof (ethernet_header_t) + ip6_hdr_len))->checksum = 0;
+ }
+ vnet_buffer (b)->l3_hdr_offset = sizeof (ethernet_header_t);
+ vnet_buffer (b)->l4_hdr_offset =
+ sizeof (ethernet_header_t) + ip6_hdr_len;
+ }
+}
+
always_inline uword
af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame, af_packet_if_t * apif)
u32 frame_num = apif->rx_req->tp_frame_nr;
u8 *block_start = apif->rx_ring + block * block_size;
uword n_trace = vlib_get_trace_count (vm, node);
- u32 thread_index = vlib_get_thread_index ();
+ u32 thread_index = vm->thread_index;
u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes;
while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs) &&
n_left_to_next)
{
-
- struct virtio_net_hdr *vh =
- (struct virtio_net_hdr *) (((u8 *) tph) + tph->tp_mac -
- sizeof (struct virtio_net_hdr));
u32 data_len = tph->tp_snaplen;
u32 offset = 0;
u32 bi0 = 0, first_bi0 = 0, prev_bi0;
- u32 vlan_len = 0;
- ip_csum_t wsum = 0;
- u16 *wsum_addr = NULL;
- u32 do_vnet = apm->flags & AF_PACKET_USES_VNET_HEADERS;
- u32 do_csum = tph->tp_status & TP_STATUS_CSUMNOTREADY;
while (data_len)
{
/* copy data */
u32 bytes_to_copy =
data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
+ u32 vlan_len = 0;
u32 bytes_copied = 0;
b0->current_data = 0;
/* Kernel removes VLAN headers, so reconstruct VLAN */
bytes_copied = sizeof (ethernet_header_t);
}
}
- /* Check if the incoming skb is marked as CSUM_PARTIAL,
- * If VNET Headers are enabled TP_STATUS_CSUMNOTREADY is
- * equivalent to the vnet csum flag.
- **/
- if (PREDICT_TRUE ((do_vnet != 0) && (do_csum != 0)))
- {
- wsum_addr = (u16 *) (((u8 *) vlib_buffer_get_current (b0)) +
- vlan_len + vh->csum_start +
- vh->csum_offset);
- if (bytes_copied <= vh->csum_start)
- {
- clib_memcpy (((u8 *) vlib_buffer_get_current (b0)) +
- bytes_copied + vlan_len,
- (u8 *) tph + tph->tp_mac + offset +
- bytes_copied,
- (vh->csum_start - bytes_copied));
- wsum =
- ip_csum_and_memcpy (wsum,
- ((u8 *)
- vlib_buffer_get_current (b0)) +
- vh->csum_start + vlan_len,
- (u8 *) tph + tph->tp_mac +
- offset + vh->csum_start,
- (bytes_to_copy - vh->csum_start));
- }
- else
- {
- wsum =
- ip_csum_and_memcpy (wsum,
- ((u8 *)
- vlib_buffer_get_current (b0)) +
- bytes_copied + vlan_len,
- (u8 *) tph + tph->tp_mac +
- offset + bytes_copied,
- (bytes_to_copy - bytes_copied));
- }
- }
- else
- {
- clib_memcpy (((u8 *) vlib_buffer_get_current (b0)) +
- bytes_copied + vlan_len,
- (u8 *) tph + tph->tp_mac + offset +
- bytes_copied, (bytes_to_copy - bytes_copied));
- }
+ clib_memcpy (((u8 *) vlib_buffer_get_current (b0)) +
+ bytes_copied + vlan_len,
+ (u8 *) tph + tph->tp_mac + offset + bytes_copied,
+ (bytes_to_copy - bytes_copied));
/* fill buffer header */
b0->current_length = bytes_to_copy + vlan_len;
vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
first_bi0 = bi0;
first_b0 = vlib_get_buffer (vm, first_bi0);
+ if (tph->tp_status & TP_STATUS_CSUMNOTREADY)
+ mark_tcp_udp_cksum_calc (first_b0);
}
else
buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0);
offset += bytes_to_copy;
data_len -= bytes_to_copy;
}
- if (PREDICT_TRUE ((do_vnet != 0) && (do_csum != 0)))
- {
- *wsum_addr = ~ip_csum_fold (wsum);
- }
n_rx_packets++;
n_rx_bytes += tph->tp_snaplen;
to_next[0] = first_bi0;
to_next += 1;
n_left_to_next--;
+ /* drop partial packets */
+ if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen))
+ {
+ next0 = VNET_DEVICE_INPUT_NEXT_DROP;
+ first_b0->error =
+ node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT];
+ }
+ else
+ {
+ next0 = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (apif->sw_if_index, &next0,
+ first_b0);
+ }
+
/* trace */
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0);
if (PREDICT_FALSE (n_trace > 0))
clib_memcpy (&tr->tph, tph, sizeof (struct tpacket2_hdr));
}
- /* redirect if feature path enabled */
- vnet_feature_start_device_input_x1 (apif->sw_if_index, &next0, b0);
-
/* enque and take next packet */
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
n_left_to_next, first_bi0, next0);