X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fdevices%2Faf_packet%2Fnode.c;h=caddcfa416bd4bb633fba53b8dd8d2d4f82495a8;hb=7d0e30bc6;hp=99c91f388056f80f799d8938a39cccfeca30ad93;hpb=535f0bfe0274e86c5d2e00dfd66dd632c6ae20a9;p=vpp.git diff --git a/src/vnet/devices/af_packet/node.c b/src/vnet/devices/af_packet/node.c index 99c91f38805..caddcfa416b 100644 --- a/src/vnet/devices/af_packet/node.c +++ b/src/vnet/devices/af_packet/node.c @@ -23,12 +23,14 @@ #include #include #include -#include +#include #include +#include #include -#define foreach_af_packet_input_error +#define foreach_af_packet_input_error \ + _(PARTIAL_PKT, "partial packet") typedef enum { @@ -58,7 +60,7 @@ format_af_packet_input_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); af_packet_input_trace_t *t = va_arg (*args, af_packet_input_trace_t *); - uword indent = format_get_indent (s); + u32 indent = format_get_indent (s); s = format (s, "af_packet: hw_if_index %d next-index %d", t->hw_if_index, t->next_index); @@ -106,6 +108,90 @@ buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi) b->next_buffer = 0; } +static_always_inline void +fill_gso_buffer_flags (vlib_buffer_t *b, u32 gso_size, u8 l4_hdr_sz) +{ + b->flags |= VNET_BUFFER_F_GSO; + vnet_buffer2 (b)->gso_size = gso_size; + vnet_buffer2 (b)->gso_l4_hdr_sz = l4_hdr_sz; +} + +static_always_inline void +mark_tcp_udp_cksum_calc (vlib_buffer_t *b, u8 *l4_hdr_sz) +{ + ethernet_header_t *eth = vlib_buffer_get_current (b); + vnet_buffer_oflags_t oflags = 0; + if (clib_net_to_host_u16 (eth->type) == ETHERNET_TYPE_IP4) + { + ip4_header_t *ip4 = + (vlib_buffer_get_current (b) + sizeof (ethernet_header_t)); + b->flags |= VNET_BUFFER_F_IS_IP4; + if (ip4->protocol == IP_PROTOCOL_TCP) + { + oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM; + tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b) + + sizeof (ethernet_header_t) + + ip4_header_bytes (ip4)); + tcp->checksum = 0; + *l4_hdr_sz = tcp_header_bytes (tcp); + } + else if (ip4->protocol == IP_PROTOCOL_UDP) + { + oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM; + udp_header_t *udp = (udp_header_t *) (vlib_buffer_get_current (b) + + sizeof (ethernet_header_t) + + ip4_header_bytes (ip4)); + udp->checksum = 0; + *l4_hdr_sz = sizeof (*udp); + } + vnet_buffer (b)->l3_hdr_offset = sizeof (ethernet_header_t); + vnet_buffer (b)->l4_hdr_offset = + sizeof (ethernet_header_t) + ip4_header_bytes (ip4); + if (oflags) + vnet_buffer_offload_flags_set (b, oflags); + } + else if (clib_net_to_host_u16 (eth->type) == ETHERNET_TYPE_IP6) + { + ip6_header_t *ip6 = + (vlib_buffer_get_current (b) + sizeof (ethernet_header_t)); + b->flags |= VNET_BUFFER_F_IS_IP6; + u16 ip6_hdr_len = sizeof (ip6_header_t); + if (ip6_ext_hdr (ip6->protocol)) + { + ip6_ext_header_t *p = (void *) (ip6 + 1); + ip6_hdr_len += ip6_ext_header_len (p); + while (ip6_ext_hdr (p->next_hdr)) + { + ip6_hdr_len += ip6_ext_header_len (p); + p = ip6_ext_next_header (p); + } + } + if (ip6->protocol == IP_PROTOCOL_TCP) + { + oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM; + tcp_header_t *tcp = + (tcp_header_t *) (vlib_buffer_get_current (b) + + sizeof (ethernet_header_t) + ip6_hdr_len); + tcp->checksum = 0; + *l4_hdr_sz = tcp_header_bytes (tcp); + } + else if (ip6->protocol == IP_PROTOCOL_UDP) + { + oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM; + udp_header_t *udp = + (udp_header_t *) (vlib_buffer_get_current (b) + + sizeof (ethernet_header_t) + ip6_hdr_len); + udp->checksum = 0; + *l4_hdr_sz = sizeof (*udp); + } + vnet_buffer (b)->l3_hdr_offset = sizeof (ethernet_header_t); + vnet_buffer (b)->l4_hdr_offset = + sizeof (ethernet_header_t) + ip6_hdr_len; + if (oflags) + vnet_buffer_offload_flags_set (b, oflags); + } +} + always_inline uword af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, af_packet_if_t * apif) @@ -124,14 +210,10 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, u32 frame_num = apif->rx_req->tp_frame_nr; u8 *block_start = apif->rx_ring + block * block_size; uword n_trace = vlib_get_trace_count (vm, node); - u32 thread_index = vlib_get_thread_index (); - u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, - VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); + u32 thread_index = vm->thread_index; + u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm); u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes; - if (apif->per_interface_next_index != ~0) - next_index = apif->per_interface_next_index; - n_free_bufs = vec_len (apm->rx_buffers[thread_index]); if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE)) { @@ -158,6 +240,7 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, u32 data_len = tph->tp_snaplen; u32 offset = 0; u32 bi0 = 0, first_bi0 = 0, prev_bi0; + u8 l4_hdr_sz = 0; while (data_len) { @@ -181,9 +264,9 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { if (PREDICT_TRUE (offset == 0)) { - clib_memcpy (vlib_buffer_get_current (b0), - (u8 *) tph + tph->tp_mac, - sizeof (ethernet_header_t)); + clib_memcpy_fast (vlib_buffer_get_current (b0), + (u8 *) tph + tph->tp_mac, + sizeof (ethernet_header_t)); ethernet_header_t *eth = vlib_buffer_get_current (b0); ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eth + 1); @@ -195,10 +278,10 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, bytes_copied = sizeof (ethernet_header_t); } } - clib_memcpy (((u8 *) vlib_buffer_get_current (b0)) + - bytes_copied + vlan_len, - (u8 *) tph + tph->tp_mac + offset + bytes_copied, - (bytes_to_copy - bytes_copied)); + clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) + + bytes_copied + vlan_len, + (u8 *) tph + tph->tp_mac + offset + + bytes_copied, (bytes_to_copy - bytes_copied)); /* fill buffer header */ b0->current_length = bytes_to_copy + vlan_len; @@ -211,6 +294,11 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; first_bi0 = bi0; first_b0 = vlib_get_buffer (vm, first_bi0); + if (tph->tp_status & TP_STATUS_CSUMNOTREADY) + mark_tcp_udp_cksum_calc (first_b0, &l4_hdr_sz); + if (tph->tp_snaplen > apif->host_mtu) + fill_gso_buffer_flags (first_b0, apif->host_mtu, + l4_hdr_sz); } else buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0); @@ -224,23 +312,38 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, to_next += 1; n_left_to_next--; + /* drop partial packets */ + if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen)) + { + next0 = VNET_DEVICE_INPUT_NEXT_DROP; + first_b0->error = + node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT]; + } + else + { + next0 = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; + + if (PREDICT_FALSE (apif->per_interface_next_index != ~0)) + next0 = apif->per_interface_next_index; + + /* redirect if feature path enabled */ + vnet_feature_start_device_input_x1 (apif->sw_if_index, &next0, + first_b0); + } + /* trace */ - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0); - if (PREDICT_FALSE (n_trace > 0)) + if (PREDICT_FALSE + (n_trace > 0 && vlib_trace_buffer (vm, node, next0, first_b0, + /* follow_chain */ 0))) { af_packet_input_trace_t *tr; - vlib_trace_buffer (vm, node, next0, first_b0, /* follow_chain */ - 0); vlib_set_trace_count (vm, node, --n_trace); tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr)); tr->next_index = next0; tr->hw_if_index = apif->hw_if_index; - clib_memcpy (&tr->tph, tph, sizeof (struct tpacket2_hdr)); + clib_memcpy_fast (&tr->tph, tph, sizeof (struct tpacket2_hdr)); } - /* redirect if feature path enabled */ - vnet_feature_start_device_input_x1 (apif->sw_if_index, &next0, b0); - /* enque and take next packet */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, first_bi0, next0); @@ -265,30 +368,28 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, return n_rx_packets; } -static uword -af_packet_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame) +VLIB_NODE_FN (af_packet_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { u32 n_rx_packets = 0; af_packet_main_t *apm = &af_packet_main; - vnet_device_input_runtime_t *rt = (void *) node->runtime_data; - vnet_device_and_queue_t *dq; - - foreach_device_and_queue (dq, rt->devices_and_queues) - { - af_packet_if_t *apif; - apif = vec_elt_at_index (apm->interfaces, dq->dev_instance); - if (apif->is_admin_up) - n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif); - } + vnet_hw_if_rxq_poll_vector_t *pv; + pv = vnet_hw_if_get_rxq_poll_vector (vm, node); + for (int i = 0; i < vec_len (pv); i++) + { + af_packet_if_t *apif; + apif = vec_elt_at_index (apm->interfaces, pv[i].dev_instance); + if (apif->is_admin_up) + n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif); + } return n_rx_packets; } -/* *INDENT-OFF* */ VLIB_REGISTER_NODE (af_packet_input_node) = { - .function = af_packet_input_fn, .name = "af-packet-input", + .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED, .sibling_of = "device-input", .format_trace = format_af_packet_input_trace, .type = VLIB_NODE_TYPE_INPUT, @@ -297,9 +398,6 @@ VLIB_REGISTER_NODE (af_packet_input_node) = { .error_strings = af_packet_input_error_strings, }; -VLIB_NODE_FUNCTION_MULTIARCH (af_packet_input_node, af_packet_input_fn) -/* *INDENT-ON* */ - /* * fd.io coding-style-patch-verification: ON