X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fdevices%2Faf_packet%2Fnode.c;h=243a38a42b1b8eb457b9b2ca0113a441306063fd;hb=671e60e65635b8d030bf303c88411192c747b59e;hp=72004320c6719cab702fd29f31be5f2179171781;hpb=7cd468a3d7dee7d6c92f69a0bb7061ae208ec727;p=vpp.git diff --git a/src/vnet/devices/af_packet/node.c b/src/vnet/devices/af_packet/node.c index 72004320c67..243a38a42b1 100644 --- a/src/vnet/devices/af_packet/node.c +++ b/src/vnet/devices/af_packet/node.c @@ -25,10 +25,12 @@ #include #include #include +#include #include -#define foreach_af_packet_input_error +#define foreach_af_packet_input_error \ + _(PARTIAL_PKT, "partial packet") typedef enum { @@ -58,7 +60,7 @@ format_af_packet_input_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); af_packet_input_trace_t *t = va_arg (*args, af_packet_input_trace_t *); - uword indent = format_get_indent (s); + u32 indent = format_get_indent (s); s = format (s, "af_packet: hw_if_index %d next-index %d", t->hw_if_index, t->next_index); @@ -106,12 +108,76 @@ buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi) b->next_buffer = 0; } +static_always_inline void +mark_tcp_udp_cksum_calc (vlib_buffer_t * b) +{ + ethernet_header_t *eth = vlib_buffer_get_current (b); + if (clib_net_to_host_u16 (eth->type) == ETHERNET_TYPE_IP4) + { + ip4_header_t *ip4 = + (vlib_buffer_get_current (b) + sizeof (ethernet_header_t)); + b->flags |= VNET_BUFFER_F_IS_IP4; + if (ip4->protocol == IP_PROTOCOL_TCP) + { + b->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; + ((tcp_header_t + *) (vlib_buffer_get_current (b) + + sizeof (ethernet_header_t) + + ip4_header_bytes (ip4)))->checksum = 0; + } + else if (ip4->protocol == IP_PROTOCOL_UDP) + { + b->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM; + ((udp_header_t + *) (vlib_buffer_get_current (b) + + sizeof (ethernet_header_t) + + ip4_header_bytes (ip4)))->checksum = 0; + } + vnet_buffer (b)->l3_hdr_offset = sizeof (ethernet_header_t); + vnet_buffer (b)->l4_hdr_offset = + sizeof (ethernet_header_t) + ip4_header_bytes (ip4); + } + else if (clib_net_to_host_u16 (eth->type) == ETHERNET_TYPE_IP6) + { + ip6_header_t *ip6 = + (vlib_buffer_get_current (b) + sizeof (ethernet_header_t)); + b->flags |= VNET_BUFFER_F_IS_IP6; + u16 ip6_hdr_len = sizeof (ip6_header_t); + if (ip6_ext_hdr (ip6->protocol)) + { + ip6_ext_header_t *p = (void *) (ip6 + 1); + ip6_hdr_len += ip6_ext_header_len (p); + while (ip6_ext_hdr (p->next_hdr)) + { + ip6_hdr_len += ip6_ext_header_len (p); + p = ip6_ext_next_header (p); + } + } + if (ip6->protocol == IP_PROTOCOL_TCP) + { + b->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; + ((tcp_header_t + *) (vlib_buffer_get_current (b) + + sizeof (ethernet_header_t) + ip6_hdr_len))->checksum = 0; + } + else if (ip6->protocol == IP_PROTOCOL_UDP) + { + b->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM; + ((udp_header_t + *) (vlib_buffer_get_current (b) + + sizeof (ethernet_header_t) + ip6_hdr_len))->checksum = 0; + } + vnet_buffer (b)->l3_hdr_offset = sizeof (ethernet_header_t); + vnet_buffer (b)->l4_hdr_offset = + sizeof (ethernet_header_t) + ip6_hdr_len; + } +} + always_inline uword af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame, u32 device_idx) + vlib_frame_t * frame, af_packet_if_t * apif) { af_packet_main_t *apm = &af_packet_main; - af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, device_idx); struct tpacket2_hdr *tph; u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; u32 block = 0; @@ -125,23 +191,22 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, u32 frame_num = apif->rx_req->tp_frame_nr; u8 *block_start = apif->rx_ring + block * block_size; uword n_trace = vlib_get_trace_count (vm, node); - u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, - VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); + u32 thread_index = vm->thread_index; + u32 n_buffer_bytes = VLIB_BUFFER_DATA_SIZE; u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes; - int cpu_index = node->cpu_index; if (apif->per_interface_next_index != ~0) next_index = apif->per_interface_next_index; - n_free_bufs = vec_len (apm->rx_buffers[cpu_index]); + n_free_bufs = vec_len (apm->rx_buffers[thread_index]); if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE)) { - vec_validate (apm->rx_buffers[cpu_index], + vec_validate (apm->rx_buffers[thread_index], VLIB_FRAME_SIZE + n_free_bufs - 1); n_free_bufs += - vlib_buffer_alloc (vm, &apm->rx_buffers[cpu_index][n_free_bufs], + vlib_buffer_alloc (vm, &apm->rx_buffers[thread_index][n_free_bufs], VLIB_FRAME_SIZE); - _vec_len (apm->rx_buffers[cpu_index]) = n_free_bufs; + _vec_len (apm->rx_buffers[thread_index]) = n_free_bufs; } rx_frame = apif->next_rx_frame; @@ -164,22 +229,45 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { /* grab free buffer */ u32 last_empty_buffer = - vec_len (apm->rx_buffers[cpu_index]) - 1; + vec_len (apm->rx_buffers[thread_index]) - 1; prev_bi0 = bi0; - bi0 = apm->rx_buffers[cpu_index][last_empty_buffer]; + bi0 = apm->rx_buffers[thread_index][last_empty_buffer]; b0 = vlib_get_buffer (vm, bi0); - _vec_len (apm->rx_buffers[cpu_index]) = last_empty_buffer; + _vec_len (apm->rx_buffers[thread_index]) = last_empty_buffer; n_free_bufs--; /* copy data */ u32 bytes_to_copy = data_len > n_buffer_bytes ? n_buffer_bytes : data_len; + u32 vlan_len = 0; + u32 bytes_copied = 0; b0->current_data = 0; - clib_memcpy (vlib_buffer_get_current (b0), - (u8 *) tph + tph->tp_mac + offset, bytes_to_copy); + /* Kernel removes VLAN headers, so reconstruct VLAN */ + if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID)) + { + if (PREDICT_TRUE (offset == 0)) + { + clib_memcpy_fast (vlib_buffer_get_current (b0), + (u8 *) tph + tph->tp_mac, + sizeof (ethernet_header_t)); + ethernet_header_t *eth = vlib_buffer_get_current (b0); + ethernet_vlan_header_t *vlan = + (ethernet_vlan_header_t *) (eth + 1); + vlan->priority_cfi_and_id = + clib_host_to_net_u16 (tph->tp_vlan_tci); + vlan->type = eth->type; + eth->type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN); + vlan_len = sizeof (ethernet_vlan_header_t); + bytes_copied = sizeof (ethernet_header_t); + } + } + clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) + + bytes_copied + vlan_len, + (u8 *) tph + tph->tp_mac + offset + + bytes_copied, (bytes_to_copy - bytes_copied)); /* fill buffer header */ - b0->current_length = bytes_to_copy; + b0->current_length = bytes_to_copy + vlan_len; if (offset == 0) { @@ -189,6 +277,8 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; first_bi0 = bi0; first_b0 = vlib_get_buffer (vm, first_bi0); + if (tph->tp_status & TP_STATUS_CSUMNOTREADY) + mark_tcp_udp_cksum_calc (first_b0); } else buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0); @@ -202,6 +292,21 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, to_next += 1; n_left_to_next--; + /* drop partial packets */ + if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen)) + { + next0 = VNET_DEVICE_INPUT_NEXT_DROP; + first_b0->error = + node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT]; + } + else + { + next0 = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; + /* redirect if feature path enabled */ + vnet_feature_start_device_input_x1 (apif->sw_if_index, &next0, + first_b0); + } + /* trace */ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0); if (PREDICT_FALSE (n_trace > 0)) @@ -213,13 +318,9 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr)); tr->next_index = next0; tr->hw_if_index = apif->hw_if_index; - clib_memcpy (&tr->tph, tph, sizeof (struct tpacket2_hdr)); + clib_memcpy_fast (&tr->tph, tph, sizeof (struct tpacket2_hdr)); } - /* redirect if feature path enabled */ - vnet_feature_start_device_input_x1 (apif->sw_if_index, &next0, b0, - 0); - /* enque and take next packet */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, first_bi0, next0); @@ -238,8 +339,9 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (vnet_get_main ()->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), apif->hw_if_index, n_rx_packets, n_rx_bytes); + vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes); + vnet_device_increment_rx_packets (thread_index, n_rx_packets); return n_rx_packets; } @@ -247,18 +349,18 @@ static uword af_packet_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - int i; u32 n_rx_packets = 0; - af_packet_main_t *apm = &af_packet_main; - - /* *INDENT-OFF* */ - clib_bitmap_foreach (i, apm->pending_input_bitmap, - ({ - clib_bitmap_set (apm->pending_input_bitmap, i, 0); - n_rx_packets += af_packet_device_input_fn(vm, node, frame, i); - })); - /* *INDENT-ON* */ + vnet_device_input_runtime_t *rt = (void *) node->runtime_data; + vnet_device_and_queue_t *dq; + + foreach_device_and_queue (dq, rt->devices_and_queues) + { + af_packet_if_t *apif; + apif = vec_elt_at_index (apm->interfaces, dq->dev_instance); + if (apif->is_admin_up) + n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif); + } return n_rx_packets; }