X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fdevices%2Fvirtio%2Fnode.c;h=fc2317ecc6924c1ecc80c63a80d0257d345bd27a;hb=72e7312af04bf8f6ecbc8ce70fe1a6e2ad8852ec;hp=686d90c3d00dbf75bb37765fdefabc43178bdd79;hpb=09a3bc50b581c72693ff6270da20a68f5781a468;p=vpp.git diff --git a/src/vnet/devices/virtio/node.c b/src/vnet/devices/virtio/node.c index 686d90c3d00..fc2317ecc69 100644 --- a/src/vnet/devices/virtio/node.c +++ b/src/vnet/devices/virtio/node.c @@ -145,56 +145,51 @@ static_always_inline void fill_gso_buffer_flags (vlib_buffer_t * b0, struct virtio_net_hdr_v1 *hdr) { u8 l4_proto = 0; - u8 l4_hdr_sz = 0; if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - ethernet_header_t *eh = (ethernet_header_t *) b0->data; + ethernet_header_t *eh = + (ethernet_header_t *) vlib_buffer_get_current (b0); u16 ethertype = clib_net_to_host_u16 (eh->type); u16 l2hdr_sz = sizeof (ethernet_header_t); - vnet_buffer (b0)->l2_hdr_offset = 0; - vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz; + if (ethernet_frame_is_tagged (ethertype)) + { + ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1); + + ethertype = clib_net_to_host_u16 (vlan->type); + l2hdr_sz += sizeof (*vlan); + if (ethertype == ETHERNET_TYPE_VLAN) + { + vlan++; + ethertype = clib_net_to_host_u16 (vlan->type); + l2hdr_sz += sizeof (*vlan); + } + } + if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4)) { - ip4_header_t *ip4 = (ip4_header_t *) (b0->data + l2hdr_sz); - vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4); + ip4_header_t *ip4 = + (ip4_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz); l4_proto = ip4->protocol; b0->flags |= - (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID - | VNET_BUFFER_F_L3_HDR_OFFSET_VALID | - VNET_BUFFER_F_L4_HDR_OFFSET_VALID); - b0->flags |= VNET_BUFFER_F_OFFLOAD_IP_CKSUM; + (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM); } else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6)) { - ip6_header_t *ip6 = (ip6_header_t *) (b0->data + l2hdr_sz); + ip6_header_t *ip6 = + (ip6_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz); /* FIXME IPv6 EH traversal */ - vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t); l4_proto = ip6->protocol; - b0->flags |= - (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID - | VNET_BUFFER_F_L3_HDR_OFFSET_VALID | - VNET_BUFFER_F_L4_HDR_OFFSET_VALID); - b0->flags |= VNET_BUFFER_F_OFFLOAD_IP_CKSUM; + b0->flags |= VNET_BUFFER_F_IS_IP6; } if (l4_proto == IP_PROTOCOL_TCP) { b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; - tcp_header_t *tcp = (tcp_header_t *) (b0->data + - vnet_buffer - (b0)->l4_hdr_offset); - l4_hdr_sz = tcp_header_bytes (tcp); - tcp->checksum = 0; } else if (l4_proto == IP_PROTOCOL_UDP) { b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM; - udp_header_t *udp = (udp_header_t *) (b0->data + - vnet_buffer - (b0)->l4_hdr_offset); - l4_hdr_sz = sizeof (*udp); - udp->checksum = 0; } } @@ -202,21 +197,16 @@ fill_gso_buffer_flags (vlib_buffer_t * b0, struct virtio_net_hdr_v1 *hdr) { ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM); vnet_buffer2 (b0)->gso_size = hdr->gso_size; - vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz; - b0->flags |= VNET_BUFFER_F_GSO; - b0->flags |= VNET_BUFFER_F_IS_IP4; + b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4; } if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6) { ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM); vnet_buffer2 (b0)->gso_size = hdr->gso_size; - vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz; - b0->flags |= VNET_BUFFER_F_GSO; - b0->flags |= VNET_BUFFER_F_IS_IP6; + b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6; } } - static_always_inline uword virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, virtio_if_t * vif, u16 qid, @@ -303,9 +293,10 @@ virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (PREDICT_FALSE (vif->per_interface_next_index != ~0)) next0 = vif->per_interface_next_index; - else - /* redirect if feature path enabled */ - vnet_feature_start_device_input_x1 (vif->sw_if_index, &next0, b0); + + /* redirect if feature path enabled */ + vnet_feature_start_device_input_x1 (vif->sw_if_index, &next0, b0); + /* trace */ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); @@ -313,12 +304,12 @@ virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, { virtio_input_trace_t *tr; vlib_trace_buffer (vm, node, next0, b0, - /* follow_chain */ 0); + /* follow_chain */ 1); vlib_set_trace_count (vm, node, --n_trace); tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->next_index = next0; tr->hw_if_index = vif->hw_if_index; - tr->len = len; + tr->len = len + b0->total_length_not_including_first_buffer; clib_memcpy_fast (&tr->hdr, hdr, hdr_sz); } @@ -336,7 +327,7 @@ virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* next packet */ n_rx_packets++; - n_rx_bytes += len; + n_rx_bytes += (len + b0->total_length_not_including_first_buffer); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } @@ -344,7 +335,7 @@ virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, thread_index, - vif->hw_if_index, n_rx_packets, + vif->sw_if_index, n_rx_packets, n_rx_bytes); refill: @@ -385,6 +376,7 @@ VLIB_REGISTER_NODE (virtio_input_node) = { .name = "virtio-input", .sibling_of = "device-input", .format_trace = format_virtio_input_trace, + .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED, .type = VLIB_NODE_TYPE_INPUT, .state = VLIB_NODE_STATE_INTERRUPT, .n_errors = VIRTIO_INPUT_N_ERROR,