#include <vlib/pci/pci.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/devices/devices.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/udp/udp_packet.h>
#include <vmxnet3/vmxnet3.h>
_(BUFFER_ALLOC, "buffer alloc error") \
_(RX_PACKET_NO_SOP, "Rx packet error - no SOP") \
_(RX_PACKET, "Rx packet error") \
+ _(RX_PACKET_EOP, "Rx packet error found on EOP") \
_(NO_BUFFER, "Rx no buffer error")
typedef enum
}
}
+static_always_inline void
+vmxnet3_handle_offload (vmxnet3_rx_comp * rx_comp, vlib_buffer_t * hb,
+ u16 gso_size)
+{
+ u8 l4_hdr_sz = 0;
+
+ if (rx_comp->flags & VMXNET3_RXCF_IP4)
+ {
+ ip4_header_t *ip4 = (ip4_header_t *) (hb->data +
+ sizeof (ethernet_header_t));
+
+ vnet_buffer (hb)->l2_hdr_offset = 0;
+ vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
+ vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
+ ip4_header_bytes (ip4);
+ hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP4;
+
+ /* checksum offload */
+ if (!(rx_comp->index & VMXNET3_RXCI_CNC))
+ {
+ if (!(rx_comp->flags & VMXNET3_RXCF_IPC))
+ {
+ hb->flags |= VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
+ ip4->checksum = 0;
+ }
+ if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
+ {
+ if (rx_comp->flags & VMXNET3_RXCF_TCP)
+ {
+ tcp_header_t *tcp =
+ (tcp_header_t *) (hb->data +
+ vnet_buffer (hb)->l4_hdr_offset);
+ hb->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ tcp->checksum = 0;
+ }
+ else if (rx_comp->flags & VMXNET3_RXCF_UDP)
+ {
+ udp_header_t *udp =
+ (udp_header_t *) (hb->data +
+ vnet_buffer (hb)->l4_hdr_offset);
+ hb->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
+ udp->checksum = 0;
+ }
+ }
+ }
+
+ if (gso_size)
+ {
+ if (rx_comp->flags & VMXNET3_RXCF_TCP)
+ {
+ tcp_header_t *tcp =
+ (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
+ l4_hdr_sz = tcp_header_bytes (tcp);
+ }
+ else if (rx_comp->flags & VMXNET3_RXCF_UDP)
+ {
+ udp_header_t *udp =
+ (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
+ l4_hdr_sz = sizeof (*udp);
+ }
+ vnet_buffer2 (hb)->gso_size = gso_size;
+ vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
+ hb->flags |= VNET_BUFFER_F_GSO;
+ }
+ }
+ else if (rx_comp->flags & VMXNET3_RXCF_IP6)
+ {
+ vnet_buffer (hb)->l2_hdr_offset = 0;
+ vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
+ vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
+ sizeof (ip6_header_t);
+ hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP6;
+
+ /* checksum offload */
+ if (!(rx_comp->index & VMXNET3_RXCI_CNC))
+ {
+ if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
+ {
+ if (rx_comp->flags & VMXNET3_RXCF_TCP)
+ {
+ tcp_header_t *tcp =
+ (tcp_header_t *) (hb->data +
+ vnet_buffer (hb)->l4_hdr_offset);
+ hb->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ tcp->checksum = 0;
+ }
+ else if (rx_comp->flags & VMXNET3_RXCF_UDP)
+ {
+ udp_header_t *udp =
+ (udp_header_t *) (hb->data +
+ vnet_buffer (hb)->l4_hdr_offset);
+ hb->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
+ udp->checksum = 0;
+ }
+ }
+ }
+
+ if (gso_size)
+ {
+ if (rx_comp->flags & VMXNET3_RXCF_TCP)
+ {
+ tcp_header_t *tcp =
+ (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
+ l4_hdr_sz = tcp_header_bytes (tcp);
+ }
+ else if (rx_comp->flags & VMXNET3_RXCF_UDP)
+ {
+ udp_header_t *udp =
+ (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
+ l4_hdr_sz = sizeof (*udp);
+ }
+ vnet_buffer2 (hb)->gso_size = gso_size;
+ vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
+ hb->flags |= VNET_BUFFER_F_GSO;
+ }
+ }
+}
+
static_always_inline uword
vmxnet3_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame, vmxnet3_device_t * vd,
uword n_trace = vlib_get_trace_count (vm, node);
u32 n_rx_packets = 0, n_rx_bytes = 0;
vmxnet3_rx_comp *rx_comp;
- u32 comp_idx;
u32 desc_idx;
vmxnet3_rxq_t *rxq;
u32 thread_index = vm->thread_index;
u8 known_next = 0, got_packet = 0;
vmxnet3_rx_desc *rxd;
clib_error_t *error;
+ u16 gso_size = 0;
rxq = vec_elt_at_index (vd->rxqs, qid);
comp_ring = &rxq->rx_comp_ring;
bi = buffer_indices;
next = nexts;
- while (PREDICT_TRUE (n_rx_packets < VLIB_FRAME_SIZE) &&
- (comp_ring->gen ==
- (rxq->rx_comp[comp_ring->next].flags & VMXNET3_RXCF_GEN)))
+ rx_comp = &rxq->rx_comp[comp_ring->next];
+
+ while (PREDICT_TRUE ((n_rx_packets < VLIB_FRAME_SIZE) &&
+ (comp_ring->gen ==
+ (rx_comp->flags & VMXNET3_RXCF_GEN))))
{
vlib_buffer_t *b0;
u32 bi0;
- comp_idx = comp_ring->next;
- rx_comp = &rxq->rx_comp[comp_idx];
-
rid = vmxnet3_find_rid (vd, rx_comp);
ring = &rxq->rx_ring[rid];
{
vlib_error_count (vm, node->node_index,
VMXNET3_INPUT_ERROR_NO_BUFFER, 1);
+ if (hb)
+ {
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
+ hb = 0;
+ }
+ prev_b0 = 0;
break;
}
- vmxnet3_rx_comp_ring_advance_next (rxq);
desc_idx = rx_comp->index & VMXNET3_RXC_INDEX;
ring->consume = desc_idx;
rxd = &rxq->rx_desc[rid][desc_idx];
{
vlib_buffer_free_one (vm, bi0);
vlib_error_count (vm, node->node_index,
- VMXNET3_INPUT_ERROR_RX_PACKET, 1);
+ VMXNET3_INPUT_ERROR_RX_PACKET_EOP, 1);
if (hb && vlib_get_buffer_index (vm, hb) != bi0)
{
vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
hb = 0;
}
prev_b0 = 0;
- continue;
+ goto next;
}
if (rx_comp->index & VMXNET3_RXCI_SOP)
{
ASSERT (!(rxd->flags & VMXNET3_RXF_BTYPE));
/* start segment */
+ if (vd->gso_enable &&
+ (rx_comp->flags & VMXNET3_RXCF_CT) == VMXNET3_RXCOMP_TYPE_LRO)
+ {
+ vmxnet3_rx_comp_ext *lro = (vmxnet3_rx_comp_ext *) rx_comp;
+
+ gso_size = lro->flags & VMXNET3_RXECF_MSS_MASK;
+ }
+
hb = b0;
bi[0] = bi0;
if (!(rx_comp->index & VMXNET3_RXCI_EOP))
vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
hb = 0;
}
- continue;
+ goto next;
}
}
else if (prev_b0) // !sop && !eop
}
else
{
- ASSERT (0);
+ vlib_error_count (vm, node->node_index,
+ VMXNET3_INPUT_ERROR_RX_PACKET, 1);
+ vlib_buffer_free_one (vm, bi0);
+ if (hb && vlib_get_buffer_index (vm, hb) != bi0)
+ {
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
+ hb = 0;
+ }
+ goto next;
}
n_rx_bytes += b0->current_length;
if (got_packet)
{
- ethernet_header_t *e = (ethernet_header_t *) hb->data;
-
if (PREDICT_FALSE (vd->per_interface_next_index != ~0))
{
next_index = vd->per_interface_next_index;
}
if (PREDICT_FALSE (known_next))
- {
- next[0] = next_index;
- }
+ next[0] = next_index;
else
{
- if (ethernet_frame_is_tagged (e->type))
- next[0] = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
- else
- {
- if (rx_comp->flags & VMXNET3_RXCF_IP4)
- {
- next[0] = VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
- hb->flags |= VNET_BUFFER_F_IS_IP4;
- vlib_buffer_advance (hb,
- device_input_next_node_advance
- [next[0]]);
- }
- else if (rx_comp->flags & VMXNET3_RXCF_IP6)
- {
- next[0] = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
- hb->flags |= VNET_BUFFER_F_IS_IP6;
- vlib_buffer_advance (hb,
- device_input_next_node_advance
- [next[0]]);
- }
- else
- {
- next[0] = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
- }
- }
+ ethernet_header_t *e = (ethernet_header_t *) hb->data;
+
+ next[0] = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ if (!ethernet_frame_is_tagged (e->type))
+ vmxnet3_handle_offload (rx_comp, hb, gso_size);
}
n_rx_packets++;
bi++;
hb = 0;
got_packet = 0;
+ gso_size = 0;
}
+
+ next:
+ vmxnet3_rx_comp_ring_advance_next (rxq);
+ rx_comp = &rxq->rx_comp[comp_ring->next];
}
if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
vlib_increment_combined_counter
(vnm->interface_main.combined_sw_if_counters +
VNET_INTERFACE_COUNTER_RX, thread_index,
- vd->hw_if_index, n_rx_packets, n_rx_bytes);
+ vd->sw_if_index, n_rx_packets, n_rx_bytes);
}
error = vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
VLIB_REGISTER_NODE (vmxnet3_input_node) = {
.name = "vmxnet3-input",
.sibling_of = "device-input",
+ .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
.format_trace = format_vmxnet3_input_trace,
.type = VLIB_NODE_TYPE_INPUT,
.state = VLIB_NODE_STATE_DISABLED,