2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
23 #include <vnet/ip/ip6_packet.h>
24 #include <vnet/ip/ip4_packet.h>
25 #include <vnet/udp/udp_packet.h>
27 #include <vmxnet3/vmxnet3.h>
29 #define foreach_vmxnet3_input_error \
30 _(BUFFER_ALLOC, "buffer alloc error") \
31 _(RX_PACKET_NO_SOP, "Rx packet error - no SOP") \
32 _(RX_PACKET, "Rx packet error") \
33 _(RX_PACKET_EOP, "Rx packet error found on EOP") \
34 _(NO_BUFFER, "Rx no buffer error")
38 #define _(f,s) VMXNET3_INPUT_ERROR_##f,
39 foreach_vmxnet3_input_error
41 VMXNET3_INPUT_N_ERROR,
42 } vmxnet3_input_error_t;
44 static __clib_unused char *vmxnet3_input_error_strings[] = {
46 foreach_vmxnet3_input_error
50 static_always_inline u16
51 vmxnet3_find_rid (vmxnet3_device_t * vd, vmxnet3_rx_comp * rx_comp)
55 // rid is bits 16-25 (10 bits number)
56 rid = rx_comp->index & (0xffffffff >> 6);
58 if ((rid >= vd->num_rx_queues) && (rid < (vd->num_rx_queues << 1)))
64 static_always_inline void
65 vmxnet3_rx_comp_ring_advance_next (vmxnet3_rxq_t * rxq)
67 vmxnet3_rx_comp_ring *comp_ring = &rxq->rx_comp_ring;
70 if (PREDICT_FALSE (comp_ring->next == rxq->size))
73 comp_ring->gen ^= VMXNET3_RXCF_GEN;
77 static_always_inline void
78 vmxnet3_handle_offload (vmxnet3_rx_comp * rx_comp, vlib_buffer_t * hb,
83 if (rx_comp->flags & VMXNET3_RXCF_IP4)
85 ip4_header_t *ip4 = (ip4_header_t *) (hb->data +
86 sizeof (ethernet_header_t));
88 vnet_buffer (hb)->l2_hdr_offset = 0;
89 vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
90 vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
91 ip4_header_bytes (ip4);
92 hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
93 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
94 VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP4;
96 /* checksum offload */
97 if (!(rx_comp->index & VMXNET3_RXCI_CNC))
99 if (!(rx_comp->flags & VMXNET3_RXCF_IPC))
101 hb->flags |= VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
104 if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
106 if (rx_comp->flags & VMXNET3_RXCF_TCP)
109 (tcp_header_t *) (hb->data +
110 vnet_buffer (hb)->l4_hdr_offset);
111 hb->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
114 else if (rx_comp->flags & VMXNET3_RXCF_UDP)
117 (udp_header_t *) (hb->data +
118 vnet_buffer (hb)->l4_hdr_offset);
119 hb->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
127 if (rx_comp->flags & VMXNET3_RXCF_TCP)
130 (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
131 l4_hdr_sz = tcp_header_bytes (tcp);
133 else if (rx_comp->flags & VMXNET3_RXCF_UDP)
136 (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
137 l4_hdr_sz = sizeof (*udp);
139 vnet_buffer2 (hb)->gso_size = gso_size;
140 vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
141 hb->flags |= VNET_BUFFER_F_GSO;
144 else if (rx_comp->flags & VMXNET3_RXCF_IP6)
146 vnet_buffer (hb)->l2_hdr_offset = 0;
147 vnet_buffer (hb)->l3_hdr_offset = sizeof (ethernet_header_t);
148 vnet_buffer (hb)->l4_hdr_offset = sizeof (ethernet_header_t) +
149 sizeof (ip6_header_t);
150 hb->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
151 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
152 VNET_BUFFER_F_L4_HDR_OFFSET_VALID | VNET_BUFFER_F_IS_IP6;
154 /* checksum offload */
155 if (!(rx_comp->index & VMXNET3_RXCI_CNC))
157 if (!(rx_comp->flags & VMXNET3_RXCF_TUC))
159 if (rx_comp->flags & VMXNET3_RXCF_TCP)
162 (tcp_header_t *) (hb->data +
163 vnet_buffer (hb)->l4_hdr_offset);
164 hb->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
167 else if (rx_comp->flags & VMXNET3_RXCF_UDP)
170 (udp_header_t *) (hb->data +
171 vnet_buffer (hb)->l4_hdr_offset);
172 hb->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
180 if (rx_comp->flags & VMXNET3_RXCF_TCP)
183 (tcp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
184 l4_hdr_sz = tcp_header_bytes (tcp);
186 else if (rx_comp->flags & VMXNET3_RXCF_UDP)
189 (udp_header_t *) (hb->data + vnet_buffer (hb)->l4_hdr_offset);
190 l4_hdr_sz = sizeof (*udp);
192 vnet_buffer2 (hb)->gso_size = gso_size;
193 vnet_buffer2 (hb)->gso_l4_hdr_sz = l4_hdr_sz;
194 hb->flags |= VNET_BUFFER_F_GSO;
199 static_always_inline uword
200 vmxnet3_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
201 vlib_frame_t * frame, vmxnet3_device_t * vd,
204 vnet_main_t *vnm = vnet_get_main ();
205 uword n_trace = vlib_get_trace_count (vm, node);
206 u32 n_rx_packets = 0, n_rx_bytes = 0;
207 vmxnet3_rx_comp *rx_comp;
210 u32 thread_index = vm->thread_index;
211 u32 buffer_indices[VLIB_FRAME_SIZE], *bi;
212 u16 nexts[VLIB_FRAME_SIZE], *next;
213 vmxnet3_rx_ring *ring;
214 vmxnet3_rx_comp_ring *comp_ring;
216 vlib_buffer_t *prev_b0 = 0, *hb = 0;
217 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
218 u8 known_next = 0, got_packet = 0;
219 vmxnet3_rx_desc *rxd;
223 rxq = vec_elt_at_index (vd->rxqs, qid);
224 comp_ring = &rxq->rx_comp_ring;
227 rx_comp = &rxq->rx_comp[comp_ring->next];
229 while (PREDICT_TRUE ((n_rx_packets < VLIB_FRAME_SIZE) &&
231 (rx_comp->flags & VMXNET3_RXCF_GEN))))
236 rid = vmxnet3_find_rid (vd, rx_comp);
237 ring = &rxq->rx_ring[rid];
239 if (PREDICT_TRUE (ring->fill >= 1))
243 vlib_error_count (vm, node->node_index,
244 VMXNET3_INPUT_ERROR_NO_BUFFER, 1);
247 vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
254 desc_idx = rx_comp->index & VMXNET3_RXC_INDEX;
255 ring->consume = desc_idx;
256 rxd = &rxq->rx_desc[rid][desc_idx];
258 bi0 = ring->bufs[desc_idx];
259 ring->bufs[desc_idx] = ~0;
261 b0 = vlib_get_buffer (vm, bi0);
262 vnet_buffer (b0)->sw_if_index[VLIB_RX] = vd->sw_if_index;
263 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
264 vnet_buffer (b0)->feature_arc_index = 0;
265 b0->current_length = rx_comp->len & VMXNET3_RXCL_LEN_MASK;
266 b0->current_data = 0;
267 b0->total_length_not_including_first_buffer = 0;
271 b0->current_config_index = 0;
273 if (PREDICT_FALSE ((rx_comp->index & VMXNET3_RXCI_EOP) &&
274 (rx_comp->len & VMXNET3_RXCL_ERROR)))
276 vlib_buffer_free_one (vm, bi0);
277 vlib_error_count (vm, node->node_index,
278 VMXNET3_INPUT_ERROR_RX_PACKET_EOP, 1);
279 if (hb && vlib_get_buffer_index (vm, hb) != bi0)
281 vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
288 if (rx_comp->index & VMXNET3_RXCI_SOP)
290 ASSERT (!(rxd->flags & VMXNET3_RXF_BTYPE));
292 if (vd->gso_enable &&
293 (rx_comp->flags & VMXNET3_RXCF_CT) == VMXNET3_RXCOMP_TYPE_LRO)
295 vmxnet3_rx_comp_ext *lro = (vmxnet3_rx_comp_ext *) rx_comp;
297 gso_size = lro->flags & VMXNET3_RXECF_MSS_MASK;
302 if (!(rx_comp->index & VMXNET3_RXCI_EOP))
304 hb->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
310 * Both start and end of packet is set. It is a complete packet
316 else if (rx_comp->index & VMXNET3_RXCI_EOP)
319 if (PREDICT_TRUE (prev_b0 != 0))
321 if (PREDICT_TRUE (b0->current_length != 0))
323 prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
324 prev_b0->next_buffer = bi0;
325 hb->total_length_not_including_first_buffer +=
330 vlib_buffer_free_one (vm, bi0);
337 /* EOP without SOP, error */
338 vlib_error_count (vm, node->node_index,
339 VMXNET3_INPUT_ERROR_RX_PACKET_NO_SOP, 1);
340 vlib_buffer_free_one (vm, bi0);
341 if (hb && vlib_get_buffer_index (vm, hb) != bi0)
343 vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
349 else if (prev_b0) // !sop && !eop
352 ASSERT (rxd->flags & VMXNET3_RXF_BTYPE);
353 prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
354 prev_b0->next_buffer = bi0;
356 hb->total_length_not_including_first_buffer += b0->current_length;
360 vlib_error_count (vm, node->node_index,
361 VMXNET3_INPUT_ERROR_RX_PACKET, 1);
362 vlib_buffer_free_one (vm, bi0);
363 if (hb && vlib_get_buffer_index (vm, hb) != bi0)
365 vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, hb));
371 n_rx_bytes += b0->current_length;
375 if (PREDICT_FALSE (vd->per_interface_next_index != ~0))
377 next_index = vd->per_interface_next_index;
382 (vnet_device_input_have_features (vd->sw_if_index)))
384 vnet_feature_start_device_input_x1 (vd->sw_if_index,
389 if (PREDICT_FALSE (known_next))
390 next[0] = next_index;
393 ethernet_header_t *e = (ethernet_header_t *) hb->data;
395 next[0] = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
396 if (!ethernet_frame_is_tagged (ntohs (e->type)))
397 vmxnet3_handle_offload (rx_comp, hb, gso_size);
409 vmxnet3_rx_comp_ring_advance_next (rxq);
410 rx_comp = &rxq->rx_comp[comp_ring->next];
413 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
415 u32 n_left = n_rx_packets;
419 while (n_trace && n_left)
422 vmxnet3_input_trace_t *tr;
424 b = vlib_get_buffer (vm, bi[0]);
425 vlib_trace_buffer (vm, node, next[0], b, /* follow_chain */ 0);
426 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
427 tr->next_index = next[0];
428 tr->hw_if_index = vd->hw_if_index;
436 vlib_set_trace_count (vm, node, n_trace);
439 if (PREDICT_TRUE (n_rx_packets))
441 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts,
443 vlib_increment_combined_counter
444 (vnm->interface_main.combined_sw_if_counters +
445 VNET_INTERFACE_COUNTER_RX, thread_index,
446 vd->sw_if_index, n_rx_packets, n_rx_bytes);
449 error = vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
450 if (PREDICT_FALSE (error != 0))
452 vlib_error_count (vm, node->node_index,
453 VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
455 error = vmxnet3_rxq_refill_ring1 (vm, vd, rxq);
456 if (PREDICT_FALSE (error != 0))
458 vlib_error_count (vm, node->node_index,
459 VMXNET3_INPUT_ERROR_BUFFER_ALLOC, 1);
465 VLIB_NODE_FN (vmxnet3_input_node) (vlib_main_t * vm,
466 vlib_node_runtime_t * node,
467 vlib_frame_t * frame)
470 vmxnet3_main_t *vmxm = &vmxnet3_main;
471 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
472 vnet_device_and_queue_t *dq;
474 foreach_device_and_queue (dq, rt->devices_and_queues)
476 vmxnet3_device_t *vd;
477 vd = vec_elt_at_index (vmxm->devices, dq->dev_instance);
478 if ((vd->flags & VMXNET3_DEVICE_F_ADMIN_UP) == 0)
480 n_rx += vmxnet3_device_input_inline (vm, node, frame, vd, dq->queue_id);
485 #ifndef CLIB_MARCH_VARIANT
487 VLIB_REGISTER_NODE (vmxnet3_input_node) = {
488 .name = "vmxnet3-input",
489 .sibling_of = "device-input",
490 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
491 .format_trace = format_vmxnet3_input_trace,
492 .type = VLIB_NODE_TYPE_INPUT,
493 .state = VLIB_NODE_STATE_DISABLED,
494 .n_errors = VMXNET3_INPUT_N_ERROR,
495 .error_strings = vmxnet3_input_error_strings,
502 * fd.io coding-style-patch-verification: ON
505 * eval: (c-set-style "gnu")