2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <sys/types.h>
22 #include <linux/if_tun.h>
23 #include <sys/ioctl.h>
24 #include <sys/eventfd.h>
26 #include <vlib/vlib.h>
27 #include <vlib/unix/unix.h>
28 #include <vnet/ethernet/ethernet.h>
29 #include <vnet/feature/feature.h>
30 #include <vnet/gso/gro_func.h>
31 #include <vnet/interface/rx_queue_funcs.h>
32 #include <vnet/ip/ip4_packet.h>
33 #include <vnet/ip/ip6_packet.h>
34 #include <vnet/udp/udp_packet.h>
35 #include <vnet/devices/virtio/virtio.h>
37 #define foreach_virtio_input_error \
38 _(BUFFER_ALLOC, "buffer alloc error") \
43 #define _(f,s) VIRTIO_INPUT_ERROR_##f,
44 foreach_virtio_input_error
47 } virtio_input_error_t;
49 static char *virtio_input_error_strings[] = {
51 foreach_virtio_input_error
61 virtio_net_hdr_v1_t hdr;
62 } virtio_input_trace_t;
65 format_virtio_input_trace (u8 * s, va_list * args)
67 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
68 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
69 virtio_input_trace_t *t = va_arg (*args, virtio_input_trace_t *);
70 u32 indent = format_get_indent (s);
72 s = format (s, "virtio: hw_if_index %d next-index %d vring %u len %u",
73 t->hw_if_index, t->next_index, t->ring, t->len);
74 s = format (s, "\n%Uhdr: flags 0x%02x gso_type 0x%02x hdr_len %u "
75 "gso_size %u csum_start %u csum_offset %u num_buffers %u",
76 format_white_space, indent + 2,
77 t->hdr.flags, t->hdr.gso_type, t->hdr.hdr_len, t->hdr.gso_size,
78 t->hdr.csum_start, t->hdr.csum_offset, t->hdr.num_buffers);
82 static_always_inline void
83 virtio_refill_vring_split (vlib_main_t * vm, virtio_if_t * vif,
84 virtio_if_type_t type, virtio_vring_t * vring,
85 const int hdr_sz, u32 node_index)
87 u16 used, next, avail, n_slots, n_refill;
92 used = vring->desc_in_use;
94 if (sz - used < sz / 8)
97 /* deliver free buffers in chunks of 64 */
98 n_refill = clib_min (sz - used, 64);
100 next = vring->desc_next;
101 avail = vring->avail->idx;
103 vlib_buffer_alloc_to_ring_from_pool (vm, vring->buffers, next,
104 vring->size, n_refill,
105 vring->buffer_pool_index);
107 if (PREDICT_FALSE (n_slots != n_refill))
109 vlib_error_count (vm, node_index,
110 VIRTIO_INPUT_ERROR_BUFFER_ALLOC, n_refill - n_slots);
117 vring_desc_t *d = &vring->desc[next];;
118 vlib_buffer_t *b = vlib_get_buffer (vm, vring->buffers[next]);
120 * current_data may not be initialized with 0 and may contain
121 * previous offset. Here we want to make sure, it should be 0
124 b->current_data = -hdr_sz;
125 clib_memset (vlib_buffer_get_current (b), 0, hdr_sz);
127 ((type == VIRTIO_IF_TYPE_PCI) ? vlib_buffer_get_current_pa (vm,
129 pointer_to_uword (vlib_buffer_get_current (b)));
130 d->len = vlib_buffer_get_default_data_size (vm) + hdr_sz;
131 d->flags = VRING_DESC_F_WRITE;
132 vring->avail->ring[avail & mask] = next;
134 next = (next + 1) & mask;
138 clib_atomic_store_seq_cst (&vring->avail->idx, avail);
139 vring->desc_next = next;
140 vring->desc_in_use = used;
141 if ((clib_atomic_load_seq_cst (&vring->used->flags) &
142 VRING_USED_F_NO_NOTIFY) == 0)
144 virtio_kick (vm, vring, vif);
149 static_always_inline void
150 virtio_refill_vring_packed (vlib_main_t * vm, virtio_if_t * vif,
151 virtio_if_type_t type, virtio_vring_t * vring,
152 const int hdr_sz, u32 node_index)
154 u16 used, next, n_slots, n_refill, flags = 0, first_desc_flags;
155 u16 sz = vring->size;
158 used = vring->desc_in_use;
163 /* deliver free buffers in chunks of 64 */
164 n_refill = clib_min (sz - used, 64);
166 next = vring->desc_next;
167 first_desc_flags = vring->packed_desc[next].flags;
169 vlib_buffer_alloc_to_ring_from_pool (vm, vring->buffers, next,
171 vring->buffer_pool_index);
173 if (PREDICT_FALSE (n_slots != n_refill))
175 vlib_error_count (vm, node_index,
176 VIRTIO_INPUT_ERROR_BUFFER_ALLOC, n_refill - n_slots);
183 vring_packed_desc_t *d = &vring->packed_desc[next];
184 vlib_buffer_t *b = vlib_get_buffer (vm, vring->buffers[next]);
186 * current_data may not be initialized with 0 and may contain
187 * previous offset. Here we want to make sure, it should be 0
190 b->current_data = -hdr_sz;
191 clib_memset (vlib_buffer_get_current (b), 0, hdr_sz);
193 ((type == VIRTIO_IF_TYPE_PCI) ? vlib_buffer_get_current_pa (vm,
195 pointer_to_uword (vlib_buffer_get_current (b)));
196 d->len = vlib_buffer_get_default_data_size (vm) + hdr_sz;
198 if (vring->avail_wrap_counter)
199 flags = (VRING_DESC_F_AVAIL | VRING_DESC_F_WRITE);
201 flags = (VRING_DESC_F_USED | VRING_DESC_F_WRITE);
204 if (vring->desc_next == next)
205 first_desc_flags = flags;
213 vring->avail_wrap_counter ^= 1;
218 CLIB_MEMORY_STORE_BARRIER ();
219 vring->packed_desc[vring->desc_next].flags = first_desc_flags;
220 vring->desc_next = next;
221 vring->desc_in_use = used;
222 CLIB_MEMORY_BARRIER ();
223 if (vring->device_event->flags != VRING_EVENT_F_DISABLE)
225 virtio_kick (vm, vring, vif);
231 static_always_inline void
232 virtio_needs_csum (vlib_buffer_t * b0, virtio_net_hdr_v1_t * hdr,
233 u8 * l4_proto, u8 * l4_hdr_sz, virtio_if_type_t type)
235 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
237 u16 ethertype = 0, l2hdr_sz = 0;
239 if (type == VIRTIO_IF_TYPE_TUN)
241 switch (b0->data[0] & 0xf0)
244 ethertype = ETHERNET_TYPE_IP4;
247 ethertype = ETHERNET_TYPE_IP6;
253 ethernet_header_t *eh =
254 (ethernet_header_t *) vlib_buffer_get_current (b0);
255 ethertype = clib_net_to_host_u16 (eh->type);
256 l2hdr_sz = sizeof (ethernet_header_t);
258 if (ethernet_frame_is_tagged (ethertype))
260 ethernet_vlan_header_t *vlan =
261 (ethernet_vlan_header_t *) (eh + 1);
263 ethertype = clib_net_to_host_u16 (vlan->type);
264 l2hdr_sz += sizeof (*vlan);
265 if (ethertype == ETHERNET_TYPE_VLAN)
268 ethertype = clib_net_to_host_u16 (vlan->type);
269 l2hdr_sz += sizeof (*vlan);
274 vnet_buffer (b0)->l2_hdr_offset = 0;
275 vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
277 if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
280 (ip4_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
281 vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
282 *l4_proto = ip4->protocol;
284 (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
286 (VNET_BUFFER_F_L2_HDR_OFFSET_VALID
287 | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
288 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
290 else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
293 (ip6_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
294 vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t);
295 /* FIXME IPv6 EH traversal */
296 *l4_proto = ip6->protocol;
297 b0->flags |= (VNET_BUFFER_F_IS_IP6 |
298 VNET_BUFFER_F_L2_HDR_OFFSET_VALID
299 | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
300 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
302 if (*l4_proto == IP_PROTOCOL_TCP)
304 b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
305 tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b0) +
307 (b0)->l4_hdr_offset);
308 *l4_hdr_sz = tcp_header_bytes (tcp);
310 else if (*l4_proto == IP_PROTOCOL_UDP)
312 b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
313 udp_header_t *udp = (udp_header_t *) (vlib_buffer_get_current (b0) +
315 (b0)->l4_hdr_offset);
316 *l4_hdr_sz = sizeof (*udp);
321 static_always_inline void
322 fill_gso_buffer_flags (vlib_buffer_t * b0, virtio_net_hdr_v1_t * hdr,
323 u8 l4_proto, u8 l4_hdr_sz)
325 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
327 ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
328 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
329 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
330 b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4;
332 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
334 ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
335 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
336 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
337 b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6;
341 static_always_inline u16
342 virtio_n_left_to_process (virtio_vring_t * vring, const int packed)
345 return vring->desc_in_use;
347 return vring->used->idx - vring->last_used_idx;
350 static_always_inline u16
351 virtio_get_slot_id (virtio_vring_t * vring, const int packed, u16 last,
355 return vring->packed_desc[last].id;
357 return vring->used->ring[last & mask].id;
360 static_always_inline u16
361 virtio_get_len (virtio_vring_t * vring, const int packed, const int hdr_sz,
365 return vring->packed_desc[last].len - hdr_sz;
367 return vring->used->ring[last & mask].len - hdr_sz;
370 #define increment_last(last, packed, vring) \
373 if (packed && last >= vring->size) \
376 vring->used_wrap_counter ^= 1; \
380 static_always_inline uword
381 virtio_device_input_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
382 vlib_frame_t * frame, virtio_if_t * vif,
383 virtio_vring_t * vring, virtio_if_type_t type,
384 int gso_enabled, int checksum_offload_enabled,
387 vnet_main_t *vnm = vnet_get_main ();
388 u32 thread_index = vm->thread_index;
389 uword n_trace = vlib_get_trace_count (vm, node);
391 const int hdr_sz = vif->virtio_net_hdr_sz;
393 u32 n_rx_packets = 0;
395 u16 mask = vring->size - 1;
396 u16 last = vring->last_used_idx;
397 u16 n_left = virtio_n_left_to_process (vring, packed);
403 if (type == VIRTIO_IF_TYPE_TUN)
405 next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
409 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
410 if (PREDICT_FALSE (vif->per_interface_next_index != ~0))
411 next_index = vif->per_interface_next_index;
413 /* only for l2, redirect if feature path enabled */
414 vnet_feature_start_device_input_x1 (vif->sw_if_index, &next_index, &bt);
420 u32 next0 = next_index;
422 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
424 while (n_left && n_left_to_next)
428 vring_packed_desc_t *d = &vring->packed_desc[last];
429 u16 flags = d->flags;
430 if ((flags & VRING_DESC_F_AVAIL) !=
431 (vring->used_wrap_counter << 7)
432 || (flags & VRING_DESC_F_USED) !=
433 (vring->used_wrap_counter << 15))
439 u8 l4_proto = 0, l4_hdr_sz = 0;
441 virtio_net_hdr_v1_t *hdr;
442 u16 slot = virtio_get_slot_id (vring, packed, last, mask);
443 u16 len = virtio_get_len (vring, packed, hdr_sz, last, mask);
444 u32 bi0 = vring->buffers[slot];
445 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
446 hdr = vlib_buffer_get_current (b0);
447 if (hdr_sz == sizeof (virtio_net_hdr_v1_t))
448 num_buffers = hdr->num_buffers;
450 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
451 b0->current_data = 0;
452 b0->current_length = len;
454 if (checksum_offload_enabled)
455 virtio_needs_csum (b0, hdr, &l4_proto, &l4_hdr_sz, type);
458 fill_gso_buffer_flags (b0, hdr, l4_proto, l4_hdr_sz);
460 vnet_buffer (b0)->sw_if_index[VLIB_RX] = vif->sw_if_index;
461 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
463 /* if multisegment packet */
464 if (PREDICT_FALSE (num_buffers > 1))
466 vlib_buffer_t *pb, *cb;
468 b0->total_length_not_including_first_buffer = 0;
469 while (num_buffers > 1)
471 increment_last (last, packed, vring);
472 u16 cslot = virtio_get_slot_id (vring, packed, last, mask);
473 /* hdr size is 0 after 1st packet in chain buffers */
474 u16 clen = virtio_get_len (vring, packed, 0, last, mask);
475 u32 cbi = vring->buffers[cslot];
476 cb = vlib_get_buffer (vm, cbi);
479 cb->current_length = clen;
481 /* previous buffer */
482 pb->next_buffer = cbi;
483 pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
486 b0->total_length_not_including_first_buffer += clen;
489 vring->desc_in_use--;
493 len += b0->total_length_not_including_first_buffer;
496 if (type == VIRTIO_IF_TYPE_TUN)
498 switch (b0->data[0] & 0xf0)
501 next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
504 next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
507 next0 = VNET_DEVICE_INPUT_NEXT_DROP;
511 if (PREDICT_FALSE (vif->per_interface_next_index != ~0))
512 next0 = vif->per_interface_next_index;
516 /* copy feature arc data from template */
517 b0->current_config_index = bt.current_config_index;
518 vnet_buffer (b0)->feature_arc_index =
519 vnet_buffer (&bt)->feature_arc_index;
523 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
525 if (PREDICT_FALSE (n_trace > 0 && vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */
528 virtio_input_trace_t *tr;
529 vlib_set_trace_count (vm, node, --n_trace);
530 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
531 tr->next_index = next0;
532 tr->hw_if_index = vif->hw_if_index;
534 clib_memcpy_fast (&tr->hdr, hdr, hdr_sz);
539 vring->desc_in_use--;
543 increment_last (last, packed, vring);
545 /* only tun interfaces may have different next index */
546 if (type == VIRTIO_IF_TYPE_TUN)
547 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
548 n_left_to_next, bi0, next0);
554 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
556 vring->last_used_idx = last;
558 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
559 + VNET_INTERFACE_COUNTER_RX, thread_index,
560 vif->sw_if_index, n_rx_packets,
566 static_always_inline uword
567 virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
568 vlib_frame_t * frame, virtio_if_t * vif, u16 qid,
569 virtio_if_type_t type)
571 virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
572 const int hdr_sz = vif->virtio_net_hdr_sz;
573 u16 txq_id = vm->thread_index % vif->num_txqs;
574 virtio_vring_t *txq_vring = vec_elt_at_index (vif->txq_vrings, txq_id);
577 if (clib_spinlock_trylock_if_init (&txq_vring->lockp))
579 if (vif->packet_coalesce)
580 vnet_gro_flow_table_schedule_node_on_dispatcher
581 (vm, txq_vring->flow_table);
582 else if (vif->packet_buffering)
583 virtio_vring_buffering_schedule_node_on_dispatcher
584 (vm, txq_vring->buffering);
585 clib_spinlock_unlock_if_init (&txq_vring->lockp);
590 if (vif->gso_enabled)
592 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
594 else if (vif->csum_offload_enabled)
596 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
600 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
603 virtio_refill_vring_packed (vm, vif, type, vring, hdr_sz,
608 if (vif->gso_enabled)
610 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
612 else if (vif->csum_offload_enabled)
614 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
618 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
621 virtio_refill_vring_split (vm, vif, type, vring, hdr_sz,
627 VLIB_NODE_FN (virtio_input_node) (vlib_main_t * vm,
628 vlib_node_runtime_t * node,
629 vlib_frame_t * frame)
632 virtio_main_t *vim = &virtio_main;
633 vnet_hw_if_rxq_poll_vector_t *p,
634 *pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
639 vif = vec_elt_at_index (vim->interfaces, p->dev_instance);
640 if (vif->flags & VIRTIO_IF_FLAG_ADMIN_UP)
642 if (vif->type == VIRTIO_IF_TYPE_TAP)
643 n_rx += virtio_device_input_inline (
644 vm, node, frame, vif, p->queue_id, VIRTIO_IF_TYPE_TAP);
645 else if (vif->type == VIRTIO_IF_TYPE_PCI)
646 n_rx += virtio_device_input_inline (
647 vm, node, frame, vif, p->queue_id, VIRTIO_IF_TYPE_PCI);
648 else if (vif->type == VIRTIO_IF_TYPE_TUN)
649 n_rx += virtio_device_input_inline (
650 vm, node, frame, vif, p->queue_id, VIRTIO_IF_TYPE_TUN);
658 VLIB_REGISTER_NODE (virtio_input_node) = {
659 .name = "virtio-input",
660 .sibling_of = "device-input",
661 .format_trace = format_virtio_input_trace,
662 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
663 .type = VLIB_NODE_TYPE_INPUT,
664 .state = VLIB_NODE_STATE_INTERRUPT,
665 .n_errors = VIRTIO_INPUT_N_ERROR,
666 .error_strings = virtio_input_error_strings,
671 * fd.io coding-style-patch-verification: ON
674 * eval: (c-set-style "gnu")