2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <sys/types.h>
22 #include <linux/if_tun.h>
23 #include <sys/ioctl.h>
24 #include <sys/eventfd.h>
26 #include <vlib/vlib.h>
27 #include <vlib/unix/unix.h>
28 #include <vnet/ethernet/ethernet.h>
29 #include <vnet/feature/feature.h>
30 #include <vnet/interface/rx_queue_funcs.h>
31 #include <vnet/ip/ip4_packet.h>
32 #include <vnet/ip/ip6_packet.h>
33 #include <vnet/udp/udp_packet.h>
34 #include <vnet/tcp/tcp_packet.h>
35 #include <vnet/devices/virtio/virtio.h>
36 #include <vnet/devices/virtio/virtio_inline.h>
38 static char *virtio_input_error_strings[] = {
40 foreach_virtio_input_error
50 vnet_virtio_net_hdr_v1_t hdr;
51 } virtio_input_trace_t;
54 format_virtio_input_trace (u8 * s, va_list * args)
56 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
57 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
58 virtio_input_trace_t *t = va_arg (*args, virtio_input_trace_t *);
59 u32 indent = format_get_indent (s);
61 s = format (s, "virtio: hw_if_index %d next-index %d vring %u len %u",
62 t->hw_if_index, t->next_index, t->ring, t->len);
63 s = format (s, "\n%Uhdr: flags 0x%02x gso_type 0x%02x hdr_len %u "
64 "gso_size %u csum_start %u csum_offset %u num_buffers %u",
65 format_white_space, indent + 2,
66 t->hdr.flags, t->hdr.gso_type, t->hdr.hdr_len, t->hdr.gso_size,
67 t->hdr.csum_start, t->hdr.csum_offset, t->hdr.num_buffers);
71 static_always_inline void
72 virtio_needs_csum (vlib_buffer_t *b0, vnet_virtio_net_hdr_v1_t *hdr,
73 u8 *l4_proto, u8 *l4_hdr_sz, virtio_if_type_t type)
75 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
77 u16 ethertype = 0, l2hdr_sz = 0;
78 vnet_buffer_oflags_t oflags = 0;
80 if (type == VIRTIO_IF_TYPE_TUN)
82 switch (b0->data[0] & 0xf0)
85 ethertype = ETHERNET_TYPE_IP4;
88 ethertype = ETHERNET_TYPE_IP6;
94 ethernet_header_t *eh = (ethernet_header_t *) b0->data;
95 ethertype = clib_net_to_host_u16 (eh->type);
96 l2hdr_sz = sizeof (ethernet_header_t);
98 if (ethernet_frame_is_tagged (ethertype))
100 ethernet_vlan_header_t *vlan =
101 (ethernet_vlan_header_t *) (eh + 1);
103 ethertype = clib_net_to_host_u16 (vlan->type);
104 l2hdr_sz += sizeof (*vlan);
105 if (ethertype == ETHERNET_TYPE_VLAN)
108 ethertype = clib_net_to_host_u16 (vlan->type);
109 l2hdr_sz += sizeof (*vlan);
114 vnet_buffer (b0)->l2_hdr_offset = 0;
115 vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
117 if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
119 ip4_header_t *ip4 = (ip4_header_t *) (b0->data + l2hdr_sz);
120 vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
121 *l4_proto = ip4->protocol;
122 oflags |= VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
124 (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
125 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
126 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
128 else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
130 ip6_header_t *ip6 = (ip6_header_t *) (b0->data + l2hdr_sz);
131 vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t);
132 /* FIXME IPv6 EH traversal */
133 *l4_proto = ip6->protocol;
134 b0->flags |= (VNET_BUFFER_F_IS_IP6 |
135 VNET_BUFFER_F_L2_HDR_OFFSET_VALID
136 | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
137 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
139 if (*l4_proto == IP_PROTOCOL_TCP)
141 oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
143 (tcp_header_t *) (b0->data + vnet_buffer (b0)->l4_hdr_offset);
144 *l4_hdr_sz = tcp_header_bytes (tcp);
146 else if (*l4_proto == IP_PROTOCOL_UDP)
148 oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
149 *l4_hdr_sz = sizeof (udp_header_t);
152 vnet_buffer_offload_flags_set (b0, oflags);
156 static_always_inline void
157 fill_gso_buffer_flags (vlib_buffer_t *b0, vnet_virtio_net_hdr_v1_t *hdr,
158 u8 l4_proto, u8 l4_hdr_sz)
160 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
162 ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
163 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
164 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
165 b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4;
167 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
169 ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
170 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
171 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
172 b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6;
176 static_always_inline u16
177 virtio_n_left_to_process (vnet_virtio_vring_t *vring, const int packed)
180 return vring->desc_in_use;
182 return vring->used->idx - vring->last_used_idx;
185 static_always_inline u16
186 virtio_get_slot_id (vnet_virtio_vring_t *vring, const int packed, u16 last,
190 return vring->packed_desc[last].id;
192 return vring->used->ring[last & mask].id;
195 static_always_inline u16
196 virtio_get_len (vnet_virtio_vring_t *vring, const int packed, const int hdr_sz,
200 return vring->packed_desc[last].len - hdr_sz;
202 return vring->used->ring[last & mask].len - hdr_sz;
205 #define increment_last(last, packed, vring) \
209 if (packed && last >= vring->queue_size) \
212 vring->used_wrap_counter ^= 1; \
217 static_always_inline void
218 virtio_device_input_ethernet (vlib_main_t *vm, vlib_node_runtime_t *node,
219 const u32 next_index, const u32 sw_if_index,
220 const u32 hw_if_index)
222 vlib_next_frame_t *nf;
224 ethernet_input_frame_t *ef;
226 if (PREDICT_FALSE (VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT != next_index))
229 nf = vlib_node_runtime_get_next_frame (
230 vm, node, VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT);
231 f = vlib_get_frame (vm, nf->frame);
232 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
234 ef = vlib_frame_scalar_args (f);
235 ef->sw_if_index = sw_if_index;
236 ef->hw_if_index = hw_if_index;
237 vlib_frame_no_append (f);
240 static_always_inline uword
241 virtio_device_input_gso_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
242 vlib_frame_t *frame, virtio_if_t *vif,
243 vnet_virtio_vring_t *vring,
244 virtio_if_type_t type, int gso_enabled,
245 int checksum_offload_enabled, int packed)
247 vnet_main_t *vnm = vnet_get_main ();
248 u32 thread_index = vm->thread_index;
249 uword n_trace = vlib_get_trace_count (vm, node);
251 const int hdr_sz = vif->virtio_net_hdr_sz;
253 u32 n_rx_packets = 0;
255 u16 mask = vring->queue_size - 1;
256 u16 last = vring->last_used_idx;
257 u16 n_left = virtio_n_left_to_process (vring, packed);
258 vlib_buffer_t bt = {};
263 if (type == VIRTIO_IF_TYPE_TUN)
265 next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
269 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
270 if (PREDICT_FALSE (vif->per_interface_next_index != ~0))
271 next_index = vif->per_interface_next_index;
273 /* only for l2, redirect if feature path enabled */
274 vnet_feature_start_device_input_x1 (vif->sw_if_index, &next_index, &bt);
280 u32 next0 = next_index;
282 vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
284 while (n_left && n_left_to_next)
288 vnet_virtio_vring_packed_desc_t *d = &vring->packed_desc[last];
289 u16 flags = d->flags;
290 if ((flags & VRING_DESC_F_AVAIL) !=
291 (vring->used_wrap_counter << 7)
292 || (flags & VRING_DESC_F_USED) !=
293 (vring->used_wrap_counter << 15))
299 u8 l4_proto = 0, l4_hdr_sz = 0;
301 vnet_virtio_net_hdr_v1_t *hdr;
302 u16 slot = virtio_get_slot_id (vring, packed, last, mask);
303 u16 len = virtio_get_len (vring, packed, hdr_sz, last, mask);
304 u32 bi0 = vring->buffers[slot];
305 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
306 hdr = vlib_buffer_get_current (b0);
307 if (hdr_sz == sizeof (vnet_virtio_net_hdr_v1_t))
308 num_buffers = hdr->num_buffers;
310 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
311 b0->current_data = 0;
312 b0->current_length = len;
314 if (checksum_offload_enabled)
315 virtio_needs_csum (b0, hdr, &l4_proto, &l4_hdr_sz, type);
318 fill_gso_buffer_flags (b0, hdr, l4_proto, l4_hdr_sz);
320 vnet_buffer (b0)->sw_if_index[VLIB_RX] = vif->sw_if_index;
321 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
323 /* if multisegment packet */
324 if (PREDICT_FALSE (num_buffers > 1))
326 vlib_buffer_t *pb, *cb;
328 b0->total_length_not_including_first_buffer = 0;
329 while (num_buffers > 1)
331 increment_last (last, packed, vring);
332 u16 cslot = virtio_get_slot_id (vring, packed, last, mask);
333 /* hdr size is 0 after 1st packet in chain buffers */
334 u16 clen = virtio_get_len (vring, packed, 0, last, mask);
335 u32 cbi = vring->buffers[cslot];
336 cb = vlib_get_buffer (vm, cbi);
339 cb->current_length = clen;
341 /* previous buffer */
342 pb->next_buffer = cbi;
343 pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
346 b0->total_length_not_including_first_buffer += clen;
349 vring->desc_in_use--;
353 len += b0->total_length_not_including_first_buffer;
356 if (type == VIRTIO_IF_TYPE_TUN)
358 switch (b0->data[0] & 0xf0)
361 next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
364 next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
367 next0 = VNET_DEVICE_INPUT_NEXT_DROP;
371 if (PREDICT_FALSE (vif->per_interface_next_index != ~0))
372 next0 = vif->per_interface_next_index;
376 /* copy feature arc data from template */
377 b0->current_config_index = bt.current_config_index;
378 vnet_buffer (b0)->feature_arc_index =
379 vnet_buffer (&bt)->feature_arc_index;
383 if (PREDICT_FALSE (n_trace > 0 && vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */
386 virtio_input_trace_t *tr;
387 vlib_set_trace_count (vm, node, --n_trace);
388 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
389 tr->next_index = next0;
390 tr->hw_if_index = vif->hw_if_index;
392 clib_memcpy_fast (&tr->hdr, hdr, (hdr_sz == 12) ? 12 : 10);
397 vring->desc_in_use--;
401 increment_last (last, packed, vring);
403 /* only tun interfaces may have different next index */
404 if (type == VIRTIO_IF_TYPE_TUN)
405 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
406 n_left_to_next, bi0, next0);
412 virtio_device_input_ethernet (vm, node, next_index, vif->sw_if_index,
414 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
416 vring->last_used_idx = last;
418 vring->total_packets += n_rx_packets;
419 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
420 + VNET_INTERFACE_COUNTER_RX, thread_index,
421 vif->sw_if_index, n_rx_packets,
427 static_always_inline uword
428 virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
429 vlib_frame_t * frame, virtio_if_t * vif, u16 qid,
430 virtio_if_type_t type)
432 vnet_virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
433 const int hdr_sz = vif->virtio_net_hdr_sz;
438 if (vif->gso_enabled)
440 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
442 else if (vif->csum_offload_enabled)
444 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
448 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
451 virtio_refill_vring_packed (vm, vif, type, vring, hdr_sz,
456 if (vif->gso_enabled)
458 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
460 else if (vif->csum_offload_enabled)
462 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
466 virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
469 virtio_refill_vring_split (vm, vif, type, vring, hdr_sz,
475 VLIB_NODE_FN (virtio_input_node) (vlib_main_t * vm,
476 vlib_node_runtime_t * node,
477 vlib_frame_t * frame)
480 virtio_main_t *vim = &virtio_main;
481 vnet_hw_if_rxq_poll_vector_t *p,
482 *pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
487 vif = vec_elt_at_index (vim->interfaces, p->dev_instance);
488 if (vif->flags & VIRTIO_IF_FLAG_ADMIN_UP)
490 if (vif->type == VIRTIO_IF_TYPE_TAP)
491 n_rx += virtio_device_input_inline (
492 vm, node, frame, vif, p->queue_id, VIRTIO_IF_TYPE_TAP);
493 else if (vif->type == VIRTIO_IF_TYPE_PCI)
494 n_rx += virtio_device_input_inline (
495 vm, node, frame, vif, p->queue_id, VIRTIO_IF_TYPE_PCI);
496 else if (vif->type == VIRTIO_IF_TYPE_TUN)
497 n_rx += virtio_device_input_inline (
498 vm, node, frame, vif, p->queue_id, VIRTIO_IF_TYPE_TUN);
506 VLIB_REGISTER_NODE (virtio_input_node) = {
507 .name = "virtio-input",
508 .sibling_of = "device-input",
509 .format_trace = format_virtio_input_trace,
510 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
511 .type = VLIB_NODE_TYPE_INPUT,
512 .state = VLIB_NODE_STATE_INTERRUPT,
513 .n_errors = VIRTIO_INPUT_N_ERROR,
514 .error_strings = virtio_input_error_strings,
519 * fd.io coding-style-patch-verification: ON
522 * eval: (c-set-style "gnu")