2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <sys/types.h>
22 #include <linux/if_tun.h>
23 #include <sys/ioctl.h>
24 #include <sys/eventfd.h>
26 #include <vlib/vlib.h>
27 #include <vlib/unix/unix.h>
28 #include <vnet/ethernet/ethernet.h>
29 #include <vnet/devices/devices.h>
30 #include <vnet/feature/feature.h>
31 #include <vnet/ip/ip4_packet.h>
32 #include <vnet/ip/ip6_packet.h>
33 #include <vnet/udp/udp_packet.h>
34 #include <vnet/devices/virtio/virtio.h>
37 #define foreach_virtio_input_error \
42 #define _(f,s) VIRTIO_INPUT_ERROR_##f,
43 foreach_virtio_input_error
46 } virtio_input_error_t;
48 static char *virtio_input_error_strings[] = {
50 foreach_virtio_input_error
60 struct virtio_net_hdr_v1 hdr;
61 } virtio_input_trace_t;
64 format_virtio_input_trace (u8 * s, va_list * args)
66 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
67 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
68 virtio_input_trace_t *t = va_arg (*args, virtio_input_trace_t *);
69 u32 indent = format_get_indent (s);
71 s = format (s, "virtio: hw_if_index %d next-index %d vring %u len %u",
72 t->hw_if_index, t->next_index, t->ring, t->len);
73 s = format (s, "\n%Uhdr: flags 0x%02x gso_type 0x%02x hdr_len %u "
74 "gso_size %u csum_start %u csum_offset %u num_buffers %u",
75 format_white_space, indent + 2,
76 t->hdr.flags, t->hdr.gso_type, t->hdr.hdr_len, t->hdr.gso_size,
77 t->hdr.csum_start, t->hdr.csum_offset, t->hdr.num_buffers);
81 static_always_inline void
82 virtio_refill_vring (vlib_main_t * vm, virtio_if_t * vif,
83 virtio_if_type_t type, virtio_vring_t * vring,
86 u16 used, next, avail, n_slots;
91 used = vring->desc_in_use;
93 if (sz - used < sz / 8)
96 /* deliver free buffers in chunks of 64 */
97 n_slots = clib_min (sz - used, 64);
99 next = vring->desc_next;
100 avail = vring->avail->idx;
102 vlib_buffer_alloc_to_ring_from_pool (vm, vring->buffers, next,
103 vring->size, n_slots,
104 vring->buffer_pool_index);
111 struct vring_desc *d = &vring->desc[next];;
112 vlib_buffer_t *b = vlib_get_buffer (vm, vring->buffers[next]);
114 * current_data may not be initialized with 0 and may contain
115 * previous offset. Here we want to make sure, it should be 0
118 b->current_data = -hdr_sz;
119 memset (vlib_buffer_get_current (b), 0, hdr_sz);
121 ((type == VIRTIO_IF_TYPE_PCI) ? vlib_buffer_get_current_pa (vm,
123 pointer_to_uword (vlib_buffer_get_current (b)));
124 d->len = vlib_buffer_get_default_data_size (vm) + hdr_sz;
125 d->flags = VRING_DESC_F_WRITE;
126 vring->avail->ring[avail & mask] = next;
128 next = (next + 1) & mask;
132 CLIB_MEMORY_STORE_BARRIER ();
133 vring->avail->idx = avail;
134 vring->desc_next = next;
135 vring->desc_in_use = used;
137 if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0)
139 virtio_kick (vm, vring, vif);
144 static_always_inline void
145 virtio_needs_csum (vlib_buffer_t * b0, struct virtio_net_hdr_v1 *hdr,
146 u8 * l4_proto, u8 * l4_hdr_sz, virtio_if_type_t type)
148 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
150 u16 ethertype = 0, l2hdr_sz = 0;
152 if (type == VIRTIO_IF_TYPE_TUN)
154 switch (b0->data[0] & 0xf0)
157 ethertype = ETHERNET_TYPE_IP4;
160 ethertype = ETHERNET_TYPE_IP6;
166 ethernet_header_t *eh =
167 (ethernet_header_t *) vlib_buffer_get_current (b0);
168 ethertype = clib_net_to_host_u16 (eh->type);
169 l2hdr_sz = sizeof (ethernet_header_t);
171 if (ethernet_frame_is_tagged (ethertype))
173 ethernet_vlan_header_t *vlan =
174 (ethernet_vlan_header_t *) (eh + 1);
176 ethertype = clib_net_to_host_u16 (vlan->type);
177 l2hdr_sz += sizeof (*vlan);
178 if (ethertype == ETHERNET_TYPE_VLAN)
181 ethertype = clib_net_to_host_u16 (vlan->type);
182 l2hdr_sz += sizeof (*vlan);
187 vnet_buffer (b0)->l2_hdr_offset = 0;
188 vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
190 if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
193 (ip4_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
194 vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
195 *l4_proto = ip4->protocol;
197 (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
199 (VNET_BUFFER_F_L2_HDR_OFFSET_VALID
200 | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
201 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
203 else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
206 (ip6_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
207 vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t);
208 /* FIXME IPv6 EH traversal */
209 *l4_proto = ip6->protocol;
210 b0->flags |= (VNET_BUFFER_F_IS_IP6 |
211 VNET_BUFFER_F_L2_HDR_OFFSET_VALID
212 | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
213 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
215 if (*l4_proto == IP_PROTOCOL_TCP)
217 b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
218 tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b0) +
220 (b0)->l4_hdr_offset);
221 *l4_hdr_sz = tcp_header_bytes (tcp);
223 else if (*l4_proto == IP_PROTOCOL_UDP)
225 b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
226 udp_header_t *udp = (udp_header_t *) (vlib_buffer_get_current (b0) +
228 (b0)->l4_hdr_offset);
229 *l4_hdr_sz = sizeof (*udp);
235 static_always_inline void
236 fill_gso_buffer_flags (vlib_buffer_t * b0, struct virtio_net_hdr_v1 *hdr,
237 u8 l4_proto, u8 l4_hdr_sz)
239 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
241 ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
242 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
243 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
244 b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4;
246 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
248 ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
249 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
250 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
251 b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6;
255 static_always_inline uword
256 virtio_device_input_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
257 vlib_frame_t * frame, virtio_if_t * vif,
258 u16 qid, virtio_if_type_t type,
259 int gso_enabled, int checksum_offload_enabled)
261 vnet_main_t *vnm = vnet_get_main ();
262 u32 thread_index = vm->thread_index;
263 uword n_trace = vlib_get_trace_count (vm, node);
264 virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
265 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
266 const int hdr_sz = vif->virtio_net_hdr_sz;
268 u32 n_rx_packets = 0;
270 u16 mask = vring->size - 1;
271 u16 last = vring->last_used_idx;
272 u16 n_left = vring->used->idx - last;
274 if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0 &&
275 vring->last_kick_avail_idx != vring->avail->idx)
276 virtio_kick (vm, vring, vif);
281 if (type == VIRTIO_IF_TYPE_TUN)
282 next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
287 u32 next0 = next_index;
289 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
291 while (n_left && n_left_to_next)
293 u8 l4_proto = 0, l4_hdr_sz = 0;
295 struct vring_used_elem *e = &vring->used->ring[last & mask];
296 struct virtio_net_hdr_v1 *hdr;
298 u16 len = e->len - hdr_sz;
299 u32 bi0 = vring->buffers[slot];
300 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
301 hdr = vlib_buffer_get_current (b0);
302 if (hdr_sz == sizeof (struct virtio_net_hdr_v1))
303 num_buffers = hdr->num_buffers;
305 b0->current_data = 0;
306 b0->current_length = len;
308 if (checksum_offload_enabled)
309 virtio_needs_csum (b0, hdr, &l4_proto, &l4_hdr_sz, type);
312 fill_gso_buffer_flags (b0, hdr, l4_proto, l4_hdr_sz);
314 vnet_buffer (b0)->sw_if_index[VLIB_RX] = vif->sw_if_index;
315 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
317 /* if multisegment packet */
318 if (PREDICT_FALSE (num_buffers > 1))
320 vlib_buffer_t *pb, *cb;
322 b0->total_length_not_including_first_buffer = 0;
323 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
324 while (num_buffers > 1)
327 e = &vring->used->ring[last & mask];
328 u32 cbi = vring->buffers[e->id];
329 cb = vlib_get_buffer (vm, cbi);
332 cb->current_length = e->len;
334 /* previous buffer */
335 pb->next_buffer = cbi;
336 pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
339 b0->total_length_not_including_first_buffer += e->len;
342 vring->desc_in_use--;
346 len += b0->total_length_not_including_first_buffer;
349 if (type == VIRTIO_IF_TYPE_TUN)
351 switch (b0->data[0] & 0xf0)
354 next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
357 next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
360 next0 = VNET_DEVICE_INPUT_NEXT_DROP;
365 if (PREDICT_FALSE (vif->per_interface_next_index != ~0))
366 next0 = vif->per_interface_next_index;
368 if (type != VIRTIO_IF_TYPE_TUN)
370 /* only for l2, redirect if feature path enabled */
371 vnet_feature_start_device_input_x1 (vif->sw_if_index, &next0,
376 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
378 if (PREDICT_FALSE (n_trace > 0))
380 virtio_input_trace_t *tr;
381 vlib_trace_buffer (vm, node, next0, b0,
382 /* follow_chain */ 1);
383 vlib_set_trace_count (vm, node, --n_trace);
384 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
385 tr->next_index = next0;
386 tr->hw_if_index = vif->hw_if_index;
388 clib_memcpy_fast (&tr->hdr, hdr, hdr_sz);
393 vring->desc_in_use--;
400 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
401 n_left_to_next, bi0, next0);
407 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
409 vring->last_used_idx = last;
411 vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
412 + VNET_INTERFACE_COUNTER_RX, thread_index,
413 vif->sw_if_index, n_rx_packets,
417 virtio_refill_vring (vm, vif, type, vring, hdr_sz);
422 static_always_inline uword
423 virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
424 vlib_frame_t * frame, virtio_if_t * vif, u16 qid,
425 virtio_if_type_t type)
428 if (vif->gso_enabled)
429 return virtio_device_input_gso_inline (vm, node, frame, vif,
431 else if (vif->csum_offload_enabled)
432 return virtio_device_input_gso_inline (vm, node, frame, vif,
435 return virtio_device_input_gso_inline (vm, node, frame, vif,
440 VLIB_NODE_FN (virtio_input_node) (vlib_main_t * vm,
441 vlib_node_runtime_t * node,
442 vlib_frame_t * frame)
445 virtio_main_t *nm = &virtio_main;
446 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
447 vnet_device_and_queue_t *dq;
449 foreach_device_and_queue (dq, rt->devices_and_queues)
452 vif = vec_elt_at_index (nm->interfaces, dq->dev_instance);
453 if (vif->flags & VIRTIO_IF_FLAG_ADMIN_UP)
455 if (vif->type == VIRTIO_IF_TYPE_TAP)
456 n_rx += virtio_device_input_inline (vm, node, frame, vif,
459 else if (vif->type == VIRTIO_IF_TYPE_PCI)
460 n_rx += virtio_device_input_inline (vm, node, frame, vif,
463 else if (vif->type == VIRTIO_IF_TYPE_TUN)
464 n_rx += virtio_device_input_inline (vm, node, frame, vif,
474 VLIB_REGISTER_NODE (virtio_input_node) = {
475 .name = "virtio-input",
476 .sibling_of = "device-input",
477 .format_trace = format_virtio_input_trace,
478 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
479 .type = VLIB_NODE_TYPE_INPUT,
480 .state = VLIB_NODE_STATE_INTERRUPT,
481 .n_errors = VIRTIO_INPUT_N_ERROR,
482 .error_strings = virtio_input_error_strings,
487 * fd.io coding-style-patch-verification: ON
490 * eval: (c-set-style "gnu")