2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vppinfra/error.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/feature/feature.h>
21 #include <vnet/gso/gso.h>
22 #include <vnet/ip/icmp46_packet.h>
23 #include <vnet/ip/ip4.h>
24 #include <vnet/ip/ip6.h>
25 #include <vnet/udp/udp_packet.h>
35 format_gso_trace (u8 * s, va_list * args)
37 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
38 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
39 gso_trace_t *t = va_arg (*args, gso_trace_t *);
41 if (t->flags & VNET_BUFFER_F_GSO)
43 s = format (s, "gso_sz %d gso_l4_hdr_sz %d",
44 t->gso_size, t->gso_l4_hdr_sz);
50 static_always_inline u16
51 tso_alloc_tx_bufs (vlib_main_t * vm,
52 vnet_interface_per_thread_data_t * ptd,
53 vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
57 clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz);
59 /* rounded-up division */
60 u16 n_bufs = (n_bytes_b0 - l234_sz + (size - 1)) / size;
64 vec_validate (ptd->split_buffers, n_bufs - 1);
66 n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
69 vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
75 static_always_inline void
76 tso_init_buf_from_template_base (vlib_buffer_t * nb0, vlib_buffer_t * b0,
77 u32 flags, u16 length)
79 nb0->current_data = b0->current_data;
80 nb0->total_length_not_including_first_buffer = 0;
81 nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
82 nb0->trace_handle = b0->trace_handle;
83 clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
84 clib_memcpy_fast (vlib_buffer_get_current (nb0),
85 vlib_buffer_get_current (b0), length);
86 nb0->current_length = length;
89 static_always_inline void
90 tso_init_buf_from_template (vlib_main_t * vm, vlib_buffer_t * nb0,
91 vlib_buffer_t * b0, u16 template_data_sz,
92 u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
93 u32 next_tcp_seq, u32 flags,
94 gso_header_offset_t * gho)
96 tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
100 vlib_buffer_get_default_data_size (vm) - (template_data_sz +
102 *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
105 (tcp_header_t *) (vlib_buffer_get_current (nb0) + gho->l4_hdr_offset);
106 tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
109 static_always_inline void
110 tso_fixup_segmented_buf (vlib_buffer_t * b0, u8 tcp_flags, int is_ip6,
111 gso_header_offset_t * gho)
114 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
116 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
118 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho->l4_hdr_offset);
120 tcp->flags = tcp_flags;
123 ip6->payload_length =
124 clib_host_to_net_u16 (b0->current_length -
125 (gho->l4_hdr_offset - gho->l2_hdr_offset));
128 clib_host_to_net_u16 (b0->current_length -
129 (gho->l3_hdr_offset - gho->l2_hdr_offset));
133 * Allocate the necessary number of ptd->split_buffers,
134 * and segment the possibly chained buffer(s) from b0 into
137 * Return the cumulative number of bytes sent or zero
138 * if allocation failed.
141 static_always_inline u32
142 tso_segment_buffer (vlib_main_t * vm, vnet_interface_per_thread_data_t * ptd,
143 u32 sbi0, vlib_buffer_t * sb0, gso_header_offset_t * gho,
144 u32 n_bytes_b0, int is_ip6)
147 u16 gso_size = vnet_buffer2 (sb0)->gso_size;
149 int l4_hdr_sz = gho->l4_hdr_sz;
150 u8 save_tcp_flags = 0;
151 u8 tcp_flags_no_fin_psh = 0;
152 u32 next_tcp_seq = 0;
155 (tcp_header_t *) (vlib_buffer_get_current (sb0) + gho->l4_hdr_offset);
156 next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
157 /* store original flags for last packet and reset FIN and PSH */
158 save_tcp_flags = tcp->flags;
159 tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
163 sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
164 u16 l234_sz = gho->l4_hdr_offset + l4_hdr_sz - gho->l2_hdr_offset;
165 int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
166 next_tcp_seq += first_data_size;
169 (!tso_alloc_tx_bufs (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size)))
172 vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
173 tso_init_buf_from_template_base (b0, sb0, default_bflags,
174 l234_sz + first_data_size);
176 u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
179 /* Need to copy more segments */
180 u8 *src_ptr, *dst_ptr;
181 u16 src_left, dst_left;
182 /* current source buffer */
183 vlib_buffer_t *csb0 = sb0;
185 /* current dest buffer */
187 u16 dbi = 1; /* the buffer [0] is b0 */
189 src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
190 src_left = sb0->current_length - l234_sz - first_data_size;
192 tso_fixup_segmented_buf (b0, tcp_flags_no_fin_psh, is_ip6, gho);
194 /* grab a second buffer and prepare the loop */
195 ASSERT (dbi < vec_len (ptd->split_buffers));
196 cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
197 tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
198 &dst_left, next_tcp_seq, default_bflags,
201 /* an arbitrary large number to catch the runaway loops */
203 while (total_src_left)
206 clib_panic ("infinite loop detected");
207 u16 bytes_to_copy = clib_min (src_left, dst_left);
209 clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
211 src_left -= bytes_to_copy;
212 src_ptr += bytes_to_copy;
213 total_src_left -= bytes_to_copy;
214 dst_left -= bytes_to_copy;
215 dst_ptr += bytes_to_copy;
216 next_tcp_seq += bytes_to_copy;
217 cdb0->current_length += bytes_to_copy;
221 int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
222 u32 next_bi = csb0->next_buffer;
224 /* init src to the next buffer in chain */
228 csb0 = vlib_get_buffer (vm, csbi0);
229 src_left = csb0->current_length;
230 src_ptr = vlib_buffer_get_current (csb0);
234 ASSERT (total_src_left == 0);
238 if (0 == dst_left && total_src_left)
240 n_tx_bytes += cdb0->current_length;
241 ASSERT (dbi < vec_len (ptd->split_buffers));
242 cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
243 tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
244 gso_size, &dst_ptr, &dst_left,
245 next_tcp_seq, default_bflags, gho);
249 tso_fixup_segmented_buf (cdb0, save_tcp_flags, is_ip6, gho);
251 n_tx_bytes += cdb0->current_length;
253 n_tx_bytes += b0->current_length;
257 static_always_inline void
258 drop_one_buffer_and_count (vlib_main_t * vm, vnet_main_t * vnm,
259 vlib_node_runtime_t * node, u32 * pbi0,
260 u32 sw_if_index, u32 drop_error_code)
262 u32 thread_index = vm->thread_index;
264 vlib_simple_counter_main_t *cm;
266 vec_elt_at_index (vnm->interface_main.sw_if_counters,
267 VNET_INTERFACE_COUNTER_TX_ERROR);
268 vlib_increment_simple_counter (cm, thread_index, sw_if_index, 1);
270 vlib_error_drop_buffers (vm, node, pbi0,
271 /* buffer stride */ 1,
273 VNET_INTERFACE_OUTPUT_NEXT_DROP,
274 node->node_index, drop_error_code);
277 static_always_inline uword
278 vnet_gso_node_inline (vlib_main_t * vm,
279 vlib_node_runtime_t * node,
280 vlib_frame_t * frame,
282 vnet_hw_interface_t * hi,
283 int is_ip6, int do_segmentation)
286 u32 next_index = node->cached_next_index;
287 u32 *from = vlib_frame_vector_args (frame);
288 u32 n_left_from = frame->n_vectors;
289 u32 *from_end = from + n_left_from;
290 u32 thread_index = vm->thread_index;
291 vnet_interface_main_t *im = &vnm->interface_main;
292 vnet_interface_per_thread_data_t *ptd =
293 vec_elt_at_index (im->per_thread_data, thread_index);
294 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
296 vlib_get_buffers (vm, from, b, n_left_from);
298 while (n_left_from > 0)
302 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
304 while (from + 8 <= from_end && n_left_to_next >= 4)
306 u32 bi0, bi1, bi2, bi3;
307 u32 next0, next1, next2, next3;
308 u32 swif0, swif1, swif2, swif3;
309 gso_trace_t *t0, *t1, *t2, *t3;
310 vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
312 /* Prefetch next iteration. */
313 vlib_prefetch_buffer_header (b[4], LOAD);
314 vlib_prefetch_buffer_header (b[5], LOAD);
315 vlib_prefetch_buffer_header (b[6], LOAD);
316 vlib_prefetch_buffer_header (b[7], LOAD);
327 swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
328 swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
329 swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
330 swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
332 if (PREDICT_FALSE (hi->sw_if_index != swif0))
334 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
335 if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
336 (b[0]->flags & VNET_BUFFER_F_GSO))
339 if (PREDICT_FALSE (hi->sw_if_index != swif1))
341 hi1 = vnet_get_sup_hw_interface (vnm, swif0);
342 if (!(hi1->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
343 (b[1]->flags & VNET_BUFFER_F_GSO))
346 if (PREDICT_FALSE (hi->sw_if_index != swif2))
348 hi2 = vnet_get_sup_hw_interface (vnm, swif0);
349 if ((hi2->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
350 (b[2]->flags & VNET_BUFFER_F_GSO))
353 if (PREDICT_FALSE (hi->sw_if_index != swif3))
355 hi3 = vnet_get_sup_hw_interface (vnm, swif0);
356 if (!(hi3->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
357 (b[3]->flags & VNET_BUFFER_F_GSO))
361 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
363 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
364 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
365 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
366 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
368 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
370 t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
371 t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
372 t1->gso_size = vnet_buffer2 (b[1])->gso_size;
373 t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
375 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
377 t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
378 t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
379 t2->gso_size = vnet_buffer2 (b[2])->gso_size;
380 t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
382 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
384 t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
385 t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
386 t3->gso_size = vnet_buffer2 (b[3])->gso_size;
387 t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
397 vnet_feature_next (&next0, b[0]);
398 vnet_feature_next (&next1, b[1]);
399 vnet_feature_next (&next2, b[2]);
400 vnet_feature_next (&next3, b[3]);
401 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
402 n_left_to_next, bi0, bi1, bi2, bi3,
403 next0, next1, next2, next3);
407 while (from + 1 <= from_end && n_left_to_next > 0)
411 vnet_hw_interface_t *hi0;
414 swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
415 if (PREDICT_FALSE (hi->sw_if_index != swif0))
417 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
418 if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
419 (b[0]->flags & VNET_BUFFER_F_GSO))
423 /* speculatively enqueue b0 to the current next frame */
424 to_next[0] = bi0 = from[0];
430 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
432 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
433 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
434 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
435 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
440 if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
443 * Undo the enqueue of the b0 - it is not going anywhere,
444 * and will be freed either after it's segmented or
445 * when dropped, if there is no buffers to segment into.
449 /* undo the counting. */
450 gso_header_offset_t gho;
451 u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
454 gho = vnet_gso_header_offset_parser (b[0], is_ip6);
456 tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0,
459 if (PREDICT_FALSE (n_tx_bytes == 0))
461 drop_one_buffer_and_count (vm, vnm, node, from - 1,
463 VNET_INTERFACE_OUTPUT_ERROR_NO_BUFFERS_FOR_GSO);
468 u16 n_tx_bufs = vec_len (ptd->split_buffers);
469 u32 *from_seg = ptd->split_buffers;
471 while (n_tx_bufs > 0)
475 if (n_tx_bufs >= n_left_to_next)
477 while (n_left_to_next > 0)
479 sbi0 = to_next[0] = from_seg[0];
480 sb0 = vlib_get_buffer (vm, sbi0);
485 vnet_feature_next (&next0, sb0);
486 vlib_validate_buffer_enqueue_x1 (vm, node,
492 vlib_put_next_frame (vm, node, next_index,
494 vlib_get_new_next_frame (vm, node, next_index,
495 to_next, n_left_to_next);
497 while (n_tx_bufs > 0)
499 sbi0 = to_next[0] = from_seg[0];
500 sb0 = vlib_get_buffer (vm, sbi0);
505 vnet_feature_next (&next0, sb0);
506 vlib_validate_buffer_enqueue_x1 (vm, node,
513 /* The buffers were enqueued. Reset the length */
514 _vec_len (ptd->split_buffers) = 0;
515 /* Free the now segmented buffer */
516 vlib_buffer_free_one (vm, bi0);
522 vnet_feature_next (&next0, b[0]);
523 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
524 n_left_to_next, bi0, next0);
527 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
530 return frame->n_vectors;
533 static_always_inline uword
534 vnet_gso_inline (vlib_main_t * vm,
535 vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ip6)
537 vnet_main_t *vnm = vnet_get_main ();
538 vnet_hw_interface_t *hi;
540 if (frame->n_vectors > 0)
542 u32 *from = vlib_frame_vector_args (frame);
543 vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
544 hi = vnet_get_sup_hw_interface (vnm,
545 vnet_buffer (b)->sw_if_index[VLIB_TX]);
547 * The 3-headed "if" is here because we want to err on the side
548 * of not impacting the non-GSO performance - so for the more
549 * common case of no GSO interfaces we want to prevent the
550 * segmentation codepath from being there altogether.
552 if (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
553 return vnet_gso_node_inline (vm, node, frame, vnm, hi,
554 is_ip6, /* do_segmentation */ 0);
556 return vnet_gso_node_inline (vm, node, frame, vnm, hi,
557 is_ip6, /* do_segmentation */ 1);
562 VLIB_NODE_FN (gso_l2_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
563 vlib_frame_t * frame)
565 return vnet_gso_inline (vm, node, frame, 0 /* ip6 */ );
568 VLIB_NODE_FN (gso_l2_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
569 vlib_frame_t * frame)
571 return vnet_gso_inline (vm, node, frame, 1 /* ip6 */ );
574 VLIB_NODE_FN (gso_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
575 vlib_frame_t * frame)
577 return vnet_gso_inline (vm, node, frame, 0 /* ip6 */ );
580 VLIB_NODE_FN (gso_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
581 vlib_frame_t * frame)
583 return vnet_gso_inline (vm, node, frame, 1 /* ip6 */ );
588 VLIB_REGISTER_NODE (gso_l2_ip4_node) = {
589 .vector_size = sizeof (u32),
590 .format_trace = format_gso_trace,
591 .type = VLIB_NODE_TYPE_INTERNAL,
594 .name = "gso-l2-ip4",
597 VLIB_REGISTER_NODE (gso_l2_ip6_node) = {
598 .vector_size = sizeof (u32),
599 .format_trace = format_gso_trace,
600 .type = VLIB_NODE_TYPE_INTERNAL,
603 .name = "gso-l2-ip6",
606 VLIB_REGISTER_NODE (gso_ip4_node) = {
607 .vector_size = sizeof (u32),
608 .format_trace = format_gso_trace,
609 .type = VLIB_NODE_TYPE_INTERNAL,
615 VLIB_REGISTER_NODE (gso_ip6_node) = {
616 .vector_size = sizeof (u32),
617 .format_trace = format_gso_trace,
618 .type = VLIB_NODE_TYPE_INTERNAL,
624 VNET_FEATURE_INIT (gso_l2_ip4_node, static) = {
625 .arc_name = "l2-output-ip4",
626 .node_name = "gso-l2-ip4",
627 .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
630 VNET_FEATURE_INIT (gso_l2_ip6_node, static) = {
631 .arc_name = "l2-output-ip6",
632 .node_name = "gso-l2-ip6",
633 .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
636 VNET_FEATURE_INIT (gso_ip4_node, static) = {
637 .arc_name = "ip4-output",
638 .node_name = "gso-ip4",
639 .runs_after = VNET_FEATURES ("ipsec4-output-feature"),
640 .runs_before = VNET_FEATURES ("interface-output"),
643 VNET_FEATURE_INIT (gso_ip6_node, static) = {
644 .arc_name = "ip6-output",
645 .node_name = "gso-ip6",
646 .runs_after = VNET_FEATURES ("ipsec6-output-feature"),
647 .runs_before = VNET_FEATURES ("interface-output"),
651 * fd.io coding-style-patch-verification: ON
654 * eval: (c-set-style "gnu")