2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vppinfra/error.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/feature/feature.h>
21 #include <vnet/gso/gso.h>
22 #include <vnet/gso/hdr_offset_parser.h>
23 #include <vnet/ip/icmp46_packet.h>
24 #include <vnet/ip/ip4.h>
25 #include <vnet/ip/ip6.h>
26 #include <vnet/udp/udp_packet.h>
28 #define foreach_gso_error \
29 _ (NO_BUFFERS, "no buffers to segment GSO") \
30 _ (UNHANDLED_TYPE, "unhandled gso type")
32 static char *gso_error_strings[] = {
33 #define _(sym, string) string,
40 #define _(sym, str) GSO_ERROR_##sym,
57 generic_header_offset_t gho;
61 format_gso_trace (u8 * s, va_list * args)
63 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
64 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
65 gso_trace_t *t = va_arg (*args, gso_trace_t *);
67 if (t->flags & VNET_BUFFER_F_GSO)
69 s = format (s, "gso_sz %d gso_l4_hdr_sz %d\n%U",
70 t->gso_size, t->gso_l4_hdr_sz, format_generic_header_offset,
76 format (s, "non-gso buffer\n%U", format_generic_header_offset,
83 static_always_inline u16
84 tso_segment_ipip_tunnel_fixup (vlib_main_t * vm,
85 vnet_interface_per_thread_data_t * ptd,
87 generic_header_offset_t * gho)
89 u16 n_tx_bufs = vec_len (ptd->split_buffers);
90 u16 i = 0, n_tx_bytes = 0;
94 vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
97 (ip4_header_t *) (vlib_buffer_get_current (b0) +
98 gho->outer_l3_hdr_offset);
100 (ip6_header_t *) (vlib_buffer_get_current (b0) +
101 gho->outer_l3_hdr_offset);
103 if (gho->gho_flags & GHO_F_OUTER_IP4)
106 clib_host_to_net_u16 (b0->current_length -
107 gho->outer_l3_hdr_offset);
108 ip4->checksum = ip4_header_checksum (ip4);
110 else if (gho->gho_flags & GHO_F_OUTER_IP6)
112 ip6->payload_length =
113 clib_host_to_net_u16 (b0->current_length -
114 gho->outer_l4_hdr_offset);
117 n_tx_bytes += gho->outer_hdr_sz;
123 static_always_inline void
124 tso_segment_vxlan_tunnel_headers_fixup (vlib_main_t * vm, vlib_buffer_t * b,
125 generic_header_offset_t * gho)
128 ip4_header_t *ip4 = 0;
129 ip6_header_t *ip6 = 0;
130 udp_header_t *udp = 0;
133 (ip4_header_t *) (vlib_buffer_get_current (b) + gho->outer_l3_hdr_offset);
135 (ip6_header_t *) (vlib_buffer_get_current (b) + gho->outer_l3_hdr_offset);
137 (udp_header_t *) (vlib_buffer_get_current (b) + gho->outer_l4_hdr_offset);
139 if (gho->gho_flags & GHO_F_OUTER_IP4)
141 proto = ip4->protocol;
143 clib_host_to_net_u16 (b->current_length - gho->outer_l3_hdr_offset);
144 ip4->checksum = ip4_header_checksum (ip4);
146 else if (gho->gho_flags & GHO_F_OUTER_IP6)
148 proto = ip6->protocol;
149 ip6->payload_length =
150 clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
152 if (proto == IP_PROTOCOL_UDP)
156 clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
158 if (gho->gho_flags & GHO_F_OUTER_IP6)
161 ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
163 else if (gho->gho_flags & GHO_F_OUTER_IP4)
165 udp->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
167 /* FIXME: it should be OUTER_UDP_CKSUM */
168 vnet_buffer_offload_flags_clear (b, VNET_BUFFER_OFFLOAD_F_UDP_CKSUM);
172 static_always_inline u16
173 tso_segment_vxlan_tunnel_fixup (vlib_main_t * vm,
174 vnet_interface_per_thread_data_t * ptd,
176 generic_header_offset_t * gho)
178 u16 n_tx_bufs = vec_len (ptd->split_buffers);
179 u16 i = 0, n_tx_bytes = 0;
181 while (i < n_tx_bufs)
183 vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
185 tso_segment_vxlan_tunnel_headers_fixup (vm, b0, gho);
186 n_tx_bytes += gho->outer_hdr_sz;
192 static_always_inline u16
193 tso_alloc_tx_bufs (vlib_main_t * vm,
194 vnet_interface_per_thread_data_t * ptd,
195 vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
196 u16 gso_size, u16 first_data_size,
197 generic_header_offset_t * gho)
200 u16 first_packet_length = l234_sz + first_data_size;
203 * size is the amount of data per segmented buffer except the 1st
205 * l2_hdr_offset is an offset == current_data of vlib_buffer_t.
206 * l234_sz is hdr_sz from l2_hdr_offset.
209 clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz
210 - gho->l2_hdr_offset);
213 * First segmented buffer length is calculated separately.
214 * As it may contain less data than gso_size (when gso_size is
215 * greater than current_length of 1st buffer from GSO chained
216 * buffers) and/or size calculated above.
221 * Total packet length minus first packet length including l234 header.
222 * rounded-up division
224 ASSERT (n_bytes_b0 > first_packet_length);
225 n_bufs += ((n_bytes_b0 - first_packet_length + (size - 1)) / size);
227 vec_validate (ptd->split_buffers, n_bufs - 1);
229 n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
230 if (n_alloc < n_bufs)
232 vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
238 static_always_inline void
239 tso_init_buf_from_template_base (vlib_buffer_t * nb0, vlib_buffer_t * b0,
240 u32 flags, u16 length)
242 /* copying objects from cacheline 0 */
243 nb0->current_data = b0->current_data;
244 nb0->current_length = length;
245 nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
246 nb0->flow_id = b0->flow_id;
247 nb0->error = b0->error;
248 nb0->current_config_index = b0->current_config_index;
249 clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
251 /* copying objects from cacheline 1 */
252 nb0->trace_handle = b0->trace_handle;
253 nb0->total_length_not_including_first_buffer = 0;
256 clib_memcpy_fast (vlib_buffer_get_current (nb0),
257 vlib_buffer_get_current (b0), length);
260 static_always_inline void
261 tso_init_buf_from_template (vlib_main_t * vm, vlib_buffer_t * nb0,
262 vlib_buffer_t * b0, u16 template_data_sz,
263 u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
264 u32 next_tcp_seq, u32 flags,
265 generic_header_offset_t * gho)
267 tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
271 vlib_buffer_get_default_data_size (vm) - (template_data_sz +
273 *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
276 (tcp_header_t *) (vlib_buffer_get_current (nb0) + gho->l4_hdr_offset);
277 tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
280 static_always_inline void
281 tso_fixup_segmented_buf (vlib_main_t * vm, vlib_buffer_t * b0, u8 tcp_flags,
282 int is_l2, int is_ip6, generic_header_offset_t * gho)
285 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
287 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
289 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho->l4_hdr_offset);
291 tcp->flags = tcp_flags;
295 ip6->payload_length =
296 clib_host_to_net_u16 (b0->current_length - gho->l4_hdr_offset);
297 if (gho->gho_flags & GHO_F_TCP)
302 ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6, &bogus);
303 vnet_buffer_offload_flags_clear (b0,
304 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
310 clib_host_to_net_u16 (b0->current_length - gho->l3_hdr_offset);
311 if (gho->gho_flags & GHO_F_IP4)
312 ip4->checksum = ip4_header_checksum (ip4);
313 if (gho->gho_flags & GHO_F_TCP)
316 tcp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip4);
318 vnet_buffer_offload_flags_clear (b0, (VNET_BUFFER_OFFLOAD_F_IP_CKSUM |
319 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM));
322 if (!is_l2 && ((gho->gho_flags & GHO_F_TUNNEL) == 0))
324 u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
326 ip_adjacency_t *adj0 = adj_get (adj_index0);
328 if (adj0->lookup_next_index == IP_LOOKUP_NEXT_MIDCHAIN &&
329 adj0->sub_type.midchain.fixup_func)
330 /* calls e.g. ipip44_fixup */
331 adj0->sub_type.midchain.fixup_func
332 (vm, adj0, b0, adj0->sub_type.midchain.fixup_data);
337 * Allocate the necessary number of ptd->split_buffers,
338 * and segment the possibly chained buffer(s) from b0 into
341 * Return the cumulative number of bytes sent or zero
342 * if allocation failed.
345 static_always_inline u32
346 tso_segment_buffer (vlib_main_t * vm, vnet_interface_per_thread_data_t * ptd,
347 u32 sbi0, vlib_buffer_t * sb0,
348 generic_header_offset_t * gho, u32 n_bytes_b0, int is_l2,
352 u16 gso_size = vnet_buffer2 (sb0)->gso_size;
354 u8 save_tcp_flags = 0;
355 u8 tcp_flags_no_fin_psh = 0;
356 u32 next_tcp_seq = 0;
359 (tcp_header_t *) (vlib_buffer_get_current (sb0) + gho->l4_hdr_offset);
360 next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
361 /* store original flags for last packet and reset FIN and PSH */
362 save_tcp_flags = tcp->flags;
363 tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
367 sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
368 u16 l234_sz = gho->hdr_sz;
369 int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
370 next_tcp_seq += first_data_size;
374 (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
377 vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
378 tso_init_buf_from_template_base (b0, sb0, default_bflags,
379 l234_sz + first_data_size);
381 u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
384 /* Need to copy more segments */
385 u8 *src_ptr, *dst_ptr;
386 u16 src_left, dst_left;
387 /* current source buffer */
388 vlib_buffer_t *csb0 = sb0;
390 /* current dest buffer */
392 u16 dbi = 1; /* the buffer [0] is b0 */
394 src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
395 src_left = sb0->current_length - l234_sz - first_data_size;
397 tso_fixup_segmented_buf (vm, b0, tcp_flags_no_fin_psh, is_l2, is_ip6,
400 /* grab a second buffer and prepare the loop */
401 ASSERT (dbi < vec_len (ptd->split_buffers));
402 cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
403 tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
404 &dst_left, next_tcp_seq, default_bflags,
407 /* an arbitrary large number to catch the runaway loops */
409 while (total_src_left)
412 clib_panic ("infinite loop detected");
413 u16 bytes_to_copy = clib_min (src_left, dst_left);
415 clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
417 src_left -= bytes_to_copy;
418 src_ptr += bytes_to_copy;
419 total_src_left -= bytes_to_copy;
420 dst_left -= bytes_to_copy;
421 dst_ptr += bytes_to_copy;
422 next_tcp_seq += bytes_to_copy;
423 cdb0->current_length += bytes_to_copy;
427 int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
428 u32 next_bi = csb0->next_buffer;
430 /* init src to the next buffer in chain */
434 csb0 = vlib_get_buffer (vm, csbi0);
435 src_left = csb0->current_length;
436 src_ptr = vlib_buffer_get_current (csb0);
440 ASSERT (total_src_left == 0);
444 if (0 == dst_left && total_src_left)
446 n_tx_bytes += cdb0->current_length;
447 tso_fixup_segmented_buf (vm, cdb0, tcp_flags_no_fin_psh, is_l2,
449 ASSERT (dbi < vec_len (ptd->split_buffers));
450 cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
451 tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
452 gso_size, &dst_ptr, &dst_left,
453 next_tcp_seq, default_bflags, gho);
457 tso_fixup_segmented_buf (vm, cdb0, save_tcp_flags, is_l2, is_ip6, gho);
459 n_tx_bytes += cdb0->current_length;
461 n_tx_bytes += b0->current_length;
466 gso_segment_buffer (vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd,
467 u32 bi, vlib_buffer_t *b, generic_header_offset_t *gho,
468 u32 n_bytes_b, u8 is_l2, u8 is_ip6)
471 return tso_segment_buffer (vm, ptd, bi, b, gho, n_bytes_b, is_l2, is_ip6);
474 static_always_inline void
475 drop_one_buffer_and_count (vlib_main_t * vm, vnet_main_t * vnm,
476 vlib_node_runtime_t * node, u32 * pbi0,
477 u32 sw_if_index, u32 drop_error_code)
479 u32 thread_index = vm->thread_index;
481 vlib_simple_counter_main_t *cm;
483 vec_elt_at_index (vnm->interface_main.sw_if_counters,
484 VNET_INTERFACE_COUNTER_TX_ERROR);
485 vlib_increment_simple_counter (cm, thread_index, sw_if_index, 1);
487 vlib_error_drop_buffers (vm, node, pbi0,
488 /* buffer stride */ 1,
489 /* n_buffers */ 1, GSO_NEXT_DROP, node->node_index,
493 static_always_inline uword
494 vnet_gso_node_inline (vlib_main_t * vm,
495 vlib_node_runtime_t * node,
496 vlib_frame_t * frame,
498 vnet_hw_interface_t * hi,
499 int is_l2, int is_ip4, int is_ip6, int do_segmentation)
502 u32 next_index = node->cached_next_index;
503 u32 *from = vlib_frame_vector_args (frame);
504 u32 n_left_from = frame->n_vectors;
505 u32 *from_end = from + n_left_from;
506 u32 thread_index = vm->thread_index;
507 vnet_interface_main_t *im = &vnm->interface_main;
508 vnet_interface_per_thread_data_t *ptd =
509 vec_elt_at_index (im->per_thread_data, thread_index);
510 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
512 vlib_get_buffers (vm, from, b, n_left_from);
514 while (n_left_from > 0)
518 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
520 if (!do_segmentation)
521 while (from + 8 <= from_end && n_left_to_next >= 4)
523 u32 bi0, bi1, bi2, bi3;
524 u32 next0, next1, next2, next3;
525 u32 swif0, swif1, swif2, swif3;
526 gso_trace_t *t0, *t1, *t2, *t3;
527 vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
529 /* Prefetch next iteration. */
530 vlib_prefetch_buffer_header (b[4], LOAD);
531 vlib_prefetch_buffer_header (b[5], LOAD);
532 vlib_prefetch_buffer_header (b[6], LOAD);
533 vlib_prefetch_buffer_header (b[7], LOAD);
544 swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
545 swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
546 swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
547 swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
549 if (PREDICT_FALSE (hi->sw_if_index != swif0))
551 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
552 if ((hi0->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
553 (b[0]->flags & VNET_BUFFER_F_GSO))
556 if (PREDICT_FALSE (hi->sw_if_index != swif1))
558 hi1 = vnet_get_sup_hw_interface (vnm, swif1);
559 if (!(hi1->caps & VNET_HW_IF_CAP_TCP_GSO) &&
560 (b[1]->flags & VNET_BUFFER_F_GSO))
563 if (PREDICT_FALSE (hi->sw_if_index != swif2))
565 hi2 = vnet_get_sup_hw_interface (vnm, swif2);
566 if ((hi2->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
567 (b[2]->flags & VNET_BUFFER_F_GSO))
570 if (PREDICT_FALSE (hi->sw_if_index != swif3))
572 hi3 = vnet_get_sup_hw_interface (vnm, swif3);
573 if (!(hi3->caps & VNET_HW_IF_CAP_TCP_GSO) &&
574 (b[3]->flags & VNET_BUFFER_F_GSO))
578 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
580 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
581 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
582 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
583 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
584 clib_memset (&t0->gho, 0, sizeof (t0->gho));
585 vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
588 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
590 t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
591 t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
592 t1->gso_size = vnet_buffer2 (b[1])->gso_size;
593 t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
594 clib_memset (&t1->gho, 0, sizeof (t1->gho));
595 vnet_generic_header_offset_parser (b[1], &t1->gho, is_l2,
598 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
600 t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
601 t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
602 t2->gso_size = vnet_buffer2 (b[2])->gso_size;
603 t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
604 clib_memset (&t2->gho, 0, sizeof (t2->gho));
605 vnet_generic_header_offset_parser (b[2], &t2->gho, is_l2,
608 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
610 t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
611 t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
612 t3->gso_size = vnet_buffer2 (b[3])->gso_size;
613 t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
614 clib_memset (&t3->gho, 0, sizeof (t3->gho));
615 vnet_generic_header_offset_parser (b[3], &t3->gho, is_l2,
626 vnet_feature_next (&next0, b[0]);
627 vnet_feature_next (&next1, b[1]);
628 vnet_feature_next (&next2, b[2]);
629 vnet_feature_next (&next3, b[3]);
630 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
631 n_left_to_next, bi0, bi1, bi2,
632 bi3, next0, next1, next2, next3);
636 while (from + 1 <= from_end && n_left_to_next > 0)
640 vnet_hw_interface_t *hi0;
642 u32 do_segmentation0 = 0;
644 swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
645 if (PREDICT_FALSE (hi->sw_if_index != swif0))
647 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
648 if ((hi0->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
649 (b[0]->flags & VNET_BUFFER_F_GSO))
650 do_segmentation0 = 1;
653 do_segmentation0 = do_segmentation;
655 /* speculatively enqueue b0 to the current next frame */
656 to_next[0] = bi0 = from[0];
662 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
664 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
665 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
666 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
667 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
668 clib_memset (&t0->gho, 0, sizeof (t0->gho));
669 vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
673 if (do_segmentation0)
675 if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
678 * Undo the enqueue of the b0 - it is not going anywhere,
679 * and will be freed either after it's segmented or
680 * when dropped, if there is no buffers to segment into.
684 /* undo the counting. */
685 generic_header_offset_t gho = { 0 };
687 u32 inner_is_ip6 = is_ip6;
689 vnet_generic_header_offset_parser (b[0], &gho, is_l2,
692 if (PREDICT_FALSE (gho.gho_flags & GHO_F_TUNNEL))
695 (gho.gho_flags & (GHO_F_GRE_TUNNEL |
696 GHO_F_GENEVE_TUNNEL)))
698 /* not supported yet */
699 drop_one_buffer_and_count (vm, vnm, node, from - 1,
701 GSO_ERROR_UNHANDLED_TYPE);
706 inner_is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0;
709 n_tx_bytes = gso_segment_buffer_inline (vm, ptd, b[0], &gho,
710 is_l2, inner_is_ip6);
712 if (PREDICT_FALSE (n_tx_bytes == 0))
714 drop_one_buffer_and_count (vm, vnm, node, from - 1,
716 GSO_ERROR_NO_BUFFERS);
722 if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL))
725 tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho);
729 (gho.gho_flags & (GHO_F_IPIP_TUNNEL |
730 GHO_F_IPIP6_TUNNEL)))
733 tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho);
736 u16 n_tx_bufs = vec_len (ptd->split_buffers);
737 u32 *from_seg = ptd->split_buffers;
739 while (n_tx_bufs > 0)
743 while (n_tx_bufs > 0 && n_left_to_next > 0)
745 sbi0 = to_next[0] = from_seg[0];
746 sb0 = vlib_get_buffer (vm, sbi0);
747 ASSERT (sb0->current_length > 0);
753 vnet_feature_next (&next0, sb0);
754 vlib_validate_buffer_enqueue_x1 (vm, node,
760 vlib_put_next_frame (vm, node, next_index,
763 vlib_get_next_frame (vm, node, next_index,
764 to_next, n_left_to_next);
766 /* The buffers were enqueued. Reset the length */
767 vec_set_len (ptd->split_buffers, 0);
768 /* Free the now segmented buffer */
769 vlib_buffer_free_one (vm, bi0);
775 vnet_feature_next (&next0, b[0]);
776 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
777 n_left_to_next, bi0, next0);
780 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
783 return frame->n_vectors;
786 static_always_inline uword
787 vnet_gso_inline (vlib_main_t * vm,
788 vlib_node_runtime_t * node, vlib_frame_t * frame, int is_l2,
789 int is_ip4, int is_ip6)
791 vnet_main_t *vnm = vnet_get_main ();
792 vnet_hw_interface_t *hi;
794 if (frame->n_vectors > 0)
796 u32 *from = vlib_frame_vector_args (frame);
797 vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
798 hi = vnet_get_sup_hw_interface (vnm,
799 vnet_buffer (b)->sw_if_index[VLIB_TX]);
801 if (hi->caps & (VNET_HW_IF_CAP_TCP_GSO | VNET_HW_IF_CAP_VXLAN_TNL_GSO))
802 return vnet_gso_node_inline (vm, node, frame, vnm, hi,
803 is_l2, is_ip4, is_ip6,
804 /* do_segmentation */ 0);
806 return vnet_gso_node_inline (vm, node, frame, vnm, hi,
807 is_l2, is_ip4, is_ip6,
808 /* do_segmentation */ 1);
813 VLIB_NODE_FN (gso_l2_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
814 vlib_frame_t * frame)
816 return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 1 /* ip4 */ ,
820 VLIB_NODE_FN (gso_l2_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
821 vlib_frame_t * frame)
823 return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 0 /* ip4 */ ,
827 VLIB_NODE_FN (gso_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
828 vlib_frame_t * frame)
830 return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 1 /* ip4 */ ,
834 VLIB_NODE_FN (gso_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
835 vlib_frame_t * frame)
837 return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 0 /* ip4 */ ,
843 VLIB_REGISTER_NODE (gso_l2_ip4_node) = {
844 .vector_size = sizeof (u32),
845 .format_trace = format_gso_trace,
846 .type = VLIB_NODE_TYPE_INTERNAL,
847 .n_errors = ARRAY_LEN(gso_error_strings),
848 .error_strings = gso_error_strings,
849 .n_next_nodes = GSO_N_NEXT,
851 [GSO_NEXT_DROP] = "error-drop",
853 .name = "gso-l2-ip4",
856 VLIB_REGISTER_NODE (gso_l2_ip6_node) = {
857 .vector_size = sizeof (u32),
858 .format_trace = format_gso_trace,
859 .type = VLIB_NODE_TYPE_INTERNAL,
860 .n_errors = ARRAY_LEN(gso_error_strings),
861 .error_strings = gso_error_strings,
862 .n_next_nodes = GSO_N_NEXT,
864 [GSO_NEXT_DROP] = "error-drop",
866 .name = "gso-l2-ip6",
869 VLIB_REGISTER_NODE (gso_ip4_node) = {
870 .vector_size = sizeof (u32),
871 .format_trace = format_gso_trace,
872 .type = VLIB_NODE_TYPE_INTERNAL,
873 .n_errors = ARRAY_LEN(gso_error_strings),
874 .error_strings = gso_error_strings,
875 .n_next_nodes = GSO_N_NEXT,
877 [GSO_NEXT_DROP] = "error-drop",
882 VLIB_REGISTER_NODE (gso_ip6_node) = {
883 .vector_size = sizeof (u32),
884 .format_trace = format_gso_trace,
885 .type = VLIB_NODE_TYPE_INTERNAL,
886 .n_errors = ARRAY_LEN(gso_error_strings),
887 .error_strings = gso_error_strings,
888 .n_next_nodes = GSO_N_NEXT,
890 [GSO_NEXT_DROP] = "error-drop",
895 VNET_FEATURE_INIT (gso_l2_ip4_node, static) = {
896 .arc_name = "l2-output-ip4",
897 .node_name = "gso-l2-ip4",
898 .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
901 VNET_FEATURE_INIT (gso_l2_ip6_node, static) = {
902 .arc_name = "l2-output-ip6",
903 .node_name = "gso-l2-ip6",
904 .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
907 VNET_FEATURE_INIT (gso_ip4_node, static) = {
908 .arc_name = "ip4-output",
909 .node_name = "gso-ip4",
910 .runs_before = VNET_FEATURES ("ipsec4-output-feature"),
913 VNET_FEATURE_INIT (gso_ip6_node, static) = {
914 .arc_name = "ip6-output",
915 .node_name = "gso-ip6",
916 .runs_before = VNET_FEATURES ("ipsec6-output-feature"),
920 * fd.io coding-style-patch-verification: ON
923 * eval: (c-set-style "gnu")