gso: add support for IP-IP
[vpp.git] / src / vnet / gso / node.c
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vppinfra/error.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/feature/feature.h>
21 #include <vnet/gso/gso.h>
22 #include <vnet/gso/hdr_offset_parser.h>
23 #include <vnet/ip/icmp46_packet.h>
24 #include <vnet/ip/ip4.h>
25 #include <vnet/ip/ip6.h>
26 #include <vnet/udp/udp_packet.h>
27
28 typedef struct
29 {
30   u32 flags;
31   u16 gso_size;
32   u8 gso_l4_hdr_sz;
33   generic_header_offset_t gho;
34 } gso_trace_t;
35
36 static u8 *
37 format_gso_trace (u8 * s, va_list * args)
38 {
39   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
40   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
41   gso_trace_t *t = va_arg (*args, gso_trace_t *);
42
43   if (t->flags & VNET_BUFFER_F_GSO)
44     {
45       s = format (s, "gso_sz %d gso_l4_hdr_sz %d %U",
46                   t->gso_size, t->gso_l4_hdr_sz, format_generic_header_offset,
47                   &t->gho);
48     }
49   else
50     {
51       s =
52         format (s, "non-gso buffer %U", format_generic_header_offset,
53                 &t->gho);
54     }
55
56   return s;
57 }
58
59 static_always_inline u16
60 tso_segment_ipip_tunnel_fixup (vlib_main_t * vm,
61                                vnet_interface_per_thread_data_t * ptd,
62                                vlib_buffer_t * sb0,
63                                generic_header_offset_t * gho)
64 {
65   u16 n_tx_bufs = vec_len (ptd->split_buffers);
66   u16 i = 0, n_tx_bytes = 0;
67
68   while (i < n_tx_bufs)
69     {
70       vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
71       vnet_get_outer_header (b0, gho);
72       clib_memcpy_fast (vlib_buffer_get_current (b0),
73                         vlib_buffer_get_current (sb0), gho->outer_hdr_sz);
74
75       ip4_header_t *ip4 =
76         (ip4_header_t *) (vlib_buffer_get_current (b0) +
77                           gho->outer_l3_hdr_offset);
78       ip6_header_t *ip6 =
79         (ip6_header_t *) (vlib_buffer_get_current (b0) +
80                           gho->outer_l3_hdr_offset);
81
82       if (gho->gho_flags & GHO_F_OUTER_IP4)
83         {
84           ip4->length =
85             clib_host_to_net_u16 (b0->current_length -
86                                   gho->outer_l3_hdr_offset);
87           ip4->checksum = ip4_header_checksum (ip4);
88         }
89       else if (gho->gho_flags & GHO_F_OUTER_IP6)
90         {
91           ip6->payload_length =
92             clib_host_to_net_u16 (b0->current_length -
93                                   gho->outer_l4_hdr_offset);
94         }
95
96       n_tx_bytes += gho->outer_hdr_sz;
97       i++;
98     }
99   return n_tx_bytes;
100
101 }
102
103 static_always_inline void
104 tso_segment_vxlan_tunnel_headers_fixup (vlib_main_t * vm, vlib_buffer_t * b,
105                                         generic_header_offset_t * gho)
106 {
107   u8 proto = 0;
108   ip4_header_t *ip4 = 0;
109   ip6_header_t *ip6 = 0;
110   udp_header_t *udp = 0;
111
112   ip4 =
113     (ip4_header_t *) (vlib_buffer_get_current (b) + gho->outer_l3_hdr_offset);
114   ip6 =
115     (ip6_header_t *) (vlib_buffer_get_current (b) + gho->outer_l3_hdr_offset);
116   udp =
117     (udp_header_t *) (vlib_buffer_get_current (b) + gho->outer_l4_hdr_offset);
118
119   if (gho->gho_flags & GHO_F_OUTER_IP4)
120     {
121       proto = ip4->protocol;
122       ip4->length =
123         clib_host_to_net_u16 (b->current_length - gho->outer_l3_hdr_offset);
124       ip4->checksum = ip4_header_checksum (ip4);
125     }
126   else if (gho->gho_flags & GHO_F_OUTER_IP6)
127     {
128       proto = ip6->protocol;
129       ip6->payload_length =
130         clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
131     }
132   if (proto == IP_PROTOCOL_UDP)
133     {
134       int bogus;
135       udp->length =
136         clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
137       udp->checksum = 0;
138       if (gho->gho_flags & GHO_F_OUTER_IP6)
139         {
140           udp->checksum =
141             ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
142         }
143       else if (gho->gho_flags & GHO_F_OUTER_IP4)
144         {
145           udp->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
146         }
147       b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
148     }
149 }
150
151 static_always_inline u16
152 tso_segment_vxlan_tunnel_fixup (vlib_main_t * vm,
153                                 vnet_interface_per_thread_data_t * ptd,
154                                 vlib_buffer_t * sb0,
155                                 generic_header_offset_t * gho)
156 {
157   u16 n_tx_bufs = vec_len (ptd->split_buffers);
158   u16 i = 0, n_tx_bytes = 0;
159
160   while (i < n_tx_bufs)
161     {
162       vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
163       vnet_get_outer_header (b0, gho);
164       clib_memcpy_fast (vlib_buffer_get_current (b0),
165                         vlib_buffer_get_current (sb0), gho->outer_hdr_sz);
166
167       tso_segment_vxlan_tunnel_headers_fixup (vm, b0, gho);
168       n_tx_bytes += gho->outer_hdr_sz;
169       i++;
170     }
171   return n_tx_bytes;
172 }
173
174 static_always_inline u16
175 tso_alloc_tx_bufs (vlib_main_t * vm,
176                    vnet_interface_per_thread_data_t * ptd,
177                    vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
178                    u16 gso_size, u16 first_data_size,
179                    generic_header_offset_t * gho)
180 {
181   u16 n_alloc, size;
182   u16 first_packet_length = l234_sz + first_data_size;
183
184   /*
185    * size is the amount of data per segmented buffer except the 1st
186    * segmented buffer.
187    * l2_hdr_offset is an offset == current_data of vlib_buffer_t.
188    * l234_sz is hdr_sz from l2_hdr_offset.
189    */
190   size =
191     clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz
192               - gho->l2_hdr_offset);
193
194   /*
195    * First segmented buffer length is calculated separately.
196    * As it may contain less data than gso_size (when gso_size is
197    * greater than current_length of 1st buffer from GSO chained
198    * buffers) and/or size calculated above.
199    */
200   u16 n_bufs = 1;
201
202   /*
203    * Total packet length minus first packet length including l234 header.
204    * rounded-up division
205    */
206   ASSERT (n_bytes_b0 > first_packet_length);
207   n_bufs += ((n_bytes_b0 - first_packet_length + (size - 1)) / size);
208
209   vec_validate (ptd->split_buffers, n_bufs - 1);
210
211   n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
212   if (n_alloc < n_bufs)
213     {
214       vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
215       return 0;
216     }
217   return n_alloc;
218 }
219
220 static_always_inline void
221 tso_init_buf_from_template_base (vlib_buffer_t * nb0, vlib_buffer_t * b0,
222                                  u32 flags, u16 length)
223 {
224   /* copying objects from cacheline 0 */
225   nb0->current_data = b0->current_data;
226   nb0->current_length = length;
227   nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
228   nb0->flow_id = b0->flow_id;
229   nb0->error = b0->error;
230   nb0->current_config_index = b0->current_config_index;
231   clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
232
233   /* copying objects from cacheline 1 */
234   nb0->trace_handle = b0->trace_handle;
235   nb0->total_length_not_including_first_buffer = 0;
236
237   /* copying data */
238   clib_memcpy_fast (vlib_buffer_get_current (nb0),
239                     vlib_buffer_get_current (b0), length);
240 }
241
242 static_always_inline void
243 tso_init_buf_from_template (vlib_main_t * vm, vlib_buffer_t * nb0,
244                             vlib_buffer_t * b0, u16 template_data_sz,
245                             u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
246                             u32 next_tcp_seq, u32 flags,
247                             generic_header_offset_t * gho)
248 {
249   tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
250
251   *p_dst_left =
252     clib_min (gso_size,
253               vlib_buffer_get_default_data_size (vm) - (template_data_sz +
254                                                         nb0->current_data));
255   *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
256
257   tcp_header_t *tcp =
258     (tcp_header_t *) (vlib_buffer_get_current (nb0) + gho->l4_hdr_offset);
259   tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
260 }
261
262 static_always_inline void
263 tso_fixup_segmented_buf (vlib_main_t * vm, vlib_buffer_t * b0, u8 tcp_flags,
264                          int is_ip6, generic_header_offset_t * gho)
265 {
266   ip4_header_t *ip4 =
267     (ip4_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
268   ip6_header_t *ip6 =
269     (ip6_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
270   tcp_header_t *tcp =
271     (tcp_header_t *) (vlib_buffer_get_current (b0) + gho->l4_hdr_offset);
272
273   tcp->flags = tcp_flags;
274
275   if (is_ip6)
276     {
277       ip6->payload_length =
278         clib_host_to_net_u16 (b0->current_length - gho->l4_hdr_offset);
279       if (gho->gho_flags & GHO_F_TCP)
280         {
281           int bogus = 0;
282           tcp->checksum = 0;
283           tcp->checksum =
284             ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6, &bogus);
285           b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
286         }
287     }
288   else
289     {
290       ip4->length =
291         clib_host_to_net_u16 (b0->current_length - gho->l3_hdr_offset);
292       if (gho->gho_flags & GHO_F_IP4)
293         ip4->checksum = ip4_header_checksum (ip4);
294       if (gho->gho_flags & GHO_F_TCP)
295         {
296           tcp->checksum = 0;
297           tcp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip4);
298         }
299       b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
300       b0->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
301     }
302
303   if ((gho->gho_flags & GHO_F_TUNNEL) == 0)
304     {
305       u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
306
307       ip_adjacency_t *adj0 = adj_get (adj_index0);
308
309       if (adj0->lookup_next_index == IP_LOOKUP_NEXT_MIDCHAIN &&
310           adj0->sub_type.midchain.fixup_func)
311         /* calls e.g. ipip44_fixup */
312         adj0->sub_type.midchain.fixup_func
313           (vm, adj0, b0, adj0->sub_type.midchain.fixup_data);
314     }
315 }
316
317 /**
318  * Allocate the necessary number of ptd->split_buffers,
319  * and segment the possibly chained buffer(s) from b0 into
320  * there.
321  *
322  * Return the cumulative number of bytes sent or zero
323  * if allocation failed.
324  */
325
326 static_always_inline u32
327 tso_segment_buffer (vlib_main_t * vm, vnet_interface_per_thread_data_t * ptd,
328                     u32 sbi0, vlib_buffer_t * sb0,
329                     generic_header_offset_t * gho, u32 n_bytes_b0, int is_ip6)
330 {
331   u32 n_tx_bytes = 0;
332   u16 gso_size = vnet_buffer2 (sb0)->gso_size;
333
334   u8 save_tcp_flags = 0;
335   u8 tcp_flags_no_fin_psh = 0;
336   u32 next_tcp_seq = 0;
337
338   tcp_header_t *tcp =
339     (tcp_header_t *) (vlib_buffer_get_current (sb0) + gho->l4_hdr_offset);
340   next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
341   /* store original flags for last packet and reset FIN and PSH */
342   save_tcp_flags = tcp->flags;
343   tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
344   tcp->checksum = 0;
345
346   u32 default_bflags =
347     sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
348   u16 l234_sz = gho->hdr_sz;
349   int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
350   next_tcp_seq += first_data_size;
351
352   if (PREDICT_FALSE
353       (!tso_alloc_tx_bufs
354        (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
355     return 0;
356
357   vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
358   tso_init_buf_from_template_base (b0, sb0, default_bflags,
359                                    l234_sz + first_data_size);
360
361   u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
362   if (total_src_left)
363     {
364       /* Need to copy more segments */
365       u8 *src_ptr, *dst_ptr;
366       u16 src_left, dst_left;
367       /* current source buffer */
368       vlib_buffer_t *csb0 = sb0;
369       u32 csbi0 = sbi0;
370       /* current dest buffer */
371       vlib_buffer_t *cdb0;
372       u16 dbi = 1;              /* the buffer [0] is b0 */
373
374       src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
375       src_left = sb0->current_length - l234_sz - first_data_size;
376
377       tso_fixup_segmented_buf (vm, b0, tcp_flags_no_fin_psh, is_ip6, gho);
378
379       /* grab a second buffer and prepare the loop */
380       ASSERT (dbi < vec_len (ptd->split_buffers));
381       cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
382       tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
383                                   &dst_left, next_tcp_seq, default_bflags,
384                                   gho);
385
386       /* an arbitrary large number to catch the runaway loops */
387       int nloops = 2000;
388       while (total_src_left)
389         {
390           if (nloops-- <= 0)
391             clib_panic ("infinite loop detected");
392           u16 bytes_to_copy = clib_min (src_left, dst_left);
393
394           clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
395
396           src_left -= bytes_to_copy;
397           src_ptr += bytes_to_copy;
398           total_src_left -= bytes_to_copy;
399           dst_left -= bytes_to_copy;
400           dst_ptr += bytes_to_copy;
401           next_tcp_seq += bytes_to_copy;
402           cdb0->current_length += bytes_to_copy;
403
404           if (0 == src_left)
405             {
406               int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
407               u32 next_bi = csb0->next_buffer;
408
409               /* init src to the next buffer in chain */
410               if (has_next)
411                 {
412                   csbi0 = next_bi;
413                   csb0 = vlib_get_buffer (vm, csbi0);
414                   src_left = csb0->current_length;
415                   src_ptr = vlib_buffer_get_current (csb0);
416                 }
417               else
418                 {
419                   ASSERT (total_src_left == 0);
420                   break;
421                 }
422             }
423           if (0 == dst_left && total_src_left)
424             {
425               n_tx_bytes += cdb0->current_length;
426               tso_fixup_segmented_buf (vm, cdb0, tcp_flags_no_fin_psh, is_ip6,
427                                        gho);
428               ASSERT (dbi < vec_len (ptd->split_buffers));
429               cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
430               tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
431                                           gso_size, &dst_ptr, &dst_left,
432                                           next_tcp_seq, default_bflags, gho);
433             }
434         }
435
436       tso_fixup_segmented_buf (vm, cdb0, save_tcp_flags, is_ip6, gho);
437
438       n_tx_bytes += cdb0->current_length;
439     }
440   n_tx_bytes += b0->current_length;
441   return n_tx_bytes;
442 }
443
444 static_always_inline void
445 drop_one_buffer_and_count (vlib_main_t * vm, vnet_main_t * vnm,
446                            vlib_node_runtime_t * node, u32 * pbi0,
447                            u32 sw_if_index, u32 drop_error_code)
448 {
449   u32 thread_index = vm->thread_index;
450
451   vlib_simple_counter_main_t *cm;
452   cm =
453     vec_elt_at_index (vnm->interface_main.sw_if_counters,
454                       VNET_INTERFACE_COUNTER_TX_ERROR);
455   vlib_increment_simple_counter (cm, thread_index, sw_if_index, 1);
456
457   vlib_error_drop_buffers (vm, node, pbi0,
458                            /* buffer stride */ 1,
459                            /* n_buffers */ 1,
460                            VNET_INTERFACE_OUTPUT_NEXT_DROP,
461                            node->node_index, drop_error_code);
462 }
463
464 static_always_inline uword
465 vnet_gso_node_inline (vlib_main_t * vm,
466                       vlib_node_runtime_t * node,
467                       vlib_frame_t * frame,
468                       vnet_main_t * vnm,
469                       vnet_hw_interface_t * hi,
470                       int is_l2, int is_ip4, int is_ip6, int do_segmentation)
471 {
472   u32 *to_next;
473   u32 next_index = node->cached_next_index;
474   u32 *from = vlib_frame_vector_args (frame);
475   u32 n_left_from = frame->n_vectors;
476   u32 *from_end = from + n_left_from;
477   u32 thread_index = vm->thread_index;
478   vnet_interface_main_t *im = &vnm->interface_main;
479   vnet_interface_per_thread_data_t *ptd =
480     vec_elt_at_index (im->per_thread_data, thread_index);
481   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
482
483   vlib_get_buffers (vm, from, b, n_left_from);
484
485   while (n_left_from > 0)
486     {
487       u32 n_left_to_next;
488
489       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
490
491       if (!do_segmentation)
492         while (from + 8 <= from_end && n_left_to_next >= 4)
493           {
494             u32 bi0, bi1, bi2, bi3;
495             u32 next0, next1, next2, next3;
496             u32 swif0, swif1, swif2, swif3;
497             gso_trace_t *t0, *t1, *t2, *t3;
498             vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
499
500             /* Prefetch next iteration. */
501             vlib_prefetch_buffer_header (b[4], LOAD);
502             vlib_prefetch_buffer_header (b[5], LOAD);
503             vlib_prefetch_buffer_header (b[6], LOAD);
504             vlib_prefetch_buffer_header (b[7], LOAD);
505
506             bi0 = from[0];
507             bi1 = from[1];
508             bi2 = from[2];
509             bi3 = from[3];
510             to_next[0] = bi0;
511             to_next[1] = bi1;
512             to_next[2] = bi2;
513             to_next[3] = bi3;
514
515             swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
516             swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
517             swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
518             swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
519
520             if (PREDICT_FALSE (hi->sw_if_index != swif0))
521               {
522                 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
523                 if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
524                     (b[0]->flags & VNET_BUFFER_F_GSO))
525                   break;
526               }
527             if (PREDICT_FALSE (hi->sw_if_index != swif1))
528               {
529                 hi1 = vnet_get_sup_hw_interface (vnm, swif1);
530                 if (!(hi1->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
531                     (b[1]->flags & VNET_BUFFER_F_GSO))
532                   break;
533               }
534             if (PREDICT_FALSE (hi->sw_if_index != swif2))
535               {
536                 hi2 = vnet_get_sup_hw_interface (vnm, swif2);
537                 if ((hi2->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
538                     (b[2]->flags & VNET_BUFFER_F_GSO))
539                   break;
540               }
541             if (PREDICT_FALSE (hi->sw_if_index != swif3))
542               {
543                 hi3 = vnet_get_sup_hw_interface (vnm, swif3);
544                 if (!(hi3->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
545                     (b[3]->flags & VNET_BUFFER_F_GSO))
546                   break;
547               }
548
549             if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
550               {
551                 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
552                 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
553                 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
554                 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
555                 vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
556                                                    is_ip4, is_ip6);
557               }
558             if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
559               {
560                 t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
561                 t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
562                 t1->gso_size = vnet_buffer2 (b[1])->gso_size;
563                 t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
564                 vnet_generic_header_offset_parser (b[1], &t1->gho, is_l2,
565                                                    is_ip4, is_ip6);
566               }
567             if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
568               {
569                 t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
570                 t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
571                 t2->gso_size = vnet_buffer2 (b[2])->gso_size;
572                 t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
573                 vnet_generic_header_offset_parser (b[2], &t2->gho, is_l2,
574                                                    is_ip4, is_ip6);
575               }
576             if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
577               {
578                 t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
579                 t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
580                 t3->gso_size = vnet_buffer2 (b[3])->gso_size;
581                 t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
582                 vnet_generic_header_offset_parser (b[3], &t3->gho, is_l2,
583                                                    is_ip4, is_ip6);
584               }
585
586             from += 4;
587             to_next += 4;
588             n_left_to_next -= 4;
589             n_left_from -= 4;
590
591             next0 = next1 = 0;
592             next2 = next3 = 0;
593             vnet_feature_next (&next0, b[0]);
594             vnet_feature_next (&next1, b[1]);
595             vnet_feature_next (&next2, b[2]);
596             vnet_feature_next (&next3, b[3]);
597             vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
598                                              n_left_to_next, bi0, bi1, bi2,
599                                              bi3, next0, next1, next2, next3);
600             b += 4;
601           }
602
603       while (from + 1 <= from_end && n_left_to_next > 0)
604         {
605           u32 bi0, swif0;
606           gso_trace_t *t0;
607           vnet_hw_interface_t *hi0;
608           u32 next0 = 0;
609           u32 do_segmentation0 = 0;
610
611           swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
612           if (PREDICT_FALSE (hi->sw_if_index != swif0))
613             {
614               hi0 = vnet_get_sup_hw_interface (vnm, swif0);
615               if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
616                   (b[0]->flags & VNET_BUFFER_F_GSO))
617                 do_segmentation0 = 1;
618             }
619           else
620             do_segmentation0 = do_segmentation;
621
622           /* speculatively enqueue b0 to the current next frame */
623           to_next[0] = bi0 = from[0];
624           to_next += 1;
625           n_left_to_next -= 1;
626           from += 1;
627           n_left_from -= 1;
628
629           if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
630             {
631               t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
632               t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
633               t0->gso_size = vnet_buffer2 (b[0])->gso_size;
634               t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
635               vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
636                                                  is_ip4, is_ip6);
637             }
638
639           if (do_segmentation0)
640             {
641               if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
642                 {
643                   /*
644                    * Undo the enqueue of the b0 - it is not going anywhere,
645                    * and will be freed either after it's segmented or
646                    * when dropped, if there is no buffers to segment into.
647                    */
648                   to_next -= 1;
649                   n_left_to_next += 1;
650                   /* undo the counting. */
651                   generic_header_offset_t gho = { 0 };
652                   u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
653                   u32 n_tx_bytes = 0;
654
655                   vnet_generic_header_offset_parser (b[0], &gho, is_l2,
656                                                      is_ip4, is_ip6);
657
658                   if (PREDICT_FALSE (gho.gho_flags & GHO_F_TUNNEL))
659                     {
660                       if (PREDICT_FALSE
661                           (gho.gho_flags & (GHO_F_GRE_TUNNEL |
662                                             GHO_F_GENEVE_TUNNEL)))
663                         {
664                           /* not supported yet */
665                           drop_one_buffer_and_count (vm, vnm, node, from - 1,
666                                                      hi->sw_if_index,
667                                                      VNET_INTERFACE_OUTPUT_ERROR_UNHANDLED_GSO_TYPE);
668                           b += 1;
669                           continue;
670                         }
671
672                       vnet_get_inner_header (b[0], &gho);
673
674                       n_bytes_b0 -= gho.outer_hdr_sz;
675                       is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0;
676                     }
677
678                   n_tx_bytes =
679                     tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0,
680                                         is_ip6);
681
682                   if (PREDICT_FALSE (n_tx_bytes == 0))
683                     {
684                       drop_one_buffer_and_count (vm, vnm, node, from - 1,
685                                                  hi->sw_if_index,
686                                                  VNET_INTERFACE_OUTPUT_ERROR_NO_BUFFERS_FOR_GSO);
687                       b += 1;
688                       continue;
689                     }
690
691
692                   if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL))
693                     {
694                       vnet_get_outer_header (b[0], &gho);
695                       n_tx_bytes +=
696                         tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho);
697                     }
698                   else
699                     if (PREDICT_FALSE
700                         (gho.gho_flags & (GHO_F_IPIP_TUNNEL |
701                                           GHO_F_IPIP6_TUNNEL)))
702                     {
703                       vnet_get_outer_header (b[0], &gho);
704                       n_tx_bytes +=
705                         tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho);
706                     }
707
708                   u16 n_tx_bufs = vec_len (ptd->split_buffers);
709                   u32 *from_seg = ptd->split_buffers;
710
711                   while (n_tx_bufs > 0)
712                     {
713                       u32 sbi0;
714                       vlib_buffer_t *sb0;
715                       while (n_tx_bufs > 0 && n_left_to_next > 0)
716                         {
717                           sbi0 = to_next[0] = from_seg[0];
718                           sb0 = vlib_get_buffer (vm, sbi0);
719                           ASSERT (sb0->current_length > 0);
720                           to_next += 1;
721                           from_seg += 1;
722                           n_left_to_next -= 1;
723                           n_tx_bufs -= 1;
724                           next0 = 0;
725                           vnet_feature_next (&next0, sb0);
726                           vlib_validate_buffer_enqueue_x1 (vm, node,
727                                                            next_index,
728                                                            to_next,
729                                                            n_left_to_next,
730                                                            sbi0, next0);
731                         }
732                       vlib_put_next_frame (vm, node, next_index,
733                                            n_left_to_next);
734                       if (n_tx_bufs > 0)
735                         vlib_get_next_frame (vm, node, next_index,
736                                              to_next, n_left_to_next);
737                     }
738                   /* The buffers were enqueued. Reset the length */
739                   _vec_len (ptd->split_buffers) = 0;
740                   /* Free the now segmented buffer */
741                   vlib_buffer_free_one (vm, bi0);
742                   b += 1;
743                   continue;
744                 }
745             }
746
747           vnet_feature_next (&next0, b[0]);
748           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
749                                            n_left_to_next, bi0, next0);
750           b += 1;
751         }
752       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
753     }
754
755   return frame->n_vectors;
756 }
757
758 static_always_inline uword
759 vnet_gso_inline (vlib_main_t * vm,
760                  vlib_node_runtime_t * node, vlib_frame_t * frame, int is_l2,
761                  int is_ip4, int is_ip6)
762 {
763   vnet_main_t *vnm = vnet_get_main ();
764   vnet_hw_interface_t *hi;
765
766   if (frame->n_vectors > 0)
767     {
768       u32 *from = vlib_frame_vector_args (frame);
769       vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
770       hi = vnet_get_sup_hw_interface (vnm,
771                                       vnet_buffer (b)->sw_if_index[VLIB_TX]);
772
773       if (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
774         return vnet_gso_node_inline (vm, node, frame, vnm, hi,
775                                      is_l2, is_ip4, is_ip6,
776                                      /* do_segmentation */ 0);
777       else
778         return vnet_gso_node_inline (vm, node, frame, vnm, hi,
779                                      is_l2, is_ip4, is_ip6,
780                                      /* do_segmentation */ 1);
781     }
782   return 0;
783 }
784
785 VLIB_NODE_FN (gso_l2_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
786                                 vlib_frame_t * frame)
787 {
788   return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 1 /* ip4 */ ,
789                           0 /* ip6 */ );
790 }
791
792 VLIB_NODE_FN (gso_l2_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
793                                 vlib_frame_t * frame)
794 {
795   return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 0 /* ip4 */ ,
796                           1 /* ip6 */ );
797 }
798
799 VLIB_NODE_FN (gso_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
800                              vlib_frame_t * frame)
801 {
802   return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 1 /* ip4 */ ,
803                           0 /* ip6 */ );
804 }
805
806 VLIB_NODE_FN (gso_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
807                              vlib_frame_t * frame)
808 {
809   return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 0 /* ip4 */ ,
810                           1 /* ip6 */ );
811 }
812
813 /* *INDENT-OFF* */
814
815 VLIB_REGISTER_NODE (gso_l2_ip4_node) = {
816   .vector_size = sizeof (u32),
817   .format_trace = format_gso_trace,
818   .type = VLIB_NODE_TYPE_INTERNAL,
819   .n_errors = 0,
820   .n_next_nodes = 0,
821   .name = "gso-l2-ip4",
822 };
823
824 VLIB_REGISTER_NODE (gso_l2_ip6_node) = {
825   .vector_size = sizeof (u32),
826   .format_trace = format_gso_trace,
827   .type = VLIB_NODE_TYPE_INTERNAL,
828   .n_errors = 0,
829   .n_next_nodes = 0,
830   .name = "gso-l2-ip6",
831 };
832
833 VLIB_REGISTER_NODE (gso_ip4_node) = {
834   .vector_size = sizeof (u32),
835   .format_trace = format_gso_trace,
836   .type = VLIB_NODE_TYPE_INTERNAL,
837   .n_errors = 0,
838   .n_next_nodes = 0,
839   .name = "gso-ip4",
840 };
841
842 VLIB_REGISTER_NODE (gso_ip6_node) = {
843   .vector_size = sizeof (u32),
844   .format_trace = format_gso_trace,
845   .type = VLIB_NODE_TYPE_INTERNAL,
846   .n_errors = 0,
847   .n_next_nodes = 0,
848   .name = "gso-ip6",
849 };
850
851 VNET_FEATURE_INIT (gso_l2_ip4_node, static) = {
852   .arc_name = "l2-output-ip4",
853   .node_name = "gso-l2-ip4",
854   .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
855 };
856
857 VNET_FEATURE_INIT (gso_l2_ip6_node, static) = {
858   .arc_name = "l2-output-ip6",
859   .node_name = "gso-l2-ip6",
860   .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
861 };
862
863 VNET_FEATURE_INIT (gso_ip4_node, static) = {
864   .arc_name = "ip4-output",
865   .node_name = "gso-ip4",
866   .runs_before = VNET_FEATURES ("esp4-encrypt-tun","ipsec4-output-feature"),
867 };
868
869 VNET_FEATURE_INIT (gso_ip6_node, static) = {
870   .arc_name = "ip6-output",
871   .node_name = "gso-ip6",
872   .runs_before = VNET_FEATURES ("esp6-encrypt-tun","ipsec6-output-feature"),
873 };
874
875 /*
876  * fd.io coding-style-patch-verification: ON
877  *
878  * Local Variables:
879  * eval: (c-set-style "gnu")
880  * End:
881  */