misc: fix the formatting style
[vpp.git] / src / vnet / gso / node.c
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vppinfra/error.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/feature/feature.h>
21 #include <vnet/gso/gso.h>
22 #include <vnet/gso/hdr_offset_parser.h>
23 #include <vnet/ip/icmp46_packet.h>
24 #include <vnet/ip/ip4.h>
25 #include <vnet/ip/ip6.h>
26 #include <vnet/udp/udp_packet.h>
27
28 typedef struct
29 {
30   u32 flags;
31   u16 gso_size;
32   u8 gso_l4_hdr_sz;
33   generic_header_offset_t gho;
34 } gso_trace_t;
35
36 static u8 *
37 format_gso_trace (u8 * s, va_list * args)
38 {
39   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
40   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
41   gso_trace_t *t = va_arg (*args, gso_trace_t *);
42
43   if (t->flags & VNET_BUFFER_F_GSO)
44     {
45       s = format (s, "gso_sz %d gso_l4_hdr_sz %d\n%U",
46                   t->gso_size, t->gso_l4_hdr_sz, format_generic_header_offset,
47                   &t->gho);
48     }
49   else
50     {
51       s =
52         format (s, "non-gso buffer\n%U", format_generic_header_offset,
53                 &t->gho);
54     }
55
56   return s;
57 }
58
59 static_always_inline u16
60 tso_segment_ipip_tunnel_fixup (vlib_main_t * vm,
61                                vnet_interface_per_thread_data_t * ptd,
62                                vlib_buffer_t * sb0,
63                                generic_header_offset_t * gho)
64 {
65   u16 n_tx_bufs = vec_len (ptd->split_buffers);
66   u16 i = 0, n_tx_bytes = 0;
67
68   while (i < n_tx_bufs)
69     {
70       vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
71       vnet_get_outer_header (b0, gho);
72       clib_memcpy_fast (vlib_buffer_get_current (b0),
73                         vlib_buffer_get_current (sb0), gho->outer_hdr_sz);
74
75       ip4_header_t *ip4 =
76         (ip4_header_t *) (vlib_buffer_get_current (b0) +
77                           gho->outer_l3_hdr_offset);
78       ip6_header_t *ip6 =
79         (ip6_header_t *) (vlib_buffer_get_current (b0) +
80                           gho->outer_l3_hdr_offset);
81
82       if (gho->gho_flags & GHO_F_OUTER_IP4)
83         {
84           ip4->length =
85             clib_host_to_net_u16 (b0->current_length -
86                                   gho->outer_l3_hdr_offset);
87           ip4->checksum = ip4_header_checksum (ip4);
88         }
89       else if (gho->gho_flags & GHO_F_OUTER_IP6)
90         {
91           ip6->payload_length =
92             clib_host_to_net_u16 (b0->current_length -
93                                   gho->outer_l4_hdr_offset);
94         }
95
96       n_tx_bytes += gho->outer_hdr_sz;
97       i++;
98     }
99   return n_tx_bytes;
100
101 }
102
103 static_always_inline void
104 tso_segment_vxlan_tunnel_headers_fixup (vlib_main_t * vm, vlib_buffer_t * b,
105                                         generic_header_offset_t * gho)
106 {
107   u8 proto = 0;
108   ip4_header_t *ip4 = 0;
109   ip6_header_t *ip6 = 0;
110   udp_header_t *udp = 0;
111
112   ip4 =
113     (ip4_header_t *) (vlib_buffer_get_current (b) + gho->outer_l3_hdr_offset);
114   ip6 =
115     (ip6_header_t *) (vlib_buffer_get_current (b) + gho->outer_l3_hdr_offset);
116   udp =
117     (udp_header_t *) (vlib_buffer_get_current (b) + gho->outer_l4_hdr_offset);
118
119   if (gho->gho_flags & GHO_F_OUTER_IP4)
120     {
121       proto = ip4->protocol;
122       ip4->length =
123         clib_host_to_net_u16 (b->current_length - gho->outer_l3_hdr_offset);
124       ip4->checksum = ip4_header_checksum (ip4);
125     }
126   else if (gho->gho_flags & GHO_F_OUTER_IP6)
127     {
128       proto = ip6->protocol;
129       ip6->payload_length =
130         clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
131     }
132   if (proto == IP_PROTOCOL_UDP)
133     {
134       int bogus;
135       udp->length =
136         clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
137       udp->checksum = 0;
138       if (gho->gho_flags & GHO_F_OUTER_IP6)
139         {
140           udp->checksum =
141             ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
142         }
143       else if (gho->gho_flags & GHO_F_OUTER_IP4)
144         {
145           udp->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
146         }
147       b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
148     }
149 }
150
151 static_always_inline u16
152 tso_segment_vxlan_tunnel_fixup (vlib_main_t * vm,
153                                 vnet_interface_per_thread_data_t * ptd,
154                                 vlib_buffer_t * sb0,
155                                 generic_header_offset_t * gho)
156 {
157   u16 n_tx_bufs = vec_len (ptd->split_buffers);
158   u16 i = 0, n_tx_bytes = 0;
159
160   while (i < n_tx_bufs)
161     {
162       vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
163       vnet_get_outer_header (b0, gho);
164       clib_memcpy_fast (vlib_buffer_get_current (b0),
165                         vlib_buffer_get_current (sb0), gho->outer_hdr_sz);
166
167       tso_segment_vxlan_tunnel_headers_fixup (vm, b0, gho);
168       n_tx_bytes += gho->outer_hdr_sz;
169       i++;
170     }
171   return n_tx_bytes;
172 }
173
174 static_always_inline u16
175 tso_alloc_tx_bufs (vlib_main_t * vm,
176                    vnet_interface_per_thread_data_t * ptd,
177                    vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
178                    u16 gso_size, u16 first_data_size,
179                    generic_header_offset_t * gho)
180 {
181   u16 n_alloc, size;
182   u16 first_packet_length = l234_sz + first_data_size;
183
184   /*
185    * size is the amount of data per segmented buffer except the 1st
186    * segmented buffer.
187    * l2_hdr_offset is an offset == current_data of vlib_buffer_t.
188    * l234_sz is hdr_sz from l2_hdr_offset.
189    */
190   size =
191     clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz
192               - gho->l2_hdr_offset);
193
194   /*
195    * First segmented buffer length is calculated separately.
196    * As it may contain less data than gso_size (when gso_size is
197    * greater than current_length of 1st buffer from GSO chained
198    * buffers) and/or size calculated above.
199    */
200   u16 n_bufs = 1;
201
202   /*
203    * Total packet length minus first packet length including l234 header.
204    * rounded-up division
205    */
206   ASSERT (n_bytes_b0 > first_packet_length);
207   n_bufs += ((n_bytes_b0 - first_packet_length + (size - 1)) / size);
208
209   vec_validate (ptd->split_buffers, n_bufs - 1);
210
211   n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
212   if (n_alloc < n_bufs)
213     {
214       vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
215       return 0;
216     }
217   return n_alloc;
218 }
219
220 static_always_inline void
221 tso_init_buf_from_template_base (vlib_buffer_t * nb0, vlib_buffer_t * b0,
222                                  u32 flags, u16 length)
223 {
224   /* copying objects from cacheline 0 */
225   nb0->current_data = b0->current_data;
226   nb0->current_length = length;
227   nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
228   nb0->flow_id = b0->flow_id;
229   nb0->error = b0->error;
230   nb0->current_config_index = b0->current_config_index;
231   clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
232
233   /* copying objects from cacheline 1 */
234   nb0->trace_handle = b0->trace_handle;
235   nb0->total_length_not_including_first_buffer = 0;
236
237   /* copying data */
238   clib_memcpy_fast (vlib_buffer_get_current (nb0),
239                     vlib_buffer_get_current (b0), length);
240 }
241
242 static_always_inline void
243 tso_init_buf_from_template (vlib_main_t * vm, vlib_buffer_t * nb0,
244                             vlib_buffer_t * b0, u16 template_data_sz,
245                             u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
246                             u32 next_tcp_seq, u32 flags,
247                             generic_header_offset_t * gho)
248 {
249   tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
250
251   *p_dst_left =
252     clib_min (gso_size,
253               vlib_buffer_get_default_data_size (vm) - (template_data_sz +
254                                                         nb0->current_data));
255   *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
256
257   tcp_header_t *tcp =
258     (tcp_header_t *) (vlib_buffer_get_current (nb0) + gho->l4_hdr_offset);
259   tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
260 }
261
262 static_always_inline void
263 tso_fixup_segmented_buf (vlib_main_t * vm, vlib_buffer_t * b0, u8 tcp_flags,
264                          int is_l2, int is_ip6, generic_header_offset_t * gho)
265 {
266   ip4_header_t *ip4 =
267     (ip4_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
268   ip6_header_t *ip6 =
269     (ip6_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
270   tcp_header_t *tcp =
271     (tcp_header_t *) (vlib_buffer_get_current (b0) + gho->l4_hdr_offset);
272
273   tcp->flags = tcp_flags;
274
275   if (is_ip6)
276     {
277       ip6->payload_length =
278         clib_host_to_net_u16 (b0->current_length - gho->l4_hdr_offset);
279       if (gho->gho_flags & GHO_F_TCP)
280         {
281           int bogus = 0;
282           tcp->checksum = 0;
283           tcp->checksum =
284             ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6, &bogus);
285           b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
286         }
287     }
288   else
289     {
290       ip4->length =
291         clib_host_to_net_u16 (b0->current_length - gho->l3_hdr_offset);
292       if (gho->gho_flags & GHO_F_IP4)
293         ip4->checksum = ip4_header_checksum (ip4);
294       if (gho->gho_flags & GHO_F_TCP)
295         {
296           tcp->checksum = 0;
297           tcp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip4);
298         }
299       b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
300       b0->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
301     }
302
303   if (!is_l2 && ((gho->gho_flags & GHO_F_TUNNEL) == 0))
304     {
305       u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
306
307       ip_adjacency_t *adj0 = adj_get (adj_index0);
308
309       if (adj0->lookup_next_index == IP_LOOKUP_NEXT_MIDCHAIN &&
310           adj0->sub_type.midchain.fixup_func)
311         /* calls e.g. ipip44_fixup */
312         adj0->sub_type.midchain.fixup_func
313           (vm, adj0, b0, adj0->sub_type.midchain.fixup_data);
314     }
315 }
316
317 /**
318  * Allocate the necessary number of ptd->split_buffers,
319  * and segment the possibly chained buffer(s) from b0 into
320  * there.
321  *
322  * Return the cumulative number of bytes sent or zero
323  * if allocation failed.
324  */
325
326 static_always_inline u32
327 tso_segment_buffer (vlib_main_t * vm, vnet_interface_per_thread_data_t * ptd,
328                     u32 sbi0, vlib_buffer_t * sb0,
329                     generic_header_offset_t * gho, u32 n_bytes_b0, int is_l2,
330                     int is_ip6)
331 {
332   u32 n_tx_bytes = 0;
333   u16 gso_size = vnet_buffer2 (sb0)->gso_size;
334
335   u8 save_tcp_flags = 0;
336   u8 tcp_flags_no_fin_psh = 0;
337   u32 next_tcp_seq = 0;
338
339   tcp_header_t *tcp =
340     (tcp_header_t *) (vlib_buffer_get_current (sb0) + gho->l4_hdr_offset);
341   next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
342   /* store original flags for last packet and reset FIN and PSH */
343   save_tcp_flags = tcp->flags;
344   tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
345   tcp->checksum = 0;
346
347   u32 default_bflags =
348     sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
349   u16 l234_sz = gho->hdr_sz;
350   int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
351   next_tcp_seq += first_data_size;
352
353   if (PREDICT_FALSE
354       (!tso_alloc_tx_bufs
355        (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
356     return 0;
357
358   vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
359   tso_init_buf_from_template_base (b0, sb0, default_bflags,
360                                    l234_sz + first_data_size);
361
362   u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
363   if (total_src_left)
364     {
365       /* Need to copy more segments */
366       u8 *src_ptr, *dst_ptr;
367       u16 src_left, dst_left;
368       /* current source buffer */
369       vlib_buffer_t *csb0 = sb0;
370       u32 csbi0 = sbi0;
371       /* current dest buffer */
372       vlib_buffer_t *cdb0;
373       u16 dbi = 1;              /* the buffer [0] is b0 */
374
375       src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
376       src_left = sb0->current_length - l234_sz - first_data_size;
377
378       tso_fixup_segmented_buf (vm, b0, tcp_flags_no_fin_psh, is_l2, is_ip6,
379                                gho);
380
381       /* grab a second buffer and prepare the loop */
382       ASSERT (dbi < vec_len (ptd->split_buffers));
383       cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
384       tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
385                                   &dst_left, next_tcp_seq, default_bflags,
386                                   gho);
387
388       /* an arbitrary large number to catch the runaway loops */
389       int nloops = 2000;
390       while (total_src_left)
391         {
392           if (nloops-- <= 0)
393             clib_panic ("infinite loop detected");
394           u16 bytes_to_copy = clib_min (src_left, dst_left);
395
396           clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
397
398           src_left -= bytes_to_copy;
399           src_ptr += bytes_to_copy;
400           total_src_left -= bytes_to_copy;
401           dst_left -= bytes_to_copy;
402           dst_ptr += bytes_to_copy;
403           next_tcp_seq += bytes_to_copy;
404           cdb0->current_length += bytes_to_copy;
405
406           if (0 == src_left)
407             {
408               int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
409               u32 next_bi = csb0->next_buffer;
410
411               /* init src to the next buffer in chain */
412               if (has_next)
413                 {
414                   csbi0 = next_bi;
415                   csb0 = vlib_get_buffer (vm, csbi0);
416                   src_left = csb0->current_length;
417                   src_ptr = vlib_buffer_get_current (csb0);
418                 }
419               else
420                 {
421                   ASSERT (total_src_left == 0);
422                   break;
423                 }
424             }
425           if (0 == dst_left && total_src_left)
426             {
427               n_tx_bytes += cdb0->current_length;
428               tso_fixup_segmented_buf (vm, cdb0, tcp_flags_no_fin_psh, is_l2,
429                                        is_ip6, gho);
430               ASSERT (dbi < vec_len (ptd->split_buffers));
431               cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
432               tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
433                                           gso_size, &dst_ptr, &dst_left,
434                                           next_tcp_seq, default_bflags, gho);
435             }
436         }
437
438       tso_fixup_segmented_buf (vm, cdb0, save_tcp_flags, is_l2, is_ip6, gho);
439
440       n_tx_bytes += cdb0->current_length;
441     }
442   n_tx_bytes += b0->current_length;
443   return n_tx_bytes;
444 }
445
446 static_always_inline void
447 drop_one_buffer_and_count (vlib_main_t * vm, vnet_main_t * vnm,
448                            vlib_node_runtime_t * node, u32 * pbi0,
449                            u32 sw_if_index, u32 drop_error_code)
450 {
451   u32 thread_index = vm->thread_index;
452
453   vlib_simple_counter_main_t *cm;
454   cm =
455     vec_elt_at_index (vnm->interface_main.sw_if_counters,
456                       VNET_INTERFACE_COUNTER_TX_ERROR);
457   vlib_increment_simple_counter (cm, thread_index, sw_if_index, 1);
458
459   vlib_error_drop_buffers (vm, node, pbi0,
460                            /* buffer stride */ 1,
461                            /* n_buffers */ 1,
462                            VNET_INTERFACE_OUTPUT_NEXT_DROP,
463                            node->node_index, drop_error_code);
464 }
465
466 static_always_inline uword
467 vnet_gso_node_inline (vlib_main_t * vm,
468                       vlib_node_runtime_t * node,
469                       vlib_frame_t * frame,
470                       vnet_main_t * vnm,
471                       vnet_hw_interface_t * hi,
472                       int is_l2, int is_ip4, int is_ip6, int do_segmentation)
473 {
474   u32 *to_next;
475   u32 next_index = node->cached_next_index;
476   u32 *from = vlib_frame_vector_args (frame);
477   u32 n_left_from = frame->n_vectors;
478   u32 *from_end = from + n_left_from;
479   u32 thread_index = vm->thread_index;
480   vnet_interface_main_t *im = &vnm->interface_main;
481   vnet_interface_per_thread_data_t *ptd =
482     vec_elt_at_index (im->per_thread_data, thread_index);
483   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
484
485   vlib_get_buffers (vm, from, b, n_left_from);
486
487   while (n_left_from > 0)
488     {
489       u32 n_left_to_next;
490
491       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
492
493       if (!do_segmentation)
494         while (from + 8 <= from_end && n_left_to_next >= 4)
495           {
496             u32 bi0, bi1, bi2, bi3;
497             u32 next0, next1, next2, next3;
498             u32 swif0, swif1, swif2, swif3;
499             gso_trace_t *t0, *t1, *t2, *t3;
500             vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
501
502             /* Prefetch next iteration. */
503             vlib_prefetch_buffer_header (b[4], LOAD);
504             vlib_prefetch_buffer_header (b[5], LOAD);
505             vlib_prefetch_buffer_header (b[6], LOAD);
506             vlib_prefetch_buffer_header (b[7], LOAD);
507
508             bi0 = from[0];
509             bi1 = from[1];
510             bi2 = from[2];
511             bi3 = from[3];
512             to_next[0] = bi0;
513             to_next[1] = bi1;
514             to_next[2] = bi2;
515             to_next[3] = bi3;
516
517             swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
518             swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
519             swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
520             swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
521
522             if (PREDICT_FALSE (hi->sw_if_index != swif0))
523               {
524                 hi0 = vnet_get_sup_hw_interface (vnm, swif0);
525                 if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
526                     (b[0]->flags & VNET_BUFFER_F_GSO))
527                   break;
528               }
529             if (PREDICT_FALSE (hi->sw_if_index != swif1))
530               {
531                 hi1 = vnet_get_sup_hw_interface (vnm, swif1);
532                 if (!(hi1->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
533                     (b[1]->flags & VNET_BUFFER_F_GSO))
534                   break;
535               }
536             if (PREDICT_FALSE (hi->sw_if_index != swif2))
537               {
538                 hi2 = vnet_get_sup_hw_interface (vnm, swif2);
539                 if ((hi2->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
540                     (b[2]->flags & VNET_BUFFER_F_GSO))
541                   break;
542               }
543             if (PREDICT_FALSE (hi->sw_if_index != swif3))
544               {
545                 hi3 = vnet_get_sup_hw_interface (vnm, swif3);
546                 if (!(hi3->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
547                     (b[3]->flags & VNET_BUFFER_F_GSO))
548                   break;
549               }
550
551             if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
552               {
553                 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
554                 t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
555                 t0->gso_size = vnet_buffer2 (b[0])->gso_size;
556                 t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
557                 vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
558                                                    is_ip4, is_ip6);
559               }
560             if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
561               {
562                 t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
563                 t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
564                 t1->gso_size = vnet_buffer2 (b[1])->gso_size;
565                 t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
566                 vnet_generic_header_offset_parser (b[1], &t1->gho, is_l2,
567                                                    is_ip4, is_ip6);
568               }
569             if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
570               {
571                 t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
572                 t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
573                 t2->gso_size = vnet_buffer2 (b[2])->gso_size;
574                 t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
575                 vnet_generic_header_offset_parser (b[2], &t2->gho, is_l2,
576                                                    is_ip4, is_ip6);
577               }
578             if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
579               {
580                 t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
581                 t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
582                 t3->gso_size = vnet_buffer2 (b[3])->gso_size;
583                 t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
584                 vnet_generic_header_offset_parser (b[3], &t3->gho, is_l2,
585                                                    is_ip4, is_ip6);
586               }
587
588             from += 4;
589             to_next += 4;
590             n_left_to_next -= 4;
591             n_left_from -= 4;
592
593             next0 = next1 = 0;
594             next2 = next3 = 0;
595             vnet_feature_next (&next0, b[0]);
596             vnet_feature_next (&next1, b[1]);
597             vnet_feature_next (&next2, b[2]);
598             vnet_feature_next (&next3, b[3]);
599             vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
600                                              n_left_to_next, bi0, bi1, bi2,
601                                              bi3, next0, next1, next2, next3);
602             b += 4;
603           }
604
605       while (from + 1 <= from_end && n_left_to_next > 0)
606         {
607           u32 bi0, swif0;
608           gso_trace_t *t0;
609           vnet_hw_interface_t *hi0;
610           u32 next0 = 0;
611           u32 do_segmentation0 = 0;
612
613           swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
614           if (PREDICT_FALSE (hi->sw_if_index != swif0))
615             {
616               hi0 = vnet_get_sup_hw_interface (vnm, swif0);
617               if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
618                   (b[0]->flags & VNET_BUFFER_F_GSO))
619                 do_segmentation0 = 1;
620             }
621           else
622             do_segmentation0 = do_segmentation;
623
624           /* speculatively enqueue b0 to the current next frame */
625           to_next[0] = bi0 = from[0];
626           to_next += 1;
627           n_left_to_next -= 1;
628           from += 1;
629           n_left_from -= 1;
630
631           if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
632             {
633               t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
634               t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
635               t0->gso_size = vnet_buffer2 (b[0])->gso_size;
636               t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
637               vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
638                                                  is_ip4, is_ip6);
639             }
640
641           if (do_segmentation0)
642             {
643               if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
644                 {
645                   /*
646                    * Undo the enqueue of the b0 - it is not going anywhere,
647                    * and will be freed either after it's segmented or
648                    * when dropped, if there is no buffers to segment into.
649                    */
650                   to_next -= 1;
651                   n_left_to_next += 1;
652                   /* undo the counting. */
653                   generic_header_offset_t gho = { 0 };
654                   u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
655                   u32 n_tx_bytes = 0;
656                   u32 inner_is_ip6 = is_ip6;
657
658                   vnet_generic_header_offset_parser (b[0], &gho, is_l2,
659                                                      is_ip4, is_ip6);
660
661                   if (PREDICT_FALSE (gho.gho_flags & GHO_F_TUNNEL))
662                     {
663                       if (PREDICT_FALSE
664                           (gho.gho_flags & (GHO_F_GRE_TUNNEL |
665                                             GHO_F_GENEVE_TUNNEL)))
666                         {
667                           /* not supported yet */
668                           drop_one_buffer_and_count (vm, vnm, node, from - 1,
669                                                      hi->sw_if_index,
670                                                      VNET_INTERFACE_OUTPUT_ERROR_UNHANDLED_GSO_TYPE);
671                           b += 1;
672                           continue;
673                         }
674
675                       vnet_get_inner_header (b[0], &gho);
676
677                       n_bytes_b0 -= gho.outer_hdr_sz;
678                       inner_is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0;
679                     }
680
681                   n_tx_bytes =
682                     tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0,
683                                         is_l2, inner_is_ip6);
684
685                   if (PREDICT_FALSE (n_tx_bytes == 0))
686                     {
687                       drop_one_buffer_and_count (vm, vnm, node, from - 1,
688                                                  hi->sw_if_index,
689                                                  VNET_INTERFACE_OUTPUT_ERROR_NO_BUFFERS_FOR_GSO);
690                       b += 1;
691                       continue;
692                     }
693
694
695                   if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL))
696                     {
697                       vnet_get_outer_header (b[0], &gho);
698                       n_tx_bytes +=
699                         tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho);
700                     }
701                   else
702                     if (PREDICT_FALSE
703                         (gho.gho_flags & (GHO_F_IPIP_TUNNEL |
704                                           GHO_F_IPIP6_TUNNEL)))
705                     {
706                       vnet_get_outer_header (b[0], &gho);
707                       n_tx_bytes +=
708                         tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho);
709                     }
710
711                   u16 n_tx_bufs = vec_len (ptd->split_buffers);
712                   u32 *from_seg = ptd->split_buffers;
713
714                   while (n_tx_bufs > 0)
715                     {
716                       u32 sbi0;
717                       vlib_buffer_t *sb0;
718                       while (n_tx_bufs > 0 && n_left_to_next > 0)
719                         {
720                           sbi0 = to_next[0] = from_seg[0];
721                           sb0 = vlib_get_buffer (vm, sbi0);
722                           ASSERT (sb0->current_length > 0);
723                           to_next += 1;
724                           from_seg += 1;
725                           n_left_to_next -= 1;
726                           n_tx_bufs -= 1;
727                           next0 = 0;
728                           vnet_feature_next (&next0, sb0);
729                           vlib_validate_buffer_enqueue_x1 (vm, node,
730                                                            next_index,
731                                                            to_next,
732                                                            n_left_to_next,
733                                                            sbi0, next0);
734                         }
735                       vlib_put_next_frame (vm, node, next_index,
736                                            n_left_to_next);
737                       if (n_tx_bufs > 0)
738                         vlib_get_next_frame (vm, node, next_index,
739                                              to_next, n_left_to_next);
740                     }
741                   /* The buffers were enqueued. Reset the length */
742                   _vec_len (ptd->split_buffers) = 0;
743                   /* Free the now segmented buffer */
744                   vlib_buffer_free_one (vm, bi0);
745                   b += 1;
746                   continue;
747                 }
748             }
749
750           vnet_feature_next (&next0, b[0]);
751           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
752                                            n_left_to_next, bi0, next0);
753           b += 1;
754         }
755       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
756     }
757
758   return frame->n_vectors;
759 }
760
761 static_always_inline uword
762 vnet_gso_inline (vlib_main_t * vm,
763                  vlib_node_runtime_t * node, vlib_frame_t * frame, int is_l2,
764                  int is_ip4, int is_ip6)
765 {
766   vnet_main_t *vnm = vnet_get_main ();
767   vnet_hw_interface_t *hi;
768
769   if (frame->n_vectors > 0)
770     {
771       u32 *from = vlib_frame_vector_args (frame);
772       vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
773       hi = vnet_get_sup_hw_interface (vnm,
774                                       vnet_buffer (b)->sw_if_index[VLIB_TX]);
775
776       if (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
777         return vnet_gso_node_inline (vm, node, frame, vnm, hi,
778                                      is_l2, is_ip4, is_ip6,
779                                      /* do_segmentation */ 0);
780       else
781         return vnet_gso_node_inline (vm, node, frame, vnm, hi,
782                                      is_l2, is_ip4, is_ip6,
783                                      /* do_segmentation */ 1);
784     }
785   return 0;
786 }
787
788 VLIB_NODE_FN (gso_l2_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
789                                 vlib_frame_t * frame)
790 {
791   return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 1 /* ip4 */ ,
792                           0 /* ip6 */ );
793 }
794
795 VLIB_NODE_FN (gso_l2_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
796                                 vlib_frame_t * frame)
797 {
798   return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 0 /* ip4 */ ,
799                           1 /* ip6 */ );
800 }
801
802 VLIB_NODE_FN (gso_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
803                              vlib_frame_t * frame)
804 {
805   return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 1 /* ip4 */ ,
806                           0 /* ip6 */ );
807 }
808
809 VLIB_NODE_FN (gso_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
810                              vlib_frame_t * frame)
811 {
812   return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 0 /* ip4 */ ,
813                           1 /* ip6 */ );
814 }
815
816 /* *INDENT-OFF* */
817
818 VLIB_REGISTER_NODE (gso_l2_ip4_node) = {
819   .vector_size = sizeof (u32),
820   .format_trace = format_gso_trace,
821   .type = VLIB_NODE_TYPE_INTERNAL,
822   .n_errors = 0,
823   .n_next_nodes = 0,
824   .name = "gso-l2-ip4",
825 };
826
827 VLIB_REGISTER_NODE (gso_l2_ip6_node) = {
828   .vector_size = sizeof (u32),
829   .format_trace = format_gso_trace,
830   .type = VLIB_NODE_TYPE_INTERNAL,
831   .n_errors = 0,
832   .n_next_nodes = 0,
833   .name = "gso-l2-ip6",
834 };
835
836 VLIB_REGISTER_NODE (gso_ip4_node) = {
837   .vector_size = sizeof (u32),
838   .format_trace = format_gso_trace,
839   .type = VLIB_NODE_TYPE_INTERNAL,
840   .n_errors = 0,
841   .n_next_nodes = 0,
842   .name = "gso-ip4",
843 };
844
845 VLIB_REGISTER_NODE (gso_ip6_node) = {
846   .vector_size = sizeof (u32),
847   .format_trace = format_gso_trace,
848   .type = VLIB_NODE_TYPE_INTERNAL,
849   .n_errors = 0,
850   .n_next_nodes = 0,
851   .name = "gso-ip6",
852 };
853
854 VNET_FEATURE_INIT (gso_l2_ip4_node, static) = {
855   .arc_name = "l2-output-ip4",
856   .node_name = "gso-l2-ip4",
857   .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
858 };
859
860 VNET_FEATURE_INIT (gso_l2_ip6_node, static) = {
861   .arc_name = "l2-output-ip6",
862   .node_name = "gso-l2-ip6",
863   .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
864 };
865
866 VNET_FEATURE_INIT (gso_ip4_node, static) = {
867   .arc_name = "ip4-output",
868   .node_name = "gso-ip4",
869   .runs_before = VNET_FEATURES ("ipsec4-output-feature"),
870 };
871
872 VNET_FEATURE_INIT (gso_ip6_node, static) = {
873   .arc_name = "ip6-output",
874   .node_name = "gso-ip6",
875   .runs_before = VNET_FEATURES ("ipsec6-output-feature"),
876 };
877
878 /*
879  * fd.io coding-style-patch-verification: ON
880  *
881  * Local Variables:
882  * eval: (c-set-style "gnu")
883  * End:
884  */