c11 safe string handling support
[vpp.git] / src / vnet / vxlan-gpe / decap.c
1 /*
2  * decap.c - decapsulate VXLAN GPE
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 /**
18  *  @file
19  *  @brief Functions for decapsulating VXLAN GPE tunnels
20  *
21 */
22
23 #include <vlib/vlib.h>
24 #include <vnet/pg/pg.h>
25 #include <vnet/vxlan-gpe/vxlan_gpe.h>
26
27 vlib_node_registration_t vxlan_gpe_input_node;
28
29 /**
30  * @brief Struct for VXLAN GPE decap packet tracing
31  *
32  */
33 typedef struct
34 {
35   u32 next_index;
36   u32 tunnel_index;
37   u32 error;
38 } vxlan_gpe_rx_trace_t;
39
40 /**
41  * @brief Tracing function for VXLAN GPE packet decapsulation
42  *
43  * @param *s
44  * @param *args
45  *
46  * @return *s
47  *
48  */
49 static u8 *
50 format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
51 {
52   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
53   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
54   vxlan_gpe_rx_trace_t *t = va_arg (*args, vxlan_gpe_rx_trace_t *);
55
56   if (t->tunnel_index != ~0)
57     {
58       s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
59                   t->next_index, t->error);
60     }
61   else
62     {
63       s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
64                   t->error);
65     }
66   return s;
67 }
68
69 /**
70  * @brief Tracing function for VXLAN GPE packet decapsulation including length
71  *
72  * @param *s
73  * @param *args
74  *
75  * @return *s
76  *
77  */
78 static u8 *
79 format_vxlan_gpe_with_length (u8 * s, va_list * args)
80 {
81   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
82   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
83
84
85   return s;
86 }
87
88 /**
89  * @brief Common processing for IPv4 and IPv6 VXLAN GPE decap dispatch functions
90  *
91  * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
92  * tunnels are "terminate local". This means that there is no "TX" interface for this
93  * decap case, so that field in the buffer_metadata can be "used for something else".
94  * The something else in this case is, for the IPv4/IPv6 inner-packet type case, the
95  * FIB index used to look up the inner-packet's adjacency.
96  *
97  *      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
98  *
99  * @param *vm
100  * @param *node
101  * @param *from_frame
102  * @param is_ip4
103  *
104  * @return from_frame->n_vectors
105  *
106  */
107 always_inline uword
108 vxlan_gpe_input (vlib_main_t * vm,
109                  vlib_node_runtime_t * node,
110                  vlib_frame_t * from_frame, u8 is_ip4)
111 {
112   u32 n_left_from, next_index, *from, *to_next;
113   vxlan_gpe_main_t *nngm = &vxlan_gpe_main;
114   vnet_main_t *vnm = nngm->vnet_main;
115   vnet_interface_main_t *im = &vnm->interface_main;
116   u32 last_tunnel_index = ~0;
117   vxlan4_gpe_tunnel_key_t last_key4;
118   vxlan6_gpe_tunnel_key_t last_key6;
119   u32 pkts_decapsulated = 0;
120   u32 thread_index = vm->thread_index;
121   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
122
123   if (is_ip4)
124     clib_memset (&last_key4, 0xff, sizeof (last_key4));
125   else
126     clib_memset (&last_key6, 0xff, sizeof (last_key6));
127
128   from = vlib_frame_vector_args (from_frame);
129   n_left_from = from_frame->n_vectors;
130
131   next_index = node->cached_next_index;
132   stats_sw_if_index = node->runtime_data[0];
133   stats_n_packets = stats_n_bytes = 0;
134
135   while (n_left_from > 0)
136     {
137       u32 n_left_to_next;
138
139       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
140
141       while (n_left_from >= 4 && n_left_to_next >= 2)
142         {
143           u32 bi0, bi1;
144           vlib_buffer_t *b0, *b1;
145           u32 next0, next1;
146           ip4_vxlan_gpe_header_t *iuvn4_0, *iuvn4_1;
147           ip6_vxlan_gpe_header_t *iuvn6_0, *iuvn6_1;
148           uword *p0, *p1;
149           u32 tunnel_index0, tunnel_index1;
150           vxlan_gpe_tunnel_t *t0, *t1;
151           vxlan4_gpe_tunnel_key_t key4_0, key4_1;
152           vxlan6_gpe_tunnel_key_t key6_0, key6_1;
153           u32 error0, error1;
154           u32 sw_if_index0, sw_if_index1, len0, len1;
155
156           /* Prefetch next iteration. */
157           {
158             vlib_buffer_t *p2, *p3;
159
160             p2 = vlib_get_buffer (vm, from[2]);
161             p3 = vlib_get_buffer (vm, from[3]);
162
163             vlib_prefetch_buffer_header (p2, LOAD);
164             vlib_prefetch_buffer_header (p3, LOAD);
165
166             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
167             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
168           }
169
170           bi0 = from[0];
171           bi1 = from[1];
172           to_next[0] = bi0;
173           to_next[1] = bi1;
174           from += 2;
175           to_next += 2;
176           n_left_to_next -= 2;
177           n_left_from -= 2;
178
179           b0 = vlib_get_buffer (vm, bi0);
180           b1 = vlib_get_buffer (vm, bi1);
181
182           if (is_ip4)
183             {
184               /* udp leaves current_data pointing at the vxlan-gpe header */
185               vlib_buffer_advance (b0,
186                                    -(word) (sizeof (udp_header_t) +
187                                             sizeof (ip4_header_t)));
188               vlib_buffer_advance (b1,
189                                    -(word) (sizeof (udp_header_t) +
190                                             sizeof (ip4_header_t)));
191
192               iuvn4_0 = vlib_buffer_get_current (b0);
193               iuvn4_1 = vlib_buffer_get_current (b1);
194
195               /* pop (ip, udp, vxlan) */
196               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
197               vlib_buffer_advance (b1, sizeof (*iuvn4_1));
198             }
199           else
200             {
201               /* udp leaves current_data pointing at the vxlan-gpe header */
202               vlib_buffer_advance (b0,
203                                    -(word) (sizeof (udp_header_t) +
204                                             sizeof (ip6_header_t)));
205               vlib_buffer_advance (b1,
206                                    -(word) (sizeof (udp_header_t) +
207                                             sizeof (ip6_header_t)));
208
209               iuvn6_0 = vlib_buffer_get_current (b0);
210               iuvn6_1 = vlib_buffer_get_current (b1);
211
212               /* pop (ip, udp, vxlan) */
213               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
214               vlib_buffer_advance (b1, sizeof (*iuvn6_1));
215             }
216
217           tunnel_index0 = ~0;
218           tunnel_index1 = ~0;
219           error0 = 0;
220           error1 = 0;
221
222           if (is_ip4)
223             {
224               next0 =
225                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
226                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
227                 VXLAN_GPE_INPUT_NEXT_DROP;
228               next1 =
229                 (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
230                 nngm->decap_next_node_list[iuvn4_1->vxlan.protocol] :
231                 VXLAN_GPE_INPUT_NEXT_DROP;
232
233               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
234               key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
235
236               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
237               key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
238
239               key4_0.vni = iuvn4_0->vxlan.vni_res;
240               key4_1.vni = iuvn4_1->vxlan.vni_res;
241
242               key4_0.pad = 0;
243               key4_1.pad = 0;
244             }
245           else                  /* is_ip6 */
246             {
247               next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
248                 iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
249               next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
250                 iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
251
252               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
253               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
254               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
255               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
256
257               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
258               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
259               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
260               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
261
262               key6_0.vni = iuvn6_0->vxlan.vni_res;
263               key6_1.vni = iuvn6_1->vxlan.vni_res;
264             }
265
266           /* Processing packet 0 */
267           if (is_ip4)
268             {
269               /* Processing for key4_0 */
270               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
271                                  || (key4_0.as_u64[1] !=
272                                      last_key4.as_u64[1])))
273                 {
274                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
275
276                   if (p0 == 0)
277                     {
278                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
279                       goto trace0;
280                     }
281
282                   last_key4.as_u64[0] = key4_0.as_u64[0];
283                   last_key4.as_u64[1] = key4_0.as_u64[1];
284                   tunnel_index0 = last_tunnel_index = p0[0];
285                 }
286               else
287                 tunnel_index0 = last_tunnel_index;
288             }
289           else                  /* is_ip6 */
290             {
291               next0 =
292                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
293                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
294                 VXLAN_GPE_INPUT_NEXT_DROP;
295               next1 =
296                 (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
297                 nngm->decap_next_node_list[iuvn6_1->vxlan.protocol] :
298                 VXLAN_GPE_INPUT_NEXT_DROP;
299
300               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
301               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
302               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
303               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
304
305               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
306               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
307               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
308               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
309
310               key6_0.vni = iuvn6_0->vxlan.vni_res;
311               key6_1.vni = iuvn6_1->vxlan.vni_res;
312
313               /* Processing for key6_0 */
314               if (PREDICT_FALSE
315                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
316                 {
317                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
318
319                   if (p0 == 0)
320                     {
321                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
322                       goto trace0;
323                     }
324
325                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
326                   tunnel_index0 = last_tunnel_index = p0[0];
327                 }
328               else
329                 tunnel_index0 = last_tunnel_index;
330             }
331
332           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
333
334
335           sw_if_index0 = t0->sw_if_index;
336           len0 = vlib_buffer_length_in_chain (vm, b0);
337
338           /* Required to make the l2 tag push / pop code work on l2 subifs */
339           vnet_update_l2_len (b0);
340
341           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
342           vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
343
344       /**
345        * ip[46] lookup in the configured FIB
346        */
347           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
348
349           pkts_decapsulated++;
350           stats_n_packets += 1;
351           stats_n_bytes += len0;
352
353           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
354             {
355               stats_n_packets -= 1;
356               stats_n_bytes -= len0;
357               if (stats_n_packets)
358                 vlib_increment_combined_counter (im->combined_sw_if_counters +
359                                                  VNET_INTERFACE_COUNTER_RX,
360                                                  thread_index,
361                                                  stats_sw_if_index,
362                                                  stats_n_packets,
363                                                  stats_n_bytes);
364               stats_n_packets = 1;
365               stats_n_bytes = len0;
366               stats_sw_if_index = sw_if_index0;
367             }
368
369         trace0:b0->error = error0 ? node->errors[error0] : 0;
370
371           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
372             {
373               vxlan_gpe_rx_trace_t *tr =
374                 vlib_add_trace (vm, node, b0, sizeof (*tr));
375               tr->next_index = next0;
376               tr->error = error0;
377               tr->tunnel_index = tunnel_index0;
378             }
379
380           /* Process packet 1 */
381           if (is_ip4)
382             {
383               /* Processing for key4_1 */
384               if (PREDICT_FALSE ((key4_1.as_u64[0] != last_key4.as_u64[0])
385                                  || (key4_1.as_u64[1] !=
386                                      last_key4.as_u64[1])))
387                 {
388                   p1 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_1);
389
390                   if (p1 == 0)
391                     {
392                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
393                       goto trace1;
394                     }
395
396                   last_key4.as_u64[0] = key4_1.as_u64[0];
397                   last_key4.as_u64[1] = key4_1.as_u64[1];
398                   tunnel_index1 = last_tunnel_index = p1[0];
399                 }
400               else
401                 tunnel_index1 = last_tunnel_index;
402             }
403           else                  /* is_ip6 */
404             {
405               /* Processing for key6_1 */
406               if (PREDICT_FALSE
407                   (memcmp (&key6_1, &last_key6, sizeof (last_key6)) != 0))
408                 {
409                   p1 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_1);
410
411                   if (p1 == 0)
412                     {
413                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
414                       goto trace1;
415                     }
416
417                   memcpy (&last_key6, &key6_1, sizeof (key6_1));
418                   tunnel_index1 = last_tunnel_index = p1[0];
419                 }
420               else
421                 tunnel_index1 = last_tunnel_index;
422             }
423
424           t1 = pool_elt_at_index (nngm->tunnels, tunnel_index1);
425
426           sw_if_index1 = t1->sw_if_index;
427           len1 = vlib_buffer_length_in_chain (vm, b1);
428
429           /* Required to make the l2 tag push / pop code work on l2 subifs */
430           vnet_update_l2_len (b1);
431
432           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
433           vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
434
435           /*
436            * ip[46] lookup in the configured FIB
437            */
438           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
439
440           pkts_decapsulated++;
441           stats_n_packets += 1;
442           stats_n_bytes += len1;
443
444           /* Batch stats increment on the same vxlan tunnel so counter
445              is not incremented per packet */
446           if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
447             {
448               stats_n_packets -= 1;
449               stats_n_bytes -= len1;
450               if (stats_n_packets)
451                 vlib_increment_combined_counter (im->combined_sw_if_counters +
452                                                  VNET_INTERFACE_COUNTER_RX,
453                                                  thread_index,
454                                                  stats_sw_if_index,
455                                                  stats_n_packets,
456                                                  stats_n_bytes);
457               stats_n_packets = 1;
458               stats_n_bytes = len1;
459               stats_sw_if_index = sw_if_index1;
460             }
461           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
462
463         trace1:b1->error = error1 ? node->errors[error1] : 0;
464
465           if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
466             {
467               vxlan_gpe_rx_trace_t *tr =
468                 vlib_add_trace (vm, node, b1, sizeof (*tr));
469               tr->next_index = next1;
470               tr->error = error1;
471               tr->tunnel_index = tunnel_index1;
472             }
473
474           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
475                                            n_left_to_next, bi0, bi1, next0,
476                                            next1);
477         }
478
479       while (n_left_from > 0 && n_left_to_next > 0)
480         {
481           u32 bi0;
482           vlib_buffer_t *b0;
483           u32 next0;
484           ip4_vxlan_gpe_header_t *iuvn4_0;
485           ip6_vxlan_gpe_header_t *iuvn6_0;
486           uword *p0;
487           u32 tunnel_index0;
488           vxlan_gpe_tunnel_t *t0;
489           vxlan4_gpe_tunnel_key_t key4_0;
490           vxlan6_gpe_tunnel_key_t key6_0;
491           u32 error0;
492           u32 sw_if_index0, len0;
493
494           bi0 = from[0];
495           to_next[0] = bi0;
496           from += 1;
497           to_next += 1;
498           n_left_from -= 1;
499           n_left_to_next -= 1;
500
501           b0 = vlib_get_buffer (vm, bi0);
502
503           if (is_ip4)
504             {
505               /* udp leaves current_data pointing at the vxlan-gpe header */
506               vlib_buffer_advance (b0,
507                                    -(word) (sizeof (udp_header_t) +
508                                             sizeof (ip4_header_t)));
509
510               iuvn4_0 = vlib_buffer_get_current (b0);
511
512               /* pop (ip, udp, vxlan) */
513               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
514             }
515           else
516             {
517               /* udp leaves current_data pointing at the vxlan-gpe header */
518               vlib_buffer_advance (b0,
519                                    -(word) (sizeof (udp_header_t) +
520                                             sizeof (ip6_header_t)));
521
522               iuvn6_0 = vlib_buffer_get_current (b0);
523
524               /* pop (ip, udp, vxlan) */
525               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
526             }
527
528           tunnel_index0 = ~0;
529           error0 = 0;
530
531           if (is_ip4)
532             {
533               next0 =
534                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
535                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
536                 VXLAN_GPE_INPUT_NEXT_DROP;
537
538               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
539               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
540               key4_0.vni = iuvn4_0->vxlan.vni_res;
541               key4_0.pad = 0;
542
543               /* Processing for key4_0 */
544               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
545                                  || (key4_0.as_u64[1] !=
546                                      last_key4.as_u64[1])))
547                 {
548                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
549
550                   if (p0 == 0)
551                     {
552                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
553                       goto trace00;
554                     }
555
556                   last_key4.as_u64[0] = key4_0.as_u64[0];
557                   last_key4.as_u64[1] = key4_0.as_u64[1];
558                   tunnel_index0 = last_tunnel_index = p0[0];
559                 }
560               else
561                 tunnel_index0 = last_tunnel_index;
562             }
563           else                  /* is_ip6 */
564             {
565               next0 =
566                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
567                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
568                 VXLAN_GPE_INPUT_NEXT_DROP;
569
570               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
571               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
572               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
573               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
574               key6_0.vni = iuvn6_0->vxlan.vni_res;
575
576               /* Processing for key6_0 */
577               if (PREDICT_FALSE
578                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
579                 {
580                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
581
582                   if (p0 == 0)
583                     {
584                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
585                       goto trace00;
586                     }
587
588                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
589                   tunnel_index0 = last_tunnel_index = p0[0];
590                 }
591               else
592                 tunnel_index0 = last_tunnel_index;
593             }
594
595           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
596
597
598           sw_if_index0 = t0->sw_if_index;
599           len0 = vlib_buffer_length_in_chain (vm, b0);
600
601           /* Required to make the l2 tag push / pop code work on l2 subifs */
602           vnet_update_l2_len (b0);
603
604           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
605           vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
606
607           /*
608            * ip[46] lookup in the configured FIB
609            */
610           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
611
612           pkts_decapsulated++;
613           stats_n_packets += 1;
614           stats_n_bytes += len0;
615
616           /* Batch stats increment on the same vxlan-gpe tunnel so counter
617              is not incremented per packet */
618           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
619             {
620               stats_n_packets -= 1;
621               stats_n_bytes -= len0;
622               if (stats_n_packets)
623                 vlib_increment_combined_counter (im->combined_sw_if_counters +
624                                                  VNET_INTERFACE_COUNTER_RX,
625                                                  thread_index,
626                                                  stats_sw_if_index,
627                                                  stats_n_packets,
628                                                  stats_n_bytes);
629               stats_n_packets = 1;
630               stats_n_bytes = len0;
631               stats_sw_if_index = sw_if_index0;
632             }
633
634         trace00:b0->error = error0 ? node->errors[error0] : 0;
635
636           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
637             {
638               vxlan_gpe_rx_trace_t *tr =
639                 vlib_add_trace (vm, node, b0, sizeof (*tr));
640               tr->next_index = next0;
641               tr->error = error0;
642               tr->tunnel_index = tunnel_index0;
643             }
644           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
645                                            n_left_to_next, bi0, next0);
646         }
647
648       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
649     }
650   vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
651                                VXLAN_GPE_ERROR_DECAPSULATED,
652                                pkts_decapsulated);
653   /* Increment any remaining batch stats */
654   if (stats_n_packets)
655     {
656       vlib_increment_combined_counter (im->combined_sw_if_counters +
657                                        VNET_INTERFACE_COUNTER_RX,
658                                        thread_index, stats_sw_if_index,
659                                        stats_n_packets, stats_n_bytes);
660       node->runtime_data[0] = stats_sw_if_index;
661     }
662   return from_frame->n_vectors;
663 }
664
665 /**
666  * @brief Graph processing dispatch function for IPv4 VXLAN GPE
667  *
668  * @node vxlan4-gpe-input
669  * @param *vm
670  * @param *node
671  * @param *from_frame
672  *
673  * @return from_frame->n_vectors
674  *
675  */
676 static uword
677 vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
678                   vlib_frame_t * from_frame)
679 {
680   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 1);
681 }
682
683
684 void
685 vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
686 {
687   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
688   hm->decap_next_node_list[protocol_id] = next_node_index;
689   return;
690 }
691
692 void
693 vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
694 {
695   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
696   hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
697   return;
698 }
699
700
701 /**
702  * @brief Graph processing dispatch function for IPv6 VXLAN GPE
703  *
704  * @node vxlan6-gpe-input
705  * @param *vm
706  * @param *node
707  * @param *from_frame
708  *
709  * @return from_frame->n_vectors - uword
710  *
711  */
712 static uword
713 vxlan6_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
714                   vlib_frame_t * from_frame)
715 {
716   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 0);
717 }
718
719 /**
720  * @brief VXLAN GPE error strings
721  */
722 static char *vxlan_gpe_error_strings[] = {
723 #define vxlan_gpe_error(n,s) s,
724 #include <vnet/vxlan-gpe/vxlan_gpe_error.def>
725 #undef vxlan_gpe_error
726 #undef _
727 };
728
729 /* *INDENT-OFF* */
730 VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
731   .function = vxlan4_gpe_input,
732   .name = "vxlan4-gpe-input",
733   /* Takes a vector of packets. */
734   .vector_size = sizeof (u32),
735   .type = VLIB_NODE_TYPE_INTERNAL,
736   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
737   .error_strings = vxlan_gpe_error_strings,
738
739   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
740   .next_nodes = {
741 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
742     foreach_vxlan_gpe_input_next
743 #undef _
744   },
745
746   .format_buffer = format_vxlan_gpe_with_length,
747   .format_trace = format_vxlan_gpe_rx_trace,
748   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
749 };
750 /* *INDENT-ON* */
751
752 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gpe_input_node, vxlan4_gpe_input);
753
754 /* *INDENT-OFF* */
755 VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
756   .function = vxlan6_gpe_input,
757   .name = "vxlan6-gpe-input",
758   /* Takes a vector of packets. */
759   .vector_size = sizeof (u32),
760   .type = VLIB_NODE_TYPE_INTERNAL,
761   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
762   .error_strings = vxlan_gpe_error_strings,
763
764   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
765   .next_nodes = {
766 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
767     foreach_vxlan_gpe_input_next
768 #undef _
769   },
770
771   .format_buffer = format_vxlan_gpe_with_length,
772   .format_trace = format_vxlan_gpe_rx_trace,
773   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
774 };
775 /* *INDENT-ON* */
776
777 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gpe_input_node, vxlan6_gpe_input);
778 typedef enum
779 {
780   IP_VXLAN_BYPASS_NEXT_DROP,
781   IP_VXLAN_BYPASS_NEXT_VXLAN,
782   IP_VXLAN_BYPASS_N_NEXT,
783 } ip_vxan_bypass_next_t;
784
785 always_inline uword
786 ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
787                             vlib_node_runtime_t * node,
788                             vlib_frame_t * frame, u32 is_ip4)
789 {
790   vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
791   u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
792   vlib_node_runtime_t *error_node =
793     vlib_node_get_runtime (vm, ip4_input_node.index);
794   ip4_address_t addr4;          /* last IPv4 address matching a local VTEP address */
795   ip6_address_t addr6;          /* last IPv6 address matching a local VTEP address */
796
797   from = vlib_frame_vector_args (frame);
798   n_left_from = frame->n_vectors;
799   next_index = node->cached_next_index;
800
801   if (node->flags & VLIB_NODE_FLAG_TRACE)
802     ip4_forward_next_trace (vm, node, frame, VLIB_TX);
803
804   if (is_ip4)
805     addr4.data_u32 = ~0;
806   else
807     ip6_address_set_zero (&addr6);
808
809   while (n_left_from > 0)
810     {
811       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
812
813       while (n_left_from >= 4 && n_left_to_next >= 2)
814         {
815           vlib_buffer_t *b0, *b1;
816           ip4_header_t *ip40, *ip41;
817           ip6_header_t *ip60, *ip61;
818           udp_header_t *udp0, *udp1;
819           u32 bi0, ip_len0, udp_len0, flags0, next0;
820           u32 bi1, ip_len1, udp_len1, flags1, next1;
821           i32 len_diff0, len_diff1;
822           u8 error0, good_udp0, proto0;
823           u8 error1, good_udp1, proto1;
824
825           /* Prefetch next iteration. */
826           {
827             vlib_buffer_t *p2, *p3;
828
829             p2 = vlib_get_buffer (vm, from[2]);
830             p3 = vlib_get_buffer (vm, from[3]);
831
832             vlib_prefetch_buffer_header (p2, LOAD);
833             vlib_prefetch_buffer_header (p3, LOAD);
834
835             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
836             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
837           }
838
839           bi0 = to_next[0] = from[0];
840           bi1 = to_next[1] = from[1];
841           from += 2;
842           n_left_from -= 2;
843           to_next += 2;
844           n_left_to_next -= 2;
845
846           b0 = vlib_get_buffer (vm, bi0);
847           b1 = vlib_get_buffer (vm, bi1);
848           if (is_ip4)
849             {
850               ip40 = vlib_buffer_get_current (b0);
851               ip41 = vlib_buffer_get_current (b1);
852             }
853           else
854             {
855               ip60 = vlib_buffer_get_current (b0);
856               ip61 = vlib_buffer_get_current (b1);
857             }
858
859           /* Setup packet for next IP feature */
860           vnet_feature_next (&next0, b0);
861           vnet_feature_next (&next1, b1);
862
863           if (is_ip4)
864             {
865               proto0 = ip40->protocol;
866               proto1 = ip41->protocol;
867             }
868           else
869             {
870               proto0 = ip60->protocol;
871               proto1 = ip61->protocol;
872             }
873
874           /* Process packet 0 */
875           if (proto0 != IP_PROTOCOL_UDP)
876             goto exit0;         /* not UDP packet */
877
878           if (is_ip4)
879             udp0 = ip4_next_header (ip40);
880           else
881             udp0 = ip6_next_header (ip60);
882
883           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
884             goto exit0;         /* not VXLAN packet */
885
886           /* Validate DIP against VTEPs */
887           if (is_ip4)
888             {
889               if (addr4.as_u32 != ip40->dst_address.as_u32)
890                 {
891                   if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
892                     goto exit0; /* no local VTEP for VXLAN packet */
893                   addr4 = ip40->dst_address;
894                 }
895             }
896           else
897             {
898               if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
899                 {
900                   if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
901                     goto exit0; /* no local VTEP for VXLAN packet */
902                   addr6 = ip60->dst_address;
903                 }
904             }
905
906           flags0 = b0->flags;
907           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
908
909           /* Don't verify UDP checksum for packets with explicit zero checksum. */
910           good_udp0 |= udp0->checksum == 0;
911
912           /* Verify UDP length */
913           if (is_ip4)
914             ip_len0 = clib_net_to_host_u16 (ip40->length);
915           else
916             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
917           udp_len0 = clib_net_to_host_u16 (udp0->length);
918           len_diff0 = ip_len0 - udp_len0;
919
920           /* Verify UDP checksum */
921           if (PREDICT_FALSE (!good_udp0))
922             {
923               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
924                 {
925                   if (is_ip4)
926                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
927                   else
928                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
929                   good_udp0 =
930                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
931                 }
932             }
933
934           if (is_ip4)
935             {
936               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
937               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
938             }
939           else
940             {
941               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
942               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
943             }
944
945           next0 = error0 ?
946             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
947           b0->error = error0 ? error_node->errors[error0] : 0;
948
949           /* vxlan_gpe-input node expect current at VXLAN header */
950           if (is_ip4)
951             vlib_buffer_advance (b0,
952                                  sizeof (ip4_header_t) +
953                                  sizeof (udp_header_t));
954           else
955             vlib_buffer_advance (b0,
956                                  sizeof (ip6_header_t) +
957                                  sizeof (udp_header_t));
958
959         exit0:
960           /* Process packet 1 */
961           if (proto1 != IP_PROTOCOL_UDP)
962             goto exit1;         /* not UDP packet */
963
964           if (is_ip4)
965             udp1 = ip4_next_header (ip41);
966           else
967             udp1 = ip6_next_header (ip61);
968
969           if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
970             goto exit1;         /* not VXLAN packet */
971
972           /* Validate DIP against VTEPs */
973           if (is_ip4)
974             {
975               if (addr4.as_u32 != ip41->dst_address.as_u32)
976                 {
977                   if (!hash_get (ngm->vtep4, ip41->dst_address.as_u32))
978                     goto exit1; /* no local VTEP for VXLAN packet */
979                   addr4 = ip41->dst_address;
980                 }
981             }
982           else
983             {
984               if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
985                 {
986                   if (!hash_get_mem (ngm->vtep6, &ip61->dst_address))
987                     goto exit1; /* no local VTEP for VXLAN packet */
988                   addr6 = ip61->dst_address;
989                 }
990             }
991
992           flags1 = b1->flags;
993           good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
994
995           /* Don't verify UDP checksum for packets with explicit zero checksum. */
996           good_udp1 |= udp1->checksum == 0;
997
998           /* Verify UDP length */
999           if (is_ip4)
1000             ip_len1 = clib_net_to_host_u16 (ip41->length);
1001           else
1002             ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1003           udp_len1 = clib_net_to_host_u16 (udp1->length);
1004           len_diff1 = ip_len1 - udp_len1;
1005
1006           /* Verify UDP checksum */
1007           if (PREDICT_FALSE (!good_udp1))
1008             {
1009               if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1010                 {
1011                   if (is_ip4)
1012                     flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1013                   else
1014                     flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1015                   good_udp1 =
1016                     (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1017                 }
1018             }
1019
1020           if (is_ip4)
1021             {
1022               error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1023               error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1024             }
1025           else
1026             {
1027               error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1028               error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1029             }
1030
1031           next1 = error1 ?
1032             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1033           b1->error = error1 ? error_node->errors[error1] : 0;
1034
1035           /* vxlan_gpe-input node expect current at VXLAN header */
1036           if (is_ip4)
1037             vlib_buffer_advance (b1,
1038                                  sizeof (ip4_header_t) +
1039                                  sizeof (udp_header_t));
1040           else
1041             vlib_buffer_advance (b1,
1042                                  sizeof (ip6_header_t) +
1043                                  sizeof (udp_header_t));
1044
1045         exit1:
1046           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1047                                            to_next, n_left_to_next,
1048                                            bi0, bi1, next0, next1);
1049         }
1050
1051       while (n_left_from > 0 && n_left_to_next > 0)
1052         {
1053           vlib_buffer_t *b0;
1054           ip4_header_t *ip40;
1055           ip6_header_t *ip60;
1056           udp_header_t *udp0;
1057           u32 bi0, ip_len0, udp_len0, flags0, next0;
1058           i32 len_diff0;
1059           u8 error0, good_udp0, proto0;
1060
1061           bi0 = to_next[0] = from[0];
1062           from += 1;
1063           n_left_from -= 1;
1064           to_next += 1;
1065           n_left_to_next -= 1;
1066
1067           b0 = vlib_get_buffer (vm, bi0);
1068           if (is_ip4)
1069             ip40 = vlib_buffer_get_current (b0);
1070           else
1071             ip60 = vlib_buffer_get_current (b0);
1072
1073           /* Setup packet for next IP feature */
1074           vnet_feature_next (&next0, b0);
1075
1076           if (is_ip4)
1077             proto0 = ip40->protocol;
1078           else
1079             proto0 = ip60->protocol;
1080
1081           if (proto0 != IP_PROTOCOL_UDP)
1082             goto exit;          /* not UDP packet */
1083
1084           if (is_ip4)
1085             udp0 = ip4_next_header (ip40);
1086           else
1087             udp0 = ip6_next_header (ip60);
1088
1089           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
1090             goto exit;          /* not VXLAN packet */
1091
1092           /* Validate DIP against VTEPs */
1093           if (is_ip4)
1094             {
1095               if (addr4.as_u32 != ip40->dst_address.as_u32)
1096                 {
1097                   if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
1098                     goto exit;  /* no local VTEP for VXLAN packet */
1099                   addr4 = ip40->dst_address;
1100                 }
1101             }
1102           else
1103             {
1104               if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1105                 {
1106                   if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
1107                     goto exit;  /* no local VTEP for VXLAN packet */
1108                   addr6 = ip60->dst_address;
1109                 }
1110             }
1111
1112           flags0 = b0->flags;
1113           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1114
1115           /* Don't verify UDP checksum for packets with explicit zero checksum. */
1116           good_udp0 |= udp0->checksum == 0;
1117
1118           /* Verify UDP length */
1119           if (is_ip4)
1120             ip_len0 = clib_net_to_host_u16 (ip40->length);
1121           else
1122             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1123           udp_len0 = clib_net_to_host_u16 (udp0->length);
1124           len_diff0 = ip_len0 - udp_len0;
1125
1126           /* Verify UDP checksum */
1127           if (PREDICT_FALSE (!good_udp0))
1128             {
1129               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1130                 {
1131                   if (is_ip4)
1132                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1133                   else
1134                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1135                   good_udp0 =
1136                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1137                 }
1138             }
1139
1140           if (is_ip4)
1141             {
1142               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1143               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1144             }
1145           else
1146             {
1147               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1148               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1149             }
1150
1151           next0 = error0 ?
1152             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1153           b0->error = error0 ? error_node->errors[error0] : 0;
1154
1155           /* vxlan_gpe-input node expect current at VXLAN header */
1156           if (is_ip4)
1157             vlib_buffer_advance (b0,
1158                                  sizeof (ip4_header_t) +
1159                                  sizeof (udp_header_t));
1160           else
1161             vlib_buffer_advance (b0,
1162                                  sizeof (ip6_header_t) +
1163                                  sizeof (udp_header_t));
1164
1165         exit:
1166           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1167                                            to_next, n_left_to_next,
1168                                            bi0, next0);
1169         }
1170
1171       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1172     }
1173
1174   return frame->n_vectors;
1175 }
1176
1177 static uword
1178 ip4_vxlan_gpe_bypass (vlib_main_t * vm,
1179                       vlib_node_runtime_t * node, vlib_frame_t * frame)
1180 {
1181   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1182 }
1183
1184 /* *INDENT-OFF* */
1185 VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
1186   .function = ip4_vxlan_gpe_bypass,
1187   .name = "ip4-vxlan-gpe-bypass",
1188   .vector_size = sizeof (u32),
1189
1190   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1191   .next_nodes = {
1192     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1193     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-gpe-input",
1194   },
1195
1196   .format_buffer = format_ip4_header,
1197   .format_trace = format_ip4_forward_next_trace,
1198 };
1199 /* *INDENT-ON* */
1200
1201 VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gpe_bypass_node, ip4_vxlan_gpe_bypass)
1202 /* Dummy init function to get us linked in. */
1203      clib_error_t *ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
1204 {
1205   return 0;
1206 }
1207
1208 VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init);
1209
1210 static uword
1211 ip6_vxlan_gpe_bypass (vlib_main_t * vm,
1212                       vlib_node_runtime_t * node, vlib_frame_t * frame)
1213 {
1214   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1215 }
1216
1217 /* *INDENT-OFF* */
1218 VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
1219   .function = ip6_vxlan_gpe_bypass,
1220   .name = "ip6-vxlan-gpe-bypass",
1221   .vector_size = sizeof (u32),
1222
1223   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1224   .next_nodes = {
1225     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1226     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-gpe-input",
1227   },
1228
1229   .format_buffer = format_ip6_header,
1230   .format_trace = format_ip6_forward_next_trace,
1231 };
1232 /* *INDENT-ON* */
1233
1234 VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gpe_bypass_node, ip6_vxlan_gpe_bypass)
1235 /* Dummy init function to get us linked in. */
1236      clib_error_t *ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
1237 {
1238   return 0;
1239 }
1240
1241 VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init);
1242
1243 /*
1244  * fd.io coding-style-patch-verification: ON
1245  *
1246  * Local Variables:
1247  * eval: (c-set-style "gnu")
1248  * End:
1249  */