VPP-275 Coding standards cleanup - vnet/vnet/vxlan-gpe
[vpp.git] / src / vnet / vxlan-gpe / decap.c
1 /*
2  * decap.c - decapsulate VXLAN GPE
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 /**
18  *  @file
19  *  @brief Functions for decapsulating VXLAN GPE tunnels
20  *
21 */
22
23 #include <vlib/vlib.h>
24 #include <vnet/pg/pg.h>
25 #include <vnet/vxlan-gpe/vxlan_gpe.h>
26
27 vlib_node_registration_t vxlan_gpe_input_node;
28
29 /**
30  * @brief Struct for VXLAN GPE decap packet tracing
31  *
32  */
33 typedef struct
34 {
35   u32 next_index;
36   u32 tunnel_index;
37   u32 error;
38 } vxlan_gpe_rx_trace_t;
39
40 /**
41  * @brief Tracing function for VXLAN GPE packet decapsulation
42  *
43  * @param *s
44  * @param *args
45  *
46  * @return *s
47  *
48  */
49 static u8 *
50 format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
51 {
52   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
53   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
54   vxlan_gpe_rx_trace_t *t = va_arg (*args, vxlan_gpe_rx_trace_t *);
55
56   if (t->tunnel_index != ~0)
57     {
58       s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
59                   t->next_index, t->error);
60     }
61   else
62     {
63       s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
64                   t->error);
65     }
66   return s;
67 }
68
69 /**
70  * @brief Tracing function for VXLAN GPE packet decapsulation including length
71  *
72  * @param *s
73  * @param *args
74  *
75  * @return *s
76  *
77  */
78 static u8 *
79 format_vxlan_gpe_with_length (u8 * s, va_list * args)
80 {
81   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
82   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
83
84
85   return s;
86 }
87
88 /**
89  * @brief Common processing for IPv4 and IPv6 VXLAN GPE decap dispatch functions
90  *
91  * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
92  * tunnels are "terminate local". This means that there is no "TX" interface for this
93  * decap case, so that field in the buffer_metadata can be "used for something else".
94  * The something else in this case is, for the IPv4/IPv6 inner-packet type case, the
95  * FIB index used to look up the inner-packet's adjacency.
96  *
97  *      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
98  *
99  * @param *vm
100  * @param *node
101  * @param *from_frame
102  * @param is_ip4
103  *
104  * @return from_frame->n_vectors
105  *
106  */
107 always_inline uword
108 vxlan_gpe_input (vlib_main_t * vm,
109                  vlib_node_runtime_t * node,
110                  vlib_frame_t * from_frame, u8 is_ip4)
111 {
112   u32 n_left_from, next_index, *from, *to_next;
113   vxlan_gpe_main_t *nngm = &vxlan_gpe_main;
114   vnet_main_t *vnm = nngm->vnet_main;
115   vnet_interface_main_t *im = &vnm->interface_main;
116   u32 last_tunnel_index = ~0;
117   vxlan4_gpe_tunnel_key_t last_key4;
118   vxlan6_gpe_tunnel_key_t last_key6;
119   u32 pkts_decapsulated = 0;
120   u32 thread_index = vlib_get_thread_index ();
121   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
122
123   if (is_ip4)
124     memset (&last_key4, 0xff, sizeof (last_key4));
125   else
126     memset (&last_key6, 0xff, sizeof (last_key6));
127
128   from = vlib_frame_vector_args (from_frame);
129   n_left_from = from_frame->n_vectors;
130
131   next_index = node->cached_next_index;
132   stats_sw_if_index = node->runtime_data[0];
133   stats_n_packets = stats_n_bytes = 0;
134
135   while (n_left_from > 0)
136     {
137       u32 n_left_to_next;
138
139       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
140
141       while (n_left_from >= 4 && n_left_to_next >= 2)
142         {
143           u32 bi0, bi1;
144           vlib_buffer_t *b0, *b1;
145           u32 next0, next1;
146           ip4_vxlan_gpe_header_t *iuvn4_0, *iuvn4_1;
147           ip6_vxlan_gpe_header_t *iuvn6_0, *iuvn6_1;
148           uword *p0, *p1;
149           u32 tunnel_index0, tunnel_index1;
150           vxlan_gpe_tunnel_t *t0, *t1;
151           vxlan4_gpe_tunnel_key_t key4_0, key4_1;
152           vxlan6_gpe_tunnel_key_t key6_0, key6_1;
153           u32 error0, error1;
154           u32 sw_if_index0, sw_if_index1, len0, len1;
155
156           /* Prefetch next iteration. */
157           {
158             vlib_buffer_t *p2, *p3;
159
160             p2 = vlib_get_buffer (vm, from[2]);
161             p3 = vlib_get_buffer (vm, from[3]);
162
163             vlib_prefetch_buffer_header (p2, LOAD);
164             vlib_prefetch_buffer_header (p3, LOAD);
165
166             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
167             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
168           }
169
170           bi0 = from[0];
171           bi1 = from[1];
172           to_next[0] = bi0;
173           to_next[1] = bi1;
174           from += 2;
175           to_next += 2;
176           n_left_to_next -= 2;
177           n_left_from -= 2;
178
179           b0 = vlib_get_buffer (vm, bi0);
180           b1 = vlib_get_buffer (vm, bi1);
181
182           if (is_ip4)
183             {
184               /* udp leaves current_data pointing at the vxlan-gpe header */
185               vlib_buffer_advance (b0,
186                                    -(word) (sizeof (udp_header_t) +
187                                             sizeof (ip4_header_t)));
188               vlib_buffer_advance (b1,
189                                    -(word) (sizeof (udp_header_t) +
190                                             sizeof (ip4_header_t)));
191
192               iuvn4_0 = vlib_buffer_get_current (b0);
193               iuvn4_1 = vlib_buffer_get_current (b1);
194
195               /* pop (ip, udp, vxlan) */
196               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
197               vlib_buffer_advance (b1, sizeof (*iuvn4_1));
198             }
199           else
200             {
201               /* udp leaves current_data pointing at the vxlan-gpe header */
202               vlib_buffer_advance (b0,
203                                    -(word) (sizeof (udp_header_t) +
204                                             sizeof (ip6_header_t)));
205               vlib_buffer_advance (b1,
206                                    -(word) (sizeof (udp_header_t) +
207                                             sizeof (ip6_header_t)));
208
209               iuvn6_0 = vlib_buffer_get_current (b0);
210               iuvn6_1 = vlib_buffer_get_current (b1);
211
212               /* pop (ip, udp, vxlan) */
213               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
214               vlib_buffer_advance (b1, sizeof (*iuvn6_1));
215             }
216
217           tunnel_index0 = ~0;
218           tunnel_index1 = ~0;
219           error0 = 0;
220           error1 = 0;
221
222           if (is_ip4)
223             {
224               next0 =
225                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
226                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
227                 VXLAN_GPE_INPUT_NEXT_DROP;
228               next1 =
229                 (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
230                 nngm->decap_next_node_list[iuvn4_1->vxlan.protocol] :
231                 VXLAN_GPE_INPUT_NEXT_DROP;
232
233               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
234               key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
235
236               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
237               key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
238
239               key4_0.vni = iuvn4_0->vxlan.vni_res;
240               key4_1.vni = iuvn4_1->vxlan.vni_res;
241
242               key4_0.pad = 0;
243               key4_1.pad = 0;
244             }
245           else                  /* is_ip6 */
246             {
247               next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
248                 iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
249               next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
250                 iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
251
252               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
253               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
254               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
255               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
256
257               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
258               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
259               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
260               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
261
262               key6_0.vni = iuvn6_0->vxlan.vni_res;
263               key6_1.vni = iuvn6_1->vxlan.vni_res;
264             }
265
266           /* Processing packet 0 */
267           if (is_ip4)
268             {
269               /* Processing for key4_0 */
270               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
271                                  || (key4_0.as_u64[1] !=
272                                      last_key4.as_u64[1])))
273                 {
274                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
275
276                   if (p0 == 0)
277                     {
278                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
279                       goto trace0;
280                     }
281
282                   last_key4.as_u64[0] = key4_0.as_u64[0];
283                   last_key4.as_u64[1] = key4_0.as_u64[1];
284                   tunnel_index0 = last_tunnel_index = p0[0];
285                 }
286               else
287                 tunnel_index0 = last_tunnel_index;
288             }
289           else                  /* is_ip6 */
290             {
291               next0 =
292                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
293                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
294                 VXLAN_GPE_INPUT_NEXT_DROP;
295               next1 =
296                 (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
297                 nngm->decap_next_node_list[iuvn6_1->vxlan.protocol] :
298                 VXLAN_GPE_INPUT_NEXT_DROP;
299
300               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
301               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
302               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
303               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
304
305               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
306               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
307               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
308               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
309
310               key6_0.vni = iuvn6_0->vxlan.vni_res;
311               key6_1.vni = iuvn6_1->vxlan.vni_res;
312
313               /* Processing for key6_0 */
314               if (PREDICT_FALSE
315                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
316                 {
317                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
318
319                   if (p0 == 0)
320                     {
321                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
322                       goto trace0;
323                     }
324
325                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
326                   tunnel_index0 = last_tunnel_index = p0[0];
327                 }
328               else
329                 tunnel_index0 = last_tunnel_index;
330             }
331
332           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
333
334
335           sw_if_index0 = t0->sw_if_index;
336           len0 = vlib_buffer_length_in_chain (vm, b0);
337
338           /* Required to make the l2 tag push / pop code work on l2 subifs */
339           vnet_update_l2_len (b0);
340
341       /**
342        * ip[46] lookup in the configured FIB
343        */
344           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
345
346           pkts_decapsulated++;
347           stats_n_packets += 1;
348           stats_n_bytes += len0;
349
350           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
351             {
352               stats_n_packets -= 1;
353               stats_n_bytes -= len0;
354               if (stats_n_packets)
355                 vlib_increment_combined_counter (im->combined_sw_if_counters +
356                                                  VNET_INTERFACE_COUNTER_RX,
357                                                  thread_index,
358                                                  stats_sw_if_index,
359                                                  stats_n_packets,
360                                                  stats_n_bytes);
361               stats_n_packets = 1;
362               stats_n_bytes = len0;
363               stats_sw_if_index = sw_if_index0;
364             }
365
366         trace0:b0->error = error0 ? node->errors[error0] : 0;
367
368           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
369             {
370               vxlan_gpe_rx_trace_t *tr =
371                 vlib_add_trace (vm, node, b0, sizeof (*tr));
372               tr->next_index = next0;
373               tr->error = error0;
374               tr->tunnel_index = tunnel_index0;
375             }
376
377           /* Process packet 1 */
378           if (is_ip4)
379             {
380               /* Processing for key4_1 */
381               if (PREDICT_FALSE ((key4_1.as_u64[0] != last_key4.as_u64[0])
382                                  || (key4_1.as_u64[1] !=
383                                      last_key4.as_u64[1])))
384                 {
385                   p1 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_1);
386
387                   if (p1 == 0)
388                     {
389                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
390                       goto trace1;
391                     }
392
393                   last_key4.as_u64[0] = key4_1.as_u64[0];
394                   last_key4.as_u64[1] = key4_1.as_u64[1];
395                   tunnel_index1 = last_tunnel_index = p1[0];
396                 }
397               else
398                 tunnel_index1 = last_tunnel_index;
399             }
400           else                  /* is_ip6 */
401             {
402               /* Processing for key6_1 */
403               if (PREDICT_FALSE
404                   (memcmp (&key6_1, &last_key6, sizeof (last_key6)) != 0))
405                 {
406                   p1 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_1);
407
408                   if (p1 == 0)
409                     {
410                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
411                       goto trace1;
412                     }
413
414                   memcpy (&last_key6, &key6_1, sizeof (key6_1));
415                   tunnel_index1 = last_tunnel_index = p1[0];
416                 }
417               else
418                 tunnel_index1 = last_tunnel_index;
419             }
420
421           t1 = pool_elt_at_index (nngm->tunnels, tunnel_index1);
422
423           sw_if_index1 = t1->sw_if_index;
424           len1 = vlib_buffer_length_in_chain (vm, b1);
425
426           /* Required to make the l2 tag push / pop code work on l2 subifs */
427           vnet_update_l2_len (b1);
428
429           /*
430            * ip[46] lookup in the configured FIB
431            */
432           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
433
434           pkts_decapsulated++;
435           stats_n_packets += 1;
436           stats_n_bytes += len1;
437
438           /* Batch stats increment on the same vxlan tunnel so counter
439              is not incremented per packet */
440           if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
441             {
442               stats_n_packets -= 1;
443               stats_n_bytes -= len1;
444               if (stats_n_packets)
445                 vlib_increment_combined_counter (im->combined_sw_if_counters +
446                                                  VNET_INTERFACE_COUNTER_RX,
447                                                  thread_index,
448                                                  stats_sw_if_index,
449                                                  stats_n_packets,
450                                                  stats_n_bytes);
451               stats_n_packets = 1;
452               stats_n_bytes = len1;
453               stats_sw_if_index = sw_if_index1;
454             }
455           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
456
457         trace1:b1->error = error1 ? node->errors[error1] : 0;
458
459           if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
460             {
461               vxlan_gpe_rx_trace_t *tr =
462                 vlib_add_trace (vm, node, b1, sizeof (*tr));
463               tr->next_index = next1;
464               tr->error = error1;
465               tr->tunnel_index = tunnel_index1;
466             }
467
468           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
469                                            n_left_to_next, bi0, bi1, next0,
470                                            next1);
471         }
472
473       while (n_left_from > 0 && n_left_to_next > 0)
474         {
475           u32 bi0;
476           vlib_buffer_t *b0;
477           u32 next0;
478           ip4_vxlan_gpe_header_t *iuvn4_0;
479           ip6_vxlan_gpe_header_t *iuvn6_0;
480           uword *p0;
481           u32 tunnel_index0;
482           vxlan_gpe_tunnel_t *t0;
483           vxlan4_gpe_tunnel_key_t key4_0;
484           vxlan6_gpe_tunnel_key_t key6_0;
485           u32 error0;
486           u32 sw_if_index0, len0;
487
488           bi0 = from[0];
489           to_next[0] = bi0;
490           from += 1;
491           to_next += 1;
492           n_left_from -= 1;
493           n_left_to_next -= 1;
494
495           b0 = vlib_get_buffer (vm, bi0);
496
497           if (is_ip4)
498             {
499               /* udp leaves current_data pointing at the vxlan-gpe header */
500               vlib_buffer_advance (b0,
501                                    -(word) (sizeof (udp_header_t) +
502                                             sizeof (ip4_header_t)));
503
504               iuvn4_0 = vlib_buffer_get_current (b0);
505
506               /* pop (ip, udp, vxlan) */
507               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
508             }
509           else
510             {
511               /* udp leaves current_data pointing at the vxlan-gpe header */
512               vlib_buffer_advance (b0,
513                                    -(word) (sizeof (udp_header_t) +
514                                             sizeof (ip6_header_t)));
515
516               iuvn6_0 = vlib_buffer_get_current (b0);
517
518               /* pop (ip, udp, vxlan) */
519               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
520             }
521
522           tunnel_index0 = ~0;
523           error0 = 0;
524
525           if (is_ip4)
526             {
527               next0 =
528                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
529                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
530                 VXLAN_GPE_INPUT_NEXT_DROP;
531
532               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
533               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
534               key4_0.vni = iuvn4_0->vxlan.vni_res;
535               key4_0.pad = 0;
536
537               /* Processing for key4_0 */
538               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
539                                  || (key4_0.as_u64[1] !=
540                                      last_key4.as_u64[1])))
541                 {
542                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
543
544                   if (p0 == 0)
545                     {
546                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
547                       goto trace00;
548                     }
549
550                   last_key4.as_u64[0] = key4_0.as_u64[0];
551                   last_key4.as_u64[1] = key4_0.as_u64[1];
552                   tunnel_index0 = last_tunnel_index = p0[0];
553                 }
554               else
555                 tunnel_index0 = last_tunnel_index;
556             }
557           else                  /* is_ip6 */
558             {
559               next0 =
560                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
561                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
562                 VXLAN_GPE_INPUT_NEXT_DROP;
563
564               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
565               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
566               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
567               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
568               key6_0.vni = iuvn6_0->vxlan.vni_res;
569
570               /* Processing for key6_0 */
571               if (PREDICT_FALSE
572                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
573                 {
574                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
575
576                   if (p0 == 0)
577                     {
578                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
579                       goto trace00;
580                     }
581
582                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
583                   tunnel_index0 = last_tunnel_index = p0[0];
584                 }
585               else
586                 tunnel_index0 = last_tunnel_index;
587             }
588
589           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
590
591
592           sw_if_index0 = t0->sw_if_index;
593           len0 = vlib_buffer_length_in_chain (vm, b0);
594
595           /* Required to make the l2 tag push / pop code work on l2 subifs */
596           vnet_update_l2_len (b0);
597
598           /*
599            * ip[46] lookup in the configured FIB
600            */
601           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
602
603           pkts_decapsulated++;
604           stats_n_packets += 1;
605           stats_n_bytes += len0;
606
607           /* Batch stats increment on the same vxlan-gpe tunnel so counter
608              is not incremented per packet */
609           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
610             {
611               stats_n_packets -= 1;
612               stats_n_bytes -= len0;
613               if (stats_n_packets)
614                 vlib_increment_combined_counter (im->combined_sw_if_counters +
615                                                  VNET_INTERFACE_COUNTER_RX,
616                                                  thread_index,
617                                                  stats_sw_if_index,
618                                                  stats_n_packets,
619                                                  stats_n_bytes);
620               stats_n_packets = 1;
621               stats_n_bytes = len0;
622               stats_sw_if_index = sw_if_index0;
623             }
624
625         trace00:b0->error = error0 ? node->errors[error0] : 0;
626
627           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
628             {
629               vxlan_gpe_rx_trace_t *tr =
630                 vlib_add_trace (vm, node, b0, sizeof (*tr));
631               tr->next_index = next0;
632               tr->error = error0;
633               tr->tunnel_index = tunnel_index0;
634             }
635           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
636                                            n_left_to_next, bi0, next0);
637         }
638
639       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
640     }
641   vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
642                                VXLAN_GPE_ERROR_DECAPSULATED,
643                                pkts_decapsulated);
644   /* Increment any remaining batch stats */
645   if (stats_n_packets)
646     {
647       vlib_increment_combined_counter (im->combined_sw_if_counters +
648                                        VNET_INTERFACE_COUNTER_RX,
649                                        thread_index, stats_sw_if_index,
650                                        stats_n_packets, stats_n_bytes);
651       node->runtime_data[0] = stats_sw_if_index;
652     }
653   return from_frame->n_vectors;
654 }
655
656 /**
657  * @brief Graph processing dispatch function for IPv4 VXLAN GPE
658  *
659  * @node vxlan4-gpe-input
660  * @param *vm
661  * @param *node
662  * @param *from_frame
663  *
664  * @return from_frame->n_vectors
665  *
666  */
667 static uword
668 vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
669                   vlib_frame_t * from_frame)
670 {
671   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 1);
672 }
673
674
675 void
676 vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
677 {
678   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
679   hm->decap_next_node_list[protocol_id] = next_node_index;
680   return;
681 }
682
683 void
684 vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
685 {
686   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
687   hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
688   return;
689 }
690
691
692 /**
693  * @brief Graph processing dispatch function for IPv6 VXLAN GPE
694  *
695  * @node vxlan6-gpe-input
696  * @param *vm
697  * @param *node
698  * @param *from_frame
699  *
700  * @return from_frame->n_vectors - uword
701  *
702  */
703 static uword
704 vxlan6_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
705                   vlib_frame_t * from_frame)
706 {
707   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 0);
708 }
709
710 /**
711  * @brief VXLAN GPE error strings
712  */
713 static char *vxlan_gpe_error_strings[] = {
714 #define vxlan_gpe_error(n,s) s,
715 #include <vnet/vxlan-gpe/vxlan_gpe_error.def>
716 #undef vxlan_gpe_error
717 #undef _
718 };
719
720 /* *INDENT-OFF* */
721 VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
722   .function = vxlan4_gpe_input,
723   .name = "vxlan4-gpe-input",
724   /* Takes a vector of packets. */
725   .vector_size = sizeof (u32),
726   .type = VLIB_NODE_TYPE_INTERNAL,
727   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
728   .error_strings = vxlan_gpe_error_strings,
729
730   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
731   .next_nodes = {
732 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
733     foreach_vxlan_gpe_input_next
734 #undef _
735   },
736
737   .format_buffer = format_vxlan_gpe_with_length,
738   .format_trace = format_vxlan_gpe_rx_trace,
739   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
740 };
741 /* *INDENT-ON* */
742
743 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gpe_input_node, vxlan4_gpe_input);
744
745 /* *INDENT-OFF* */
746 VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
747   .function = vxlan6_gpe_input,
748   .name = "vxlan6-gpe-input",
749   /* Takes a vector of packets. */
750   .vector_size = sizeof (u32),
751   .type = VLIB_NODE_TYPE_INTERNAL,
752   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
753   .error_strings = vxlan_gpe_error_strings,
754
755   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
756   .next_nodes = {
757 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
758     foreach_vxlan_gpe_input_next
759 #undef _
760   },
761
762   .format_buffer = format_vxlan_gpe_with_length,
763   .format_trace = format_vxlan_gpe_rx_trace,
764   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
765 };
766 /* *INDENT-ON* */
767
768 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gpe_input_node, vxlan6_gpe_input);
769 typedef enum
770 {
771   IP_VXLAN_BYPASS_NEXT_DROP,
772   IP_VXLAN_BYPASS_NEXT_VXLAN,
773   IP_VXLAN_BYPASS_N_NEXT,
774 } ip_vxan_bypass_next_t;
775
776 always_inline uword
777 ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
778                             vlib_node_runtime_t * node,
779                             vlib_frame_t * frame, u32 is_ip4)
780 {
781   vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
782   u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
783   vlib_node_runtime_t *error_node =
784     vlib_node_get_runtime (vm, ip4_input_node.index);
785   ip4_address_t addr4;          /* last IPv4 address matching a local VTEP address */
786   ip6_address_t addr6;          /* last IPv6 address matching a local VTEP address */
787
788   from = vlib_frame_vector_args (frame);
789   n_left_from = frame->n_vectors;
790   next_index = node->cached_next_index;
791
792   if (node->flags & VLIB_NODE_FLAG_TRACE)
793     ip4_forward_next_trace (vm, node, frame, VLIB_TX);
794
795   if (is_ip4)
796     addr4.data_u32 = ~0;
797   else
798     ip6_address_set_zero (&addr6);
799
800   while (n_left_from > 0)
801     {
802       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
803
804       while (n_left_from >= 4 && n_left_to_next >= 2)
805         {
806           vlib_buffer_t *b0, *b1;
807           ip4_header_t *ip40, *ip41;
808           ip6_header_t *ip60, *ip61;
809           udp_header_t *udp0, *udp1;
810           u32 bi0, ip_len0, udp_len0, flags0, next0;
811           u32 bi1, ip_len1, udp_len1, flags1, next1;
812           i32 len_diff0, len_diff1;
813           u8 error0, good_udp0, proto0;
814           u8 error1, good_udp1, proto1;
815
816           /* Prefetch next iteration. */
817           {
818             vlib_buffer_t *p2, *p3;
819
820             p2 = vlib_get_buffer (vm, from[2]);
821             p3 = vlib_get_buffer (vm, from[3]);
822
823             vlib_prefetch_buffer_header (p2, LOAD);
824             vlib_prefetch_buffer_header (p3, LOAD);
825
826             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
827             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
828           }
829
830           bi0 = to_next[0] = from[0];
831           bi1 = to_next[1] = from[1];
832           from += 2;
833           n_left_from -= 2;
834           to_next += 2;
835           n_left_to_next -= 2;
836
837           b0 = vlib_get_buffer (vm, bi0);
838           b1 = vlib_get_buffer (vm, bi1);
839           if (is_ip4)
840             {
841               ip40 = vlib_buffer_get_current (b0);
842               ip41 = vlib_buffer_get_current (b1);
843             }
844           else
845             {
846               ip60 = vlib_buffer_get_current (b0);
847               ip61 = vlib_buffer_get_current (b1);
848             }
849
850           /* Setup packet for next IP feature */
851           vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
852                              b0);
853           vnet_feature_next (vnet_buffer (b1)->sw_if_index[VLIB_RX], &next1,
854                              b1);
855
856           if (is_ip4)
857             {
858               proto0 = ip40->protocol;
859               proto1 = ip41->protocol;
860             }
861           else
862             {
863               proto0 = ip60->protocol;
864               proto1 = ip61->protocol;
865             }
866
867           /* Process packet 0 */
868           if (proto0 != IP_PROTOCOL_UDP)
869             goto exit0;         /* not UDP packet */
870
871           if (is_ip4)
872             udp0 = ip4_next_header (ip40);
873           else
874             udp0 = ip6_next_header (ip60);
875
876           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
877             goto exit0;         /* not VXLAN packet */
878
879           /* Validate DIP against VTEPs */
880           if (is_ip4)
881             {
882               if (addr4.as_u32 != ip40->dst_address.as_u32)
883                 {
884                   if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
885                     goto exit0; /* no local VTEP for VXLAN packet */
886                   addr4 = ip40->dst_address;
887                 }
888             }
889           else
890             {
891               if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
892                 {
893                   if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
894                     goto exit0; /* no local VTEP for VXLAN packet */
895                   addr6 = ip60->dst_address;
896                 }
897             }
898
899           flags0 = b0->flags;
900           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
901
902           /* Don't verify UDP checksum for packets with explicit zero checksum. */
903           good_udp0 |= udp0->checksum == 0;
904
905           /* Verify UDP length */
906           if (is_ip4)
907             ip_len0 = clib_net_to_host_u16 (ip40->length);
908           else
909             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
910           udp_len0 = clib_net_to_host_u16 (udp0->length);
911           len_diff0 = ip_len0 - udp_len0;
912
913           /* Verify UDP checksum */
914           if (PREDICT_FALSE (!good_udp0))
915             {
916               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
917                 {
918                   if (is_ip4)
919                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
920                   else
921                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
922                   good_udp0 =
923                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
924                 }
925             }
926
927           if (is_ip4)
928             {
929               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
930               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
931             }
932           else
933             {
934               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
935               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
936             }
937
938           next0 = error0 ?
939             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
940           b0->error = error0 ? error_node->errors[error0] : 0;
941
942           /* vxlan_gpe-input node expect current at VXLAN header */
943           if (is_ip4)
944             vlib_buffer_advance (b0,
945                                  sizeof (ip4_header_t) +
946                                  sizeof (udp_header_t));
947           else
948             vlib_buffer_advance (b0,
949                                  sizeof (ip6_header_t) +
950                                  sizeof (udp_header_t));
951
952         exit0:
953           /* Process packet 1 */
954           if (proto1 != IP_PROTOCOL_UDP)
955             goto exit1;         /* not UDP packet */
956
957           if (is_ip4)
958             udp1 = ip4_next_header (ip41);
959           else
960             udp1 = ip6_next_header (ip61);
961
962           if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
963             goto exit1;         /* not VXLAN packet */
964
965           /* Validate DIP against VTEPs */
966           if (is_ip4)
967             {
968               if (addr4.as_u32 != ip41->dst_address.as_u32)
969                 {
970                   if (!hash_get (ngm->vtep4, ip41->dst_address.as_u32))
971                     goto exit1; /* no local VTEP for VXLAN packet */
972                   addr4 = ip41->dst_address;
973                 }
974             }
975           else
976             {
977               if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
978                 {
979                   if (!hash_get_mem (ngm->vtep6, &ip61->dst_address))
980                     goto exit1; /* no local VTEP for VXLAN packet */
981                   addr6 = ip61->dst_address;
982                 }
983             }
984
985           flags1 = b1->flags;
986           good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
987
988           /* Don't verify UDP checksum for packets with explicit zero checksum. */
989           good_udp1 |= udp1->checksum == 0;
990
991           /* Verify UDP length */
992           if (is_ip4)
993             ip_len1 = clib_net_to_host_u16 (ip41->length);
994           else
995             ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
996           udp_len1 = clib_net_to_host_u16 (udp1->length);
997           len_diff1 = ip_len1 - udp_len1;
998
999           /* Verify UDP checksum */
1000           if (PREDICT_FALSE (!good_udp1))
1001             {
1002               if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1003                 {
1004                   if (is_ip4)
1005                     flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1006                   else
1007                     flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1008                   good_udp1 =
1009                     (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1010                 }
1011             }
1012
1013           if (is_ip4)
1014             {
1015               error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1016               error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1017             }
1018           else
1019             {
1020               error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1021               error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1022             }
1023
1024           next1 = error1 ?
1025             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1026           b1->error = error1 ? error_node->errors[error1] : 0;
1027
1028           /* vxlan_gpe-input node expect current at VXLAN header */
1029           if (is_ip4)
1030             vlib_buffer_advance (b1,
1031                                  sizeof (ip4_header_t) +
1032                                  sizeof (udp_header_t));
1033           else
1034             vlib_buffer_advance (b1,
1035                                  sizeof (ip6_header_t) +
1036                                  sizeof (udp_header_t));
1037
1038         exit1:
1039           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1040                                            to_next, n_left_to_next,
1041                                            bi0, bi1, next0, next1);
1042         }
1043
1044       while (n_left_from > 0 && n_left_to_next > 0)
1045         {
1046           vlib_buffer_t *b0;
1047           ip4_header_t *ip40;
1048           ip6_header_t *ip60;
1049           udp_header_t *udp0;
1050           u32 bi0, ip_len0, udp_len0, flags0, next0;
1051           i32 len_diff0;
1052           u8 error0, good_udp0, proto0;
1053
1054           bi0 = to_next[0] = from[0];
1055           from += 1;
1056           n_left_from -= 1;
1057           to_next += 1;
1058           n_left_to_next -= 1;
1059
1060           b0 = vlib_get_buffer (vm, bi0);
1061           if (is_ip4)
1062             ip40 = vlib_buffer_get_current (b0);
1063           else
1064             ip60 = vlib_buffer_get_current (b0);
1065
1066           /* Setup packet for next IP feature */
1067           vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
1068                              b0);
1069
1070           if (is_ip4)
1071             proto0 = ip40->protocol;
1072           else
1073             proto0 = ip60->protocol;
1074
1075           if (proto0 != IP_PROTOCOL_UDP)
1076             goto exit;          /* not UDP packet */
1077
1078           if (is_ip4)
1079             udp0 = ip4_next_header (ip40);
1080           else
1081             udp0 = ip6_next_header (ip60);
1082
1083           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
1084             goto exit;          /* not VXLAN packet */
1085
1086           /* Validate DIP against VTEPs */
1087           if (is_ip4)
1088             {
1089               if (addr4.as_u32 != ip40->dst_address.as_u32)
1090                 {
1091                   if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
1092                     goto exit;  /* no local VTEP for VXLAN packet */
1093                   addr4 = ip40->dst_address;
1094                 }
1095             }
1096           else
1097             {
1098               if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1099                 {
1100                   if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
1101                     goto exit;  /* no local VTEP for VXLAN packet */
1102                   addr6 = ip60->dst_address;
1103                 }
1104             }
1105
1106           flags0 = b0->flags;
1107           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1108
1109           /* Don't verify UDP checksum for packets with explicit zero checksum. */
1110           good_udp0 |= udp0->checksum == 0;
1111
1112           /* Verify UDP length */
1113           if (is_ip4)
1114             ip_len0 = clib_net_to_host_u16 (ip40->length);
1115           else
1116             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1117           udp_len0 = clib_net_to_host_u16 (udp0->length);
1118           len_diff0 = ip_len0 - udp_len0;
1119
1120           /* Verify UDP checksum */
1121           if (PREDICT_FALSE (!good_udp0))
1122             {
1123               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1124                 {
1125                   if (is_ip4)
1126                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1127                   else
1128                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1129                   good_udp0 =
1130                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1131                 }
1132             }
1133
1134           if (is_ip4)
1135             {
1136               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1137               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1138             }
1139           else
1140             {
1141               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1142               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1143             }
1144
1145           next0 = error0 ?
1146             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1147           b0->error = error0 ? error_node->errors[error0] : 0;
1148
1149           /* vxlan_gpe-input node expect current at VXLAN header */
1150           if (is_ip4)
1151             vlib_buffer_advance (b0,
1152                                  sizeof (ip4_header_t) +
1153                                  sizeof (udp_header_t));
1154           else
1155             vlib_buffer_advance (b0,
1156                                  sizeof (ip6_header_t) +
1157                                  sizeof (udp_header_t));
1158
1159         exit:
1160           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1161                                            to_next, n_left_to_next,
1162                                            bi0, next0);
1163         }
1164
1165       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1166     }
1167
1168   return frame->n_vectors;
1169 }
1170
1171 static uword
1172 ip4_vxlan_gpe_bypass (vlib_main_t * vm,
1173                       vlib_node_runtime_t * node, vlib_frame_t * frame)
1174 {
1175   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1176 }
1177
1178 /* *INDENT-OFF* */
1179 VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
1180   .function = ip4_vxlan_gpe_bypass,
1181   .name = "ip4-vxlan-gpe-bypass",
1182   .vector_size = sizeof (u32),
1183
1184   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1185   .next_nodes = {
1186     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1187     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-gpe-input",
1188   },
1189
1190   .format_buffer = format_ip4_header,
1191   .format_trace = format_ip4_forward_next_trace,
1192 };
1193 /* *INDENT-ON* */
1194
1195 VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gpe_bypass_node, ip4_vxlan_gpe_bypass)
1196 /* Dummy init function to get us linked in. */
1197      clib_error_t *ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
1198 {
1199   return 0;
1200 }
1201
1202 VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init);
1203
1204 static uword
1205 ip6_vxlan_gpe_bypass (vlib_main_t * vm,
1206                       vlib_node_runtime_t * node, vlib_frame_t * frame)
1207 {
1208   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1209 }
1210
1211 /* *INDENT-OFF* */
1212 VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
1213   .function = ip6_vxlan_gpe_bypass,
1214   .name = "ip6-vxlan-gpe-bypass",
1215   .vector_size = sizeof (u32),
1216
1217   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1218   .next_nodes = {
1219     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1220     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-gpe-input",
1221   },
1222
1223   .format_buffer = format_ip6_header,
1224   .format_trace = format_ip6_forward_next_trace,
1225 };
1226 /* *INDENT-ON* */
1227
1228 VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gpe_bypass_node, ip6_vxlan_gpe_bypass)
1229 /* Dummy init function to get us linked in. */
1230      clib_error_t *ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
1231 {
1232   return 0;
1233 }
1234
1235 VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init);
1236
1237 /*
1238  * fd.io coding-style-patch-verification: ON
1239  *
1240  * Local Variables:
1241  * eval: (c-set-style "gnu")
1242  * End:
1243  */