make "test-all" target pass again
[vpp.git] / src / vnet / vxlan-gpe / decap.c
1 /*
2  * decap.c - decapsulate VXLAN GPE
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 /**
18  *  @file
19  *  @brief Functions for decapsulating VXLAN GPE tunnels
20  *
21 */
22
23 #include <vlib/vlib.h>
24 #include <vnet/pg/pg.h>
25 #include <vnet/vxlan-gpe/vxlan_gpe.h>
26
27 vlib_node_registration_t vxlan_gpe_input_node;
28
29 /**
30  * @brief Struct for VXLAN GPE decap packet tracing
31  *
32  */
33 typedef struct
34 {
35   u32 next_index;
36   u32 tunnel_index;
37   u32 error;
38 } vxlan_gpe_rx_trace_t;
39
40 /**
41  * @brief Tracing function for VXLAN GPE packet decapsulation
42  *
43  * @param *s
44  * @param *args
45  *
46  * @return *s
47  *
48  */
49 static u8 *
50 format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
51 {
52   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
53   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
54   vxlan_gpe_rx_trace_t *t = va_arg (*args, vxlan_gpe_rx_trace_t *);
55
56   if (t->tunnel_index != ~0)
57     {
58       s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
59                   t->next_index, t->error);
60     }
61   else
62     {
63       s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
64                   t->error);
65     }
66   return s;
67 }
68
69 /**
70  * @brief Tracing function for VXLAN GPE packet decapsulation including length
71  *
72  * @param *s
73  * @param *args
74  *
75  * @return *s
76  *
77  */
78 static u8 *
79 format_vxlan_gpe_with_length (u8 * s, va_list * args)
80 {
81   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
82   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
83
84
85   return s;
86 }
87
88 /**
89  * @brief Common processing for IPv4 and IPv6 VXLAN GPE decap dispatch functions
90  *
91  * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
92  * tunnels are "terminate local". This means that there is no "TX" interface for this
93  * decap case, so that field in the buffer_metadata can be "used for something else".
94  * The something else in this case is, for the IPv4/IPv6 inner-packet type case, the
95  * FIB index used to look up the inner-packet's adjacency.
96  *
97  *      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
98  *
99  * @param *vm
100  * @param *node
101  * @param *from_frame
102  * @param is_ip4
103  *
104  * @return from_frame->n_vectors
105  *
106  */
107 always_inline uword
108 vxlan_gpe_input (vlib_main_t * vm,
109                  vlib_node_runtime_t * node,
110                  vlib_frame_t * from_frame, u8 is_ip4)
111 {
112   u32 n_left_from, next_index, *from, *to_next;
113   vxlan_gpe_main_t *nngm = &vxlan_gpe_main;
114   vnet_main_t *vnm = nngm->vnet_main;
115   vnet_interface_main_t *im = &vnm->interface_main;
116   u32 last_tunnel_index = ~0;
117   vxlan4_gpe_tunnel_key_t last_key4;
118   vxlan6_gpe_tunnel_key_t last_key6;
119   u32 pkts_decapsulated = 0;
120   u32 thread_index = vlib_get_thread_index ();
121   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
122
123   if (is_ip4)
124     memset (&last_key4, 0xff, sizeof (last_key4));
125   else
126     memset (&last_key6, 0xff, sizeof (last_key6));
127
128   from = vlib_frame_vector_args (from_frame);
129   n_left_from = from_frame->n_vectors;
130
131   next_index = node->cached_next_index;
132   stats_sw_if_index = node->runtime_data[0];
133   stats_n_packets = stats_n_bytes = 0;
134
135   while (n_left_from > 0)
136     {
137       u32 n_left_to_next;
138
139       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
140
141       while (n_left_from >= 4 && n_left_to_next >= 2)
142         {
143           u32 bi0, bi1;
144           vlib_buffer_t *b0, *b1;
145           u32 next0, next1;
146           ip4_vxlan_gpe_header_t *iuvn4_0, *iuvn4_1;
147           ip6_vxlan_gpe_header_t *iuvn6_0, *iuvn6_1;
148           uword *p0, *p1;
149           u32 tunnel_index0, tunnel_index1;
150           vxlan_gpe_tunnel_t *t0, *t1;
151           vxlan4_gpe_tunnel_key_t key4_0, key4_1;
152           vxlan6_gpe_tunnel_key_t key6_0, key6_1;
153           u32 error0, error1;
154           u32 sw_if_index0, sw_if_index1, len0, len1;
155
156           /* Prefetch next iteration. */
157           {
158             vlib_buffer_t *p2, *p3;
159
160             p2 = vlib_get_buffer (vm, from[2]);
161             p3 = vlib_get_buffer (vm, from[3]);
162
163             vlib_prefetch_buffer_header (p2, LOAD);
164             vlib_prefetch_buffer_header (p3, LOAD);
165
166             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
167             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
168           }
169
170           bi0 = from[0];
171           bi1 = from[1];
172           to_next[0] = bi0;
173           to_next[1] = bi1;
174           from += 2;
175           to_next += 2;
176           n_left_to_next -= 2;
177           n_left_from -= 2;
178
179           b0 = vlib_get_buffer (vm, bi0);
180           b1 = vlib_get_buffer (vm, bi1);
181
182           if (is_ip4)
183             {
184               /* udp leaves current_data pointing at the vxlan-gpe header */
185               vlib_buffer_advance (b0,
186                                    -(word) (sizeof (udp_header_t) +
187                                             sizeof (ip4_header_t)));
188               vlib_buffer_advance (b1,
189                                    -(word) (sizeof (udp_header_t) +
190                                             sizeof (ip4_header_t)));
191
192               iuvn4_0 = vlib_buffer_get_current (b0);
193               iuvn4_1 = vlib_buffer_get_current (b1);
194
195               /* pop (ip, udp, vxlan) */
196               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
197               vlib_buffer_advance (b1, sizeof (*iuvn4_1));
198             }
199           else
200             {
201               /* udp leaves current_data pointing at the vxlan-gpe header */
202               vlib_buffer_advance (b0,
203                                    -(word) (sizeof (udp_header_t) +
204                                             sizeof (ip6_header_t)));
205               vlib_buffer_advance (b1,
206                                    -(word) (sizeof (udp_header_t) +
207                                             sizeof (ip6_header_t)));
208
209               iuvn6_0 = vlib_buffer_get_current (b0);
210               iuvn6_1 = vlib_buffer_get_current (b1);
211
212               /* pop (ip, udp, vxlan) */
213               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
214               vlib_buffer_advance (b1, sizeof (*iuvn6_1));
215             }
216
217           tunnel_index0 = ~0;
218           tunnel_index1 = ~0;
219           error0 = 0;
220           error1 = 0;
221
222           if (is_ip4)
223             {
224               next0 =
225                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
226                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
227                 VXLAN_GPE_INPUT_NEXT_DROP;
228               next1 =
229                 (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
230                 nngm->decap_next_node_list[iuvn4_1->vxlan.protocol] :
231                 VXLAN_GPE_INPUT_NEXT_DROP;
232
233               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
234               key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
235
236               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
237               key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
238
239               key4_0.vni = iuvn4_0->vxlan.vni_res;
240               key4_1.vni = iuvn4_1->vxlan.vni_res;
241
242               key4_0.pad = 0;
243               key4_1.pad = 0;
244             }
245           else                  /* is_ip6 */
246             {
247               next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
248                 iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
249               next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
250                 iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
251
252               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
253               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
254               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
255               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
256
257               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
258               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
259               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
260               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
261
262               key6_0.vni = iuvn6_0->vxlan.vni_res;
263               key6_1.vni = iuvn6_1->vxlan.vni_res;
264             }
265
266           /* Processing packet 0 */
267           if (is_ip4)
268             {
269               /* Processing for key4_0 */
270               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
271                                  || (key4_0.as_u64[1] !=
272                                      last_key4.as_u64[1])))
273                 {
274                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
275
276                   if (p0 == 0)
277                     {
278                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
279                       goto trace0;
280                     }
281
282                   last_key4.as_u64[0] = key4_0.as_u64[0];
283                   last_key4.as_u64[1] = key4_0.as_u64[1];
284                   tunnel_index0 = last_tunnel_index = p0[0];
285                 }
286               else
287                 tunnel_index0 = last_tunnel_index;
288             }
289           else                  /* is_ip6 */
290             {
291               next0 =
292                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
293                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
294                 VXLAN_GPE_INPUT_NEXT_DROP;
295               next1 =
296                 (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
297                 nngm->decap_next_node_list[iuvn6_1->vxlan.protocol] :
298                 VXLAN_GPE_INPUT_NEXT_DROP;
299
300               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
301               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
302               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
303               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
304
305               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
306               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
307               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
308               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
309
310               key6_0.vni = iuvn6_0->vxlan.vni_res;
311               key6_1.vni = iuvn6_1->vxlan.vni_res;
312
313               /* Processing for key6_0 */
314               if (PREDICT_FALSE
315                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
316                 {
317                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
318
319                   if (p0 == 0)
320                     {
321                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
322                       goto trace0;
323                     }
324
325                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
326                   tunnel_index0 = last_tunnel_index = p0[0];
327                 }
328               else
329                 tunnel_index0 = last_tunnel_index;
330             }
331
332           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
333
334
335           sw_if_index0 = t0->sw_if_index;
336           len0 = vlib_buffer_length_in_chain (vm, b0);
337
338           /* Required to make the l2 tag push / pop code work on l2 subifs */
339           vnet_update_l2_len (b0);
340
341           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
342           vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
343
344       /**
345        * ip[46] lookup in the configured FIB
346        */
347           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
348
349           pkts_decapsulated++;
350           stats_n_packets += 1;
351           stats_n_bytes += len0;
352
353           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
354             {
355               stats_n_packets -= 1;
356               stats_n_bytes -= len0;
357               if (stats_n_packets)
358                 vlib_increment_combined_counter (im->combined_sw_if_counters +
359                                                  VNET_INTERFACE_COUNTER_RX,
360                                                  thread_index,
361                                                  stats_sw_if_index,
362                                                  stats_n_packets,
363                                                  stats_n_bytes);
364               stats_n_packets = 1;
365               stats_n_bytes = len0;
366               stats_sw_if_index = sw_if_index0;
367             }
368
369         trace0:b0->error = error0 ? node->errors[error0] : 0;
370
371           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
372             {
373               vxlan_gpe_rx_trace_t *tr =
374                 vlib_add_trace (vm, node, b0, sizeof (*tr));
375               tr->next_index = next0;
376               tr->error = error0;
377               tr->tunnel_index = tunnel_index0;
378             }
379
380           /* Process packet 1 */
381           if (is_ip4)
382             {
383               /* Processing for key4_1 */
384               if (PREDICT_FALSE ((key4_1.as_u64[0] != last_key4.as_u64[0])
385                                  || (key4_1.as_u64[1] !=
386                                      last_key4.as_u64[1])))
387                 {
388                   p1 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_1);
389
390                   if (p1 == 0)
391                     {
392                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
393                       goto trace1;
394                     }
395
396                   last_key4.as_u64[0] = key4_1.as_u64[0];
397                   last_key4.as_u64[1] = key4_1.as_u64[1];
398                   tunnel_index1 = last_tunnel_index = p1[0];
399                 }
400               else
401                 tunnel_index1 = last_tunnel_index;
402             }
403           else                  /* is_ip6 */
404             {
405               /* Processing for key6_1 */
406               if (PREDICT_FALSE
407                   (memcmp (&key6_1, &last_key6, sizeof (last_key6)) != 0))
408                 {
409                   p1 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_1);
410
411                   if (p1 == 0)
412                     {
413                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
414                       goto trace1;
415                     }
416
417                   memcpy (&last_key6, &key6_1, sizeof (key6_1));
418                   tunnel_index1 = last_tunnel_index = p1[0];
419                 }
420               else
421                 tunnel_index1 = last_tunnel_index;
422             }
423
424           t1 = pool_elt_at_index (nngm->tunnels, tunnel_index1);
425
426           sw_if_index1 = t1->sw_if_index;
427           len1 = vlib_buffer_length_in_chain (vm, b1);
428
429           /* Required to make the l2 tag push / pop code work on l2 subifs */
430           vnet_update_l2_len (b1);
431
432           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
433           vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
434
435           /*
436            * ip[46] lookup in the configured FIB
437            */
438           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
439
440           pkts_decapsulated++;
441           stats_n_packets += 1;
442           stats_n_bytes += len1;
443
444           /* Batch stats increment on the same vxlan tunnel so counter
445              is not incremented per packet */
446           if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
447             {
448               stats_n_packets -= 1;
449               stats_n_bytes -= len1;
450               if (stats_n_packets)
451                 vlib_increment_combined_counter (im->combined_sw_if_counters +
452                                                  VNET_INTERFACE_COUNTER_RX,
453                                                  thread_index,
454                                                  stats_sw_if_index,
455                                                  stats_n_packets,
456                                                  stats_n_bytes);
457               stats_n_packets = 1;
458               stats_n_bytes = len1;
459               stats_sw_if_index = sw_if_index1;
460             }
461           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
462
463         trace1:b1->error = error1 ? node->errors[error1] : 0;
464
465           if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
466             {
467               vxlan_gpe_rx_trace_t *tr =
468                 vlib_add_trace (vm, node, b1, sizeof (*tr));
469               tr->next_index = next1;
470               tr->error = error1;
471               tr->tunnel_index = tunnel_index1;
472             }
473
474           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
475                                            n_left_to_next, bi0, bi1, next0,
476                                            next1);
477         }
478
479       while (n_left_from > 0 && n_left_to_next > 0)
480         {
481           u32 bi0;
482           vlib_buffer_t *b0;
483           u32 next0;
484           ip4_vxlan_gpe_header_t *iuvn4_0;
485           ip6_vxlan_gpe_header_t *iuvn6_0;
486           uword *p0;
487           u32 tunnel_index0;
488           vxlan_gpe_tunnel_t *t0;
489           vxlan4_gpe_tunnel_key_t key4_0;
490           vxlan6_gpe_tunnel_key_t key6_0;
491           u32 error0;
492           u32 sw_if_index0, len0;
493
494           bi0 = from[0];
495           to_next[0] = bi0;
496           from += 1;
497           to_next += 1;
498           n_left_from -= 1;
499           n_left_to_next -= 1;
500
501           b0 = vlib_get_buffer (vm, bi0);
502
503           if (is_ip4)
504             {
505               /* udp leaves current_data pointing at the vxlan-gpe header */
506               vlib_buffer_advance (b0,
507                                    -(word) (sizeof (udp_header_t) +
508                                             sizeof (ip4_header_t)));
509
510               iuvn4_0 = vlib_buffer_get_current (b0);
511
512               /* pop (ip, udp, vxlan) */
513               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
514             }
515           else
516             {
517               /* udp leaves current_data pointing at the vxlan-gpe header */
518               vlib_buffer_advance (b0,
519                                    -(word) (sizeof (udp_header_t) +
520                                             sizeof (ip6_header_t)));
521
522               iuvn6_0 = vlib_buffer_get_current (b0);
523
524               /* pop (ip, udp, vxlan) */
525               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
526             }
527
528           tunnel_index0 = ~0;
529           error0 = 0;
530
531           if (is_ip4)
532             {
533               next0 =
534                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
535                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
536                 VXLAN_GPE_INPUT_NEXT_DROP;
537
538               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
539               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
540               key4_0.vni = iuvn4_0->vxlan.vni_res;
541               key4_0.pad = 0;
542
543               /* Processing for key4_0 */
544               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
545                                  || (key4_0.as_u64[1] !=
546                                      last_key4.as_u64[1])))
547                 {
548                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
549
550                   if (p0 == 0)
551                     {
552                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
553                       goto trace00;
554                     }
555
556                   last_key4.as_u64[0] = key4_0.as_u64[0];
557                   last_key4.as_u64[1] = key4_0.as_u64[1];
558                   tunnel_index0 = last_tunnel_index = p0[0];
559                 }
560               else
561                 tunnel_index0 = last_tunnel_index;
562             }
563           else                  /* is_ip6 */
564             {
565               next0 =
566                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
567                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
568                 VXLAN_GPE_INPUT_NEXT_DROP;
569
570               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
571               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
572               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
573               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
574               key6_0.vni = iuvn6_0->vxlan.vni_res;
575
576               /* Processing for key6_0 */
577               if (PREDICT_FALSE
578                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
579                 {
580                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
581
582                   if (p0 == 0)
583                     {
584                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
585                       goto trace00;
586                     }
587
588                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
589                   tunnel_index0 = last_tunnel_index = p0[0];
590                 }
591               else
592                 tunnel_index0 = last_tunnel_index;
593             }
594
595           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
596
597
598           sw_if_index0 = t0->sw_if_index;
599           len0 = vlib_buffer_length_in_chain (vm, b0);
600
601           /* Required to make the l2 tag push / pop code work on l2 subifs */
602           vnet_update_l2_len (b0);
603
604           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
605           vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
606
607           /*
608            * ip[46] lookup in the configured FIB
609            */
610           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
611
612           pkts_decapsulated++;
613           stats_n_packets += 1;
614           stats_n_bytes += len0;
615
616           /* Batch stats increment on the same vxlan-gpe tunnel so counter
617              is not incremented per packet */
618           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
619             {
620               stats_n_packets -= 1;
621               stats_n_bytes -= len0;
622               if (stats_n_packets)
623                 vlib_increment_combined_counter (im->combined_sw_if_counters +
624                                                  VNET_INTERFACE_COUNTER_RX,
625                                                  thread_index,
626                                                  stats_sw_if_index,
627                                                  stats_n_packets,
628                                                  stats_n_bytes);
629               stats_n_packets = 1;
630               stats_n_bytes = len0;
631               stats_sw_if_index = sw_if_index0;
632             }
633
634         trace00:b0->error = error0 ? node->errors[error0] : 0;
635
636           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
637             {
638               vxlan_gpe_rx_trace_t *tr =
639                 vlib_add_trace (vm, node, b0, sizeof (*tr));
640               tr->next_index = next0;
641               tr->error = error0;
642               tr->tunnel_index = tunnel_index0;
643             }
644           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
645                                            n_left_to_next, bi0, next0);
646         }
647
648       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
649     }
650   vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
651                                VXLAN_GPE_ERROR_DECAPSULATED,
652                                pkts_decapsulated);
653   /* Increment any remaining batch stats */
654   if (stats_n_packets)
655     {
656       vlib_increment_combined_counter (im->combined_sw_if_counters +
657                                        VNET_INTERFACE_COUNTER_RX,
658                                        thread_index, stats_sw_if_index,
659                                        stats_n_packets, stats_n_bytes);
660       node->runtime_data[0] = stats_sw_if_index;
661     }
662   return from_frame->n_vectors;
663 }
664
665 /**
666  * @brief Graph processing dispatch function for IPv4 VXLAN GPE
667  *
668  * @node vxlan4-gpe-input
669  * @param *vm
670  * @param *node
671  * @param *from_frame
672  *
673  * @return from_frame->n_vectors
674  *
675  */
676 static uword
677 vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
678                   vlib_frame_t * from_frame)
679 {
680   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 1);
681 }
682
683
684 void
685 vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
686 {
687   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
688   hm->decap_next_node_list[protocol_id] = next_node_index;
689   return;
690 }
691
692 void
693 vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
694 {
695   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
696   hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
697   return;
698 }
699
700
701 /**
702  * @brief Graph processing dispatch function for IPv6 VXLAN GPE
703  *
704  * @node vxlan6-gpe-input
705  * @param *vm
706  * @param *node
707  * @param *from_frame
708  *
709  * @return from_frame->n_vectors - uword
710  *
711  */
712 static uword
713 vxlan6_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
714                   vlib_frame_t * from_frame)
715 {
716   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 0);
717 }
718
719 /**
720  * @brief VXLAN GPE error strings
721  */
722 static char *vxlan_gpe_error_strings[] = {
723 #define vxlan_gpe_error(n,s) s,
724 #include <vnet/vxlan-gpe/vxlan_gpe_error.def>
725 #undef vxlan_gpe_error
726 #undef _
727 };
728
729 /* *INDENT-OFF* */
730 VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
731   .function = vxlan4_gpe_input,
732   .name = "vxlan4-gpe-input",
733   /* Takes a vector of packets. */
734   .vector_size = sizeof (u32),
735   .type = VLIB_NODE_TYPE_INTERNAL,
736   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
737   .error_strings = vxlan_gpe_error_strings,
738
739   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
740   .next_nodes = {
741 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
742     foreach_vxlan_gpe_input_next
743 #undef _
744   },
745
746   .format_buffer = format_vxlan_gpe_with_length,
747   .format_trace = format_vxlan_gpe_rx_trace,
748   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
749 };
750 /* *INDENT-ON* */
751
752 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gpe_input_node, vxlan4_gpe_input);
753
754 /* *INDENT-OFF* */
755 VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
756   .function = vxlan6_gpe_input,
757   .name = "vxlan6-gpe-input",
758   /* Takes a vector of packets. */
759   .vector_size = sizeof (u32),
760   .type = VLIB_NODE_TYPE_INTERNAL,
761   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
762   .error_strings = vxlan_gpe_error_strings,
763
764   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
765   .next_nodes = {
766 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
767     foreach_vxlan_gpe_input_next
768 #undef _
769   },
770
771   .format_buffer = format_vxlan_gpe_with_length,
772   .format_trace = format_vxlan_gpe_rx_trace,
773   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
774 };
775 /* *INDENT-ON* */
776
777 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gpe_input_node, vxlan6_gpe_input);
778 typedef enum
779 {
780   IP_VXLAN_BYPASS_NEXT_DROP,
781   IP_VXLAN_BYPASS_NEXT_VXLAN,
782   IP_VXLAN_BYPASS_N_NEXT,
783 } ip_vxan_bypass_next_t;
784
785 always_inline uword
786 ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
787                             vlib_node_runtime_t * node,
788                             vlib_frame_t * frame, u32 is_ip4)
789 {
790   vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
791   u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
792   vlib_node_runtime_t *error_node =
793     vlib_node_get_runtime (vm, ip4_input_node.index);
794   ip4_address_t addr4;          /* last IPv4 address matching a local VTEP address */
795   ip6_address_t addr6;          /* last IPv6 address matching a local VTEP address */
796
797   from = vlib_frame_vector_args (frame);
798   n_left_from = frame->n_vectors;
799   next_index = node->cached_next_index;
800
801   if (node->flags & VLIB_NODE_FLAG_TRACE)
802     ip4_forward_next_trace (vm, node, frame, VLIB_TX);
803
804   if (is_ip4)
805     addr4.data_u32 = ~0;
806   else
807     ip6_address_set_zero (&addr6);
808
809   while (n_left_from > 0)
810     {
811       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
812
813       while (n_left_from >= 4 && n_left_to_next >= 2)
814         {
815           vlib_buffer_t *b0, *b1;
816           ip4_header_t *ip40, *ip41;
817           ip6_header_t *ip60, *ip61;
818           udp_header_t *udp0, *udp1;
819           u32 bi0, ip_len0, udp_len0, flags0, next0;
820           u32 bi1, ip_len1, udp_len1, flags1, next1;
821           i32 len_diff0, len_diff1;
822           u8 error0, good_udp0, proto0;
823           u8 error1, good_udp1, proto1;
824
825           /* Prefetch next iteration. */
826           {
827             vlib_buffer_t *p2, *p3;
828
829             p2 = vlib_get_buffer (vm, from[2]);
830             p3 = vlib_get_buffer (vm, from[3]);
831
832             vlib_prefetch_buffer_header (p2, LOAD);
833             vlib_prefetch_buffer_header (p3, LOAD);
834
835             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
836             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
837           }
838
839           bi0 = to_next[0] = from[0];
840           bi1 = to_next[1] = from[1];
841           from += 2;
842           n_left_from -= 2;
843           to_next += 2;
844           n_left_to_next -= 2;
845
846           b0 = vlib_get_buffer (vm, bi0);
847           b1 = vlib_get_buffer (vm, bi1);
848           if (is_ip4)
849             {
850               ip40 = vlib_buffer_get_current (b0);
851               ip41 = vlib_buffer_get_current (b1);
852             }
853           else
854             {
855               ip60 = vlib_buffer_get_current (b0);
856               ip61 = vlib_buffer_get_current (b1);
857             }
858
859           /* Setup packet for next IP feature */
860           vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
861                              b0);
862           vnet_feature_next (vnet_buffer (b1)->sw_if_index[VLIB_RX], &next1,
863                              b1);
864
865           if (is_ip4)
866             {
867               proto0 = ip40->protocol;
868               proto1 = ip41->protocol;
869             }
870           else
871             {
872               proto0 = ip60->protocol;
873               proto1 = ip61->protocol;
874             }
875
876           /* Process packet 0 */
877           if (proto0 != IP_PROTOCOL_UDP)
878             goto exit0;         /* not UDP packet */
879
880           if (is_ip4)
881             udp0 = ip4_next_header (ip40);
882           else
883             udp0 = ip6_next_header (ip60);
884
885           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
886             goto exit0;         /* not VXLAN packet */
887
888           /* Validate DIP against VTEPs */
889           if (is_ip4)
890             {
891               if (addr4.as_u32 != ip40->dst_address.as_u32)
892                 {
893                   if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
894                     goto exit0; /* no local VTEP for VXLAN packet */
895                   addr4 = ip40->dst_address;
896                 }
897             }
898           else
899             {
900               if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
901                 {
902                   if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
903                     goto exit0; /* no local VTEP for VXLAN packet */
904                   addr6 = ip60->dst_address;
905                 }
906             }
907
908           flags0 = b0->flags;
909           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
910
911           /* Don't verify UDP checksum for packets with explicit zero checksum. */
912           good_udp0 |= udp0->checksum == 0;
913
914           /* Verify UDP length */
915           if (is_ip4)
916             ip_len0 = clib_net_to_host_u16 (ip40->length);
917           else
918             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
919           udp_len0 = clib_net_to_host_u16 (udp0->length);
920           len_diff0 = ip_len0 - udp_len0;
921
922           /* Verify UDP checksum */
923           if (PREDICT_FALSE (!good_udp0))
924             {
925               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
926                 {
927                   if (is_ip4)
928                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
929                   else
930                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
931                   good_udp0 =
932                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
933                 }
934             }
935
936           if (is_ip4)
937             {
938               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
939               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
940             }
941           else
942             {
943               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
944               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
945             }
946
947           next0 = error0 ?
948             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
949           b0->error = error0 ? error_node->errors[error0] : 0;
950
951           /* vxlan_gpe-input node expect current at VXLAN header */
952           if (is_ip4)
953             vlib_buffer_advance (b0,
954                                  sizeof (ip4_header_t) +
955                                  sizeof (udp_header_t));
956           else
957             vlib_buffer_advance (b0,
958                                  sizeof (ip6_header_t) +
959                                  sizeof (udp_header_t));
960
961         exit0:
962           /* Process packet 1 */
963           if (proto1 != IP_PROTOCOL_UDP)
964             goto exit1;         /* not UDP packet */
965
966           if (is_ip4)
967             udp1 = ip4_next_header (ip41);
968           else
969             udp1 = ip6_next_header (ip61);
970
971           if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
972             goto exit1;         /* not VXLAN packet */
973
974           /* Validate DIP against VTEPs */
975           if (is_ip4)
976             {
977               if (addr4.as_u32 != ip41->dst_address.as_u32)
978                 {
979                   if (!hash_get (ngm->vtep4, ip41->dst_address.as_u32))
980                     goto exit1; /* no local VTEP for VXLAN packet */
981                   addr4 = ip41->dst_address;
982                 }
983             }
984           else
985             {
986               if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
987                 {
988                   if (!hash_get_mem (ngm->vtep6, &ip61->dst_address))
989                     goto exit1; /* no local VTEP for VXLAN packet */
990                   addr6 = ip61->dst_address;
991                 }
992             }
993
994           flags1 = b1->flags;
995           good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
996
997           /* Don't verify UDP checksum for packets with explicit zero checksum. */
998           good_udp1 |= udp1->checksum == 0;
999
1000           /* Verify UDP length */
1001           if (is_ip4)
1002             ip_len1 = clib_net_to_host_u16 (ip41->length);
1003           else
1004             ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1005           udp_len1 = clib_net_to_host_u16 (udp1->length);
1006           len_diff1 = ip_len1 - udp_len1;
1007
1008           /* Verify UDP checksum */
1009           if (PREDICT_FALSE (!good_udp1))
1010             {
1011               if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1012                 {
1013                   if (is_ip4)
1014                     flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1015                   else
1016                     flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1017                   good_udp1 =
1018                     (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1019                 }
1020             }
1021
1022           if (is_ip4)
1023             {
1024               error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1025               error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1026             }
1027           else
1028             {
1029               error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1030               error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1031             }
1032
1033           next1 = error1 ?
1034             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1035           b1->error = error1 ? error_node->errors[error1] : 0;
1036
1037           /* vxlan_gpe-input node expect current at VXLAN header */
1038           if (is_ip4)
1039             vlib_buffer_advance (b1,
1040                                  sizeof (ip4_header_t) +
1041                                  sizeof (udp_header_t));
1042           else
1043             vlib_buffer_advance (b1,
1044                                  sizeof (ip6_header_t) +
1045                                  sizeof (udp_header_t));
1046
1047         exit1:
1048           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1049                                            to_next, n_left_to_next,
1050                                            bi0, bi1, next0, next1);
1051         }
1052
1053       while (n_left_from > 0 && n_left_to_next > 0)
1054         {
1055           vlib_buffer_t *b0;
1056           ip4_header_t *ip40;
1057           ip6_header_t *ip60;
1058           udp_header_t *udp0;
1059           u32 bi0, ip_len0, udp_len0, flags0, next0;
1060           i32 len_diff0;
1061           u8 error0, good_udp0, proto0;
1062
1063           bi0 = to_next[0] = from[0];
1064           from += 1;
1065           n_left_from -= 1;
1066           to_next += 1;
1067           n_left_to_next -= 1;
1068
1069           b0 = vlib_get_buffer (vm, bi0);
1070           if (is_ip4)
1071             ip40 = vlib_buffer_get_current (b0);
1072           else
1073             ip60 = vlib_buffer_get_current (b0);
1074
1075           /* Setup packet for next IP feature */
1076           vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
1077                              b0);
1078
1079           if (is_ip4)
1080             proto0 = ip40->protocol;
1081           else
1082             proto0 = ip60->protocol;
1083
1084           if (proto0 != IP_PROTOCOL_UDP)
1085             goto exit;          /* not UDP packet */
1086
1087           if (is_ip4)
1088             udp0 = ip4_next_header (ip40);
1089           else
1090             udp0 = ip6_next_header (ip60);
1091
1092           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
1093             goto exit;          /* not VXLAN packet */
1094
1095           /* Validate DIP against VTEPs */
1096           if (is_ip4)
1097             {
1098               if (addr4.as_u32 != ip40->dst_address.as_u32)
1099                 {
1100                   if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
1101                     goto exit;  /* no local VTEP for VXLAN packet */
1102                   addr4 = ip40->dst_address;
1103                 }
1104             }
1105           else
1106             {
1107               if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1108                 {
1109                   if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
1110                     goto exit;  /* no local VTEP for VXLAN packet */
1111                   addr6 = ip60->dst_address;
1112                 }
1113             }
1114
1115           flags0 = b0->flags;
1116           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1117
1118           /* Don't verify UDP checksum for packets with explicit zero checksum. */
1119           good_udp0 |= udp0->checksum == 0;
1120
1121           /* Verify UDP length */
1122           if (is_ip4)
1123             ip_len0 = clib_net_to_host_u16 (ip40->length);
1124           else
1125             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1126           udp_len0 = clib_net_to_host_u16 (udp0->length);
1127           len_diff0 = ip_len0 - udp_len0;
1128
1129           /* Verify UDP checksum */
1130           if (PREDICT_FALSE (!good_udp0))
1131             {
1132               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1133                 {
1134                   if (is_ip4)
1135                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1136                   else
1137                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1138                   good_udp0 =
1139                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1140                 }
1141             }
1142
1143           if (is_ip4)
1144             {
1145               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1146               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1147             }
1148           else
1149             {
1150               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1151               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1152             }
1153
1154           next0 = error0 ?
1155             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1156           b0->error = error0 ? error_node->errors[error0] : 0;
1157
1158           /* vxlan_gpe-input node expect current at VXLAN header */
1159           if (is_ip4)
1160             vlib_buffer_advance (b0,
1161                                  sizeof (ip4_header_t) +
1162                                  sizeof (udp_header_t));
1163           else
1164             vlib_buffer_advance (b0,
1165                                  sizeof (ip6_header_t) +
1166                                  sizeof (udp_header_t));
1167
1168         exit:
1169           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1170                                            to_next, n_left_to_next,
1171                                            bi0, next0);
1172         }
1173
1174       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1175     }
1176
1177   return frame->n_vectors;
1178 }
1179
1180 static uword
1181 ip4_vxlan_gpe_bypass (vlib_main_t * vm,
1182                       vlib_node_runtime_t * node, vlib_frame_t * frame)
1183 {
1184   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1185 }
1186
1187 /* *INDENT-OFF* */
1188 VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
1189   .function = ip4_vxlan_gpe_bypass,
1190   .name = "ip4-vxlan-gpe-bypass",
1191   .vector_size = sizeof (u32),
1192
1193   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1194   .next_nodes = {
1195     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1196     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-gpe-input",
1197   },
1198
1199   .format_buffer = format_ip4_header,
1200   .format_trace = format_ip4_forward_next_trace,
1201 };
1202 /* *INDENT-ON* */
1203
1204 VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gpe_bypass_node, ip4_vxlan_gpe_bypass)
1205 /* Dummy init function to get us linked in. */
1206      clib_error_t *ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
1207 {
1208   return 0;
1209 }
1210
1211 VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init);
1212
1213 static uword
1214 ip6_vxlan_gpe_bypass (vlib_main_t * vm,
1215                       vlib_node_runtime_t * node, vlib_frame_t * frame)
1216 {
1217   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1218 }
1219
1220 /* *INDENT-OFF* */
1221 VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
1222   .function = ip6_vxlan_gpe_bypass,
1223   .name = "ip6-vxlan-gpe-bypass",
1224   .vector_size = sizeof (u32),
1225
1226   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1227   .next_nodes = {
1228     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1229     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-gpe-input",
1230   },
1231
1232   .format_buffer = format_ip6_header,
1233   .format_trace = format_ip6_forward_next_trace,
1234 };
1235 /* *INDENT-ON* */
1236
1237 VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gpe_bypass_node, ip6_vxlan_gpe_bypass)
1238 /* Dummy init function to get us linked in. */
1239      clib_error_t *ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
1240 {
1241   return 0;
1242 }
1243
1244 VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init);
1245
1246 /*
1247  * fd.io coding-style-patch-verification: ON
1248  *
1249  * Local Variables:
1250  * eval: (c-set-style "gnu")
1251  * End:
1252  */