misc: Purge unused pg includes
[vpp.git] / src / vnet / vxlan-gpe / decap.c
1 /*
2  * decap.c - decapsulate VXLAN GPE
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 /**
18  *  @file
19  *  @brief Functions for decapsulating VXLAN GPE tunnels
20  *
21 */
22
23 #include <vlib/vlib.h>
24 #include <vnet/vxlan-gpe/vxlan_gpe.h>
25
26 /**
27  * @brief Struct for VXLAN GPE decap packet tracing
28  *
29  */
30 typedef struct
31 {
32   u32 next_index;
33   u32 tunnel_index;
34   u32 error;
35 } vxlan_gpe_rx_trace_t;
36
37 /**
38  * @brief Tracing function for VXLAN GPE packet decapsulation
39  *
40  * @param *s
41  * @param *args
42  *
43  * @return *s
44  *
45  */
46 static u8 *
47 format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
48 {
49   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
50   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
51   vxlan_gpe_rx_trace_t *t = va_arg (*args, vxlan_gpe_rx_trace_t *);
52
53   if (t->tunnel_index != ~0)
54     {
55       s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
56                   t->next_index, t->error);
57     }
58   else
59     {
60       s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
61                   t->error);
62     }
63   return s;
64 }
65
66 /**
67  * @brief Tracing function for VXLAN GPE packet decapsulation including length
68  *
69  * @param *s
70  * @param *args
71  *
72  * @return *s
73  *
74  */
75 static u8 *
76 format_vxlan_gpe_with_length (u8 * s, va_list * args)
77 {
78   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
79   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
80
81
82   return s;
83 }
84
85 /**
86  * @brief Common processing for IPv4 and IPv6 VXLAN GPE decap dispatch functions
87  *
88  * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
89  * tunnels are "terminate local". This means that there is no "TX" interface for this
90  * decap case, so that field in the buffer_metadata can be "used for something else".
91  * The something else in this case is, for the IPv4/IPv6 inner-packet type case, the
92  * FIB index used to look up the inner-packet's adjacency.
93  *
94  *      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
95  *
96  * @param *vm
97  * @param *node
98  * @param *from_frame
99  * @param is_ip4
100  *
101  * @return from_frame->n_vectors
102  *
103  */
104 always_inline uword
105 vxlan_gpe_input (vlib_main_t * vm,
106                  vlib_node_runtime_t * node,
107                  vlib_frame_t * from_frame, u8 is_ip4)
108 {
109   u32 n_left_from, next_index, *from, *to_next;
110   vxlan_gpe_main_t *nngm = &vxlan_gpe_main;
111   vnet_main_t *vnm = nngm->vnet_main;
112   vnet_interface_main_t *im = &vnm->interface_main;
113   u32 last_tunnel_index = ~0;
114   vxlan4_gpe_tunnel_key_t last_key4;
115   vxlan6_gpe_tunnel_key_t last_key6;
116   u32 pkts_decapsulated = 0;
117   u32 thread_index = vm->thread_index;
118   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
119
120   if (is_ip4)
121     clib_memset (&last_key4, 0xff, sizeof (last_key4));
122   else
123     clib_memset (&last_key6, 0xff, sizeof (last_key6));
124
125   from = vlib_frame_vector_args (from_frame);
126   n_left_from = from_frame->n_vectors;
127
128   next_index = node->cached_next_index;
129   stats_sw_if_index = node->runtime_data[0];
130   stats_n_packets = stats_n_bytes = 0;
131
132   while (n_left_from > 0)
133     {
134       u32 n_left_to_next;
135
136       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
137
138       while (n_left_from >= 4 && n_left_to_next >= 2)
139         {
140           u32 bi0, bi1;
141           vlib_buffer_t *b0, *b1;
142           u32 next0, next1;
143           ip4_vxlan_gpe_header_t *iuvn4_0, *iuvn4_1;
144           ip6_vxlan_gpe_header_t *iuvn6_0, *iuvn6_1;
145           uword *p0, *p1;
146           u32 tunnel_index0, tunnel_index1;
147           vxlan_gpe_tunnel_t *t0, *t1;
148           vxlan4_gpe_tunnel_key_t key4_0, key4_1;
149           vxlan6_gpe_tunnel_key_t key6_0, key6_1;
150           u32 error0, error1;
151           u32 sw_if_index0, sw_if_index1, len0, len1;
152
153           /* Prefetch next iteration. */
154           {
155             vlib_buffer_t *p2, *p3;
156
157             p2 = vlib_get_buffer (vm, from[2]);
158             p3 = vlib_get_buffer (vm, from[3]);
159
160             vlib_prefetch_buffer_header (p2, LOAD);
161             vlib_prefetch_buffer_header (p3, LOAD);
162
163             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
164             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
165           }
166
167           bi0 = from[0];
168           bi1 = from[1];
169           to_next[0] = bi0;
170           to_next[1] = bi1;
171           from += 2;
172           to_next += 2;
173           n_left_to_next -= 2;
174           n_left_from -= 2;
175
176           b0 = vlib_get_buffer (vm, bi0);
177           b1 = vlib_get_buffer (vm, bi1);
178
179           if (is_ip4)
180             {
181               /* udp leaves current_data pointing at the vxlan-gpe header */
182               vlib_buffer_advance (b0,
183                                    -(word) (sizeof (udp_header_t) +
184                                             sizeof (ip4_header_t)));
185               vlib_buffer_advance (b1,
186                                    -(word) (sizeof (udp_header_t) +
187                                             sizeof (ip4_header_t)));
188
189               iuvn4_0 = vlib_buffer_get_current (b0);
190               iuvn4_1 = vlib_buffer_get_current (b1);
191
192               /* pop (ip, udp, vxlan) */
193               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
194               vlib_buffer_advance (b1, sizeof (*iuvn4_1));
195             }
196           else
197             {
198               /* udp leaves current_data pointing at the vxlan-gpe header */
199               vlib_buffer_advance (b0,
200                                    -(word) (sizeof (udp_header_t) +
201                                             sizeof (ip6_header_t)));
202               vlib_buffer_advance (b1,
203                                    -(word) (sizeof (udp_header_t) +
204                                             sizeof (ip6_header_t)));
205
206               iuvn6_0 = vlib_buffer_get_current (b0);
207               iuvn6_1 = vlib_buffer_get_current (b1);
208
209               /* pop (ip, udp, vxlan) */
210               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
211               vlib_buffer_advance (b1, sizeof (*iuvn6_1));
212             }
213
214           tunnel_index0 = ~0;
215           tunnel_index1 = ~0;
216           error0 = 0;
217           error1 = 0;
218
219           if (is_ip4)
220             {
221               next0 =
222                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
223                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
224                 VXLAN_GPE_INPUT_NEXT_DROP;
225               next1 =
226                 (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
227                 nngm->decap_next_node_list[iuvn4_1->vxlan.protocol] :
228                 VXLAN_GPE_INPUT_NEXT_DROP;
229
230               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
231               key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
232
233               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
234               key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
235
236               key4_0.vni = iuvn4_0->vxlan.vni_res;
237               key4_1.vni = iuvn4_1->vxlan.vni_res;
238
239               key4_0.pad = 0;
240               key4_1.pad = 0;
241             }
242           else                  /* is_ip6 */
243             {
244               next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
245                 iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
246               next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
247                 iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
248
249               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
250               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
251               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
252               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
253
254               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
255               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
256               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
257               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
258
259               key6_0.vni = iuvn6_0->vxlan.vni_res;
260               key6_1.vni = iuvn6_1->vxlan.vni_res;
261             }
262
263           /* Processing packet 0 */
264           if (is_ip4)
265             {
266               /* Processing for key4_0 */
267               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
268                                  || (key4_0.as_u64[1] !=
269                                      last_key4.as_u64[1])))
270                 {
271                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
272
273                   if (p0 == 0)
274                     {
275                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
276                       goto trace0;
277                     }
278
279                   last_key4.as_u64[0] = key4_0.as_u64[0];
280                   last_key4.as_u64[1] = key4_0.as_u64[1];
281                   tunnel_index0 = last_tunnel_index = p0[0];
282                 }
283               else
284                 tunnel_index0 = last_tunnel_index;
285             }
286           else                  /* is_ip6 */
287             {
288               next0 =
289                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
290                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
291                 VXLAN_GPE_INPUT_NEXT_DROP;
292               next1 =
293                 (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
294                 nngm->decap_next_node_list[iuvn6_1->vxlan.protocol] :
295                 VXLAN_GPE_INPUT_NEXT_DROP;
296
297               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
298               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
299               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
300               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
301
302               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
303               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
304               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
305               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
306
307               key6_0.vni = iuvn6_0->vxlan.vni_res;
308               key6_1.vni = iuvn6_1->vxlan.vni_res;
309
310               /* Processing for key6_0 */
311               if (PREDICT_FALSE
312                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
313                 {
314                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
315
316                   if (p0 == 0)
317                     {
318                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
319                       goto trace0;
320                     }
321
322                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
323                   tunnel_index0 = last_tunnel_index = p0[0];
324                 }
325               else
326                 tunnel_index0 = last_tunnel_index;
327             }
328
329           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
330
331
332           sw_if_index0 = t0->sw_if_index;
333           len0 = vlib_buffer_length_in_chain (vm, b0);
334
335           /* Required to make the l2 tag push / pop code work on l2 subifs */
336           vnet_update_l2_len (b0);
337
338           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
339           vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
340
341       /**
342        * ip[46] lookup in the configured FIB
343        */
344           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
345
346           pkts_decapsulated++;
347           stats_n_packets += 1;
348           stats_n_bytes += len0;
349
350           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
351             {
352               stats_n_packets -= 1;
353               stats_n_bytes -= len0;
354               if (stats_n_packets)
355                 vlib_increment_combined_counter (im->combined_sw_if_counters +
356                                                  VNET_INTERFACE_COUNTER_RX,
357                                                  thread_index,
358                                                  stats_sw_if_index,
359                                                  stats_n_packets,
360                                                  stats_n_bytes);
361               stats_n_packets = 1;
362               stats_n_bytes = len0;
363               stats_sw_if_index = sw_if_index0;
364             }
365
366         trace0:b0->error = error0 ? node->errors[error0] : 0;
367
368           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
369             {
370               vxlan_gpe_rx_trace_t *tr =
371                 vlib_add_trace (vm, node, b0, sizeof (*tr));
372               tr->next_index = next0;
373               tr->error = error0;
374               tr->tunnel_index = tunnel_index0;
375             }
376
377           /* Process packet 1 */
378           if (is_ip4)
379             {
380               /* Processing for key4_1 */
381               if (PREDICT_FALSE ((key4_1.as_u64[0] != last_key4.as_u64[0])
382                                  || (key4_1.as_u64[1] !=
383                                      last_key4.as_u64[1])))
384                 {
385                   p1 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_1);
386
387                   if (p1 == 0)
388                     {
389                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
390                       goto trace1;
391                     }
392
393                   last_key4.as_u64[0] = key4_1.as_u64[0];
394                   last_key4.as_u64[1] = key4_1.as_u64[1];
395                   tunnel_index1 = last_tunnel_index = p1[0];
396                 }
397               else
398                 tunnel_index1 = last_tunnel_index;
399             }
400           else                  /* is_ip6 */
401             {
402               /* Processing for key6_1 */
403               if (PREDICT_FALSE
404                   (memcmp (&key6_1, &last_key6, sizeof (last_key6)) != 0))
405                 {
406                   p1 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_1);
407
408                   if (p1 == 0)
409                     {
410                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
411                       goto trace1;
412                     }
413
414                   memcpy (&last_key6, &key6_1, sizeof (key6_1));
415                   tunnel_index1 = last_tunnel_index = p1[0];
416                 }
417               else
418                 tunnel_index1 = last_tunnel_index;
419             }
420
421           t1 = pool_elt_at_index (nngm->tunnels, tunnel_index1);
422
423           sw_if_index1 = t1->sw_if_index;
424           len1 = vlib_buffer_length_in_chain (vm, b1);
425
426           /* Required to make the l2 tag push / pop code work on l2 subifs */
427           vnet_update_l2_len (b1);
428
429           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
430           vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
431
432           /*
433            * ip[46] lookup in the configured FIB
434            */
435           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
436
437           pkts_decapsulated++;
438           stats_n_packets += 1;
439           stats_n_bytes += len1;
440
441           /* Batch stats increment on the same vxlan tunnel so counter
442              is not incremented per packet */
443           if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
444             {
445               stats_n_packets -= 1;
446               stats_n_bytes -= len1;
447               if (stats_n_packets)
448                 vlib_increment_combined_counter (im->combined_sw_if_counters +
449                                                  VNET_INTERFACE_COUNTER_RX,
450                                                  thread_index,
451                                                  stats_sw_if_index,
452                                                  stats_n_packets,
453                                                  stats_n_bytes);
454               stats_n_packets = 1;
455               stats_n_bytes = len1;
456               stats_sw_if_index = sw_if_index1;
457             }
458           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
459
460         trace1:b1->error = error1 ? node->errors[error1] : 0;
461
462           if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
463             {
464               vxlan_gpe_rx_trace_t *tr =
465                 vlib_add_trace (vm, node, b1, sizeof (*tr));
466               tr->next_index = next1;
467               tr->error = error1;
468               tr->tunnel_index = tunnel_index1;
469             }
470
471           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
472                                            n_left_to_next, bi0, bi1, next0,
473                                            next1);
474         }
475
476       while (n_left_from > 0 && n_left_to_next > 0)
477         {
478           u32 bi0;
479           vlib_buffer_t *b0;
480           u32 next0;
481           ip4_vxlan_gpe_header_t *iuvn4_0;
482           ip6_vxlan_gpe_header_t *iuvn6_0;
483           uword *p0;
484           u32 tunnel_index0;
485           vxlan_gpe_tunnel_t *t0;
486           vxlan4_gpe_tunnel_key_t key4_0;
487           vxlan6_gpe_tunnel_key_t key6_0;
488           u32 error0;
489           u32 sw_if_index0, len0;
490
491           bi0 = from[0];
492           to_next[0] = bi0;
493           from += 1;
494           to_next += 1;
495           n_left_from -= 1;
496           n_left_to_next -= 1;
497
498           b0 = vlib_get_buffer (vm, bi0);
499
500           if (is_ip4)
501             {
502               /* udp leaves current_data pointing at the vxlan-gpe header */
503               vlib_buffer_advance (b0,
504                                    -(word) (sizeof (udp_header_t) +
505                                             sizeof (ip4_header_t)));
506
507               iuvn4_0 = vlib_buffer_get_current (b0);
508
509               /* pop (ip, udp, vxlan) */
510               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
511             }
512           else
513             {
514               /* udp leaves current_data pointing at the vxlan-gpe header */
515               vlib_buffer_advance (b0,
516                                    -(word) (sizeof (udp_header_t) +
517                                             sizeof (ip6_header_t)));
518
519               iuvn6_0 = vlib_buffer_get_current (b0);
520
521               /* pop (ip, udp, vxlan) */
522               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
523             }
524
525           tunnel_index0 = ~0;
526           error0 = 0;
527
528           if (is_ip4)
529             {
530               next0 =
531                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
532                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
533                 VXLAN_GPE_INPUT_NEXT_DROP;
534
535               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
536               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
537               key4_0.vni = iuvn4_0->vxlan.vni_res;
538               key4_0.pad = 0;
539
540               /* Processing for key4_0 */
541               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
542                                  || (key4_0.as_u64[1] !=
543                                      last_key4.as_u64[1])))
544                 {
545                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
546
547                   if (p0 == 0)
548                     {
549                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
550                       goto trace00;
551                     }
552
553                   last_key4.as_u64[0] = key4_0.as_u64[0];
554                   last_key4.as_u64[1] = key4_0.as_u64[1];
555                   tunnel_index0 = last_tunnel_index = p0[0];
556                 }
557               else
558                 tunnel_index0 = last_tunnel_index;
559             }
560           else                  /* is_ip6 */
561             {
562               next0 =
563                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
564                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
565                 VXLAN_GPE_INPUT_NEXT_DROP;
566
567               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
568               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
569               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
570               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
571               key6_0.vni = iuvn6_0->vxlan.vni_res;
572
573               /* Processing for key6_0 */
574               if (PREDICT_FALSE
575                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
576                 {
577                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
578
579                   if (p0 == 0)
580                     {
581                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
582                       goto trace00;
583                     }
584
585                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
586                   tunnel_index0 = last_tunnel_index = p0[0];
587                 }
588               else
589                 tunnel_index0 = last_tunnel_index;
590             }
591
592           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
593
594
595           sw_if_index0 = t0->sw_if_index;
596           len0 = vlib_buffer_length_in_chain (vm, b0);
597
598           /* Required to make the l2 tag push / pop code work on l2 subifs */
599           vnet_update_l2_len (b0);
600
601           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
602           vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
603
604           /*
605            * ip[46] lookup in the configured FIB
606            */
607           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
608
609           pkts_decapsulated++;
610           stats_n_packets += 1;
611           stats_n_bytes += len0;
612
613           /* Batch stats increment on the same vxlan-gpe tunnel so counter
614              is not incremented per packet */
615           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
616             {
617               stats_n_packets -= 1;
618               stats_n_bytes -= len0;
619               if (stats_n_packets)
620                 vlib_increment_combined_counter (im->combined_sw_if_counters +
621                                                  VNET_INTERFACE_COUNTER_RX,
622                                                  thread_index,
623                                                  stats_sw_if_index,
624                                                  stats_n_packets,
625                                                  stats_n_bytes);
626               stats_n_packets = 1;
627               stats_n_bytes = len0;
628               stats_sw_if_index = sw_if_index0;
629             }
630
631         trace00:b0->error = error0 ? node->errors[error0] : 0;
632
633           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
634             {
635               vxlan_gpe_rx_trace_t *tr =
636                 vlib_add_trace (vm, node, b0, sizeof (*tr));
637               tr->next_index = next0;
638               tr->error = error0;
639               tr->tunnel_index = tunnel_index0;
640             }
641           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
642                                            n_left_to_next, bi0, next0);
643         }
644
645       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
646     }
647
648   vlib_node_increment_counter (vm,
649                                is_ip4 ? vxlan4_gpe_input_node.index :
650                                vxlan6_gpe_input_node.index,
651                                VXLAN_GPE_ERROR_DECAPSULATED,
652                                pkts_decapsulated);
653
654   /* Increment any remaining batch stats */
655   if (stats_n_packets)
656     {
657       vlib_increment_combined_counter (im->combined_sw_if_counters +
658                                        VNET_INTERFACE_COUNTER_RX,
659                                        thread_index, stats_sw_if_index,
660                                        stats_n_packets, stats_n_bytes);
661       node->runtime_data[0] = stats_sw_if_index;
662     }
663   return from_frame->n_vectors;
664 }
665
666 /**
667  * @brief Graph processing dispatch function for IPv4 VXLAN GPE
668  *
669  * @node vxlan4-gpe-input
670  * @param *vm
671  * @param *node
672  * @param *from_frame
673  *
674  * @return from_frame->n_vectors
675  *
676  */
677 VLIB_NODE_FN (vxlan4_gpe_input_node) (vlib_main_t * vm,
678                                       vlib_node_runtime_t * node,
679                                       vlib_frame_t * from_frame)
680 {
681   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 1);
682 }
683
684 #ifndef CLIB_MARCH_VARIANT
685 void
686 vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
687 {
688   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
689   hm->decap_next_node_list[protocol_id] = next_node_index;
690   return;
691 }
692
693 void
694 vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
695 {
696   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
697   hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
698   return;
699 }
700 #endif /* CLIB_MARCH_VARIANT */
701
702 /**
703  * @brief Graph processing dispatch function for IPv6 VXLAN GPE
704  *
705  * @node vxlan6-gpe-input
706  * @param *vm
707  * @param *node
708  * @param *from_frame
709  *
710  * @return from_frame->n_vectors - uword
711  *
712  */
713 VLIB_NODE_FN (vxlan6_gpe_input_node) (vlib_main_t * vm,
714                                       vlib_node_runtime_t * node,
715                                       vlib_frame_t * from_frame)
716 {
717   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 0);
718 }
719
720 /**
721  * @brief VXLAN GPE error strings
722  */
723 static char *vxlan_gpe_error_strings[] = {
724 #define vxlan_gpe_error(n,s) s,
725 #include <vnet/vxlan-gpe/vxlan_gpe_error.def>
726 #undef vxlan_gpe_error
727 #undef _
728 };
729
730 /* *INDENT-OFF* */
731 VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
732   .name = "vxlan4-gpe-input",
733   /* Takes a vector of packets. */
734   .vector_size = sizeof (u32),
735   .type = VLIB_NODE_TYPE_INTERNAL,
736   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
737   .error_strings = vxlan_gpe_error_strings,
738
739   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
740   .next_nodes = {
741 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
742     foreach_vxlan_gpe_input_next
743 #undef _
744   },
745
746   .format_buffer = format_vxlan_gpe_with_length,
747   .format_trace = format_vxlan_gpe_rx_trace,
748   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
749 };
750 /* *INDENT-ON* */
751
752 /* *INDENT-OFF* */
753 VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
754   .name = "vxlan6-gpe-input",
755   /* Takes a vector of packets. */
756   .vector_size = sizeof (u32),
757   .type = VLIB_NODE_TYPE_INTERNAL,
758   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
759   .error_strings = vxlan_gpe_error_strings,
760
761   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
762   .next_nodes = {
763 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
764     foreach_vxlan_gpe_input_next
765 #undef _
766   },
767
768   .format_buffer = format_vxlan_gpe_with_length,
769   .format_trace = format_vxlan_gpe_rx_trace,
770   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
771 };
772 /* *INDENT-ON* */
773
774 typedef enum
775 {
776   IP_VXLAN_BYPASS_NEXT_DROP,
777   IP_VXLAN_BYPASS_NEXT_VXLAN,
778   IP_VXLAN_BYPASS_N_NEXT,
779 } ip_vxlan_bypass_next_t;
780
781 always_inline uword
782 ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
783                             vlib_node_runtime_t * node,
784                             vlib_frame_t * frame, u32 is_ip4)
785 {
786   vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
787   u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
788   vlib_node_runtime_t *error_node =
789     vlib_node_get_runtime (vm, ip4_input_node.index);
790   vtep4_key_t last_vtep4;       /* last IPv4 address / fib index
791                                    matching a local VTEP address */
792   vtep6_key_t last_vtep6;       /* last IPv6 address / fib index
793                                    matching a local VTEP address */
794   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
795 #ifdef CLIB_HAVE_VEC512
796   vtep4_cache_t vtep4_u512;
797   clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
798 #endif
799
800   from = vlib_frame_vector_args (frame);
801   n_left_from = frame->n_vectors;
802   next_index = node->cached_next_index;
803
804   vlib_get_buffers (vm, from, bufs, n_left_from);
805
806   if (node->flags & VLIB_NODE_FLAG_TRACE)
807     ip4_forward_next_trace (vm, node, frame, VLIB_TX);
808
809   if (is_ip4)
810     vtep4_key_init (&last_vtep4);
811   else
812     vtep6_key_init (&last_vtep6);
813
814   while (n_left_from > 0)
815     {
816       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
817
818       while (n_left_from >= 4 && n_left_to_next >= 2)
819         {
820           vlib_buffer_t *b0, *b1;
821           ip4_header_t *ip40, *ip41;
822           ip6_header_t *ip60, *ip61;
823           udp_header_t *udp0, *udp1;
824           u32 bi0, ip_len0, udp_len0, flags0, next0;
825           u32 bi1, ip_len1, udp_len1, flags1, next1;
826           i32 len_diff0, len_diff1;
827           u8 error0, good_udp0, proto0;
828           u8 error1, good_udp1, proto1;
829
830           /* Prefetch next iteration. */
831           {
832             vlib_prefetch_buffer_header (b[2], LOAD);
833             vlib_prefetch_buffer_header (b[3], LOAD);
834
835             CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
836             CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
837           }
838
839           bi0 = to_next[0] = from[0];
840           bi1 = to_next[1] = from[1];
841           from += 2;
842           n_left_from -= 2;
843           to_next += 2;
844           n_left_to_next -= 2;
845
846           b0 = b[0];
847           b1 = b[1];
848           b += 2;
849           if (is_ip4)
850             {
851               ip40 = vlib_buffer_get_current (b0);
852               ip41 = vlib_buffer_get_current (b1);
853             }
854           else
855             {
856               ip60 = vlib_buffer_get_current (b0);
857               ip61 = vlib_buffer_get_current (b1);
858             }
859
860           /* Setup packet for next IP feature */
861           vnet_feature_next (&next0, b0);
862           vnet_feature_next (&next1, b1);
863
864           if (is_ip4)
865             {
866               proto0 = ip40->protocol;
867               proto1 = ip41->protocol;
868             }
869           else
870             {
871               proto0 = ip60->protocol;
872               proto1 = ip61->protocol;
873             }
874
875           /* Process packet 0 */
876           if (proto0 != IP_PROTOCOL_UDP)
877             goto exit0;         /* not UDP packet */
878
879           if (is_ip4)
880             udp0 = ip4_next_header (ip40);
881           else
882             udp0 = ip6_next_header (ip60);
883
884           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
885             goto exit0;         /* not VXLAN packet */
886
887           /* Validate DIP against VTEPs */
888           if (is_ip4)
889             {
890 #ifdef CLIB_HAVE_VEC512
891               if (!vtep4_check_vector
892                   (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
893 #else
894               if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
895 #endif
896                 goto exit0;     /* no local VTEP for VXLAN packet */
897             }
898           else
899             {
900               if (!vtep6_check (&ngm->vtep_table, b0, ip60, &last_vtep6))
901                 goto exit0;     /* no local VTEP for VXLAN packet */
902             }
903
904           flags0 = b0->flags;
905           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
906
907           /* Don't verify UDP checksum for packets with explicit zero checksum. */
908           good_udp0 |= udp0->checksum == 0;
909
910           /* Verify UDP length */
911           if (is_ip4)
912             ip_len0 = clib_net_to_host_u16 (ip40->length);
913           else
914             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
915           udp_len0 = clib_net_to_host_u16 (udp0->length);
916           len_diff0 = ip_len0 - udp_len0;
917
918           /* Verify UDP checksum */
919           if (PREDICT_FALSE (!good_udp0))
920             {
921               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
922                 {
923                   if (is_ip4)
924                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
925                   else
926                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
927                   good_udp0 =
928                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
929                 }
930             }
931
932           if (is_ip4)
933             {
934               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
935               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
936             }
937           else
938             {
939               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
940               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
941             }
942
943           next0 = error0 ?
944             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
945           b0->error = error0 ? error_node->errors[error0] : 0;
946
947           /* vxlan_gpe-input node expect current at VXLAN header */
948           if (is_ip4)
949             vlib_buffer_advance (b0,
950                                  sizeof (ip4_header_t) +
951                                  sizeof (udp_header_t));
952           else
953             vlib_buffer_advance (b0,
954                                  sizeof (ip6_header_t) +
955                                  sizeof (udp_header_t));
956
957         exit0:
958           /* Process packet 1 */
959           if (proto1 != IP_PROTOCOL_UDP)
960             goto exit1;         /* not UDP packet */
961
962           if (is_ip4)
963             udp1 = ip4_next_header (ip41);
964           else
965             udp1 = ip6_next_header (ip61);
966
967           if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
968             goto exit1;         /* not VXLAN packet */
969
970           /* Validate DIP against VTEPs */
971           if (is_ip4)
972             {
973 #ifdef CLIB_HAVE_VEC512
974               if (!vtep4_check_vector
975                   (&ngm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
976 #else
977               if (!vtep4_check (&ngm->vtep_table, b1, ip41, &last_vtep4))
978 #endif
979                 goto exit1;     /* no local VTEP for VXLAN packet */
980             }
981           else
982             {
983               if (!vtep6_check (&ngm->vtep_table, b1, ip61, &last_vtep6))
984                 goto exit1;     /* no local VTEP for VXLAN packet */
985             }
986
987           flags1 = b1->flags;
988           good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
989
990           /* Don't verify UDP checksum for packets with explicit zero checksum. */
991           good_udp1 |= udp1->checksum == 0;
992
993           /* Verify UDP length */
994           if (is_ip4)
995             ip_len1 = clib_net_to_host_u16 (ip41->length);
996           else
997             ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
998           udp_len1 = clib_net_to_host_u16 (udp1->length);
999           len_diff1 = ip_len1 - udp_len1;
1000
1001           /* Verify UDP checksum */
1002           if (PREDICT_FALSE (!good_udp1))
1003             {
1004               if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1005                 {
1006                   if (is_ip4)
1007                     flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1008                   else
1009                     flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1010                   good_udp1 =
1011                     (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1012                 }
1013             }
1014
1015           if (is_ip4)
1016             {
1017               error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1018               error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1019             }
1020           else
1021             {
1022               error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1023               error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1024             }
1025
1026           next1 = error1 ?
1027             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1028           b1->error = error1 ? error_node->errors[error1] : 0;
1029
1030           /* vxlan_gpe-input node expect current at VXLAN header */
1031           if (is_ip4)
1032             vlib_buffer_advance (b1,
1033                                  sizeof (ip4_header_t) +
1034                                  sizeof (udp_header_t));
1035           else
1036             vlib_buffer_advance (b1,
1037                                  sizeof (ip6_header_t) +
1038                                  sizeof (udp_header_t));
1039
1040         exit1:
1041           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1042                                            to_next, n_left_to_next,
1043                                            bi0, bi1, next0, next1);
1044         }
1045
1046       while (n_left_from > 0 && n_left_to_next > 0)
1047         {
1048           vlib_buffer_t *b0;
1049           ip4_header_t *ip40;
1050           ip6_header_t *ip60;
1051           udp_header_t *udp0;
1052           u32 bi0, ip_len0, udp_len0, flags0, next0;
1053           i32 len_diff0;
1054           u8 error0, good_udp0, proto0;
1055
1056           bi0 = to_next[0] = from[0];
1057           from += 1;
1058           n_left_from -= 1;
1059           to_next += 1;
1060           n_left_to_next -= 1;
1061
1062           b0 = b[0];
1063           b++;
1064           if (is_ip4)
1065             ip40 = vlib_buffer_get_current (b0);
1066           else
1067             ip60 = vlib_buffer_get_current (b0);
1068
1069           /* Setup packet for next IP feature */
1070           vnet_feature_next (&next0, b0);
1071
1072           if (is_ip4)
1073             proto0 = ip40->protocol;
1074           else
1075             proto0 = ip60->protocol;
1076
1077           if (proto0 != IP_PROTOCOL_UDP)
1078             goto exit;          /* not UDP packet */
1079
1080           if (is_ip4)
1081             udp0 = ip4_next_header (ip40);
1082           else
1083             udp0 = ip6_next_header (ip60);
1084
1085           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
1086             goto exit;          /* not VXLAN packet */
1087
1088           /* Validate DIP against VTEPs */
1089
1090           if (is_ip4)
1091             {
1092 #ifdef CLIB_HAVE_VEC512
1093               if (!vtep4_check_vector
1094                   (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
1095 #else
1096               if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
1097 #endif
1098                 goto exit;      /* no local VTEP for VXLAN packet */
1099             }
1100           else
1101             {
1102               if (!vtep6_check (&ngm->vtep_table, b0, ip60, &last_vtep6))
1103                 goto exit;      /* no local VTEP for VXLAN packet */
1104             }
1105
1106           flags0 = b0->flags;
1107           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1108
1109           /* Don't verify UDP checksum for packets with explicit zero checksum. */
1110           good_udp0 |= udp0->checksum == 0;
1111
1112           /* Verify UDP length */
1113           if (is_ip4)
1114             ip_len0 = clib_net_to_host_u16 (ip40->length);
1115           else
1116             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1117           udp_len0 = clib_net_to_host_u16 (udp0->length);
1118           len_diff0 = ip_len0 - udp_len0;
1119
1120           /* Verify UDP checksum */
1121           if (PREDICT_FALSE (!good_udp0))
1122             {
1123               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1124                 {
1125                   if (is_ip4)
1126                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1127                   else
1128                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1129                   good_udp0 =
1130                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1131                 }
1132             }
1133
1134           if (is_ip4)
1135             {
1136               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1137               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1138             }
1139           else
1140             {
1141               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1142               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1143             }
1144
1145           next0 = error0 ?
1146             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1147           b0->error = error0 ? error_node->errors[error0] : 0;
1148
1149           /* vxlan_gpe-input node expect current at VXLAN header */
1150           if (is_ip4)
1151             vlib_buffer_advance (b0,
1152                                  sizeof (ip4_header_t) +
1153                                  sizeof (udp_header_t));
1154           else
1155             vlib_buffer_advance (b0,
1156                                  sizeof (ip6_header_t) +
1157                                  sizeof (udp_header_t));
1158
1159         exit:
1160           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1161                                            to_next, n_left_to_next,
1162                                            bi0, next0);
1163         }
1164
1165       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1166     }
1167
1168   return frame->n_vectors;
1169 }
1170
1171 VLIB_NODE_FN (ip4_vxlan_gpe_bypass_node) (vlib_main_t * vm,
1172                                           vlib_node_runtime_t * node,
1173                                           vlib_frame_t * frame)
1174 {
1175   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1176 }
1177
1178 /* *INDENT-OFF* */
1179 VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
1180   .name = "ip4-vxlan-gpe-bypass",
1181   .vector_size = sizeof (u32),
1182
1183   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1184   .next_nodes = {
1185     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1186     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-gpe-input",
1187   },
1188
1189   .format_buffer = format_ip4_header,
1190   .format_trace = format_ip4_forward_next_trace,
1191 };
1192 /* *INDENT-ON* */
1193
1194 #ifndef CLIB_MARCH_VARIANT
1195 /* Dummy init function to get us linked in. */
1196 clib_error_t *
1197 ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
1198 {
1199   return 0;
1200 }
1201
1202 VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init);
1203 #endif /* CLIB_MARCH_VARIANT */
1204
1205 VLIB_NODE_FN (ip6_vxlan_gpe_bypass_node) (vlib_main_t * vm,
1206                                           vlib_node_runtime_t * node,
1207                                           vlib_frame_t * frame)
1208 {
1209   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1210 }
1211
1212 /* *INDENT-OFF* */
1213 VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
1214   .name = "ip6-vxlan-gpe-bypass",
1215   .vector_size = sizeof (u32),
1216
1217   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1218   .next_nodes = {
1219     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1220     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-gpe-input",
1221   },
1222
1223   .format_buffer = format_ip6_header,
1224   .format_trace = format_ip6_forward_next_trace,
1225 };
1226 /* *INDENT-ON* */
1227
1228 #ifndef CLIB_MARCH_VARIANT
1229 /* Dummy init function to get us linked in. */
1230 clib_error_t *
1231 ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
1232 {
1233   return 0;
1234 }
1235
1236 VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init);
1237 #endif /* CLIB_MARCH_VARIANT */
1238
1239 /*
1240  * fd.io coding-style-patch-verification: ON
1241  *
1242  * Local Variables:
1243  * eval: (c-set-style "gnu")
1244  * End:
1245  */