misc: minimize dependencies on udp.h
[vpp.git] / src / vnet / vxlan-gpe / decap.c
1 /*
2  * decap.c - decapsulate VXLAN GPE
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 /**
18  *  @file
19  *  @brief Functions for decapsulating VXLAN GPE tunnels
20  *
21 */
22
23 #include <vlib/vlib.h>
24 #include <vnet/udp/udp_local.h>
25 #include <vnet/vxlan-gpe/vxlan_gpe.h>
26
27 /**
28  * @brief Struct for VXLAN GPE decap packet tracing
29  *
30  */
31 typedef struct
32 {
33   u32 next_index;
34   u32 tunnel_index;
35   u32 error;
36 } vxlan_gpe_rx_trace_t;
37
38 /**
39  * @brief Tracing function for VXLAN GPE packet decapsulation
40  *
41  * @param *s
42  * @param *args
43  *
44  * @return *s
45  *
46  */
47 static u8 *
48 format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
49 {
50   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
51   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
52   vxlan_gpe_rx_trace_t *t = va_arg (*args, vxlan_gpe_rx_trace_t *);
53
54   if (t->tunnel_index != ~0)
55     {
56       s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
57                   t->next_index, t->error);
58     }
59   else
60     {
61       s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
62                   t->error);
63     }
64   return s;
65 }
66
67 /**
68  * @brief Tracing function for VXLAN GPE packet decapsulation including length
69  *
70  * @param *s
71  * @param *args
72  *
73  * @return *s
74  *
75  */
76 static u8 *
77 format_vxlan_gpe_with_length (u8 * s, va_list * args)
78 {
79   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
80   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
81
82
83   return s;
84 }
85
86 /**
87  * @brief Common processing for IPv4 and IPv6 VXLAN GPE decap dispatch functions
88  *
89  * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
90  * tunnels are "terminate local". This means that there is no "TX" interface for this
91  * decap case, so that field in the buffer_metadata can be "used for something else".
92  * The something else in this case is, for the IPv4/IPv6 inner-packet type case, the
93  * FIB index used to look up the inner-packet's adjacency.
94  *
95  *      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
96  *
97  * @param *vm
98  * @param *node
99  * @param *from_frame
100  * @param is_ip4
101  *
102  * @return from_frame->n_vectors
103  *
104  */
105 always_inline uword
106 vxlan_gpe_input (vlib_main_t * vm,
107                  vlib_node_runtime_t * node,
108                  vlib_frame_t * from_frame, u8 is_ip4)
109 {
110   u32 n_left_from, next_index, *from, *to_next;
111   vxlan_gpe_main_t *nngm = &vxlan_gpe_main;
112   vnet_main_t *vnm = nngm->vnet_main;
113   vnet_interface_main_t *im = &vnm->interface_main;
114   u32 last_tunnel_index = ~0;
115   vxlan4_gpe_tunnel_key_t last_key4;
116   vxlan6_gpe_tunnel_key_t last_key6;
117   u32 pkts_decapsulated = 0;
118   u32 thread_index = vm->thread_index;
119   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
120
121   if (is_ip4)
122     clib_memset (&last_key4, 0xff, sizeof (last_key4));
123   else
124     clib_memset (&last_key6, 0xff, sizeof (last_key6));
125
126   from = vlib_frame_vector_args (from_frame);
127   n_left_from = from_frame->n_vectors;
128
129   next_index = node->cached_next_index;
130   stats_sw_if_index = node->runtime_data[0];
131   stats_n_packets = stats_n_bytes = 0;
132
133   while (n_left_from > 0)
134     {
135       u32 n_left_to_next;
136
137       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
138
139       while (n_left_from >= 4 && n_left_to_next >= 2)
140         {
141           u32 bi0, bi1;
142           vlib_buffer_t *b0, *b1;
143           u32 next0, next1;
144           ip4_vxlan_gpe_header_t *iuvn4_0, *iuvn4_1;
145           ip6_vxlan_gpe_header_t *iuvn6_0, *iuvn6_1;
146           uword *p0, *p1;
147           u32 tunnel_index0, tunnel_index1;
148           vxlan_gpe_tunnel_t *t0, *t1;
149           vxlan4_gpe_tunnel_key_t key4_0, key4_1;
150           vxlan6_gpe_tunnel_key_t key6_0, key6_1;
151           u32 error0, error1;
152           u32 sw_if_index0, sw_if_index1, len0, len1;
153
154           /* Prefetch next iteration. */
155           {
156             vlib_buffer_t *p2, *p3;
157
158             p2 = vlib_get_buffer (vm, from[2]);
159             p3 = vlib_get_buffer (vm, from[3]);
160
161             vlib_prefetch_buffer_header (p2, LOAD);
162             vlib_prefetch_buffer_header (p3, LOAD);
163
164             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
165             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
166           }
167
168           bi0 = from[0];
169           bi1 = from[1];
170           to_next[0] = bi0;
171           to_next[1] = bi1;
172           from += 2;
173           to_next += 2;
174           n_left_to_next -= 2;
175           n_left_from -= 2;
176
177           b0 = vlib_get_buffer (vm, bi0);
178           b1 = vlib_get_buffer (vm, bi1);
179
180           if (is_ip4)
181             {
182               /* udp leaves current_data pointing at the vxlan-gpe header */
183               vlib_buffer_advance (b0,
184                                    -(word) (sizeof (udp_header_t) +
185                                             sizeof (ip4_header_t)));
186               vlib_buffer_advance (b1,
187                                    -(word) (sizeof (udp_header_t) +
188                                             sizeof (ip4_header_t)));
189
190               iuvn4_0 = vlib_buffer_get_current (b0);
191               iuvn4_1 = vlib_buffer_get_current (b1);
192
193               /* pop (ip, udp, vxlan) */
194               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
195               vlib_buffer_advance (b1, sizeof (*iuvn4_1));
196             }
197           else
198             {
199               /* udp leaves current_data pointing at the vxlan-gpe header */
200               vlib_buffer_advance (b0,
201                                    -(word) (sizeof (udp_header_t) +
202                                             sizeof (ip6_header_t)));
203               vlib_buffer_advance (b1,
204                                    -(word) (sizeof (udp_header_t) +
205                                             sizeof (ip6_header_t)));
206
207               iuvn6_0 = vlib_buffer_get_current (b0);
208               iuvn6_1 = vlib_buffer_get_current (b1);
209
210               /* pop (ip, udp, vxlan) */
211               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
212               vlib_buffer_advance (b1, sizeof (*iuvn6_1));
213             }
214
215           tunnel_index0 = ~0;
216           tunnel_index1 = ~0;
217           error0 = 0;
218           error1 = 0;
219
220           if (is_ip4)
221             {
222               next0 =
223                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
224                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
225                 VXLAN_GPE_INPUT_NEXT_DROP;
226               next1 =
227                 (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
228                 nngm->decap_next_node_list[iuvn4_1->vxlan.protocol] :
229                 VXLAN_GPE_INPUT_NEXT_DROP;
230
231               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
232               key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
233
234               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
235               key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
236
237               key4_0.vni = iuvn4_0->vxlan.vni_res;
238               key4_1.vni = iuvn4_1->vxlan.vni_res;
239
240               key4_0.pad = 0;
241               key4_1.pad = 0;
242             }
243           else                  /* is_ip6 */
244             {
245               next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
246                 iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
247               next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
248                 iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
249
250               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
251               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
252               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
253               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
254
255               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
256               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
257               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
258               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
259
260               key6_0.vni = iuvn6_0->vxlan.vni_res;
261               key6_1.vni = iuvn6_1->vxlan.vni_res;
262             }
263
264           /* Processing packet 0 */
265           if (is_ip4)
266             {
267               /* Processing for key4_0 */
268               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
269                                  || (key4_0.as_u64[1] !=
270                                      last_key4.as_u64[1])))
271                 {
272                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
273
274                   if (p0 == 0)
275                     {
276                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
277                       goto trace0;
278                     }
279
280                   last_key4.as_u64[0] = key4_0.as_u64[0];
281                   last_key4.as_u64[1] = key4_0.as_u64[1];
282                   tunnel_index0 = last_tunnel_index = p0[0];
283                 }
284               else
285                 tunnel_index0 = last_tunnel_index;
286             }
287           else                  /* is_ip6 */
288             {
289               next0 =
290                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
291                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
292                 VXLAN_GPE_INPUT_NEXT_DROP;
293               next1 =
294                 (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
295                 nngm->decap_next_node_list[iuvn6_1->vxlan.protocol] :
296                 VXLAN_GPE_INPUT_NEXT_DROP;
297
298               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
299               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
300               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
301               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
302
303               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
304               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
305               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
306               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
307
308               key6_0.vni = iuvn6_0->vxlan.vni_res;
309               key6_1.vni = iuvn6_1->vxlan.vni_res;
310
311               /* Processing for key6_0 */
312               if (PREDICT_FALSE
313                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
314                 {
315                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
316
317                   if (p0 == 0)
318                     {
319                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
320                       goto trace0;
321                     }
322
323                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
324                   tunnel_index0 = last_tunnel_index = p0[0];
325                 }
326               else
327                 tunnel_index0 = last_tunnel_index;
328             }
329
330           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
331
332
333           sw_if_index0 = t0->sw_if_index;
334           len0 = vlib_buffer_length_in_chain (vm, b0);
335
336           /* Required to make the l2 tag push / pop code work on l2 subifs */
337           vnet_update_l2_len (b0);
338
339           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
340           vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
341
342       /**
343        * ip[46] lookup in the configured FIB
344        */
345           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
346
347           pkts_decapsulated++;
348           stats_n_packets += 1;
349           stats_n_bytes += len0;
350
351           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
352             {
353               stats_n_packets -= 1;
354               stats_n_bytes -= len0;
355               if (stats_n_packets)
356                 vlib_increment_combined_counter (im->combined_sw_if_counters +
357                                                  VNET_INTERFACE_COUNTER_RX,
358                                                  thread_index,
359                                                  stats_sw_if_index,
360                                                  stats_n_packets,
361                                                  stats_n_bytes);
362               stats_n_packets = 1;
363               stats_n_bytes = len0;
364               stats_sw_if_index = sw_if_index0;
365             }
366
367         trace0:b0->error = error0 ? node->errors[error0] : 0;
368
369           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
370             {
371               vxlan_gpe_rx_trace_t *tr =
372                 vlib_add_trace (vm, node, b0, sizeof (*tr));
373               tr->next_index = next0;
374               tr->error = error0;
375               tr->tunnel_index = tunnel_index0;
376             }
377
378           /* Process packet 1 */
379           if (is_ip4)
380             {
381               /* Processing for key4_1 */
382               if (PREDICT_FALSE ((key4_1.as_u64[0] != last_key4.as_u64[0])
383                                  || (key4_1.as_u64[1] !=
384                                      last_key4.as_u64[1])))
385                 {
386                   p1 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_1);
387
388                   if (p1 == 0)
389                     {
390                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
391                       goto trace1;
392                     }
393
394                   last_key4.as_u64[0] = key4_1.as_u64[0];
395                   last_key4.as_u64[1] = key4_1.as_u64[1];
396                   tunnel_index1 = last_tunnel_index = p1[0];
397                 }
398               else
399                 tunnel_index1 = last_tunnel_index;
400             }
401           else                  /* is_ip6 */
402             {
403               /* Processing for key6_1 */
404               if (PREDICT_FALSE
405                   (memcmp (&key6_1, &last_key6, sizeof (last_key6)) != 0))
406                 {
407                   p1 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_1);
408
409                   if (p1 == 0)
410                     {
411                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
412                       goto trace1;
413                     }
414
415                   memcpy (&last_key6, &key6_1, sizeof (key6_1));
416                   tunnel_index1 = last_tunnel_index = p1[0];
417                 }
418               else
419                 tunnel_index1 = last_tunnel_index;
420             }
421
422           t1 = pool_elt_at_index (nngm->tunnels, tunnel_index1);
423
424           sw_if_index1 = t1->sw_if_index;
425           len1 = vlib_buffer_length_in_chain (vm, b1);
426
427           /* Required to make the l2 tag push / pop code work on l2 subifs */
428           vnet_update_l2_len (b1);
429
430           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
431           vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
432
433           /*
434            * ip[46] lookup in the configured FIB
435            */
436           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
437
438           pkts_decapsulated++;
439           stats_n_packets += 1;
440           stats_n_bytes += len1;
441
442           /* Batch stats increment on the same vxlan tunnel so counter
443              is not incremented per packet */
444           if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
445             {
446               stats_n_packets -= 1;
447               stats_n_bytes -= len1;
448               if (stats_n_packets)
449                 vlib_increment_combined_counter (im->combined_sw_if_counters +
450                                                  VNET_INTERFACE_COUNTER_RX,
451                                                  thread_index,
452                                                  stats_sw_if_index,
453                                                  stats_n_packets,
454                                                  stats_n_bytes);
455               stats_n_packets = 1;
456               stats_n_bytes = len1;
457               stats_sw_if_index = sw_if_index1;
458             }
459           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
460
461         trace1:b1->error = error1 ? node->errors[error1] : 0;
462
463           if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
464             {
465               vxlan_gpe_rx_trace_t *tr =
466                 vlib_add_trace (vm, node, b1, sizeof (*tr));
467               tr->next_index = next1;
468               tr->error = error1;
469               tr->tunnel_index = tunnel_index1;
470             }
471
472           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
473                                            n_left_to_next, bi0, bi1, next0,
474                                            next1);
475         }
476
477       while (n_left_from > 0 && n_left_to_next > 0)
478         {
479           u32 bi0;
480           vlib_buffer_t *b0;
481           u32 next0;
482           ip4_vxlan_gpe_header_t *iuvn4_0;
483           ip6_vxlan_gpe_header_t *iuvn6_0;
484           uword *p0;
485           u32 tunnel_index0;
486           vxlan_gpe_tunnel_t *t0;
487           vxlan4_gpe_tunnel_key_t key4_0;
488           vxlan6_gpe_tunnel_key_t key6_0;
489           u32 error0;
490           u32 sw_if_index0, len0;
491
492           bi0 = from[0];
493           to_next[0] = bi0;
494           from += 1;
495           to_next += 1;
496           n_left_from -= 1;
497           n_left_to_next -= 1;
498
499           b0 = vlib_get_buffer (vm, bi0);
500
501           if (is_ip4)
502             {
503               /* udp leaves current_data pointing at the vxlan-gpe header */
504               vlib_buffer_advance (b0,
505                                    -(word) (sizeof (udp_header_t) +
506                                             sizeof (ip4_header_t)));
507
508               iuvn4_0 = vlib_buffer_get_current (b0);
509
510               /* pop (ip, udp, vxlan) */
511               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
512             }
513           else
514             {
515               /* udp leaves current_data pointing at the vxlan-gpe header */
516               vlib_buffer_advance (b0,
517                                    -(word) (sizeof (udp_header_t) +
518                                             sizeof (ip6_header_t)));
519
520               iuvn6_0 = vlib_buffer_get_current (b0);
521
522               /* pop (ip, udp, vxlan) */
523               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
524             }
525
526           tunnel_index0 = ~0;
527           error0 = 0;
528
529           if (is_ip4)
530             {
531               next0 =
532                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
533                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
534                 VXLAN_GPE_INPUT_NEXT_DROP;
535
536               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
537               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
538               key4_0.vni = iuvn4_0->vxlan.vni_res;
539               key4_0.pad = 0;
540
541               /* Processing for key4_0 */
542               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
543                                  || (key4_0.as_u64[1] !=
544                                      last_key4.as_u64[1])))
545                 {
546                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
547
548                   if (p0 == 0)
549                     {
550                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
551                       goto trace00;
552                     }
553
554                   last_key4.as_u64[0] = key4_0.as_u64[0];
555                   last_key4.as_u64[1] = key4_0.as_u64[1];
556                   tunnel_index0 = last_tunnel_index = p0[0];
557                 }
558               else
559                 tunnel_index0 = last_tunnel_index;
560             }
561           else                  /* is_ip6 */
562             {
563               next0 =
564                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
565                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
566                 VXLAN_GPE_INPUT_NEXT_DROP;
567
568               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
569               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
570               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
571               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
572               key6_0.vni = iuvn6_0->vxlan.vni_res;
573
574               /* Processing for key6_0 */
575               if (PREDICT_FALSE
576                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
577                 {
578                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
579
580                   if (p0 == 0)
581                     {
582                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
583                       goto trace00;
584                     }
585
586                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
587                   tunnel_index0 = last_tunnel_index = p0[0];
588                 }
589               else
590                 tunnel_index0 = last_tunnel_index;
591             }
592
593           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
594
595
596           sw_if_index0 = t0->sw_if_index;
597           len0 = vlib_buffer_length_in_chain (vm, b0);
598
599           /* Required to make the l2 tag push / pop code work on l2 subifs */
600           vnet_update_l2_len (b0);
601
602           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
603           vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
604
605           /*
606            * ip[46] lookup in the configured FIB
607            */
608           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
609
610           pkts_decapsulated++;
611           stats_n_packets += 1;
612           stats_n_bytes += len0;
613
614           /* Batch stats increment on the same vxlan-gpe tunnel so counter
615              is not incremented per packet */
616           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
617             {
618               stats_n_packets -= 1;
619               stats_n_bytes -= len0;
620               if (stats_n_packets)
621                 vlib_increment_combined_counter (im->combined_sw_if_counters +
622                                                  VNET_INTERFACE_COUNTER_RX,
623                                                  thread_index,
624                                                  stats_sw_if_index,
625                                                  stats_n_packets,
626                                                  stats_n_bytes);
627               stats_n_packets = 1;
628               stats_n_bytes = len0;
629               stats_sw_if_index = sw_if_index0;
630             }
631
632         trace00:b0->error = error0 ? node->errors[error0] : 0;
633
634           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
635             {
636               vxlan_gpe_rx_trace_t *tr =
637                 vlib_add_trace (vm, node, b0, sizeof (*tr));
638               tr->next_index = next0;
639               tr->error = error0;
640               tr->tunnel_index = tunnel_index0;
641             }
642           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
643                                            n_left_to_next, bi0, next0);
644         }
645
646       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
647     }
648
649   vlib_node_increment_counter (vm,
650                                is_ip4 ? vxlan4_gpe_input_node.index :
651                                vxlan6_gpe_input_node.index,
652                                VXLAN_GPE_ERROR_DECAPSULATED,
653                                pkts_decapsulated);
654
655   /* Increment any remaining batch stats */
656   if (stats_n_packets)
657     {
658       vlib_increment_combined_counter (im->combined_sw_if_counters +
659                                        VNET_INTERFACE_COUNTER_RX,
660                                        thread_index, stats_sw_if_index,
661                                        stats_n_packets, stats_n_bytes);
662       node->runtime_data[0] = stats_sw_if_index;
663     }
664   return from_frame->n_vectors;
665 }
666
667 /**
668  * @brief Graph processing dispatch function for IPv4 VXLAN GPE
669  *
670  * @node vxlan4-gpe-input
671  * @param *vm
672  * @param *node
673  * @param *from_frame
674  *
675  * @return from_frame->n_vectors
676  *
677  */
678 VLIB_NODE_FN (vxlan4_gpe_input_node) (vlib_main_t * vm,
679                                       vlib_node_runtime_t * node,
680                                       vlib_frame_t * from_frame)
681 {
682   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 1);
683 }
684
685 #ifndef CLIB_MARCH_VARIANT
686 void
687 vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
688 {
689   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
690   hm->decap_next_node_list[protocol_id] = next_node_index;
691   return;
692 }
693
694 void
695 vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
696 {
697   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
698   hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
699   return;
700 }
701 #endif /* CLIB_MARCH_VARIANT */
702
703 /**
704  * @brief Graph processing dispatch function for IPv6 VXLAN GPE
705  *
706  * @node vxlan6-gpe-input
707  * @param *vm
708  * @param *node
709  * @param *from_frame
710  *
711  * @return from_frame->n_vectors - uword
712  *
713  */
714 VLIB_NODE_FN (vxlan6_gpe_input_node) (vlib_main_t * vm,
715                                       vlib_node_runtime_t * node,
716                                       vlib_frame_t * from_frame)
717 {
718   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 0);
719 }
720
721 /**
722  * @brief VXLAN GPE error strings
723  */
724 static char *vxlan_gpe_error_strings[] = {
725 #define vxlan_gpe_error(n,s) s,
726 #include <vnet/vxlan-gpe/vxlan_gpe_error.def>
727 #undef vxlan_gpe_error
728 #undef _
729 };
730
731 /* *INDENT-OFF* */
732 VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
733   .name = "vxlan4-gpe-input",
734   /* Takes a vector of packets. */
735   .vector_size = sizeof (u32),
736   .type = VLIB_NODE_TYPE_INTERNAL,
737   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
738   .error_strings = vxlan_gpe_error_strings,
739
740   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
741   .next_nodes = {
742 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
743     foreach_vxlan_gpe_input_next
744 #undef _
745   },
746
747   .format_buffer = format_vxlan_gpe_with_length,
748   .format_trace = format_vxlan_gpe_rx_trace,
749   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
750 };
751 /* *INDENT-ON* */
752
753 /* *INDENT-OFF* */
754 VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
755   .name = "vxlan6-gpe-input",
756   /* Takes a vector of packets. */
757   .vector_size = sizeof (u32),
758   .type = VLIB_NODE_TYPE_INTERNAL,
759   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
760   .error_strings = vxlan_gpe_error_strings,
761
762   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
763   .next_nodes = {
764 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
765     foreach_vxlan_gpe_input_next
766 #undef _
767   },
768
769   .format_buffer = format_vxlan_gpe_with_length,
770   .format_trace = format_vxlan_gpe_rx_trace,
771   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
772 };
773 /* *INDENT-ON* */
774
775 typedef enum
776 {
777   IP_VXLAN_BYPASS_NEXT_DROP,
778   IP_VXLAN_BYPASS_NEXT_VXLAN,
779   IP_VXLAN_BYPASS_N_NEXT,
780 } ip_vxlan_bypass_next_t;
781
782 always_inline uword
783 ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
784                             vlib_node_runtime_t * node,
785                             vlib_frame_t * frame, u32 is_ip4)
786 {
787   vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
788   u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
789   vlib_node_runtime_t *error_node =
790     vlib_node_get_runtime (vm, ip4_input_node.index);
791   vtep4_key_t last_vtep4;       /* last IPv4 address / fib index
792                                    matching a local VTEP address */
793   vtep6_key_t last_vtep6;       /* last IPv6 address / fib index
794                                    matching a local VTEP address */
795   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
796 #ifdef CLIB_HAVE_VEC512
797   vtep4_cache_t vtep4_u512;
798   clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
799 #endif
800
801   from = vlib_frame_vector_args (frame);
802   n_left_from = frame->n_vectors;
803   next_index = node->cached_next_index;
804
805   vlib_get_buffers (vm, from, bufs, n_left_from);
806
807   if (node->flags & VLIB_NODE_FLAG_TRACE)
808     ip4_forward_next_trace (vm, node, frame, VLIB_TX);
809
810   if (is_ip4)
811     vtep4_key_init (&last_vtep4);
812   else
813     vtep6_key_init (&last_vtep6);
814
815   while (n_left_from > 0)
816     {
817       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
818
819       while (n_left_from >= 4 && n_left_to_next >= 2)
820         {
821           vlib_buffer_t *b0, *b1;
822           ip4_header_t *ip40, *ip41;
823           ip6_header_t *ip60, *ip61;
824           udp_header_t *udp0, *udp1;
825           u32 bi0, ip_len0, udp_len0, flags0, next0;
826           u32 bi1, ip_len1, udp_len1, flags1, next1;
827           i32 len_diff0, len_diff1;
828           u8 error0, good_udp0, proto0;
829           u8 error1, good_udp1, proto1;
830
831           /* Prefetch next iteration. */
832           {
833             vlib_prefetch_buffer_header (b[2], LOAD);
834             vlib_prefetch_buffer_header (b[3], LOAD);
835
836             CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
837             CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
838           }
839
840           bi0 = to_next[0] = from[0];
841           bi1 = to_next[1] = from[1];
842           from += 2;
843           n_left_from -= 2;
844           to_next += 2;
845           n_left_to_next -= 2;
846
847           b0 = b[0];
848           b1 = b[1];
849           b += 2;
850           if (is_ip4)
851             {
852               ip40 = vlib_buffer_get_current (b0);
853               ip41 = vlib_buffer_get_current (b1);
854             }
855           else
856             {
857               ip60 = vlib_buffer_get_current (b0);
858               ip61 = vlib_buffer_get_current (b1);
859             }
860
861           /* Setup packet for next IP feature */
862           vnet_feature_next (&next0, b0);
863           vnet_feature_next (&next1, b1);
864
865           if (is_ip4)
866             {
867               proto0 = ip40->protocol;
868               proto1 = ip41->protocol;
869             }
870           else
871             {
872               proto0 = ip60->protocol;
873               proto1 = ip61->protocol;
874             }
875
876           /* Process packet 0 */
877           if (proto0 != IP_PROTOCOL_UDP)
878             goto exit0;         /* not UDP packet */
879
880           if (is_ip4)
881             udp0 = ip4_next_header (ip40);
882           else
883             udp0 = ip6_next_header (ip60);
884
885           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
886             goto exit0;         /* not VXLAN packet */
887
888           /* Validate DIP against VTEPs */
889           if (is_ip4)
890             {
891 #ifdef CLIB_HAVE_VEC512
892               if (!vtep4_check_vector
893                   (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
894 #else
895               if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
896 #endif
897                 goto exit0;     /* no local VTEP for VXLAN packet */
898             }
899           else
900             {
901               if (!vtep6_check (&ngm->vtep_table, b0, ip60, &last_vtep6))
902                 goto exit0;     /* no local VTEP for VXLAN packet */
903             }
904
905           flags0 = b0->flags;
906           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
907
908           /* Don't verify UDP checksum for packets with explicit zero checksum. */
909           good_udp0 |= udp0->checksum == 0;
910
911           /* Verify UDP length */
912           if (is_ip4)
913             ip_len0 = clib_net_to_host_u16 (ip40->length);
914           else
915             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
916           udp_len0 = clib_net_to_host_u16 (udp0->length);
917           len_diff0 = ip_len0 - udp_len0;
918
919           /* Verify UDP checksum */
920           if (PREDICT_FALSE (!good_udp0))
921             {
922               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
923                 {
924                   if (is_ip4)
925                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
926                   else
927                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
928                   good_udp0 =
929                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
930                 }
931             }
932
933           if (is_ip4)
934             {
935               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
936               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
937             }
938           else
939             {
940               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
941               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
942             }
943
944           next0 = error0 ?
945             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
946           b0->error = error0 ? error_node->errors[error0] : 0;
947
948           /* vxlan_gpe-input node expect current at VXLAN header */
949           if (is_ip4)
950             vlib_buffer_advance (b0,
951                                  sizeof (ip4_header_t) +
952                                  sizeof (udp_header_t));
953           else
954             vlib_buffer_advance (b0,
955                                  sizeof (ip6_header_t) +
956                                  sizeof (udp_header_t));
957
958         exit0:
959           /* Process packet 1 */
960           if (proto1 != IP_PROTOCOL_UDP)
961             goto exit1;         /* not UDP packet */
962
963           if (is_ip4)
964             udp1 = ip4_next_header (ip41);
965           else
966             udp1 = ip6_next_header (ip61);
967
968           if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
969             goto exit1;         /* not VXLAN packet */
970
971           /* Validate DIP against VTEPs */
972           if (is_ip4)
973             {
974 #ifdef CLIB_HAVE_VEC512
975               if (!vtep4_check_vector
976                   (&ngm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
977 #else
978               if (!vtep4_check (&ngm->vtep_table, b1, ip41, &last_vtep4))
979 #endif
980                 goto exit1;     /* no local VTEP for VXLAN packet */
981             }
982           else
983             {
984               if (!vtep6_check (&ngm->vtep_table, b1, ip61, &last_vtep6))
985                 goto exit1;     /* no local VTEP for VXLAN packet */
986             }
987
988           flags1 = b1->flags;
989           good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
990
991           /* Don't verify UDP checksum for packets with explicit zero checksum. */
992           good_udp1 |= udp1->checksum == 0;
993
994           /* Verify UDP length */
995           if (is_ip4)
996             ip_len1 = clib_net_to_host_u16 (ip41->length);
997           else
998             ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
999           udp_len1 = clib_net_to_host_u16 (udp1->length);
1000           len_diff1 = ip_len1 - udp_len1;
1001
1002           /* Verify UDP checksum */
1003           if (PREDICT_FALSE (!good_udp1))
1004             {
1005               if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1006                 {
1007                   if (is_ip4)
1008                     flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1009                   else
1010                     flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1011                   good_udp1 =
1012                     (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1013                 }
1014             }
1015
1016           if (is_ip4)
1017             {
1018               error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1019               error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1020             }
1021           else
1022             {
1023               error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1024               error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1025             }
1026
1027           next1 = error1 ?
1028             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1029           b1->error = error1 ? error_node->errors[error1] : 0;
1030
1031           /* vxlan_gpe-input node expect current at VXLAN header */
1032           if (is_ip4)
1033             vlib_buffer_advance (b1,
1034                                  sizeof (ip4_header_t) +
1035                                  sizeof (udp_header_t));
1036           else
1037             vlib_buffer_advance (b1,
1038                                  sizeof (ip6_header_t) +
1039                                  sizeof (udp_header_t));
1040
1041         exit1:
1042           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1043                                            to_next, n_left_to_next,
1044                                            bi0, bi1, next0, next1);
1045         }
1046
1047       while (n_left_from > 0 && n_left_to_next > 0)
1048         {
1049           vlib_buffer_t *b0;
1050           ip4_header_t *ip40;
1051           ip6_header_t *ip60;
1052           udp_header_t *udp0;
1053           u32 bi0, ip_len0, udp_len0, flags0, next0;
1054           i32 len_diff0;
1055           u8 error0, good_udp0, proto0;
1056
1057           bi0 = to_next[0] = from[0];
1058           from += 1;
1059           n_left_from -= 1;
1060           to_next += 1;
1061           n_left_to_next -= 1;
1062
1063           b0 = b[0];
1064           b++;
1065           if (is_ip4)
1066             ip40 = vlib_buffer_get_current (b0);
1067           else
1068             ip60 = vlib_buffer_get_current (b0);
1069
1070           /* Setup packet for next IP feature */
1071           vnet_feature_next (&next0, b0);
1072
1073           if (is_ip4)
1074             proto0 = ip40->protocol;
1075           else
1076             proto0 = ip60->protocol;
1077
1078           if (proto0 != IP_PROTOCOL_UDP)
1079             goto exit;          /* not UDP packet */
1080
1081           if (is_ip4)
1082             udp0 = ip4_next_header (ip40);
1083           else
1084             udp0 = ip6_next_header (ip60);
1085
1086           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
1087             goto exit;          /* not VXLAN packet */
1088
1089           /* Validate DIP against VTEPs */
1090
1091           if (is_ip4)
1092             {
1093 #ifdef CLIB_HAVE_VEC512
1094               if (!vtep4_check_vector
1095                   (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
1096 #else
1097               if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
1098 #endif
1099                 goto exit;      /* no local VTEP for VXLAN packet */
1100             }
1101           else
1102             {
1103               if (!vtep6_check (&ngm->vtep_table, b0, ip60, &last_vtep6))
1104                 goto exit;      /* no local VTEP for VXLAN packet */
1105             }
1106
1107           flags0 = b0->flags;
1108           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1109
1110           /* Don't verify UDP checksum for packets with explicit zero checksum. */
1111           good_udp0 |= udp0->checksum == 0;
1112
1113           /* Verify UDP length */
1114           if (is_ip4)
1115             ip_len0 = clib_net_to_host_u16 (ip40->length);
1116           else
1117             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1118           udp_len0 = clib_net_to_host_u16 (udp0->length);
1119           len_diff0 = ip_len0 - udp_len0;
1120
1121           /* Verify UDP checksum */
1122           if (PREDICT_FALSE (!good_udp0))
1123             {
1124               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1125                 {
1126                   if (is_ip4)
1127                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1128                   else
1129                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1130                   good_udp0 =
1131                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1132                 }
1133             }
1134
1135           if (is_ip4)
1136             {
1137               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1138               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1139             }
1140           else
1141             {
1142               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1143               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1144             }
1145
1146           next0 = error0 ?
1147             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1148           b0->error = error0 ? error_node->errors[error0] : 0;
1149
1150           /* vxlan_gpe-input node expect current at VXLAN header */
1151           if (is_ip4)
1152             vlib_buffer_advance (b0,
1153                                  sizeof (ip4_header_t) +
1154                                  sizeof (udp_header_t));
1155           else
1156             vlib_buffer_advance (b0,
1157                                  sizeof (ip6_header_t) +
1158                                  sizeof (udp_header_t));
1159
1160         exit:
1161           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1162                                            to_next, n_left_to_next,
1163                                            bi0, next0);
1164         }
1165
1166       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1167     }
1168
1169   return frame->n_vectors;
1170 }
1171
1172 VLIB_NODE_FN (ip4_vxlan_gpe_bypass_node) (vlib_main_t * vm,
1173                                           vlib_node_runtime_t * node,
1174                                           vlib_frame_t * frame)
1175 {
1176   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1177 }
1178
1179 /* *INDENT-OFF* */
1180 VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
1181   .name = "ip4-vxlan-gpe-bypass",
1182   .vector_size = sizeof (u32),
1183
1184   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1185   .next_nodes = {
1186     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1187     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-gpe-input",
1188   },
1189
1190   .format_buffer = format_ip4_header,
1191   .format_trace = format_ip4_forward_next_trace,
1192 };
1193 /* *INDENT-ON* */
1194
1195 #ifndef CLIB_MARCH_VARIANT
1196 /* Dummy init function to get us linked in. */
1197 clib_error_t *
1198 ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
1199 {
1200   return 0;
1201 }
1202
1203 VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init);
1204 #endif /* CLIB_MARCH_VARIANT */
1205
1206 VLIB_NODE_FN (ip6_vxlan_gpe_bypass_node) (vlib_main_t * vm,
1207                                           vlib_node_runtime_t * node,
1208                                           vlib_frame_t * frame)
1209 {
1210   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1211 }
1212
1213 /* *INDENT-OFF* */
1214 VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
1215   .name = "ip6-vxlan-gpe-bypass",
1216   .vector_size = sizeof (u32),
1217
1218   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1219   .next_nodes = {
1220     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1221     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-gpe-input",
1222   },
1223
1224   .format_buffer = format_ip6_header,
1225   .format_trace = format_ip6_forward_next_trace,
1226 };
1227 /* *INDENT-ON* */
1228
1229 #ifndef CLIB_MARCH_VARIANT
1230 /* Dummy init function to get us linked in. */
1231 clib_error_t *
1232 ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
1233 {
1234   return 0;
1235 }
1236
1237 VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init);
1238 #endif /* CLIB_MARCH_VARIANT */
1239
1240 /*
1241  * fd.io coding-style-patch-verification: ON
1242  *
1243  * Local Variables:
1244  * eval: (c-set-style "gnu")
1245  * End:
1246  */