Typos. A bunch of typos I've been collecting.
[vpp.git] / src / vnet / vxlan-gpe / decap.c
1 /*
2  * decap.c - decapsulate VXLAN GPE
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 /**
18  *  @file
19  *  @brief Functions for decapsulating VXLAN GPE tunnels
20  *
21 */
22
23 #include <vlib/vlib.h>
24 #include <vnet/pg/pg.h>
25 #include <vnet/vxlan-gpe/vxlan_gpe.h>
26
27 /**
28  * @brief Struct for VXLAN GPE decap packet tracing
29  *
30  */
31 typedef struct
32 {
33   u32 next_index;
34   u32 tunnel_index;
35   u32 error;
36 } vxlan_gpe_rx_trace_t;
37
38 /**
39  * @brief Tracing function for VXLAN GPE packet decapsulation
40  *
41  * @param *s
42  * @param *args
43  *
44  * @return *s
45  *
46  */
47 static u8 *
48 format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
49 {
50   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
51   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
52   vxlan_gpe_rx_trace_t *t = va_arg (*args, vxlan_gpe_rx_trace_t *);
53
54   if (t->tunnel_index != ~0)
55     {
56       s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
57                   t->next_index, t->error);
58     }
59   else
60     {
61       s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
62                   t->error);
63     }
64   return s;
65 }
66
67 /**
68  * @brief Tracing function for VXLAN GPE packet decapsulation including length
69  *
70  * @param *s
71  * @param *args
72  *
73  * @return *s
74  *
75  */
76 static u8 *
77 format_vxlan_gpe_with_length (u8 * s, va_list * args)
78 {
79   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
80   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
81
82
83   return s;
84 }
85
86 /**
87  * @brief Common processing for IPv4 and IPv6 VXLAN GPE decap dispatch functions
88  *
89  * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
90  * tunnels are "terminate local". This means that there is no "TX" interface for this
91  * decap case, so that field in the buffer_metadata can be "used for something else".
92  * The something else in this case is, for the IPv4/IPv6 inner-packet type case, the
93  * FIB index used to look up the inner-packet's adjacency.
94  *
95  *      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
96  *
97  * @param *vm
98  * @param *node
99  * @param *from_frame
100  * @param is_ip4
101  *
102  * @return from_frame->n_vectors
103  *
104  */
105 always_inline uword
106 vxlan_gpe_input (vlib_main_t * vm,
107                  vlib_node_runtime_t * node,
108                  vlib_frame_t * from_frame, u8 is_ip4)
109 {
110   u32 n_left_from, next_index, *from, *to_next;
111   vxlan_gpe_main_t *nngm = &vxlan_gpe_main;
112   vnet_main_t *vnm = nngm->vnet_main;
113   vnet_interface_main_t *im = &vnm->interface_main;
114   u32 last_tunnel_index = ~0;
115   vxlan4_gpe_tunnel_key_t last_key4;
116   vxlan6_gpe_tunnel_key_t last_key6;
117   u32 ip4_pkts_decapsulated = 0;
118   u32 ip6_pkts_decapsulated = 0;
119   u32 thread_index = vm->thread_index;
120   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
121
122   if (is_ip4)
123     clib_memset (&last_key4, 0xff, sizeof (last_key4));
124   else
125     clib_memset (&last_key6, 0xff, sizeof (last_key6));
126
127   from = vlib_frame_vector_args (from_frame);
128   n_left_from = from_frame->n_vectors;
129
130   next_index = node->cached_next_index;
131   stats_sw_if_index = node->runtime_data[0];
132   stats_n_packets = stats_n_bytes = 0;
133
134   while (n_left_from > 0)
135     {
136       u32 n_left_to_next;
137
138       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
139
140       while (n_left_from >= 4 && n_left_to_next >= 2)
141         {
142           u32 bi0, bi1;
143           vlib_buffer_t *b0, *b1;
144           u32 next0, next1;
145           ip4_vxlan_gpe_header_t *iuvn4_0, *iuvn4_1;
146           ip6_vxlan_gpe_header_t *iuvn6_0, *iuvn6_1;
147           uword *p0, *p1;
148           u32 tunnel_index0, tunnel_index1;
149           vxlan_gpe_tunnel_t *t0, *t1;
150           vxlan4_gpe_tunnel_key_t key4_0, key4_1;
151           vxlan6_gpe_tunnel_key_t key6_0, key6_1;
152           u32 error0, error1;
153           u32 sw_if_index0, sw_if_index1, len0, len1;
154
155           /* Prefetch next iteration. */
156           {
157             vlib_buffer_t *p2, *p3;
158
159             p2 = vlib_get_buffer (vm, from[2]);
160             p3 = vlib_get_buffer (vm, from[3]);
161
162             vlib_prefetch_buffer_header (p2, LOAD);
163             vlib_prefetch_buffer_header (p3, LOAD);
164
165             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
166             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
167           }
168
169           bi0 = from[0];
170           bi1 = from[1];
171           to_next[0] = bi0;
172           to_next[1] = bi1;
173           from += 2;
174           to_next += 2;
175           n_left_to_next -= 2;
176           n_left_from -= 2;
177
178           b0 = vlib_get_buffer (vm, bi0);
179           b1 = vlib_get_buffer (vm, bi1);
180
181           if (is_ip4)
182             {
183               /* udp leaves current_data pointing at the vxlan-gpe header */
184               vlib_buffer_advance (b0,
185                                    -(word) (sizeof (udp_header_t) +
186                                             sizeof (ip4_header_t)));
187               vlib_buffer_advance (b1,
188                                    -(word) (sizeof (udp_header_t) +
189                                             sizeof (ip4_header_t)));
190
191               iuvn4_0 = vlib_buffer_get_current (b0);
192               iuvn4_1 = vlib_buffer_get_current (b1);
193
194               /* pop (ip, udp, vxlan) */
195               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
196               vlib_buffer_advance (b1, sizeof (*iuvn4_1));
197             }
198           else
199             {
200               /* udp leaves current_data pointing at the vxlan-gpe header */
201               vlib_buffer_advance (b0,
202                                    -(word) (sizeof (udp_header_t) +
203                                             sizeof (ip6_header_t)));
204               vlib_buffer_advance (b1,
205                                    -(word) (sizeof (udp_header_t) +
206                                             sizeof (ip6_header_t)));
207
208               iuvn6_0 = vlib_buffer_get_current (b0);
209               iuvn6_1 = vlib_buffer_get_current (b1);
210
211               /* pop (ip, udp, vxlan) */
212               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
213               vlib_buffer_advance (b1, sizeof (*iuvn6_1));
214             }
215
216           tunnel_index0 = ~0;
217           tunnel_index1 = ~0;
218           error0 = 0;
219           error1 = 0;
220
221           if (is_ip4)
222             {
223               next0 =
224                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
225                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
226                 VXLAN_GPE_INPUT_NEXT_DROP;
227               next1 =
228                 (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
229                 nngm->decap_next_node_list[iuvn4_1->vxlan.protocol] :
230                 VXLAN_GPE_INPUT_NEXT_DROP;
231
232               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
233               key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
234
235               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
236               key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
237
238               key4_0.vni = iuvn4_0->vxlan.vni_res;
239               key4_1.vni = iuvn4_1->vxlan.vni_res;
240
241               key4_0.pad = 0;
242               key4_1.pad = 0;
243             }
244           else                  /* is_ip6 */
245             {
246               next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
247                 iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
248               next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
249                 iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
250
251               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
252               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
253               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
254               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
255
256               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
257               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
258               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
259               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
260
261               key6_0.vni = iuvn6_0->vxlan.vni_res;
262               key6_1.vni = iuvn6_1->vxlan.vni_res;
263             }
264
265           /* Processing packet 0 */
266           if (is_ip4)
267             {
268               /* Processing for key4_0 */
269               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
270                                  || (key4_0.as_u64[1] !=
271                                      last_key4.as_u64[1])))
272                 {
273                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
274
275                   if (p0 == 0)
276                     {
277                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
278                       goto trace0;
279                     }
280
281                   last_key4.as_u64[0] = key4_0.as_u64[0];
282                   last_key4.as_u64[1] = key4_0.as_u64[1];
283                   tunnel_index0 = last_tunnel_index = p0[0];
284                 }
285               else
286                 tunnel_index0 = last_tunnel_index;
287             }
288           else                  /* is_ip6 */
289             {
290               next0 =
291                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
292                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
293                 VXLAN_GPE_INPUT_NEXT_DROP;
294               next1 =
295                 (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
296                 nngm->decap_next_node_list[iuvn6_1->vxlan.protocol] :
297                 VXLAN_GPE_INPUT_NEXT_DROP;
298
299               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
300               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
301               key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
302               key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
303
304               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
305               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
306               key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
307               key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
308
309               key6_0.vni = iuvn6_0->vxlan.vni_res;
310               key6_1.vni = iuvn6_1->vxlan.vni_res;
311
312               /* Processing for key6_0 */
313               if (PREDICT_FALSE
314                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
315                 {
316                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
317
318                   if (p0 == 0)
319                     {
320                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
321                       goto trace0;
322                     }
323
324                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
325                   tunnel_index0 = last_tunnel_index = p0[0];
326                 }
327               else
328                 tunnel_index0 = last_tunnel_index;
329             }
330
331           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
332
333
334           sw_if_index0 = t0->sw_if_index;
335           len0 = vlib_buffer_length_in_chain (vm, b0);
336
337           /* Required to make the l2 tag push / pop code work on l2 subifs */
338           vnet_update_l2_len (b0);
339
340           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
341           vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
342
343       /**
344        * ip[46] lookup in the configured FIB
345        */
346           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
347
348           if (is_ip4)
349             ip4_pkts_decapsulated++;
350           else
351             ip6_pkts_decapsulated++;
352
353           stats_n_packets += 1;
354           stats_n_bytes += len0;
355
356           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
357             {
358               stats_n_packets -= 1;
359               stats_n_bytes -= len0;
360               if (stats_n_packets)
361                 vlib_increment_combined_counter (im->combined_sw_if_counters +
362                                                  VNET_INTERFACE_COUNTER_RX,
363                                                  thread_index,
364                                                  stats_sw_if_index,
365                                                  stats_n_packets,
366                                                  stats_n_bytes);
367               stats_n_packets = 1;
368               stats_n_bytes = len0;
369               stats_sw_if_index = sw_if_index0;
370             }
371
372         trace0:b0->error = error0 ? node->errors[error0] : 0;
373
374           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
375             {
376               vxlan_gpe_rx_trace_t *tr =
377                 vlib_add_trace (vm, node, b0, sizeof (*tr));
378               tr->next_index = next0;
379               tr->error = error0;
380               tr->tunnel_index = tunnel_index0;
381             }
382
383           /* Process packet 1 */
384           if (is_ip4)
385             {
386               /* Processing for key4_1 */
387               if (PREDICT_FALSE ((key4_1.as_u64[0] != last_key4.as_u64[0])
388                                  || (key4_1.as_u64[1] !=
389                                      last_key4.as_u64[1])))
390                 {
391                   p1 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_1);
392
393                   if (p1 == 0)
394                     {
395                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
396                       goto trace1;
397                     }
398
399                   last_key4.as_u64[0] = key4_1.as_u64[0];
400                   last_key4.as_u64[1] = key4_1.as_u64[1];
401                   tunnel_index1 = last_tunnel_index = p1[0];
402                 }
403               else
404                 tunnel_index1 = last_tunnel_index;
405             }
406           else                  /* is_ip6 */
407             {
408               /* Processing for key6_1 */
409               if (PREDICT_FALSE
410                   (memcmp (&key6_1, &last_key6, sizeof (last_key6)) != 0))
411                 {
412                   p1 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_1);
413
414                   if (p1 == 0)
415                     {
416                       error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
417                       goto trace1;
418                     }
419
420                   memcpy (&last_key6, &key6_1, sizeof (key6_1));
421                   tunnel_index1 = last_tunnel_index = p1[0];
422                 }
423               else
424                 tunnel_index1 = last_tunnel_index;
425             }
426
427           t1 = pool_elt_at_index (nngm->tunnels, tunnel_index1);
428
429           sw_if_index1 = t1->sw_if_index;
430           len1 = vlib_buffer_length_in_chain (vm, b1);
431
432           /* Required to make the l2 tag push / pop code work on l2 subifs */
433           vnet_update_l2_len (b1);
434
435           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
436           vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index;
437
438           /*
439            * ip[46] lookup in the configured FIB
440            */
441           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
442
443           if (is_ip4)
444             ip4_pkts_decapsulated++;
445           else
446             ip6_pkts_decapsulated++;
447
448           stats_n_packets += 1;
449           stats_n_bytes += len1;
450
451           /* Batch stats increment on the same vxlan tunnel so counter
452              is not incremented per packet */
453           if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
454             {
455               stats_n_packets -= 1;
456               stats_n_bytes -= len1;
457               if (stats_n_packets)
458                 vlib_increment_combined_counter (im->combined_sw_if_counters +
459                                                  VNET_INTERFACE_COUNTER_RX,
460                                                  thread_index,
461                                                  stats_sw_if_index,
462                                                  stats_n_packets,
463                                                  stats_n_bytes);
464               stats_n_packets = 1;
465               stats_n_bytes = len1;
466               stats_sw_if_index = sw_if_index1;
467             }
468           vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
469
470         trace1:b1->error = error1 ? node->errors[error1] : 0;
471
472           if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
473             {
474               vxlan_gpe_rx_trace_t *tr =
475                 vlib_add_trace (vm, node, b1, sizeof (*tr));
476               tr->next_index = next1;
477               tr->error = error1;
478               tr->tunnel_index = tunnel_index1;
479             }
480
481           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
482                                            n_left_to_next, bi0, bi1, next0,
483                                            next1);
484         }
485
486       while (n_left_from > 0 && n_left_to_next > 0)
487         {
488           u32 bi0;
489           vlib_buffer_t *b0;
490           u32 next0;
491           ip4_vxlan_gpe_header_t *iuvn4_0;
492           ip6_vxlan_gpe_header_t *iuvn6_0;
493           uword *p0;
494           u32 tunnel_index0;
495           vxlan_gpe_tunnel_t *t0;
496           vxlan4_gpe_tunnel_key_t key4_0;
497           vxlan6_gpe_tunnel_key_t key6_0;
498           u32 error0;
499           u32 sw_if_index0, len0;
500
501           bi0 = from[0];
502           to_next[0] = bi0;
503           from += 1;
504           to_next += 1;
505           n_left_from -= 1;
506           n_left_to_next -= 1;
507
508           b0 = vlib_get_buffer (vm, bi0);
509
510           if (is_ip4)
511             {
512               /* udp leaves current_data pointing at the vxlan-gpe header */
513               vlib_buffer_advance (b0,
514                                    -(word) (sizeof (udp_header_t) +
515                                             sizeof (ip4_header_t)));
516
517               iuvn4_0 = vlib_buffer_get_current (b0);
518
519               /* pop (ip, udp, vxlan) */
520               vlib_buffer_advance (b0, sizeof (*iuvn4_0));
521             }
522           else
523             {
524               /* udp leaves current_data pointing at the vxlan-gpe header */
525               vlib_buffer_advance (b0,
526                                    -(word) (sizeof (udp_header_t) +
527                                             sizeof (ip6_header_t)));
528
529               iuvn6_0 = vlib_buffer_get_current (b0);
530
531               /* pop (ip, udp, vxlan) */
532               vlib_buffer_advance (b0, sizeof (*iuvn6_0));
533             }
534
535           tunnel_index0 = ~0;
536           error0 = 0;
537
538           if (is_ip4)
539             {
540               next0 =
541                 (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
542                 nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
543                 VXLAN_GPE_INPUT_NEXT_DROP;
544
545               key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
546               key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
547               key4_0.vni = iuvn4_0->vxlan.vni_res;
548               key4_0.pad = 0;
549
550               /* Processing for key4_0 */
551               if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
552                                  || (key4_0.as_u64[1] !=
553                                      last_key4.as_u64[1])))
554                 {
555                   p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
556
557                   if (p0 == 0)
558                     {
559                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
560                       goto trace00;
561                     }
562
563                   last_key4.as_u64[0] = key4_0.as_u64[0];
564                   last_key4.as_u64[1] = key4_0.as_u64[1];
565                   tunnel_index0 = last_tunnel_index = p0[0];
566                 }
567               else
568                 tunnel_index0 = last_tunnel_index;
569             }
570           else                  /* is_ip6 */
571             {
572               next0 =
573                 (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
574                 nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
575                 VXLAN_GPE_INPUT_NEXT_DROP;
576
577               key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
578               key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
579               key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
580               key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
581               key6_0.vni = iuvn6_0->vxlan.vni_res;
582
583               /* Processing for key6_0 */
584               if (PREDICT_FALSE
585                   (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
586                 {
587                   p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
588
589                   if (p0 == 0)
590                     {
591                       error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
592                       goto trace00;
593                     }
594
595                   memcpy (&last_key6, &key6_0, sizeof (key6_0));
596                   tunnel_index0 = last_tunnel_index = p0[0];
597                 }
598               else
599                 tunnel_index0 = last_tunnel_index;
600             }
601
602           t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
603
604
605           sw_if_index0 = t0->sw_if_index;
606           len0 = vlib_buffer_length_in_chain (vm, b0);
607
608           /* Required to make the l2 tag push / pop code work on l2 subifs */
609           vnet_update_l2_len (b0);
610
611           /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
612           vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index;
613
614           /*
615            * ip[46] lookup in the configured FIB
616            */
617           vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
618
619           if (is_ip4)
620             ip4_pkts_decapsulated++;
621           else
622             ip6_pkts_decapsulated++;
623
624           stats_n_packets += 1;
625           stats_n_bytes += len0;
626
627           /* Batch stats increment on the same vxlan-gpe tunnel so counter
628              is not incremented per packet */
629           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
630             {
631               stats_n_packets -= 1;
632               stats_n_bytes -= len0;
633               if (stats_n_packets)
634                 vlib_increment_combined_counter (im->combined_sw_if_counters +
635                                                  VNET_INTERFACE_COUNTER_RX,
636                                                  thread_index,
637                                                  stats_sw_if_index,
638                                                  stats_n_packets,
639                                                  stats_n_bytes);
640               stats_n_packets = 1;
641               stats_n_bytes = len0;
642               stats_sw_if_index = sw_if_index0;
643             }
644
645         trace00:b0->error = error0 ? node->errors[error0] : 0;
646
647           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
648             {
649               vxlan_gpe_rx_trace_t *tr =
650                 vlib_add_trace (vm, node, b0, sizeof (*tr));
651               tr->next_index = next0;
652               tr->error = error0;
653               tr->tunnel_index = tunnel_index0;
654             }
655           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
656                                            n_left_to_next, bi0, next0);
657         }
658
659       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
660     }
661   vlib_node_increment_counter (vm, vxlan4_gpe_input_node.index,
662                                VXLAN_GPE_ERROR_DECAPSULATED,
663                                ip4_pkts_decapsulated);
664   vlib_node_increment_counter (vm, vxlan6_gpe_input_node.index,
665                                VXLAN_GPE_ERROR_DECAPSULATED,
666                                ip6_pkts_decapsulated);
667   /* Increment any remaining batch stats */
668   if (stats_n_packets)
669     {
670       vlib_increment_combined_counter (im->combined_sw_if_counters +
671                                        VNET_INTERFACE_COUNTER_RX,
672                                        thread_index, stats_sw_if_index,
673                                        stats_n_packets, stats_n_bytes);
674       node->runtime_data[0] = stats_sw_if_index;
675     }
676   return from_frame->n_vectors;
677 }
678
679 /**
680  * @brief Graph processing dispatch function for IPv4 VXLAN GPE
681  *
682  * @node vxlan4-gpe-input
683  * @param *vm
684  * @param *node
685  * @param *from_frame
686  *
687  * @return from_frame->n_vectors
688  *
689  */
690 VLIB_NODE_FN (vxlan4_gpe_input_node) (vlib_main_t * vm,
691                                       vlib_node_runtime_t * node,
692                                       vlib_frame_t * from_frame)
693 {
694   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 1);
695 }
696
697 #ifndef CLIB_MARCH_VARIANT
698 void
699 vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
700 {
701   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
702   hm->decap_next_node_list[protocol_id] = next_node_index;
703   return;
704 }
705
706 void
707 vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
708 {
709   vxlan_gpe_main_t *hm = &vxlan_gpe_main;
710   hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
711   return;
712 }
713 #endif /* CLIB_MARCH_VARIANT */
714
715 /**
716  * @brief Graph processing dispatch function for IPv6 VXLAN GPE
717  *
718  * @node vxlan6-gpe-input
719  * @param *vm
720  * @param *node
721  * @param *from_frame
722  *
723  * @return from_frame->n_vectors - uword
724  *
725  */
726 VLIB_NODE_FN (vxlan6_gpe_input_node) (vlib_main_t * vm,
727                                       vlib_node_runtime_t * node,
728                                       vlib_frame_t * from_frame)
729 {
730   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 0);
731 }
732
733 /**
734  * @brief VXLAN GPE error strings
735  */
736 static char *vxlan_gpe_error_strings[] = {
737 #define vxlan_gpe_error(n,s) s,
738 #include <vnet/vxlan-gpe/vxlan_gpe_error.def>
739 #undef vxlan_gpe_error
740 #undef _
741 };
742
743 /* *INDENT-OFF* */
744 VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
745   .name = "vxlan4-gpe-input",
746   /* Takes a vector of packets. */
747   .vector_size = sizeof (u32),
748   .type = VLIB_NODE_TYPE_INTERNAL,
749   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
750   .error_strings = vxlan_gpe_error_strings,
751
752   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
753   .next_nodes = {
754 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
755     foreach_vxlan_gpe_input_next
756 #undef _
757   },
758
759   .format_buffer = format_vxlan_gpe_with_length,
760   .format_trace = format_vxlan_gpe_rx_trace,
761   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
762 };
763 /* *INDENT-ON* */
764
765 /* *INDENT-OFF* */
766 VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
767   .name = "vxlan6-gpe-input",
768   /* Takes a vector of packets. */
769   .vector_size = sizeof (u32),
770   .type = VLIB_NODE_TYPE_INTERNAL,
771   .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
772   .error_strings = vxlan_gpe_error_strings,
773
774   .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
775   .next_nodes = {
776 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
777     foreach_vxlan_gpe_input_next
778 #undef _
779   },
780
781   .format_buffer = format_vxlan_gpe_with_length,
782   .format_trace = format_vxlan_gpe_rx_trace,
783   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
784 };
785 /* *INDENT-ON* */
786
787 typedef enum
788 {
789   IP_VXLAN_BYPASS_NEXT_DROP,
790   IP_VXLAN_BYPASS_NEXT_VXLAN,
791   IP_VXLAN_BYPASS_N_NEXT,
792 } ip_vxlan_bypass_next_t;
793
794 always_inline uword
795 ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
796                             vlib_node_runtime_t * node,
797                             vlib_frame_t * frame, u32 is_ip4)
798 {
799   vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
800   u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
801   vlib_node_runtime_t *error_node =
802     vlib_node_get_runtime (vm, ip4_input_node.index);
803   ip4_address_t addr4;          /* last IPv4 address matching a local VTEP address */
804   ip6_address_t addr6;          /* last IPv6 address matching a local VTEP address */
805
806   from = vlib_frame_vector_args (frame);
807   n_left_from = frame->n_vectors;
808   next_index = node->cached_next_index;
809
810   if (node->flags & VLIB_NODE_FLAG_TRACE)
811     ip4_forward_next_trace (vm, node, frame, VLIB_TX);
812
813   if (is_ip4)
814     addr4.data_u32 = ~0;
815   else
816     ip6_address_set_zero (&addr6);
817
818   while (n_left_from > 0)
819     {
820       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
821
822       while (n_left_from >= 4 && n_left_to_next >= 2)
823         {
824           vlib_buffer_t *b0, *b1;
825           ip4_header_t *ip40, *ip41;
826           ip6_header_t *ip60, *ip61;
827           udp_header_t *udp0, *udp1;
828           u32 bi0, ip_len0, udp_len0, flags0, next0;
829           u32 bi1, ip_len1, udp_len1, flags1, next1;
830           i32 len_diff0, len_diff1;
831           u8 error0, good_udp0, proto0;
832           u8 error1, good_udp1, proto1;
833
834           /* Prefetch next iteration. */
835           {
836             vlib_buffer_t *p2, *p3;
837
838             p2 = vlib_get_buffer (vm, from[2]);
839             p3 = vlib_get_buffer (vm, from[3]);
840
841             vlib_prefetch_buffer_header (p2, LOAD);
842             vlib_prefetch_buffer_header (p3, LOAD);
843
844             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
845             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
846           }
847
848           bi0 = to_next[0] = from[0];
849           bi1 = to_next[1] = from[1];
850           from += 2;
851           n_left_from -= 2;
852           to_next += 2;
853           n_left_to_next -= 2;
854
855           b0 = vlib_get_buffer (vm, bi0);
856           b1 = vlib_get_buffer (vm, bi1);
857           if (is_ip4)
858             {
859               ip40 = vlib_buffer_get_current (b0);
860               ip41 = vlib_buffer_get_current (b1);
861             }
862           else
863             {
864               ip60 = vlib_buffer_get_current (b0);
865               ip61 = vlib_buffer_get_current (b1);
866             }
867
868           /* Setup packet for next IP feature */
869           vnet_feature_next (&next0, b0);
870           vnet_feature_next (&next1, b1);
871
872           if (is_ip4)
873             {
874               proto0 = ip40->protocol;
875               proto1 = ip41->protocol;
876             }
877           else
878             {
879               proto0 = ip60->protocol;
880               proto1 = ip61->protocol;
881             }
882
883           /* Process packet 0 */
884           if (proto0 != IP_PROTOCOL_UDP)
885             goto exit0;         /* not UDP packet */
886
887           if (is_ip4)
888             udp0 = ip4_next_header (ip40);
889           else
890             udp0 = ip6_next_header (ip60);
891
892           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
893             goto exit0;         /* not VXLAN packet */
894
895           /* Validate DIP against VTEPs */
896           if (is_ip4)
897             {
898               if (addr4.as_u32 != ip40->dst_address.as_u32)
899                 {
900                   if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
901                     goto exit0; /* no local VTEP for VXLAN packet */
902                   addr4 = ip40->dst_address;
903                 }
904             }
905           else
906             {
907               if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
908                 {
909                   if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
910                     goto exit0; /* no local VTEP for VXLAN packet */
911                   addr6 = ip60->dst_address;
912                 }
913             }
914
915           flags0 = b0->flags;
916           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
917
918           /* Don't verify UDP checksum for packets with explicit zero checksum. */
919           good_udp0 |= udp0->checksum == 0;
920
921           /* Verify UDP length */
922           if (is_ip4)
923             ip_len0 = clib_net_to_host_u16 (ip40->length);
924           else
925             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
926           udp_len0 = clib_net_to_host_u16 (udp0->length);
927           len_diff0 = ip_len0 - udp_len0;
928
929           /* Verify UDP checksum */
930           if (PREDICT_FALSE (!good_udp0))
931             {
932               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
933                 {
934                   if (is_ip4)
935                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
936                   else
937                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
938                   good_udp0 =
939                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
940                 }
941             }
942
943           if (is_ip4)
944             {
945               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
946               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
947             }
948           else
949             {
950               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
951               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
952             }
953
954           next0 = error0 ?
955             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
956           b0->error = error0 ? error_node->errors[error0] : 0;
957
958           /* vxlan_gpe-input node expect current at VXLAN header */
959           if (is_ip4)
960             vlib_buffer_advance (b0,
961                                  sizeof (ip4_header_t) +
962                                  sizeof (udp_header_t));
963           else
964             vlib_buffer_advance (b0,
965                                  sizeof (ip6_header_t) +
966                                  sizeof (udp_header_t));
967
968         exit0:
969           /* Process packet 1 */
970           if (proto1 != IP_PROTOCOL_UDP)
971             goto exit1;         /* not UDP packet */
972
973           if (is_ip4)
974             udp1 = ip4_next_header (ip41);
975           else
976             udp1 = ip6_next_header (ip61);
977
978           if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
979             goto exit1;         /* not VXLAN packet */
980
981           /* Validate DIP against VTEPs */
982           if (is_ip4)
983             {
984               if (addr4.as_u32 != ip41->dst_address.as_u32)
985                 {
986                   if (!hash_get (ngm->vtep4, ip41->dst_address.as_u32))
987                     goto exit1; /* no local VTEP for VXLAN packet */
988                   addr4 = ip41->dst_address;
989                 }
990             }
991           else
992             {
993               if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
994                 {
995                   if (!hash_get_mem (ngm->vtep6, &ip61->dst_address))
996                     goto exit1; /* no local VTEP for VXLAN packet */
997                   addr6 = ip61->dst_address;
998                 }
999             }
1000
1001           flags1 = b1->flags;
1002           good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1003
1004           /* Don't verify UDP checksum for packets with explicit zero checksum. */
1005           good_udp1 |= udp1->checksum == 0;
1006
1007           /* Verify UDP length */
1008           if (is_ip4)
1009             ip_len1 = clib_net_to_host_u16 (ip41->length);
1010           else
1011             ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1012           udp_len1 = clib_net_to_host_u16 (udp1->length);
1013           len_diff1 = ip_len1 - udp_len1;
1014
1015           /* Verify UDP checksum */
1016           if (PREDICT_FALSE (!good_udp1))
1017             {
1018               if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1019                 {
1020                   if (is_ip4)
1021                     flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1022                   else
1023                     flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1024                   good_udp1 =
1025                     (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1026                 }
1027             }
1028
1029           if (is_ip4)
1030             {
1031               error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1032               error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1033             }
1034           else
1035             {
1036               error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1037               error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1038             }
1039
1040           next1 = error1 ?
1041             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1042           b1->error = error1 ? error_node->errors[error1] : 0;
1043
1044           /* vxlan_gpe-input node expect current at VXLAN header */
1045           if (is_ip4)
1046             vlib_buffer_advance (b1,
1047                                  sizeof (ip4_header_t) +
1048                                  sizeof (udp_header_t));
1049           else
1050             vlib_buffer_advance (b1,
1051                                  sizeof (ip6_header_t) +
1052                                  sizeof (udp_header_t));
1053
1054         exit1:
1055           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1056                                            to_next, n_left_to_next,
1057                                            bi0, bi1, next0, next1);
1058         }
1059
1060       while (n_left_from > 0 && n_left_to_next > 0)
1061         {
1062           vlib_buffer_t *b0;
1063           ip4_header_t *ip40;
1064           ip6_header_t *ip60;
1065           udp_header_t *udp0;
1066           u32 bi0, ip_len0, udp_len0, flags0, next0;
1067           i32 len_diff0;
1068           u8 error0, good_udp0, proto0;
1069
1070           bi0 = to_next[0] = from[0];
1071           from += 1;
1072           n_left_from -= 1;
1073           to_next += 1;
1074           n_left_to_next -= 1;
1075
1076           b0 = vlib_get_buffer (vm, bi0);
1077           if (is_ip4)
1078             ip40 = vlib_buffer_get_current (b0);
1079           else
1080             ip60 = vlib_buffer_get_current (b0);
1081
1082           /* Setup packet for next IP feature */
1083           vnet_feature_next (&next0, b0);
1084
1085           if (is_ip4)
1086             proto0 = ip40->protocol;
1087           else
1088             proto0 = ip60->protocol;
1089
1090           if (proto0 != IP_PROTOCOL_UDP)
1091             goto exit;          /* not UDP packet */
1092
1093           if (is_ip4)
1094             udp0 = ip4_next_header (ip40);
1095           else
1096             udp0 = ip6_next_header (ip60);
1097
1098           if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
1099             goto exit;          /* not VXLAN packet */
1100
1101           /* Validate DIP against VTEPs */
1102           if (is_ip4)
1103             {
1104               if (addr4.as_u32 != ip40->dst_address.as_u32)
1105                 {
1106                   if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
1107                     goto exit;  /* no local VTEP for VXLAN packet */
1108                   addr4 = ip40->dst_address;
1109                 }
1110             }
1111           else
1112             {
1113               if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1114                 {
1115                   if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
1116                     goto exit;  /* no local VTEP for VXLAN packet */
1117                   addr6 = ip60->dst_address;
1118                 }
1119             }
1120
1121           flags0 = b0->flags;
1122           good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1123
1124           /* Don't verify UDP checksum for packets with explicit zero checksum. */
1125           good_udp0 |= udp0->checksum == 0;
1126
1127           /* Verify UDP length */
1128           if (is_ip4)
1129             ip_len0 = clib_net_to_host_u16 (ip40->length);
1130           else
1131             ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1132           udp_len0 = clib_net_to_host_u16 (udp0->length);
1133           len_diff0 = ip_len0 - udp_len0;
1134
1135           /* Verify UDP checksum */
1136           if (PREDICT_FALSE (!good_udp0))
1137             {
1138               if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1139                 {
1140                   if (is_ip4)
1141                     flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1142                   else
1143                     flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1144                   good_udp0 =
1145                     (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1146                 }
1147             }
1148
1149           if (is_ip4)
1150             {
1151               error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1152               error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1153             }
1154           else
1155             {
1156               error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1157               error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1158             }
1159
1160           next0 = error0 ?
1161             IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
1162           b0->error = error0 ? error_node->errors[error0] : 0;
1163
1164           /* vxlan_gpe-input node expect current at VXLAN header */
1165           if (is_ip4)
1166             vlib_buffer_advance (b0,
1167                                  sizeof (ip4_header_t) +
1168                                  sizeof (udp_header_t));
1169           else
1170             vlib_buffer_advance (b0,
1171                                  sizeof (ip6_header_t) +
1172                                  sizeof (udp_header_t));
1173
1174         exit:
1175           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1176                                            to_next, n_left_to_next,
1177                                            bi0, next0);
1178         }
1179
1180       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1181     }
1182
1183   return frame->n_vectors;
1184 }
1185
1186 VLIB_NODE_FN (ip4_vxlan_gpe_bypass_node) (vlib_main_t * vm,
1187                                           vlib_node_runtime_t * node,
1188                                           vlib_frame_t * frame)
1189 {
1190   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1191 }
1192
1193 /* *INDENT-OFF* */
1194 VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
1195   .name = "ip4-vxlan-gpe-bypass",
1196   .vector_size = sizeof (u32),
1197
1198   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1199   .next_nodes = {
1200     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1201     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-gpe-input",
1202   },
1203
1204   .format_buffer = format_ip4_header,
1205   .format_trace = format_ip4_forward_next_trace,
1206 };
1207 /* *INDENT-ON* */
1208
1209 #ifndef CLIB_MARCH_VARIANT
1210 /* Dummy init function to get us linked in. */
1211 clib_error_t *
1212 ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
1213 {
1214   return 0;
1215 }
1216
1217 VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init);
1218 #endif /* CLIB_MARCH_VARIANT */
1219
1220 VLIB_NODE_FN (ip6_vxlan_gpe_bypass_node) (vlib_main_t * vm,
1221                                           vlib_node_runtime_t * node,
1222                                           vlib_frame_t * frame)
1223 {
1224   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1225 }
1226
1227 /* *INDENT-OFF* */
1228 VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
1229   .name = "ip6-vxlan-gpe-bypass",
1230   .vector_size = sizeof (u32),
1231
1232   .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1233   .next_nodes = {
1234     [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1235     [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-gpe-input",
1236   },
1237
1238   .format_buffer = format_ip6_header,
1239   .format_trace = format_ip6_forward_next_trace,
1240 };
1241 /* *INDENT-ON* */
1242
1243 #ifndef CLIB_MARCH_VARIANT
1244 /* Dummy init function to get us linked in. */
1245 clib_error_t *
1246 ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
1247 {
1248   return 0;
1249 }
1250
1251 VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init);
1252 #endif /* CLIB_MARCH_VARIANT */
1253
1254 /*
1255  * fd.io coding-style-patch-verification: ON
1256  *
1257  * Local Variables:
1258  * eval: (c-set-style "gnu")
1259  * End:
1260  */