VPP-275 Coding standards cleanup - vnet/vnet/vxlan-gpe 82/9782/2
authorsharath reddy <sharathkumarboyanapally@gmail.com>
Mon, 11 Dec 2017 06:01:31 +0000 (11:31 +0530)
committerJohn Lo <loj@cisco.com>
Wed, 13 Dec 2017 05:19:13 +0000 (05:19 +0000)
Change-Id: Ifabb8d22d20bc1031664d5f004e74cd363759ab6
Signed-off-by: sharath reddy <sharathkumarboyanapally@gmail.com>
src/vnet/vxlan-gpe/decap.c
src/vnet/vxlan-gpe/encap.c
src/vnet/vxlan-gpe/vxlan_gpe.c
src/vnet/vxlan-gpe/vxlan_gpe.h
src/vnet/vxlan-gpe/vxlan_gpe_packet.h

index 1b3a8b0..151ffa3 100644 (file)
@@ -30,7 +30,8 @@ vlib_node_registration_t vxlan_gpe_input_node;
  * @brief Struct for VXLAN GPE decap packet tracing
  *
  */
-typedef struct {
+typedef struct
+{
   u32 next_index;
   u32 tunnel_index;
   u32 error;
@@ -45,22 +46,23 @@ typedef struct {
  * @return *s
  *
  */
-static u8 * format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
+static u8 *
+format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  vxlan_gpe_rx_trace_t * t = va_arg (*args, vxlan_gpe_rx_trace_t *);
+  vxlan_gpe_rx_trace_t *t = va_arg (*args, vxlan_gpe_rx_trace_t *);
 
   if (t->tunnel_index != ~0)
-  {
-    s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
-        t->next_index, t->error);
-  }
+    {
+      s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
+                 t->next_index, t->error);
+    }
   else
-  {
-    s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
-        t->error);
-  }
+    {
+      s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
+                 t->error);
+    }
   return s;
 }
 
@@ -73,7 +75,8 @@ static u8 * format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
  * @return *s
  *
  */
-static u8 * format_vxlan_gpe_with_length (u8 * s, va_list * args)
+static u8 *
+format_vxlan_gpe_with_length (u8 * s, va_list * args)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
@@ -103,14 +106,13 @@ static u8 * format_vxlan_gpe_with_length (u8 * s, va_list * args)
  */
 always_inline uword
 vxlan_gpe_input (vlib_main_t * vm,
-                     vlib_node_runtime_t * node,
-                     vlib_frame_t * from_frame,
-                                        u8 is_ip4)
+                vlib_node_runtime_t * node,
+                vlib_frame_t * from_frame, u8 is_ip4)
 {
   u32 n_left_from, next_index, *from, *to_next;
-  vxlan_gpe_main_t * nngm = &vxlan_gpe_main;
-  vnet_main_t * vnm = nngm->vnet_main;
-  vnet_interface_main_t * im = &vnm->interface_main;
+  vxlan_gpe_main_t *nngm = &vxlan_gpe_main;
+  vnet_main_t *vnm = nngm->vnet_main;
+  vnet_interface_main_t *im = &vnm->interface_main;
   u32 last_tunnel_index = ~0;
   vxlan4_gpe_tunnel_key_t last_key4;
   vxlan6_gpe_tunnel_key_t last_key6;
@@ -119,9 +121,9 @@ vxlan_gpe_input (vlib_main_t * vm,
   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
 
   if (is_ip4)
-      memset (&last_key4, 0xff, sizeof(last_key4));
+    memset (&last_key4, 0xff, sizeof (last_key4));
   else
-      memset (&last_key6, 0xff, sizeof(last_key6));
+    memset (&last_key6, 0xff, sizeof (last_key6));
 
   from = vlib_frame_vector_args (from_frame);
   n_left_from = from_frame->n_vectors;
@@ -131,494 +133,523 @@ vxlan_gpe_input (vlib_main_t * vm,
   stats_n_packets = stats_n_bytes = 0;
 
   while (n_left_from > 0)
-  {
-    u32 n_left_to_next;
+    {
+      u32 n_left_to_next;
 
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
 
-    while (n_left_from >= 4 && n_left_to_next >= 2)
-    {
-      u32 bi0, bi1;
-      vlib_buffer_t * b0, *b1;
-      u32 next0, next1;
-      ip4_vxlan_gpe_header_t * iuvn4_0, *iuvn4_1;
-      ip6_vxlan_gpe_header_t * iuvn6_0, *iuvn6_1;
-      uword * p0, *p1;
-      u32 tunnel_index0, tunnel_index1;
-      vxlan_gpe_tunnel_t * t0, *t1;
-      vxlan4_gpe_tunnel_key_t key4_0, key4_1;
-      vxlan6_gpe_tunnel_key_t key6_0, key6_1;
-      u32 error0, error1;
-      u32 sw_if_index0, sw_if_index1, len0, len1;
-
-      /* Prefetch next iteration. */
-      {
-        vlib_buffer_t * p2, *p3;
-
-        p2 = vlib_get_buffer (vm, from[2]);
-        p3 = vlib_get_buffer (vm, from[3]);
-
-        vlib_prefetch_buffer_header(p2, LOAD);
-        vlib_prefetch_buffer_header(p3, LOAD);
-
-        CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
-        CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
-      }
-
-      bi0 = from[0];
-      bi1 = from[1];
-      to_next[0] = bi0;
-      to_next[1] = bi1;
-      from += 2;
-      to_next += 2;
-      n_left_to_next -= 2;
-      n_left_from -= 2;
-
-      b0 = vlib_get_buffer (vm, bi0);
-      b1 = vlib_get_buffer (vm, bi1);
-
-      if (is_ip4)
-      {
-        /* udp leaves current_data pointing at the vxlan-gpe header */
-        vlib_buffer_advance (b0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
-        vlib_buffer_advance (b1, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
-
-        iuvn4_0 = vlib_buffer_get_current (b0);
-        iuvn4_1 = vlib_buffer_get_current (b1);
-
-        /* pop (ip, udp, vxlan) */
-        vlib_buffer_advance (b0, sizeof(*iuvn4_0));
-        vlib_buffer_advance (b1, sizeof(*iuvn4_1));
-      }
-      else
-      {
-        /* udp leaves current_data pointing at the vxlan-gpe header */
-        vlib_buffer_advance (b0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
-        vlib_buffer_advance (b1, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
-
-        iuvn6_0 = vlib_buffer_get_current (b0);
-        iuvn6_1 = vlib_buffer_get_current (b1);
-
-        /* pop (ip, udp, vxlan) */
-        vlib_buffer_advance (b0, sizeof(*iuvn6_0));
-        vlib_buffer_advance (b1, sizeof(*iuvn6_1));
-      }
-
-      tunnel_index0 = ~0;
-      tunnel_index1 = ~0;
-      error0 = 0;
-      error1 = 0;
-
-      if (is_ip4)
-      {
-        next0 =
-            (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
-            nngm->decap_next_node_list[iuvn4_0->vxlan.protocol]: \
-            VXLAN_GPE_INPUT_NEXT_DROP;
-        next1 =
-            (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
-            nngm->decap_next_node_list[iuvn4_1->vxlan.protocol]: \
-            VXLAN_GPE_INPUT_NEXT_DROP;
-
-        key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
-        key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
-
-        key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
-        key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
-
-        key4_0.vni = iuvn4_0->vxlan.vni_res;
-        key4_1.vni = iuvn4_1->vxlan.vni_res;
-
-        key4_0.pad = 0;
-        key4_1.pad = 0;
-      }
-      else /* is_ip6 */
-      {
-        next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
-                iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
-        next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
-                iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
-
-        key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
-        key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
-        key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
-        key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
-
-        key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
-        key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
-        key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
-        key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
-
-        key6_0.vni = iuvn6_0->vxlan.vni_res;
-        key6_1.vni = iuvn6_1->vxlan.vni_res;
-      }
-
-      /* Processing packet 0*/
-      if (is_ip4)
-      {
-        /* Processing for key4_0 */
-        if (PREDICT_FALSE((key4_0.as_u64[0] != last_key4.as_u64[0])
-                || (key4_0.as_u64[1] != last_key4.as_u64[1])))
-        {
-          p0 = hash_get_mem(nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
-
-          if (p0 == 0)
-          {
-            error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
-            goto trace0;
-          }
-
-          last_key4.as_u64[0] = key4_0.as_u64[0];
-          last_key4.as_u64[1] = key4_0.as_u64[1];
-          tunnel_index0 = last_tunnel_index = p0[0];
-        }
-        else
-          tunnel_index0 = last_tunnel_index;
-      }
-      else /* is_ip6 */
-      {
-        next0 =
-            (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
-            nngm->decap_next_node_list[iuvn6_0->vxlan.protocol]: \
-            VXLAN_GPE_INPUT_NEXT_DROP;
-        next1 =
-            (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
-            nngm->decap_next_node_list[iuvn6_1->vxlan.protocol]: \
-            VXLAN_GPE_INPUT_NEXT_DROP;
-
-        key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
-        key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
-        key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
-        key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
-
-        key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
-        key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
-        key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
-        key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
-
-        key6_0.vni = iuvn6_0->vxlan.vni_res;
-        key6_1.vni = iuvn6_1->vxlan.vni_res;
-
-        /* Processing for key6_0 */
-        if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
-        {
-          p0 = hash_get_mem(nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
-
-          if (p0 == 0)
-          {
-            error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
-            goto trace0;
-          }
-
-          memcpy (&last_key6, &key6_0, sizeof(key6_0));
-          tunnel_index0 = last_tunnel_index = p0[0];
-        }
-        else
-          tunnel_index0 = last_tunnel_index;
-      }
-
-      t0 = pool_elt_at_index(nngm->tunnels, tunnel_index0);
-
-
-      sw_if_index0 = t0->sw_if_index;
-      len0 = vlib_buffer_length_in_chain (vm, b0);
-
-      /* Required to make the l2 tag push / pop code work on l2 subifs */
-      vnet_update_l2_len (b0);
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 bi0, bi1;
+         vlib_buffer_t *b0, *b1;
+         u32 next0, next1;
+         ip4_vxlan_gpe_header_t *iuvn4_0, *iuvn4_1;
+         ip6_vxlan_gpe_header_t *iuvn6_0, *iuvn6_1;
+         uword *p0, *p1;
+         u32 tunnel_index0, tunnel_index1;
+         vxlan_gpe_tunnel_t *t0, *t1;
+         vxlan4_gpe_tunnel_key_t key4_0, key4_1;
+         vxlan6_gpe_tunnel_key_t key6_0, key6_1;
+         u32 error0, error1;
+         u32 sw_if_index0, sw_if_index1, len0, len1;
+
+         /* Prefetch next iteration. */
+         {
+           vlib_buffer_t *p2, *p3;
+
+           p2 = vlib_get_buffer (vm, from[2]);
+           p3 = vlib_get_buffer (vm, from[3]);
+
+           vlib_prefetch_buffer_header (p2, LOAD);
+           vlib_prefetch_buffer_header (p3, LOAD);
+
+           CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+           CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+         }
+
+         bi0 = from[0];
+         bi1 = from[1];
+         to_next[0] = bi0;
+         to_next[1] = bi1;
+         from += 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+         n_left_from -= 2;
+
+         b0 = vlib_get_buffer (vm, bi0);
+         b1 = vlib_get_buffer (vm, bi1);
+
+         if (is_ip4)
+           {
+             /* udp leaves current_data pointing at the vxlan-gpe header */
+             vlib_buffer_advance (b0,
+                                  -(word) (sizeof (udp_header_t) +
+                                           sizeof (ip4_header_t)));
+             vlib_buffer_advance (b1,
+                                  -(word) (sizeof (udp_header_t) +
+                                           sizeof (ip4_header_t)));
+
+             iuvn4_0 = vlib_buffer_get_current (b0);
+             iuvn4_1 = vlib_buffer_get_current (b1);
+
+             /* pop (ip, udp, vxlan) */
+             vlib_buffer_advance (b0, sizeof (*iuvn4_0));
+             vlib_buffer_advance (b1, sizeof (*iuvn4_1));
+           }
+         else
+           {
+             /* udp leaves current_data pointing at the vxlan-gpe header */
+             vlib_buffer_advance (b0,
+                                  -(word) (sizeof (udp_header_t) +
+                                           sizeof (ip6_header_t)));
+             vlib_buffer_advance (b1,
+                                  -(word) (sizeof (udp_header_t) +
+                                           sizeof (ip6_header_t)));
+
+             iuvn6_0 = vlib_buffer_get_current (b0);
+             iuvn6_1 = vlib_buffer_get_current (b1);
+
+             /* pop (ip, udp, vxlan) */
+             vlib_buffer_advance (b0, sizeof (*iuvn6_0));
+             vlib_buffer_advance (b1, sizeof (*iuvn6_1));
+           }
+
+         tunnel_index0 = ~0;
+         tunnel_index1 = ~0;
+         error0 = 0;
+         error1 = 0;
+
+         if (is_ip4)
+           {
+             next0 =
+               (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
+               nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
+               VXLAN_GPE_INPUT_NEXT_DROP;
+             next1 =
+               (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
+               nngm->decap_next_node_list[iuvn4_1->vxlan.protocol] :
+               VXLAN_GPE_INPUT_NEXT_DROP;
+
+             key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
+             key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
+
+             key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
+             key4_1.remote = iuvn4_1->ip4.src_address.as_u32;
+
+             key4_0.vni = iuvn4_0->vxlan.vni_res;
+             key4_1.vni = iuvn4_1->vxlan.vni_res;
+
+             key4_0.pad = 0;
+             key4_1.pad = 0;
+           }
+         else                  /* is_ip6 */
+           {
+             next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
+               iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+             next1 = (iuvn6_1->vxlan.protocol < node->n_next_nodes) ?
+               iuvn6_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+
+             key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
+             key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
+             key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
+             key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
+
+             key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
+             key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
+             key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
+             key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
+
+             key6_0.vni = iuvn6_0->vxlan.vni_res;
+             key6_1.vni = iuvn6_1->vxlan.vni_res;
+           }
+
+         /* Processing packet 0 */
+         if (is_ip4)
+           {
+             /* Processing for key4_0 */
+             if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
+                                || (key4_0.as_u64[1] !=
+                                    last_key4.as_u64[1])))
+               {
+                 p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
+
+                 if (p0 == 0)
+                   {
+                     error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+                     goto trace0;
+                   }
+
+                 last_key4.as_u64[0] = key4_0.as_u64[0];
+                 last_key4.as_u64[1] = key4_0.as_u64[1];
+                 tunnel_index0 = last_tunnel_index = p0[0];
+               }
+             else
+               tunnel_index0 = last_tunnel_index;
+           }
+         else                  /* is_ip6 */
+           {
+             next0 =
+               (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
+               nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
+               VXLAN_GPE_INPUT_NEXT_DROP;
+             next1 =
+               (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
+               nngm->decap_next_node_list[iuvn6_1->vxlan.protocol] :
+               VXLAN_GPE_INPUT_NEXT_DROP;
+
+             key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
+             key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
+             key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
+             key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
+
+             key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
+             key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
+             key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
+             key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
+
+             key6_0.vni = iuvn6_0->vxlan.vni_res;
+             key6_1.vni = iuvn6_1->vxlan.vni_res;
+
+             /* Processing for key6_0 */
+             if (PREDICT_FALSE
+                 (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
+               {
+                 p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
+
+                 if (p0 == 0)
+                   {
+                     error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+                     goto trace0;
+                   }
+
+                 memcpy (&last_key6, &key6_0, sizeof (key6_0));
+                 tunnel_index0 = last_tunnel_index = p0[0];
+               }
+             else
+               tunnel_index0 = last_tunnel_index;
+           }
+
+         t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
+
+
+         sw_if_index0 = t0->sw_if_index;
+         len0 = vlib_buffer_length_in_chain (vm, b0);
+
+         /* Required to make the l2 tag push / pop code work on l2 subifs */
+         vnet_update_l2_len (b0);
 
       /**
        * ip[46] lookup in the configured FIB
        */
-      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
-
-      pkts_decapsulated++;
-      stats_n_packets += 1;
-      stats_n_bytes += len0;
-
-      if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
-      {
-        stats_n_packets -= 1;
-        stats_n_bytes -= len0;
-        if (stats_n_packets)
-          vlib_increment_combined_counter (
-              im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
-              thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
-        stats_n_packets = 1;
-        stats_n_bytes = len0;
-        stats_sw_if_index = sw_if_index0;
-      }
-
-      trace0: b0->error = error0 ? node->errors[error0] : 0;
-
-      if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
-      {
-        vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
-        tr->next_index = next0;
-        tr->error = error0;
-        tr->tunnel_index = tunnel_index0;
-      }
-
-      /* Process packet 1 */
-      if (is_ip4)
-      {
-        /* Processing for key4_1 */
-        if (PREDICT_FALSE(
-            (key4_1.as_u64[0] != last_key4.as_u64[0])
-                || (key4_1.as_u64[1] != last_key4.as_u64[1])))
-        {
-          p1 = hash_get_mem(nngm->vxlan4_gpe_tunnel_by_key, &key4_1);
-
-          if (p1 == 0)
-          {
-            error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
-            goto trace1;
-          }
-
-          last_key4.as_u64[0] = key4_1.as_u64[0];
-          last_key4.as_u64[1] = key4_1.as_u64[1];
-          tunnel_index1 = last_tunnel_index = p1[0];
-        }
-        else
-          tunnel_index1 = last_tunnel_index;
-      }
-      else /* is_ip6 */
-      {
-        /* Processing for key6_1 */
-        if (PREDICT_FALSE(memcmp (&key6_1, &last_key6, sizeof(last_key6)) != 0))
-        {
-          p1 = hash_get_mem(nngm->vxlan6_gpe_tunnel_by_key, &key6_1);
-
-          if (p1 == 0)
-          {
-            error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
-            goto trace1;
-          }
-
-          memcpy (&last_key6, &key6_1, sizeof(key6_1));
-          tunnel_index1 = last_tunnel_index = p1[0];
-        }
-        else
-          tunnel_index1 = last_tunnel_index;
-      }
-
-      t1 = pool_elt_at_index(nngm->tunnels, tunnel_index1);
-
-      sw_if_index1 = t1->sw_if_index;
-      len1 = vlib_buffer_length_in_chain (vm, b1);
-
-      /* Required to make the l2 tag push / pop code work on l2 subifs */
-      vnet_update_l2_len (b1);
-
-      /*
-       * ip[46] lookup in the configured FIB
-       */
-      vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
-
-      pkts_decapsulated++;
-      stats_n_packets += 1;
-      stats_n_bytes += len1;
-
-      /* Batch stats increment on the same vxlan tunnel so counter
-       is not incremented per packet */
-      if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
-      {
-        stats_n_packets -= 1;
-        stats_n_bytes -= len1;
-        if (stats_n_packets)
-          vlib_increment_combined_counter (
-              im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
-              thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
-        stats_n_packets = 1;
-        stats_n_bytes = len1;
-        stats_sw_if_index = sw_if_index1;
-      }
-      vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
-
-      trace1: b1->error = error1 ? node->errors[error1] : 0;
-
-      if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
-      {
-        vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b1, sizeof(*tr));
-        tr->next_index = next1;
-        tr->error = error1;
-        tr->tunnel_index = tunnel_index1;
-      }
-
-      vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
-                                      n_left_to_next, bi0, bi1, next0, next1);
-    }
+         vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
 
-    while (n_left_from > 0 && n_left_to_next > 0)
-    {
-      u32 bi0;
-      vlib_buffer_t * b0;
-      u32 next0;
-      ip4_vxlan_gpe_header_t * iuvn4_0;
-      ip6_vxlan_gpe_header_t * iuvn6_0;
-      uword * p0;
-      u32 tunnel_index0;
-      vxlan_gpe_tunnel_t * t0;
-      vxlan4_gpe_tunnel_key_t key4_0;
-      vxlan6_gpe_tunnel_key_t key6_0;
-      u32 error0;
-      u32 sw_if_index0, len0;
-
-      bi0 = from[0];
-      to_next[0] = bi0;
-      from += 1;
-      to_next += 1;
-      n_left_from -= 1;
-      n_left_to_next -= 1;
-
-      b0 = vlib_get_buffer (vm, bi0);
-
-      if (is_ip4)
-      {
-        /* udp leaves current_data pointing at the vxlan-gpe header */
-        vlib_buffer_advance (
-            b0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
-
-        iuvn4_0 = vlib_buffer_get_current (b0);
-
-        /* pop (ip, udp, vxlan) */
-        vlib_buffer_advance (b0, sizeof(*iuvn4_0));
-      }
-      else
-      {
-        /* udp leaves current_data pointing at the vxlan-gpe header */
-        vlib_buffer_advance (
-            b0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
-
-        iuvn6_0 = vlib_buffer_get_current (b0);
-
-        /* pop (ip, udp, vxlan) */
-        vlib_buffer_advance (b0, sizeof(*iuvn6_0));
-      }
-
-      tunnel_index0 = ~0;
-      error0 = 0;
-
-      if (is_ip4)
-      {
-        next0 =
-            (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
-            nngm->decap_next_node_list[iuvn4_0->vxlan.protocol]: \
-            VXLAN_GPE_INPUT_NEXT_DROP;
-
-        key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
-        key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
-        key4_0.vni = iuvn4_0->vxlan.vni_res;
-        key4_0.pad = 0;
-
-        /* Processing for key4_0 */
-        if (PREDICT_FALSE(
-            (key4_0.as_u64[0] != last_key4.as_u64[0])
-                || (key4_0.as_u64[1] != last_key4.as_u64[1])))
-        {
-          p0 = hash_get_mem(nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
-
-          if (p0 == 0)
-          {
-            error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
-            goto trace00;
-          }
-
-          last_key4.as_u64[0] = key4_0.as_u64[0];
-          last_key4.as_u64[1] = key4_0.as_u64[1];
-          tunnel_index0 = last_tunnel_index = p0[0];
-        }
-        else
-          tunnel_index0 = last_tunnel_index;
-      }
-      else /* is_ip6 */
-      {
-        next0 =
-            (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
-            nngm->decap_next_node_list[iuvn6_0->vxlan.protocol]: \
-            VXLAN_GPE_INPUT_NEXT_DROP;
-
-        key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
-        key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
-        key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
-        key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
-        key6_0.vni = iuvn6_0->vxlan.vni_res;
-
-        /* Processing for key6_0 */
-        if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
-        {
-          p0 = hash_get_mem(nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
-
-          if (p0 == 0)
-          {
-            error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
-            goto trace00;
-          }
-
-          memcpy (&last_key6, &key6_0, sizeof(key6_0));
-          tunnel_index0 = last_tunnel_index = p0[0];
-        }
-        else
-          tunnel_index0 = last_tunnel_index;
-      }
-
-      t0 = pool_elt_at_index(nngm->tunnels, tunnel_index0);
-
-
-      sw_if_index0 = t0->sw_if_index;
-      len0 = vlib_buffer_length_in_chain (vm, b0);
-
-      /* Required to make the l2 tag push / pop code work on l2 subifs */
-      vnet_update_l2_len (b0);
-
-      /*
-       * ip[46] lookup in the configured FIB
-       */
-      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
-
-      pkts_decapsulated++;
-      stats_n_packets += 1;
-      stats_n_bytes += len0;
-
-      /* Batch stats increment on the same vxlan-gpe tunnel so counter
-       is not incremented per packet */
-      if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
-      {
-        stats_n_packets -= 1;
-        stats_n_bytes -= len0;
-        if (stats_n_packets)
-          vlib_increment_combined_counter (
-              im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
-              thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
-        stats_n_packets = 1;
-        stats_n_bytes = len0;
-        stats_sw_if_index = sw_if_index0;
-      }
-
-      trace00: b0->error = error0 ? node->errors[error0] : 0;
-
-      if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
-      {
-        vxlan_gpe_rx_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
-        tr->next_index = next0;
-        tr->error = error0;
-        tr->tunnel_index = tunnel_index0;
-      }
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
-                                      n_left_to_next, bi0, next0);
-    }
+         pkts_decapsulated++;
+         stats_n_packets += 1;
+         stats_n_bytes += len0;
+
+         if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
+           {
+             stats_n_packets -= 1;
+             stats_n_bytes -= len0;
+             if (stats_n_packets)
+               vlib_increment_combined_counter (im->combined_sw_if_counters +
+                                                VNET_INTERFACE_COUNTER_RX,
+                                                thread_index,
+                                                stats_sw_if_index,
+                                                stats_n_packets,
+                                                stats_n_bytes);
+             stats_n_packets = 1;
+             stats_n_bytes = len0;
+             stats_sw_if_index = sw_if_index0;
+           }
+
+       trace0:b0->error = error0 ? node->errors[error0] : 0;
+
+         if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             vxlan_gpe_rx_trace_t *tr =
+               vlib_add_trace (vm, node, b0, sizeof (*tr));
+             tr->next_index = next0;
+             tr->error = error0;
+             tr->tunnel_index = tunnel_index0;
+           }
+
+         /* Process packet 1 */
+         if (is_ip4)
+           {
+             /* Processing for key4_1 */
+             if (PREDICT_FALSE ((key4_1.as_u64[0] != last_key4.as_u64[0])
+                                || (key4_1.as_u64[1] !=
+                                    last_key4.as_u64[1])))
+               {
+                 p1 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_1);
+
+                 if (p1 == 0)
+                   {
+                     error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+                     goto trace1;
+                   }
+
+                 last_key4.as_u64[0] = key4_1.as_u64[0];
+                 last_key4.as_u64[1] = key4_1.as_u64[1];
+                 tunnel_index1 = last_tunnel_index = p1[0];
+               }
+             else
+               tunnel_index1 = last_tunnel_index;
+           }
+         else                  /* is_ip6 */
+           {
+             /* Processing for key6_1 */
+             if (PREDICT_FALSE
+                 (memcmp (&key6_1, &last_key6, sizeof (last_key6)) != 0))
+               {
+                 p1 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_1);
+
+                 if (p1 == 0)
+                   {
+                     error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+                     goto trace1;
+                   }
+
+                 memcpy (&last_key6, &key6_1, sizeof (key6_1));
+                 tunnel_index1 = last_tunnel_index = p1[0];
+               }
+             else
+               tunnel_index1 = last_tunnel_index;
+           }
+
+         t1 = pool_elt_at_index (nngm->tunnels, tunnel_index1);
+
+         sw_if_index1 = t1->sw_if_index;
+         len1 = vlib_buffer_length_in_chain (vm, b1);
+
+         /* Required to make the l2 tag push / pop code work on l2 subifs */
+         vnet_update_l2_len (b1);
+
+         /*
+          * ip[46] lookup in the configured FIB
+          */
+         vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
 
-    vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-  }
+         pkts_decapsulated++;
+         stats_n_packets += 1;
+         stats_n_bytes += len1;
+
+         /* Batch stats increment on the same vxlan tunnel so counter
+            is not incremented per packet */
+         if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
+           {
+             stats_n_packets -= 1;
+             stats_n_bytes -= len1;
+             if (stats_n_packets)
+               vlib_increment_combined_counter (im->combined_sw_if_counters +
+                                                VNET_INTERFACE_COUNTER_RX,
+                                                thread_index,
+                                                stats_sw_if_index,
+                                                stats_n_packets,
+                                                stats_n_bytes);
+             stats_n_packets = 1;
+             stats_n_bytes = len1;
+             stats_sw_if_index = sw_if_index1;
+           }
+         vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
+
+       trace1:b1->error = error1 ? node->errors[error1] : 0;
+
+         if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             vxlan_gpe_rx_trace_t *tr =
+               vlib_add_trace (vm, node, b1, sizeof (*tr));
+             tr->next_index = next1;
+             tr->error = error1;
+             tr->tunnel_index = tunnel_index1;
+           }
+
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+                                          n_left_to_next, bi0, bi1, next0,
+                                          next1);
+       }
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 bi0;
+         vlib_buffer_t *b0;
+         u32 next0;
+         ip4_vxlan_gpe_header_t *iuvn4_0;
+         ip6_vxlan_gpe_header_t *iuvn6_0;
+         uword *p0;
+         u32 tunnel_index0;
+         vxlan_gpe_tunnel_t *t0;
+         vxlan4_gpe_tunnel_key_t key4_0;
+         vxlan6_gpe_tunnel_key_t key6_0;
+         u32 error0;
+         u32 sw_if_index0, len0;
+
+         bi0 = from[0];
+         to_next[0] = bi0;
+         from += 1;
+         to_next += 1;
+         n_left_from -= 1;
+         n_left_to_next -= 1;
+
+         b0 = vlib_get_buffer (vm, bi0);
+
+         if (is_ip4)
+           {
+             /* udp leaves current_data pointing at the vxlan-gpe header */
+             vlib_buffer_advance (b0,
+                                  -(word) (sizeof (udp_header_t) +
+                                           sizeof (ip4_header_t)));
+
+             iuvn4_0 = vlib_buffer_get_current (b0);
+
+             /* pop (ip, udp, vxlan) */
+             vlib_buffer_advance (b0, sizeof (*iuvn4_0));
+           }
+         else
+           {
+             /* udp leaves current_data pointing at the vxlan-gpe header */
+             vlib_buffer_advance (b0,
+                                  -(word) (sizeof (udp_header_t) +
+                                           sizeof (ip6_header_t)));
+
+             iuvn6_0 = vlib_buffer_get_current (b0);
+
+             /* pop (ip, udp, vxlan) */
+             vlib_buffer_advance (b0, sizeof (*iuvn6_0));
+           }
+
+         tunnel_index0 = ~0;
+         error0 = 0;
+
+         if (is_ip4)
+           {
+             next0 =
+               (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
+               nngm->decap_next_node_list[iuvn4_0->vxlan.protocol] :
+               VXLAN_GPE_INPUT_NEXT_DROP;
+
+             key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
+             key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
+             key4_0.vni = iuvn4_0->vxlan.vni_res;
+             key4_0.pad = 0;
+
+             /* Processing for key4_0 */
+             if (PREDICT_FALSE ((key4_0.as_u64[0] != last_key4.as_u64[0])
+                                || (key4_0.as_u64[1] !=
+                                    last_key4.as_u64[1])))
+               {
+                 p0 = hash_get_mem (nngm->vxlan4_gpe_tunnel_by_key, &key4_0);
+
+                 if (p0 == 0)
+                   {
+                     error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+                     goto trace00;
+                   }
+
+                 last_key4.as_u64[0] = key4_0.as_u64[0];
+                 last_key4.as_u64[1] = key4_0.as_u64[1];
+                 tunnel_index0 = last_tunnel_index = p0[0];
+               }
+             else
+               tunnel_index0 = last_tunnel_index;
+           }
+         else                  /* is_ip6 */
+           {
+             next0 =
+               (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX) ?
+               nngm->decap_next_node_list[iuvn6_0->vxlan.protocol] :
+               VXLAN_GPE_INPUT_NEXT_DROP;
+
+             key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
+             key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
+             key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
+             key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
+             key6_0.vni = iuvn6_0->vxlan.vni_res;
+
+             /* Processing for key6_0 */
+             if (PREDICT_FALSE
+                 (memcmp (&key6_0, &last_key6, sizeof (last_key6)) != 0))
+               {
+                 p0 = hash_get_mem (nngm->vxlan6_gpe_tunnel_by_key, &key6_0);
+
+                 if (p0 == 0)
+                   {
+                     error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
+                     goto trace00;
+                   }
+
+                 memcpy (&last_key6, &key6_0, sizeof (key6_0));
+                 tunnel_index0 = last_tunnel_index = p0[0];
+               }
+             else
+               tunnel_index0 = last_tunnel_index;
+           }
+
+         t0 = pool_elt_at_index (nngm->tunnels, tunnel_index0);
+
+
+         sw_if_index0 = t0->sw_if_index;
+         len0 = vlib_buffer_length_in_chain (vm, b0);
+
+         /* Required to make the l2 tag push / pop code work on l2 subifs */
+         vnet_update_l2_len (b0);
+
+         /*
+          * ip[46] lookup in the configured FIB
+          */
+         vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+
+         pkts_decapsulated++;
+         stats_n_packets += 1;
+         stats_n_bytes += len0;
+
+         /* Batch stats increment on the same vxlan-gpe tunnel so counter
+            is not incremented per packet */
+         if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
+           {
+             stats_n_packets -= 1;
+             stats_n_bytes -= len0;
+             if (stats_n_packets)
+               vlib_increment_combined_counter (im->combined_sw_if_counters +
+                                                VNET_INTERFACE_COUNTER_RX,
+                                                thread_index,
+                                                stats_sw_if_index,
+                                                stats_n_packets,
+                                                stats_n_bytes);
+             stats_n_packets = 1;
+             stats_n_bytes = len0;
+             stats_sw_if_index = sw_if_index0;
+           }
+
+       trace00:b0->error = error0 ? node->errors[error0] : 0;
+
+         if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             vxlan_gpe_rx_trace_t *tr =
+               vlib_add_trace (vm, node, b0, sizeof (*tr));
+             tr->next_index = next0;
+             tr->error = error0;
+             tr->tunnel_index = tunnel_index0;
+           }
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                          n_left_to_next, bi0, next0);
+       }
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+    }
   vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
-                               VXLAN_GPE_ERROR_DECAPSULATED, pkts_decapsulated);
+                              VXLAN_GPE_ERROR_DECAPSULATED,
+                              pkts_decapsulated);
   /* Increment any remaining batch stats */
   if (stats_n_packets)
-  {
-    vlib_increment_combined_counter (
-        im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, thread_index,
-        stats_sw_if_index, stats_n_packets, stats_n_bytes);
-    node->runtime_data[0] = stats_sw_if_index;
-  }
+    {
+      vlib_increment_combined_counter (im->combined_sw_if_counters +
+                                      VNET_INTERFACE_COUNTER_RX,
+                                      thread_index, stats_sw_if_index,
+                                      stats_n_packets, stats_n_bytes);
+      node->runtime_data[0] = stats_sw_if_index;
+    }
   return from_frame->n_vectors;
 }
 
@@ -635,9 +666,9 @@ vxlan_gpe_input (vlib_main_t * vm,
  */
 static uword
 vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
-                  vlib_frame_t * from_frame)
+                 vlib_frame_t * from_frame)
 {
-  return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */1);
+  return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 1);
 }
 
 
@@ -671,21 +702,22 @@ vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
  */
 static uword
 vxlan6_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
-                  vlib_frame_t * from_frame)
+                 vlib_frame_t * from_frame)
 {
-  return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */0);
+  return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 0);
 }
 
 /**
  * @brief VXLAN GPE error strings
  */
-static char * vxlan_gpe_error_strings[] = {
+static char *vxlan_gpe_error_strings[] = {
 #define vxlan_gpe_error(n,s) s,
 #include <vnet/vxlan-gpe/vxlan_gpe_error.def>
 #undef vxlan_gpe_error
 #undef _
 };
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
   .function = vxlan4_gpe_input,
   .name = "vxlan4-gpe-input",
@@ -706,9 +738,11 @@ VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
   .format_trace = format_vxlan_gpe_rx_trace,
   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
 };
+/* *INDENT-ON* */
 
 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gpe_input_node, vxlan4_gpe_input);
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
   .function = vxlan6_gpe_input,
   .name = "vxlan6-gpe-input",
@@ -729,9 +763,11 @@ VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
   .format_trace = format_vxlan_gpe_rx_trace,
   // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
 };
+/* *INDENT-ON* */
 
 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gpe_input_node, vxlan6_gpe_input);
-typedef enum {
+typedef enum
+{
   IP_VXLAN_BYPASS_NEXT_DROP,
   IP_VXLAN_BYPASS_NEXT_VXLAN,
   IP_VXLAN_BYPASS_N_NEXT,
@@ -739,15 +775,15 @@ typedef enum {
 
 always_inline uword
 ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
-                       vlib_node_runtime_t * node,
-                       vlib_frame_t * frame,
-                       u32 is_ip4)
+                           vlib_node_runtime_t * node,
+                           vlib_frame_t * frame, u32 is_ip4)
 {
-  vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
-  u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
-  vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
-  ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
-  ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
+  vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+  u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip4_input_node.index);
+  ip4_address_t addr4;         /* last IPv4 address matching a local VTEP address */
+  ip6_address_t addr6;         /* last IPv6 address matching a local VTEP address */
 
   from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
@@ -756,28 +792,30 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
   if (node->flags & VLIB_NODE_FLAG_TRACE)
     ip4_forward_next_trace (vm, node, frame, VLIB_TX);
 
-  if (is_ip4) addr4.data_u32 = ~0;
-  else ip6_address_set_zero (&addr6);
+  if (is_ip4)
+    addr4.data_u32 = ~0;
+  else
+    ip6_address_set_zero (&addr6);
 
   while (n_left_from > 0)
     {
       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
 
       while (n_left_from >= 4 && n_left_to_next >= 2)
-       {
-         vlib_buffer_t * b0, * b1;
-         ip4_header_t * ip40, * ip41;
-         ip6_header_t * ip60, * ip61;
-         udp_header_t * udp0, * udp1;
-         u32 bi0, ip_len0, udp_len0, flags0, next0;
-         u32 bi1, ip_len1, udp_len1, flags1, next1;
-         i32 len_diff0, len_diff1;
-         u8 error0, good_udp0, proto0;
-         u8 error1, good_udp1, proto1;
+       {
+         vlib_buffer_t *b0, *b1;
+         ip4_header_t *ip40, *ip41;
+         ip6_header_t *ip60, *ip61;
+         udp_header_t *udp0, *udp1;
+         u32 bi0, ip_len0, udp_len0, flags0, next0;
+         u32 bi1, ip_len1, udp_len1, flags1, next1;
+         i32 len_diff0, len_diff1;
+         u8 error0, good_udp0, proto0;
+         u8 error1, good_udp1, proto1;
 
          /* Prefetch next iteration. */
          {
-           vlib_buffer_t * p2, * p3;
+           vlib_buffer_t *p2, *p3;
 
            p2 = vlib_get_buffer (vm, from[2]);
            p3 = vlib_get_buffer (vm, from[3]);
@@ -785,16 +823,16 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
            vlib_prefetch_buffer_header (p2, LOAD);
            vlib_prefetch_buffer_header (p3, LOAD);
 
-           CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
-           CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+           CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+           CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
          }
 
-         bi0 = to_next[0] = from[0];
-         bi1 = to_next[1] = from[1];
-         from += 2;
-         n_left_from -= 2;
-         to_next += 2;
-         n_left_to_next -= 2;
+         bi0 = to_next[0] = from[0];
+         bi1 = to_next[1] = from[1];
+         from += 2;
+         n_left_from -= 2;
+         to_next += 2;
+         n_left_to_next -= 2;
 
          b0 = vlib_get_buffer (vm, bi0);
          b1 = vlib_get_buffer (vm, bi1);
@@ -810,8 +848,10 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
            }
 
          /* Setup packet for next IP feature */
-         vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
-         vnet_feature_next(vnet_buffer(b1)->sw_if_index[VLIB_RX], &next1, b1);
+         vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
+                            b0);
+         vnet_feature_next (vnet_buffer (b1)->sw_if_index[VLIB_RX], &next1,
+                            b1);
 
          if (is_ip4)
            {
@@ -826,7 +866,7 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
 
          /* Process packet 0 */
          if (proto0 != IP_PROTOCOL_UDP)
-           goto exit0; /* not UDP packet */
+           goto exit0;         /* not UDP packet */
 
          if (is_ip4)
            udp0 = ip4_next_header (ip40);
@@ -834,26 +874,26 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
            udp0 = ip6_next_header (ip60);
 
          if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
-           goto exit0; /* not VXLAN packet */
+           goto exit0;         /* not VXLAN packet */
 
-         /* Validate DIP against VTEPs*/
+         /* Validate DIP against VTEPs */
          if (is_ip4)
            {
              if (addr4.as_u32 != ip40->dst_address.as_u32)
-               {
+               {
                  if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
-                     goto exit0; /* no local VTEP for VXLAN packet */
+                   goto exit0; /* no local VTEP for VXLAN packet */
                  addr4 = ip40->dst_address;
-               }
+               }
            }
          else
            {
              if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
-               {
+               {
                  if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
-                     goto exit0; /* no local VTEP for VXLAN packet */
+                   goto exit0; /* no local VTEP for VXLAN packet */
                  addr6 = ip60->dst_address;
-               }
+               }
            }
 
          flags0 = b0->flags;
@@ -874,14 +914,14 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
          if (PREDICT_FALSE (!good_udp0))
            {
              if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
-               {
+               {
                  if (is_ip4)
                    flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
                  else
                    flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
                  good_udp0 =
                    (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
-               }
+               }
            }
 
          if (is_ip4)
@@ -901,14 +941,18 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
 
          /* vxlan_gpe-input node expect current at VXLAN header */
          if (is_ip4)
-           vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
+           vlib_buffer_advance (b0,
+                                sizeof (ip4_header_t) +
+                                sizeof (udp_header_t));
          else
-           vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
+           vlib_buffer_advance (b0,
+                                sizeof (ip6_header_t) +
+                                sizeof (udp_header_t));
 
        exit0:
          /* Process packet 1 */
          if (proto1 != IP_PROTOCOL_UDP)
-           goto exit1; /* not UDP packet */
+           goto exit1;         /* not UDP packet */
 
          if (is_ip4)
            udp1 = ip4_next_header (ip41);
@@ -916,24 +960,24 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
            udp1 = ip6_next_header (ip61);
 
          if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
-           goto exit1; /* not VXLAN packet */
+           goto exit1;         /* not VXLAN packet */
 
-         /* Validate DIP against VTEPs*/
+         /* Validate DIP against VTEPs */
          if (is_ip4)
            {
              if (addr4.as_u32 != ip41->dst_address.as_u32)
-               {
+               {
                  if (!hash_get (ngm->vtep4, ip41->dst_address.as_u32))
-                     goto exit1; /* no local VTEP for VXLAN packet */
+                   goto exit1; /* no local VTEP for VXLAN packet */
                  addr4 = ip41->dst_address;
                }
            }
          else
            {
              if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
-               {
+               {
                  if (!hash_get_mem (ngm->vtep6, &ip61->dst_address))
-                     goto exit1; /* no local VTEP for VXLAN packet */
+                   goto exit1; /* no local VTEP for VXLAN packet */
                  addr6 = ip61->dst_address;
                }
            }
@@ -956,14 +1000,14 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
          if (PREDICT_FALSE (!good_udp1))
            {
              if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
-               {
+               {
                  if (is_ip4)
                    flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
                  else
                    flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
                  good_udp1 =
                    (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
-               }
+               }
            }
 
          if (is_ip4)
@@ -983,9 +1027,13 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
 
          /* vxlan_gpe-input node expect current at VXLAN header */
          if (is_ip4)
-           vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
+           vlib_buffer_advance (b1,
+                                sizeof (ip4_header_t) +
+                                sizeof (udp_header_t));
          else
-           vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
+           vlib_buffer_advance (b1,
+                                sizeof (ip6_header_t) +
+                                sizeof (udp_header_t));
 
        exit1:
          vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
@@ -995,11 +1043,11 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
 
       while (n_left_from > 0 && n_left_to_next > 0)
        {
-         vlib_buffer_t * b0;
-         ip4_header_t * ip40;
-         ip6_header_t * ip60;
-         udp_header_t * udp0;
-         u32 bi0, ip_len0, udp_len0, flags0, next0;
+         vlib_buffer_t *b0;
+         ip4_header_t *ip40;
+         ip6_header_t *ip60;
+         udp_header_t *udp0;
+         u32 bi0, ip_len0, udp_len0, flags0, next0;
          i32 len_diff0;
          u8 error0, good_udp0, proto0;
 
@@ -1016,7 +1064,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
            ip60 = vlib_buffer_get_current (b0);
 
          /* Setup packet for next IP feature */
-         vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
+         vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
+                            b0);
 
          if (is_ip4)
            proto0 = ip40->protocol;
@@ -1024,7 +1073,7 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
            proto0 = ip60->protocol;
 
          if (proto0 != IP_PROTOCOL_UDP)
-           goto exit; /* not UDP packet */
+           goto exit;          /* not UDP packet */
 
          if (is_ip4)
            udp0 = ip4_next_header (ip40);
@@ -1032,24 +1081,24 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
            udp0 = ip6_next_header (ip60);
 
          if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_VXLAN_GPE))
-           goto exit; /* not VXLAN packet */
+           goto exit;          /* not VXLAN packet */
 
-         /* Validate DIP against VTEPs*/
+         /* Validate DIP against VTEPs */
          if (is_ip4)
            {
              if (addr4.as_u32 != ip40->dst_address.as_u32)
-               {
+               {
                  if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
-                     goto exit; /* no local VTEP for VXLAN packet */
+                   goto exit;  /* no local VTEP for VXLAN packet */
                  addr4 = ip40->dst_address;
                }
            }
          else
            {
              if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
-               {
+               {
                  if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
-                     goto exit; /* no local VTEP for VXLAN packet */
+                   goto exit;  /* no local VTEP for VXLAN packet */
                  addr6 = ip60->dst_address;
                }
            }
@@ -1072,14 +1121,14 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
          if (PREDICT_FALSE (!good_udp0))
            {
              if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
-               {
+               {
                  if (is_ip4)
                    flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
                  else
                    flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
                  good_udp0 =
                    (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
-               }
+               }
            }
 
          if (is_ip4)
@@ -1099,9 +1148,13 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
 
          /* vxlan_gpe-input node expect current at VXLAN header */
          if (is_ip4)
-           vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
+           vlib_buffer_advance (b0,
+                                sizeof (ip4_header_t) +
+                                sizeof (udp_header_t));
          else
-           vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
+           vlib_buffer_advance (b0,
+                                sizeof (ip6_header_t) +
+                                sizeof (udp_header_t));
 
        exit:
          vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
@@ -1117,12 +1170,12 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
 
 static uword
 ip4_vxlan_gpe_bypass (vlib_main_t * vm,
-                 vlib_node_runtime_t * node,
-                 vlib_frame_t * frame)
+                     vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
 }
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
   .function = ip4_vxlan_gpe_bypass,
   .name = "ip4-vxlan-gpe-bypass",
@@ -1137,23 +1190,25 @@ VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
   .format_buffer = format_ip4_header,
   .format_trace = format_ip4_forward_next_trace,
 };
+/* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gpe_bypass_node,ip4_vxlan_gpe_bypass)
-
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gpe_bypass_node, ip4_vxlan_gpe_bypass)
 /* Dummy init function to get us linked in. */
-clib_error_t * ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
-{ return 0; }
+     clib_error_t *ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
+{
+  return 0;
+}
 
 VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init);
 
 static uword
 ip6_vxlan_gpe_bypass (vlib_main_t * vm,
-                 vlib_node_runtime_t * node,
-                 vlib_frame_t * frame)
+                     vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
 }
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
   .function = ip6_vxlan_gpe_bypass,
   .name = "ip6-vxlan-gpe-bypass",
@@ -1168,11 +1223,21 @@ VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
   .format_buffer = format_ip6_header,
   .format_trace = format_ip6_forward_next_trace,
 };
+/* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gpe_bypass_node,ip6_vxlan_gpe_bypass)
-
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gpe_bypass_node, ip6_vxlan_gpe_bypass)
 /* Dummy init function to get us linked in. */
-clib_error_t * ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
-{ return 0; }
+     clib_error_t *ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
+{
+  return 0;
+}
 
 VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index 67ed94b..71faac5 100644 (file)
@@ -31,7 +31,7 @@ _(ENCAPSULATED, "good packets encapsulated")
 /**
  * @brief VXLAN GPE encap error strings
  */
-static char * vxlan_gpe_encap_error_strings[] = {
+static char *vxlan_gpe_encap_error_strings[] = {
 #define _(sym,string) string,
   foreach_vxlan_gpe_encap_error
 #undef _
@@ -40,9 +40,10 @@ static char * vxlan_gpe_encap_error_strings[] = {
 /**
  * @brief Struct for VXLAN GPE errors/counters
  */
-typedef enum {
+typedef enum
+{
 #define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
-    foreach_vxlan_gpe_encap_error
+  foreach_vxlan_gpe_encap_error
 #undef _
     VXLAN_GPE_ENCAP_N_ERROR,
 } vxlan_gpe_encap_error_t;
@@ -50,7 +51,8 @@ typedef enum {
 /**
  * @brief Struct for tracing VXLAN GPE encapsulated packets
  */
-typedef struct {
+typedef struct
+{
   u32 tunnel_index;
 } vxlan_gpe_encap_trace_t;
 
@@ -63,12 +65,12 @@ typedef struct {
  * @return *s
  *
  */
-u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
+u8 *
+format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
 {
   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
-  vxlan_gpe_encap_trace_t * t
-      = va_arg (*args, vxlan_gpe_encap_trace_t *);
+  vxlan_gpe_encap_trace_t *t = va_arg (*args, vxlan_gpe_encap_trace_t *);
 
   s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
   return s;
@@ -86,11 +88,10 @@ u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
  */
 always_inline void
 vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
-                            vxlan_gpe_tunnel_t * t0, u32 * next0,
-                            u8 is_v4)
+                           vxlan_gpe_tunnel_t * t0, u32 * next0, u8 is_v4)
 {
-  ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
-  ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
+  ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
+  ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56);
 
   ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
   next0[0] = t0->encap_next_node;
@@ -111,12 +112,12 @@ vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
  */
 always_inline void
 vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
-                            vlib_buffer_t * b1, vxlan_gpe_tunnel_t * t0,
-                            vxlan_gpe_tunnel_t * t1, u32 * next0,
-                            u32 * next1, u8 is_v4)
+                           vlib_buffer_t * b1, vxlan_gpe_tunnel_t * t0,
+                           vxlan_gpe_tunnel_t * t1, u32 * next0,
+                           u32 * next1, u8 is_v4)
 {
-  ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
-  ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
+  ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
+  ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56);
 
   ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
   ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, is_v4);
@@ -143,13 +144,12 @@ vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
  */
 static uword
 vxlan_gpe_encap (vlib_main_t * vm,
-               vlib_node_runtime_t * node,
-               vlib_frame_t * from_frame)
+                vlib_node_runtime_t * node, vlib_frame_t * from_frame)
 {
   u32 n_left_from, next_index, *from, *to_next;
-  vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
-  vnet_main_t * vnm = ngm->vnet_main;
-  vnet_interface_main_t * im = &vnm->interface_main;
+  vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+  vnet_main_t *vnm = ngm->vnet_main;
+  vnet_interface_main_t *im = &vnm->interface_main;
   u32 pkts_encapsulated = 0;
   u32 thread_index = vlib_get_thread_index ();
   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
@@ -162,211 +162,233 @@ vxlan_gpe_encap (vlib_main_t * vm,
   stats_n_packets = stats_n_bytes = 0;
 
   while (n_left_from > 0)
-  {
-    u32 n_left_to_next;
-
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
-
-    while (n_left_from >= 4 && n_left_to_next >= 2)
-    {
-      u32 bi0, bi1;
-      vlib_buffer_t * b0, *b1;
-      u32 next0, next1;
-      u32 sw_if_index0, sw_if_index1, len0, len1;
-      vnet_hw_interface_t * hi0, *hi1;
-      vxlan_gpe_tunnel_t * t0, *t1;
-      u8 is_ip4_0, is_ip4_1;
-
-      next0 = next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
-
-      /* Prefetch next iteration. */
-      {
-        vlib_buffer_t * p2, *p3;
-
-        p2 = vlib_get_buffer (vm, from[2]);
-        p3 = vlib_get_buffer (vm, from[3]);
-
-        vlib_prefetch_buffer_header(p2, LOAD);
-        vlib_prefetch_buffer_header(p3, LOAD);
-
-        CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
-        CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
-      }
-
-      bi0 = from[0];
-      bi1 = from[1];
-      to_next[0] = bi0;
-      to_next[1] = bi1;
-      from += 2;
-      to_next += 2;
-      n_left_to_next -= 2;
-      n_left_from -= 2;
-
-      b0 = vlib_get_buffer (vm, bi0);
-      b1 = vlib_get_buffer (vm, bi1);
-
-      /* 1-wide cache? */
-      sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
-      sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
-      hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
-      hi1 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
-
-      t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
-      t1 = pool_elt_at_index(ngm->tunnels, hi1->dev_instance);
-
-      is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
-      is_ip4_1 = (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
-
-      if (PREDICT_TRUE(is_ip4_0 == is_ip4_1))
-      {
-        vxlan_gpe_encap_two_inline (ngm, b0, b1, t0, t1, &next0, &next1,is_ip4_0);
-      }
-      else
-      {
-        vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
-        vxlan_gpe_encap_one_inline (ngm, b1, t1, &next1, is_ip4_1);
-      }
-
-      /* Reset to look up tunnel partner in the configured FIB */
-      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
-      vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
-      vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
-      vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
-      pkts_encapsulated += 2;
-
-      len0 = vlib_buffer_length_in_chain (vm, b0);
-      len1 = vlib_buffer_length_in_chain (vm, b0);
-      stats_n_packets += 2;
-      stats_n_bytes += len0 + len1;
-
-      /* Batch stats increment on the same vxlan tunnel so counter is not
-       incremented per packet. Note stats are still incremented for deleted
-       and admin-down tunnel where packets are dropped. It is not worthwhile
-       to check for this rare case and affect normal path performance. */
-      if (PREDICT_FALSE((sw_if_index0 != stats_sw_if_index)
-              || (sw_if_index1 != stats_sw_if_index)))
-      {
-        stats_n_packets -= 2;
-        stats_n_bytes -= len0 + len1;
-        if (sw_if_index0 == sw_if_index1)
-        {
-          if (stats_n_packets)
-            vlib_increment_combined_counter (
-                im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
-                thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
-          stats_sw_if_index = sw_if_index0;
-          stats_n_packets = 2;
-          stats_n_bytes = len0 + len1;
-        }
-        else
-        {
-          vlib_increment_combined_counter (
-              im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
-              thread_index, sw_if_index0, 1, len0);
-          vlib_increment_combined_counter (
-              im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
-              thread_index, sw_if_index1, 1, len1);
-        }
-      }
-
-      if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
-      {
-        vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
-        tr->tunnel_index = t0 - ngm->tunnels;
-      }
-
-      if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
-      {
-        vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b1,
-                                                      sizeof(*tr));
-        tr->tunnel_index = t1 - ngm->tunnels;
-      }
-
-      vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
-                                      n_left_to_next, bi0, bi1, next0, next1);
-    }
-
-    while (n_left_from > 0 && n_left_to_next > 0)
     {
-      u32 bi0;
-      vlib_buffer_t * b0;
-      u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
-      u32 sw_if_index0, len0;
-      vnet_hw_interface_t * hi0;
-      vxlan_gpe_tunnel_t * t0;
-      u8 is_ip4_0;
-
-      bi0 = from[0];
-      to_next[0] = bi0;
-      from += 1;
-      to_next += 1;
-      n_left_from -= 1;
-      n_left_to_next -= 1;
-
-      b0 = vlib_get_buffer (vm, bi0);
-
-      /* 1-wide cache? */
-      sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
-      hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
-
-      t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
-
-      is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
-
-      vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
-
-      /* Reset to look up tunnel partner in the configured FIB */
-      vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
-      vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
-      pkts_encapsulated++;
-
-      len0 = vlib_buffer_length_in_chain (vm, b0);
-      stats_n_packets += 1;
-      stats_n_bytes += len0;
-
-      /* Batch stats increment on the same vxlan tunnel so counter is not
-       *  incremented per packet. Note stats are still incremented for deleted
-       *  and admin-down tunnel where packets are dropped. It is not worthwhile
-       *  to check for this rare case and affect normal path performance. */
-      if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
-      {
-        stats_n_packets -= 1;
-        stats_n_bytes -= len0;
-        if (stats_n_packets)
-          vlib_increment_combined_counter (
-              im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
-              thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
-        stats_n_packets = 1;
-        stats_n_bytes = len0;
-        stats_sw_if_index = sw_if_index0;
-      }
-      if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
-      {
-        vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0,
-                                                      sizeof(*tr));
-        tr->tunnel_index = t0 - ngm->tunnels;
-      }
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
-                                      n_left_to_next, bi0, next0);
+      u32 n_left_to_next;
+
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 bi0, bi1;
+         vlib_buffer_t *b0, *b1;
+         u32 next0, next1;
+         u32 sw_if_index0, sw_if_index1, len0, len1;
+         vnet_hw_interface_t *hi0, *hi1;
+         vxlan_gpe_tunnel_t *t0, *t1;
+         u8 is_ip4_0, is_ip4_1;
+
+         next0 = next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+
+         /* Prefetch next iteration. */
+         {
+           vlib_buffer_t *p2, *p3;
+
+           p2 = vlib_get_buffer (vm, from[2]);
+           p3 = vlib_get_buffer (vm, from[3]);
+
+           vlib_prefetch_buffer_header (p2, LOAD);
+           vlib_prefetch_buffer_header (p3, LOAD);
+
+           CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+           CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+         }
+
+         bi0 = from[0];
+         bi1 = from[1];
+         to_next[0] = bi0;
+         to_next[1] = bi1;
+         from += 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+         n_left_from -= 2;
+
+         b0 = vlib_get_buffer (vm, bi0);
+         b1 = vlib_get_buffer (vm, bi1);
+
+         /* 1-wide cache? */
+         sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+         sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+         hi0 =
+           vnet_get_sup_hw_interface (vnm,
+                                      vnet_buffer (b0)->sw_if_index
+                                      [VLIB_TX]);
+         hi1 =
+           vnet_get_sup_hw_interface (vnm,
+                                      vnet_buffer (b1)->sw_if_index
+                                      [VLIB_TX]);
+
+         t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
+         t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance);
+
+         is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+         is_ip4_1 = (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+         if (PREDICT_TRUE (is_ip4_0 == is_ip4_1))
+           {
+             vxlan_gpe_encap_two_inline (ngm, b0, b1, t0, t1, &next0, &next1,
+                                         is_ip4_0);
+           }
+         else
+           {
+             vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
+             vxlan_gpe_encap_one_inline (ngm, b1, t1, &next1, is_ip4_1);
+           }
+
+         /* Reset to look up tunnel partner in the configured FIB */
+         vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+         vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
+         vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+         vnet_buffer (b1)->sw_if_index[VLIB_RX] = sw_if_index1;
+         pkts_encapsulated += 2;
+
+         len0 = vlib_buffer_length_in_chain (vm, b0);
+         len1 = vlib_buffer_length_in_chain (vm, b0);
+         stats_n_packets += 2;
+         stats_n_bytes += len0 + len1;
+
+         /* Batch stats increment on the same vxlan tunnel so counter is not
+            incremented per packet. Note stats are still incremented for deleted
+            and admin-down tunnel where packets are dropped. It is not worthwhile
+            to check for this rare case and affect normal path performance. */
+         if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index)
+                            || (sw_if_index1 != stats_sw_if_index)))
+           {
+             stats_n_packets -= 2;
+             stats_n_bytes -= len0 + len1;
+             if (sw_if_index0 == sw_if_index1)
+               {
+                 if (stats_n_packets)
+                   vlib_increment_combined_counter
+                     (im->combined_sw_if_counters +
+                      VNET_INTERFACE_COUNTER_TX, thread_index,
+                      stats_sw_if_index, stats_n_packets, stats_n_bytes);
+                 stats_sw_if_index = sw_if_index0;
+                 stats_n_packets = 2;
+                 stats_n_bytes = len0 + len1;
+               }
+             else
+               {
+                 vlib_increment_combined_counter (im->combined_sw_if_counters
+                                                  +
+                                                  VNET_INTERFACE_COUNTER_TX,
+                                                  thread_index, sw_if_index0,
+                                                  1, len0);
+                 vlib_increment_combined_counter (im->combined_sw_if_counters
+                                                  +
+                                                  VNET_INTERFACE_COUNTER_TX,
+                                                  thread_index, sw_if_index1,
+                                                  1, len1);
+               }
+           }
+
+         if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             vxlan_gpe_encap_trace_t *tr =
+               vlib_add_trace (vm, node, b0, sizeof (*tr));
+             tr->tunnel_index = t0 - ngm->tunnels;
+           }
+
+         if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b1,
+                                                           sizeof (*tr));
+             tr->tunnel_index = t1 - ngm->tunnels;
+           }
+
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+                                          n_left_to_next, bi0, bi1, next0,
+                                          next1);
+       }
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 bi0;
+         vlib_buffer_t *b0;
+         u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+         u32 sw_if_index0, len0;
+         vnet_hw_interface_t *hi0;
+         vxlan_gpe_tunnel_t *t0;
+         u8 is_ip4_0;
+
+         bi0 = from[0];
+         to_next[0] = bi0;
+         from += 1;
+         to_next += 1;
+         n_left_from -= 1;
+         n_left_to_next -= 1;
+
+         b0 = vlib_get_buffer (vm, bi0);
+
+         /* 1-wide cache? */
+         sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+         hi0 =
+           vnet_get_sup_hw_interface (vnm,
+                                      vnet_buffer (b0)->sw_if_index
+                                      [VLIB_TX]);
+
+         t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
+
+         is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
+
+         vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
+
+         /* Reset to look up tunnel partner in the configured FIB */
+         vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+         vnet_buffer (b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+         pkts_encapsulated++;
+
+         len0 = vlib_buffer_length_in_chain (vm, b0);
+         stats_n_packets += 1;
+         stats_n_bytes += len0;
+
+         /* Batch stats increment on the same vxlan tunnel so counter is not
+          *  incremented per packet. Note stats are still incremented for deleted
+          *  and admin-down tunnel where packets are dropped. It is not worthwhile
+          *  to check for this rare case and affect normal path performance. */
+         if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
+           {
+             stats_n_packets -= 1;
+             stats_n_bytes -= len0;
+             if (stats_n_packets)
+               vlib_increment_combined_counter (im->combined_sw_if_counters +
+                                                VNET_INTERFACE_COUNTER_TX,
+                                                thread_index,
+                                                stats_sw_if_index,
+                                                stats_n_packets,
+                                                stats_n_bytes);
+             stats_n_packets = 1;
+             stats_n_bytes = len0;
+             stats_sw_if_index = sw_if_index0;
+           }
+         if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0,
+                                                           sizeof (*tr));
+             tr->tunnel_index = t0 - ngm->tunnels;
+           }
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                          n_left_to_next, bi0, next0);
+       }
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-
-    vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-  }
   vlib_node_increment_counter (vm, node->node_index,
-                               VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
-                               pkts_encapsulated);
+                              VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
+                              pkts_encapsulated);
   /* Increment any remaining batch stats */
   if (stats_n_packets)
-  {
-    vlib_increment_combined_counter (
-        im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index,
-        stats_sw_if_index, stats_n_packets, stats_n_bytes);
-    node->runtime_data[0] = stats_sw_if_index;
-  }
+    {
+      vlib_increment_combined_counter (im->combined_sw_if_counters +
+                                      VNET_INTERFACE_COUNTER_TX,
+                                      thread_index, stats_sw_if_index,
+                                      stats_n_packets, stats_n_bytes);
+      node->runtime_data[0] = stats_sw_if_index;
+    }
 
   return from_frame->n_vectors;
 }
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
   .function = vxlan_gpe_encap,
   .name = "vxlan-gpe-encap",
@@ -385,4 +407,13 @@ VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
     [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
+
 
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index b13a734..939b5fa 100644 (file)
@@ -53,15 +53,16 @@ vxlan_gpe_main_t vxlan_gpe_main;
  * @return *s formatted string
  *
  */
-u8 * format_vxlan_gpe_tunnel (u8 * s, va_list * args)
+u8 *
+format_vxlan_gpe_tunnel (u8 * s, va_list * args)
 {
-  vxlan_gpe_tunnel_t * t = va_arg (*args, vxlan_gpe_tunnel_t *);
-  vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
+  vxlan_gpe_tunnel_t *t = va_arg (*args, vxlan_gpe_tunnel_t *);
+  vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
 
   s = format (s, "[%d] local: %U remote: %U ",
-              t - ngm->tunnels,
-              format_ip46_address, &t->local, IP46_TYPE_ANY,
-              format_ip46_address, &t->remote, IP46_TYPE_ANY);
+             t - ngm->tunnels,
+             format_ip46_address, &t->local, IP46_TYPE_ANY,
+             format_ip46_address, &t->remote, IP46_TYPE_ANY);
 
   s = format (s, "  vxlan VNI %d ", t->vni);
 
@@ -85,10 +86,9 @@ u8 * format_vxlan_gpe_tunnel (u8 * s, va_list * args)
 
   if (ip46_address_is_multicast (&t->remote))
     s = format (s, "mcast_sw_if_index %d ", t->mcast_sw_if_index);
-    
+
   s = format (s, " fibs: (encap %d, decap %d)",
-              t->encap_fib_index,
-              t->decap_fib_index);
+             t->encap_fib_index, t->decap_fib_index);
 
   return s;
 }
@@ -102,15 +102,16 @@ u8 * format_vxlan_gpe_tunnel (u8 * s, va_list * args)
  * @return *s formatted string
  *
  */
-static u8 * format_vxlan_gpe_name (u8 * s, va_list * args)
+static u8 *
+format_vxlan_gpe_name (u8 * s, va_list * args)
 {
   u32 dev_instance = va_arg (*args, u32);
   return format (s, "vxlan_gpe_tunnel%d", dev_instance);
 }
 
-static uword dummy_interface_tx (vlib_main_t * vm,
-                                 vlib_node_runtime_t * node,
-                                 vlib_frame_t * frame)
+static uword
+dummy_interface_tx (vlib_main_t * vm,
+                   vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   clib_warning ("you shouldn't be here, leaking buffers...");
   return frame->n_vectors;
@@ -127,7 +128,8 @@ static uword dummy_interface_tx (vlib_main_t * vm,
  *
  */
 static clib_error_t *
-vxlan_gpe_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+vxlan_gpe_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+                                  u32 flags)
 {
   u32 hw_flags = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ?
     VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
@@ -136,6 +138,7 @@ vxlan_gpe_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags
   return 0;
 }
 
+/* *INDENT-OFF* */
 VNET_DEVICE_CLASS (vxlan_gpe_device_class,static) = {
   .name = "VXLAN_GPE",
   .format_device_name = format_vxlan_gpe_name,
@@ -143,6 +146,7 @@ VNET_DEVICE_CLASS (vxlan_gpe_device_class,static) = {
   .tx_function = dummy_interface_tx,
   .admin_up_down_function = vxlan_gpe_interface_admin_up_down,
 };
+/* *INDENT-ON* */
 
 
 /**
@@ -154,38 +158,42 @@ VNET_DEVICE_CLASS (vxlan_gpe_device_class,static) = {
  * @return *s
  *
  */
-static u8 * format_vxlan_gpe_header_with_length (u8 * s, va_list * args)
+static u8 *
+format_vxlan_gpe_header_with_length (u8 * s, va_list * args)
 {
   u32 dev_instance = va_arg (*args, u32);
   s = format (s, "unimplemented dev %u", dev_instance);
   return s;
 }
 
+/* *INDENT-OFF* */
 VNET_HW_INTERFACE_CLASS (vxlan_gpe_hw_class) = {
   .name = "VXLAN_GPE",
   .format_header = format_vxlan_gpe_header_with_length,
   .build_rewrite = default_build_rewrite,
 };
+/* *INDENT-ON* */
 
 static void
-vxlan_gpe_tunnel_restack_dpo(vxlan_gpe_tunnel_t * t)
+vxlan_gpe_tunnel_restack_dpo (vxlan_gpe_tunnel_t * t)
 {
-    dpo_id_t dpo = DPO_INVALID;
-    u32 encap_index = vxlan_gpe_encap_node.index;
-    fib_forward_chain_type_t forw_type = ip46_address_is_ip4(&t->remote) ?
-        FIB_FORW_CHAIN_TYPE_UNICAST_IP4 : FIB_FORW_CHAIN_TYPE_UNICAST_IP6;
-
-    fib_entry_contribute_forwarding (t->fib_entry_index, forw_type, &dpo);
-    dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
-    dpo_reset(&dpo);
+  dpo_id_t dpo = DPO_INVALID;
+  u32 encap_index = vxlan_gpe_encap_node.index;
+  fib_forward_chain_type_t forw_type = ip46_address_is_ip4 (&t->remote) ?
+    FIB_FORW_CHAIN_TYPE_UNICAST_IP4 : FIB_FORW_CHAIN_TYPE_UNICAST_IP6;
+
+  fib_entry_contribute_forwarding (t->fib_entry_index, forw_type, &dpo);
+  dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
+  dpo_reset (&dpo);
 }
 
 static vxlan_gpe_tunnel_t *
-vxlan_gpe_tunnel_from_fib_node (fib_node_t *node)
+vxlan_gpe_tunnel_from_fib_node (fib_node_t * node)
 {
-    ASSERT(FIB_NODE_TYPE_VXLAN_GPE_TUNNEL == node->fn_type);
-    return ((vxlan_gpe_tunnel_t*) (((char*)node) -
-                              STRUCT_OFFSET_OF(vxlan_gpe_tunnel_t, node)));
+  ASSERT (FIB_NODE_TYPE_VXLAN_GPE_TUNNEL == node->fn_type);
+  return ((vxlan_gpe_tunnel_t *) (((char *) node) -
+                                 STRUCT_OFFSET_OF (vxlan_gpe_tunnel_t,
+                                                   node)));
 }
 
 /**
@@ -193,38 +201,37 @@ vxlan_gpe_tunnel_from_fib_node (fib_node_t *node)
  * Here we will restack the new dpo of VXLAN_GPE DIP to encap node.
  */
 static fib_node_back_walk_rc_t
-vxlan_gpe_tunnel_back_walk (fib_node_t *node,
-                       fib_node_back_walk_ctx_t *ctx)
+vxlan_gpe_tunnel_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
 {
-    vxlan_gpe_tunnel_restack_dpo(vxlan_gpe_tunnel_from_fib_node(node));
-    return (FIB_NODE_BACK_WALK_CONTINUE);
+  vxlan_gpe_tunnel_restack_dpo (vxlan_gpe_tunnel_from_fib_node (node));
+  return (FIB_NODE_BACK_WALK_CONTINUE);
 }
 
 /**
  * Function definition to get a FIB node from its index
  */
-static fib_node_t*
+static fib_node_t *
 vxlan_gpe_tunnel_fib_node_get (fib_node_index_t index)
 {
-    vxlan_gpe_tunnel_t * t;
-    vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
+  vxlan_gpe_tunnel_t *t;
+  vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
 
-    t = pool_elt_at_index(ngm->tunnels, index);
+  t = pool_elt_at_index (ngm->tunnels, index);
 
-    return (&t->node);
+  return (&t->node);
 }
 
 /**
  * Function definition to inform the FIB node that its last lock has gone.
  */
 static void
-vxlan_gpe_tunnel_last_lock_gone (fib_node_t *node)
+vxlan_gpe_tunnel_last_lock_gone (fib_node_t * node)
 {
-    /*
-     * The VXLAN_GPE tunnel is a root of the graph. As such
-     * it never has children and thus is never locked.
-     */
-    ASSERT(0);
+  /*
+   * The VXLAN_GPE tunnel is a root of the graph. As such
+   * it never has children and thus is never locked.
+   */
+  ASSERT (0);
 }
 
 /*
@@ -232,9 +239,9 @@ vxlan_gpe_tunnel_last_lock_gone (fib_node_t *node)
  * for participation in the FIB object graph.
  */
 const static fib_node_vft_t vxlan_gpe_vft = {
-    .fnv_get = vxlan_gpe_tunnel_fib_node_get,
-    .fnv_last_lock = vxlan_gpe_tunnel_last_lock_gone,
-    .fnv_back_walk = vxlan_gpe_tunnel_back_walk,
+  .fnv_get = vxlan_gpe_tunnel_fib_node_get,
+  .fnv_last_lock = vxlan_gpe_tunnel_last_lock_gone,
+  .fnv_back_walk = vxlan_gpe_tunnel_back_walk,
 };
 
 #define foreach_gpe_copy_field                  \
@@ -265,18 +272,19 @@ _(decap_fib_index)
  * @return rc
  *
  */
-int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
-                        u8 protocol_override, uword encap_next_node)
+int
+vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+                   u8 protocol_override, uword encap_next_node)
 {
   u8 *rw = 0;
-  ip4_header_t * ip0;
-  ip4_vxlan_gpe_header_t * h0;
+  ip4_header_t *ip0;
+  ip4_vxlan_gpe_header_t *h0;
   int len;
 
   len = sizeof (*h0) + extension_size;
 
-  vec_free(t->rewrite);
-  vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
+  vec_free (t->rewrite);
+  vec_validate_aligned (rw, len - 1, CLIB_CACHE_LINE_BYTES);
 
   h0 = (ip4_vxlan_gpe_header_t *) rw;
 
@@ -299,15 +307,15 @@ int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
   h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
   h0->vxlan.ver_res = VXLAN_GPE_VERSION;
   if (protocol_override)
-  {
+    {
       h0->vxlan.protocol = protocol_override;
-  }
+    }
   else
-  {
+    {
       h0->vxlan.protocol = t->protocol;
-  }
-  t->rewrite_size = sizeof(ip4_vxlan_gpe_header_t) +  extension_size;
-  h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8);
+    }
+  t->rewrite_size = sizeof (ip4_vxlan_gpe_header_t) + extension_size;
+  h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni << 8);
 
   t->rewrite = rw;
   t->encap_next_node = encap_next_node;
@@ -322,24 +330,26 @@ int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
  * @return rc
  *
  */
-int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
-                        u8 protocol_override, uword encap_next_node)
+int
+vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+                   u8 protocol_override, uword encap_next_node)
 {
   u8 *rw = 0;
-  ip6_header_t * ip0;
-  ip6_vxlan_gpe_header_t * h0;
+  ip6_header_t *ip0;
+  ip6_vxlan_gpe_header_t *h0;
   int len;
 
   len = sizeof (*h0) + extension_size;
 
-  vec_free(t->rewrite);
-  vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
+  vec_free (t->rewrite);
+  vec_validate_aligned (rw, len - 1, CLIB_CACHE_LINE_BYTES);
 
   h0 = (ip6_vxlan_gpe_header_t *) rw;
 
   /* Fixed portion of the (outer) ip4 header */
   ip0 = &h0->ip6;
-  ip0->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32(6 << 28);
+  ip0->ip_version_traffic_class_and_flow_label =
+    clib_host_to_net_u32 (6 << 28);
   ip0->hop_limit = 255;
   ip0->protocol = IP_PROTOCOL_UDP;
 
@@ -356,15 +366,15 @@ int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
   h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
   h0->vxlan.ver_res = VXLAN_GPE_VERSION;
   if (protocol_override)
-  {
+    {
       h0->vxlan.protocol = t->protocol;
-  }
+    }
   else
-  {
+    {
       h0->vxlan.protocol = protocol_override;
-  }
-  t->rewrite_size = sizeof(ip4_vxlan_gpe_header_t) +  extension_size;
-  h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8);
+    }
+  t->rewrite_size = sizeof (ip4_vxlan_gpe_header_t) + extension_size;
+  h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni << 8);
 
   t->rewrite = rw;
   t->encap_next_node = encap_next_node;
@@ -372,51 +382,54 @@ int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
 }
 
 static void
-hash_set_key_copy (uword ** h, void * key, uword v) {
-       size_t ksz = hash_header(*h)->user;
-        void * copy = clib_mem_alloc (ksz);
-       clib_memcpy (copy, key, ksz);
-       hash_set_mem (*h, copy, v);
+hash_set_key_copy (uword ** h, void *key, uword v)
+{
+  size_t ksz = hash_header (*h)->user;
+  void *copy = clib_mem_alloc (ksz);
+  clib_memcpy (copy, key, ksz);
+  hash_set_mem (*h, copy, v);
 }
 
 static void
-hash_unset_key_free (uword ** h, void * key) {
-       hash_pair_t * hp = hash_get_pair_mem (*h, key);
-       ASSERT (hp);
-       key = uword_to_pointer (hp->key, void *);
-       hash_unset_mem (*h, key);
-       clib_mem_free (key);
+hash_unset_key_free (uword ** h, void *key)
+{
+  hash_pair_t *hp = hash_get_pair_mem (*h, key);
+  ASSERT (hp);
+  key = uword_to_pointer (hp->key, void *);
+  hash_unset_mem (*h, key);
+  clib_mem_free (key);
 }
 
 static uword
-vtep_addr_ref(ip46_address_t *ip)
+vtep_addr_ref (ip46_address_t * ip)
 {
-       uword *vtep = ip46_address_is_ip4(ip) ?
-                        hash_get (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
-                        hash_get_mem (vxlan_gpe_main.vtep6, &ip->ip6);
-       if (vtep)
-               return ++(*vtep);
-       ip46_address_is_ip4(ip) ?
-                        hash_set (vxlan_gpe_main.vtep4, ip->ip4.as_u32, 1) :
-                        hash_set_key_copy (&vxlan_gpe_main.vtep6, &ip->ip6, 1);
-       return 1;
+  uword *vtep = ip46_address_is_ip4 (ip) ?
+    hash_get (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
+    hash_get_mem (vxlan_gpe_main.vtep6, &ip->ip6);
+  if (vtep)
+    return ++(*vtep);
+  ip46_address_is_ip4 (ip) ?
+    hash_set (vxlan_gpe_main.vtep4, ip->ip4.as_u32, 1) :
+    hash_set_key_copy (&vxlan_gpe_main.vtep6, &ip->ip6, 1);
+  return 1;
 }
 
 static uword
-vtep_addr_unref(ip46_address_t *ip)
+vtep_addr_unref (ip46_address_t * ip)
 {
-       uword *vtep = ip46_address_is_ip4(ip) ?
-                        hash_get (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
-                        hash_get_mem (vxlan_gpe_main.vtep6, &ip->ip6);
-        ASSERT(vtep);
-       if (--(*vtep) != 0)
-               return *vtep;
-       ip46_address_is_ip4(ip) ?
-               hash_unset (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
-               hash_unset_key_free (&vxlan_gpe_main.vtep6, &ip->ip6);
-       return 0;
+  uword *vtep = ip46_address_is_ip4 (ip) ?
+    hash_get (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
+    hash_get_mem (vxlan_gpe_main.vtep6, &ip->ip6);
+  ASSERT (vtep);
+  if (--(*vtep) != 0)
+    return *vtep;
+  ip46_address_is_ip4 (ip) ?
+    hash_unset (vxlan_gpe_main.vtep4, ip->ip4.as_u32) :
+    hash_unset_key_free (&vxlan_gpe_main.vtep6, &ip->ip6);
+  return 0;
 }
 
+/* *INDENT-OFF* */
 typedef CLIB_PACKED(union {
   struct {
     fib_node_index_t mfib_entry_index;
@@ -424,43 +437,44 @@ typedef CLIB_PACKED(union {
   };
   u64 as_u64;
 }) mcast_shared_t;
+/* *INDENT-ON* */
 
 static inline mcast_shared_t
-mcast_shared_get(ip46_address_t * ip)
+mcast_shared_get (ip46_address_t * ip)
 {
-        ASSERT(ip46_address_is_multicast(ip));
-       uword * p = hash_get_mem (vxlan_gpe_main.mcast_shared, ip);
-        ASSERT(p);
-       return (mcast_shared_t) { .as_u64 = *p };
+  ASSERT (ip46_address_is_multicast (ip));
+  uword *p = hash_get_mem (vxlan_gpe_main.mcast_shared, ip);
+  ASSERT (p);
+  return (mcast_shared_t)
+  {
+  .as_u64 = *p};
 }
 
 static inline void
-mcast_shared_add(ip46_address_t *remote,
-                 fib_node_index_t mfei,
-                 adj_index_t ai)
+mcast_shared_add (ip46_address_t * remote,
+                 fib_node_index_t mfei, adj_index_t ai)
 {
-    mcast_shared_t new_ep = {
-        .mcast_adj_index = ai,
-        .mfib_entry_index = mfei,
-    };
+  mcast_shared_t new_ep = {
+    .mcast_adj_index = ai,
+    .mfib_entry_index = mfei,
+  };
 
-    hash_set_key_copy (&vxlan_gpe_main.mcast_shared, remote, new_ep.as_u64);
+  hash_set_key_copy (&vxlan_gpe_main.mcast_shared, remote, new_ep.as_u64);
 }
 
 static inline void
-mcast_shared_remove(ip46_address_t *remote)
+mcast_shared_remove (ip46_address_t * remote)
 {
-    mcast_shared_t ep = mcast_shared_get(remote);
+  mcast_shared_t ep = mcast_shared_get (remote);
 
-    adj_unlock(ep.mcast_adj_index);
-    mfib_table_entry_delete_index(ep.mfib_entry_index,
-                                  MFIB_SOURCE_VXLAN_GPE);
+  adj_unlock (ep.mcast_adj_index);
+  mfib_table_entry_delete_index (ep.mfib_entry_index, MFIB_SOURCE_VXLAN_GPE);
 
-    hash_unset_key_free (&vxlan_gpe_main.mcast_shared, remote);
+  hash_unset_key_free (&vxlan_gpe_main.mcast_shared, remote);
 }
 
 static inline fib_protocol_t
-fib_ip_proto(bool is_ip6)
+fib_ip_proto (bool is_ip6)
 {
   return (is_ip6) ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4;
 }
@@ -475,13 +489,13 @@ fib_ip_proto(bool is_ip6)
  *
  */
 int vnet_vxlan_gpe_add_del_tunnel
-(vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp)
+  (vnet_vxlan_gpe_add_del_tunnel_args_t * a, u32 * sw_if_indexp)
 {
-  vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
+  vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
   vxlan_gpe_tunnel_t *t = 0;
-  vnet_main_t * vnm = ngm->vnet_main;
-  vnet_hw_interface_t * hi;
-  uword * p;
+  vnet_main_t *vnm = ngm->vnet_main;
+  vnet_hw_interface_t *hi;
+  uword *p;
   u32 hw_if_index = ~0;
   u32 sw_if_index = ~0;
   int rv;
@@ -490,32 +504,32 @@ int vnet_vxlan_gpe_add_del_tunnel
   u32 is_ip6 = a->is_ip6;
 
   if (!is_ip6)
-  {
-    key4.local = a->local.ip4.as_u32;
-    key4.remote = a->remote.ip4.as_u32;
-    key4.vni = clib_host_to_net_u32 (a->vni << 8);
-    key4.pad = 0;
+    {
+      key4.local = a->local.ip4.as_u32;
+      key4.remote = a->remote.ip4.as_u32;
+      key4.vni = clib_host_to_net_u32 (a->vni << 8);
+      key4.pad = 0;
 
-    p = hash_get_mem(ngm->vxlan4_gpe_tunnel_by_key, &key4);
-  }
+      p = hash_get_mem (ngm->vxlan4_gpe_tunnel_by_key, &key4);
+    }
   else
-  {
-    key6.local.as_u64[0] = a->local.ip6.as_u64[0];
-    key6.local.as_u64[1] = a->local.ip6.as_u64[1];
-    key6.remote.as_u64[0] = a->remote.ip6.as_u64[0];
-    key6.remote.as_u64[1] = a->remote.ip6.as_u64[1];
-    key6.vni = clib_host_to_net_u32 (a->vni << 8);
+    {
+      key6.local.as_u64[0] = a->local.ip6.as_u64[0];
+      key6.local.as_u64[1] = a->local.ip6.as_u64[1];
+      key6.remote.as_u64[0] = a->remote.ip6.as_u64[0];
+      key6.remote.as_u64[1] = a->remote.ip6.as_u64[1];
+      key6.vni = clib_host_to_net_u32 (a->vni << 8);
 
-    p = hash_get_mem(ngm->vxlan6_gpe_tunnel_by_key, &key6);
-  }
+      p = hash_get_mem (ngm->vxlan6_gpe_tunnel_by_key, &key6);
+    }
 
   if (a->is_add)
     {
-      l2input_main_t * l2im = &l2input_main;
+      l2input_main_t *l2im = &l2input_main;
 
       /* adding a tunnel: tunnel must not already exist */
       if (p)
-        return VNET_API_ERROR_TUNNEL_EXIST;
+       return VNET_API_ERROR_TUNNEL_EXIST;
 
       pool_get_aligned (ngm->tunnels, t, CLIB_CACHE_LINE_BYTES);
       memset (t, 0, sizeof (*t));
@@ -523,72 +537,81 @@ int vnet_vxlan_gpe_add_del_tunnel
       /* copy from arg structure */
 #define _(x) t->x = a->x;
       foreach_gpe_copy_field;
-      if (!a->is_ip6) foreach_copy_ipv4
-      else            foreach_copy_ipv6
+      if (!a->is_ip6)
+       foreach_copy_ipv4
+       else
+       foreach_copy_ipv6
 #undef _
+         if (!a->is_ip6)
+         t->flags |= VXLAN_GPE_TUNNEL_IS_IPV4;
 
-      if (!a->is_ip6) t->flags |= VXLAN_GPE_TUNNEL_IS_IPV4;
-
-      if (!a->is_ip6) {
-        rv = vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
-      } else {
-        rv = vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
-      }
+      if (!a->is_ip6)
+       {
+         rv = vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
+       }
+      else
+       {
+         rv = vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
+       }
 
       if (rv)
-      {
-          pool_put (ngm->tunnels, t);
-          return rv;
-      }
+       {
+         pool_put (ngm->tunnels, t);
+         return rv;
+       }
 
       if (!is_ip6)
-      {
-        key4_copy = clib_mem_alloc (sizeof (*key4_copy));
-        clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
-        hash_set_mem (ngm->vxlan4_gpe_tunnel_by_key, key4_copy,
-                      t - ngm->tunnels);
-      }
+       {
+         key4_copy = clib_mem_alloc (sizeof (*key4_copy));
+         clib_memcpy (key4_copy, &key4, sizeof (*key4_copy));
+         hash_set_mem (ngm->vxlan4_gpe_tunnel_by_key, key4_copy,
+                       t - ngm->tunnels);
+       }
       else
-      {
-          key6_copy = clib_mem_alloc (sizeof (*key6_copy));
-          clib_memcpy (key6_copy, &key6, sizeof (*key6_copy));
-          hash_set_mem (ngm->vxlan6_gpe_tunnel_by_key, key6_copy,
-                        t - ngm->tunnels);
-      }
+       {
+         key6_copy = clib_mem_alloc (sizeof (*key6_copy));
+         clib_memcpy (key6_copy, &key6, sizeof (*key6_copy));
+         hash_set_mem (ngm->vxlan6_gpe_tunnel_by_key, key6_copy,
+                       t - ngm->tunnels);
+       }
 
       if (vec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices) > 0)
-        {
-             vnet_interface_main_t * im = &vnm->interface_main;
-          hw_if_index = ngm->free_vxlan_gpe_tunnel_hw_if_indices
-            [vec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices)-1];
-          _vec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices) -= 1;
-
-          hi = vnet_get_hw_interface (vnm, hw_if_index);
-          hi->dev_instance = t - ngm->tunnels;
-          hi->hw_instance = hi->dev_instance;
+       {
+         vnet_interface_main_t *im = &vnm->interface_main;
+         hw_if_index = ngm->free_vxlan_gpe_tunnel_hw_if_indices
+           [vec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices) - 1];
+         _vec_len (ngm->free_vxlan_gpe_tunnel_hw_if_indices) -= 1;
+
+         hi = vnet_get_hw_interface (vnm, hw_if_index);
+         hi->dev_instance = t - ngm->tunnels;
+         hi->hw_instance = hi->dev_instance;
          /* clear old stats of freed tunnel before reuse */
          sw_if_index = hi->sw_if_index;
-         vnet_interface_counter_lock(im);
+         vnet_interface_counter_lock (im);
          vlib_zero_combined_counter
-           (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX], sw_if_index);
-         vlib_zero_combined_counter
-           (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX], sw_if_index);
-         vlib_zero_simple_counter
-           (&im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP], sw_if_index);
-         vnet_interface_counter_unlock(im);
-        }
+           (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX],
+            sw_if_index);
+         vlib_zero_combined_counter (&im->combined_sw_if_counters
+                                     [VNET_INTERFACE_COUNTER_RX],
+                                     sw_if_index);
+         vlib_zero_simple_counter (&im->sw_if_counters
+                                   [VNET_INTERFACE_COUNTER_DROP],
+                                   sw_if_index);
+         vnet_interface_counter_unlock (im);
+       }
       else
-        {
-          hw_if_index = vnet_register_interface
-            (vnm, vxlan_gpe_device_class.index, t - ngm->tunnels,
-             vxlan_gpe_hw_class.index, t - ngm->tunnels);
-          hi = vnet_get_hw_interface (vnm, hw_if_index);
-          hi->output_node_index = vxlan_gpe_encap_node.index;
-        }
+       {
+         hw_if_index = vnet_register_interface
+           (vnm, vxlan_gpe_device_class.index, t - ngm->tunnels,
+            vxlan_gpe_hw_class.index, t - ngm->tunnels);
+         hi = vnet_get_hw_interface (vnm, hw_if_index);
+         hi->output_node_index = vxlan_gpe_encap_node.index;
+       }
 
       t->hw_if_index = hw_if_index;
       t->sw_if_index = sw_if_index = hi->sw_if_index;
-      vec_validate_init_empty (ngm->tunnel_index_by_sw_if_index, sw_if_index, ~0);
+      vec_validate_init_empty (ngm->tunnel_index_by_sw_if_index, sw_if_index,
+                              ~0);
       ngm->tunnel_index_by_sw_if_index[sw_if_index] = t - ngm->tunnels;
 
       /* setup l2 input config with l2 feature and bd 0 to drop packet */
@@ -596,159 +619,160 @@ int vnet_vxlan_gpe_add_del_tunnel
       l2im->configs[sw_if_index].feature_bitmap = L2INPUT_FEAT_DROP;
       l2im->configs[sw_if_index].bd_index = 0;
 
-      vnet_sw_interface_t * si = vnet_get_sw_interface (vnm, sw_if_index);
+      vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index);
       si->flags &= ~VNET_SW_INTERFACE_FLAG_HIDDEN;
       vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
-                                   VNET_SW_INTERFACE_FLAG_ADMIN_UP);
-      fib_node_init(&t->node, FIB_NODE_TYPE_VXLAN_GPE_TUNNEL);
+                                  VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+      fib_node_init (&t->node, FIB_NODE_TYPE_VXLAN_GPE_TUNNEL);
       fib_prefix_t tun_remote_pfx;
       u32 encap_index = vxlan_gpe_encap_node.index;
       vnet_flood_class_t flood_class = VNET_FLOOD_CLASS_TUNNEL_NORMAL;
 
-      fib_prefix_from_ip46_addr(&t->remote, &tun_remote_pfx);
-      if (!ip46_address_is_multicast(&t->remote))
-        {
-          /* Unicast tunnel -
-           * source the FIB entry for the tunnel's destination
-           * and become a child thereof. The tunnel will then get poked
-           * when the forwarding for the entry updates, and the tunnel can
-           * re-stack accordingly
-           */
-          vtep_addr_ref(&t->local);
-          t->fib_entry_index = fib_table_entry_special_add
-            (t->encap_fib_index, &tun_remote_pfx, FIB_SOURCE_RR,
+      fib_prefix_from_ip46_addr (&t->remote, &tun_remote_pfx);
+      if (!ip46_address_is_multicast (&t->remote))
+       {
+         /* Unicast tunnel -
+          * source the FIB entry for the tunnel's destination
+          * and become a child thereof. The tunnel will then get poked
+          * when the forwarding for the entry updates, and the tunnel can
+          * re-stack accordingly
+          */
+         vtep_addr_ref (&t->local);
+         t->fib_entry_index = fib_table_entry_special_add
+           (t->encap_fib_index, &tun_remote_pfx, FIB_SOURCE_RR,
             FIB_ENTRY_FLAG_NONE);
-          t->sibling_index = fib_entry_child_add
-            (t->fib_entry_index, FIB_NODE_TYPE_VXLAN_GPE_TUNNEL, t - ngm->tunnels);
-          vxlan_gpe_tunnel_restack_dpo(t);
+         t->sibling_index = fib_entry_child_add
+           (t->fib_entry_index, FIB_NODE_TYPE_VXLAN_GPE_TUNNEL,
+            t - ngm->tunnels);
+         vxlan_gpe_tunnel_restack_dpo (t);
        }
       else
-        {
-         /* Multicast tunnel -
+       {
+         /* Multicast tunnel -
           * as the same mcast group can be used for mutiple mcast tunnels
           * with different VNIs, create the output fib adjecency only if
           * it does not already exist
           */
-          fib_protocol_t fp = fib_ip_proto(is_ip6);
-
-         if (vtep_addr_ref(&t->remote) == 1)
-          {
-              fib_node_index_t mfei;
-              adj_index_t ai;
-              fib_route_path_t path = {
-                  .frp_proto = fib_proto_to_dpo(fp),
-                  .frp_addr = zero_addr,
-                  .frp_sw_if_index = 0xffffffff,
-                  .frp_fib_index = ~0,
-                  .frp_weight = 0,
-                  .frp_flags = FIB_ROUTE_PATH_LOCAL,
-              };
-              const mfib_prefix_t mpfx = {
-                  .fp_proto = fp,
-                  .fp_len = (is_ip6 ? 128 : 32),
-                  .fp_grp_addr = tun_remote_pfx.fp_addr,
-              };
-
-              /*
-               * Setup the (*,G) to receive traffic on the mcast group
-               *  - the forwarding interface is for-us
-               *  - the accepting interface is that from the API
-               */
-              mfib_table_entry_path_update(t->encap_fib_index,
-                                           &mpfx,
-                                           MFIB_SOURCE_VXLAN_GPE,
-                                           &path,
-                                           MFIB_ITF_FLAG_FORWARD);
-
-              path.frp_sw_if_index = a->mcast_sw_if_index;
-              path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
-              mfei = mfib_table_entry_path_update(t->encap_fib_index,
-                                                  &mpfx,
-                                                  MFIB_SOURCE_VXLAN_GPE,
-                                                  &path,
-                                                  MFIB_ITF_FLAG_ACCEPT);
-
-              /*
-               * Create the mcast adjacency to send traffic to the group
-               */
-              ai = adj_mcast_add_or_lock(fp,
-                                         fib_proto_to_link(fp),
-                                         a->mcast_sw_if_index);
-
-              /*
-               * create a new end-point
-               */
-              mcast_shared_add(&t->remote, mfei, ai);
-          }
-
-          dpo_id_t dpo = DPO_INVALID;
-          mcast_shared_t ep = mcast_shared_get(&t->remote);
-
-          /* Stack shared mcast remote mac addr rewrite on encap */
-          dpo_set (&dpo, DPO_ADJACENCY_MCAST,
-                   fib_proto_to_dpo(fp),
-                   ep.mcast_adj_index);
-
-          dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
-          dpo_reset (&dpo);
+         fib_protocol_t fp = fib_ip_proto (is_ip6);
+
+         if (vtep_addr_ref (&t->remote) == 1)
+           {
+             fib_node_index_t mfei;
+             adj_index_t ai;
+             fib_route_path_t path = {
+               .frp_proto = fib_proto_to_dpo (fp),
+               .frp_addr = zero_addr,
+               .frp_sw_if_index = 0xffffffff,
+               .frp_fib_index = ~0,
+               .frp_weight = 0,
+               .frp_flags = FIB_ROUTE_PATH_LOCAL,
+             };
+             const mfib_prefix_t mpfx = {
+               .fp_proto = fp,
+               .fp_len = (is_ip6 ? 128 : 32),
+               .fp_grp_addr = tun_remote_pfx.fp_addr,
+             };
+
+             /*
+              * Setup the (*,G) to receive traffic on the mcast group
+              *  - the forwarding interface is for-us
+              *  - the accepting interface is that from the API
+              */
+             mfib_table_entry_path_update (t->encap_fib_index,
+                                           &mpfx,
+                                           MFIB_SOURCE_VXLAN_GPE,
+                                           &path, MFIB_ITF_FLAG_FORWARD);
+
+             path.frp_sw_if_index = a->mcast_sw_if_index;
+             path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
+             mfei = mfib_table_entry_path_update (t->encap_fib_index,
+                                                  &mpfx,
+                                                  MFIB_SOURCE_VXLAN_GPE,
+                                                  &path,
+                                                  MFIB_ITF_FLAG_ACCEPT);
+
+             /*
+              * Create the mcast adjacency to send traffic to the group
+              */
+             ai = adj_mcast_add_or_lock (fp,
+                                         fib_proto_to_link (fp),
+                                         a->mcast_sw_if_index);
+
+             /*
+              * create a new end-point
+              */
+             mcast_shared_add (&t->remote, mfei, ai);
+           }
+
+         dpo_id_t dpo = DPO_INVALID;
+         mcast_shared_t ep = mcast_shared_get (&t->remote);
+
+         /* Stack shared mcast remote mac addr rewrite on encap */
+         dpo_set (&dpo, DPO_ADJACENCY_MCAST,
+                  fib_proto_to_dpo (fp), ep.mcast_adj_index);
+
+         dpo_stack_from_node (encap_index, &t->next_dpo, &dpo);
+         dpo_reset (&dpo);
          flood_class = VNET_FLOOD_CLASS_TUNNEL_MASTER;
        }
 
       /* Set vxlan tunnel output node */
       hi->output_node_index = encap_index;
 
-      vnet_get_sw_interface (vnet_get_main(), sw_if_index)->flood_class = flood_class;
+      vnet_get_sw_interface (vnet_get_main (), sw_if_index)->flood_class =
+       flood_class;
     }
   else
     {
       /* deleting a tunnel: tunnel must exist */
       if (!p)
-        return VNET_API_ERROR_NO_SUCH_ENTRY;
+       return VNET_API_ERROR_NO_SUCH_ENTRY;
 
       t = pool_elt_at_index (ngm->tunnels, p[0]);
 
       sw_if_index = t->sw_if_index;
-      vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */);
-      vnet_sw_interface_t * si = vnet_get_sw_interface (vnm, t->sw_if_index);
+      vnet_sw_interface_set_flags (vnm, t->sw_if_index, 0 /* down */ );
+      vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, t->sw_if_index);
       si->flags |= VNET_SW_INTERFACE_FLAG_HIDDEN;
-      set_int_l2_mode(ngm->vlib_main, vnm, MODE_L3, t->sw_if_index, 0, 0, 0, 0);
+      set_int_l2_mode (ngm->vlib_main, vnm, MODE_L3, t->sw_if_index, 0, 0, 0,
+                      0);
       vec_add1 (ngm->free_vxlan_gpe_tunnel_hw_if_indices, t->hw_if_index);
 
       ngm->tunnel_index_by_sw_if_index[t->sw_if_index] = ~0;
 
       if (!is_ip6)
-        hash_unset (ngm->vxlan4_gpe_tunnel_by_key, key4.as_u64);
+       hash_unset (ngm->vxlan4_gpe_tunnel_by_key, key4.as_u64);
       else
        hash_unset_key_free (&ngm->vxlan6_gpe_tunnel_by_key, &key6);
 
-      if (!ip46_address_is_multicast(&t->remote))
-        {
-         vtep_addr_unref(&t->local);
-         fib_entry_child_remove(t->fib_entry_index, t->sibling_index);
-         fib_table_entry_delete_index(t->fib_entry_index, FIB_SOURCE_RR);
-        }
-      else if (vtep_addr_unref(&t->remote) == 0)
-        {
-         mcast_shared_remove(&t->remote);
-        }
-
-      fib_node_deinit(&t->node);
+      if (!ip46_address_is_multicast (&t->remote))
+       {
+         vtep_addr_unref (&t->local);
+         fib_entry_child_remove (t->fib_entry_index, t->sibling_index);
+         fib_table_entry_delete_index (t->fib_entry_index, FIB_SOURCE_RR);
+       }
+      else if (vtep_addr_unref (&t->remote) == 0)
+       {
+         mcast_shared_remove (&t->remote);
+       }
+
+      fib_node_deinit (&t->node);
       vec_free (t->rewrite);
       pool_put (ngm->tunnels, t);
     }
 
   if (sw_if_indexp)
-      *sw_if_indexp = sw_if_index;
+    *sw_if_indexp = sw_if_index;
 
   return 0;
 }
 
 static clib_error_t *
 vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
-                                   unformat_input_t * input,
-                                   vlib_cli_command_t * cmd)
+                                    unformat_input_t * input,
+                                    vlib_cli_command_t * cmd)
 {
-  unformat_input_t _line_input, * line_input = &_line_input;
+  unformat_input_t _line_input, *line_input = &_line_input;
   u8 is_add = 1;
   ip46_address_t local, remote;
   u8 local_set = 0;
@@ -764,100 +788,103 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
   u8 vni_set = 0;
   int rv;
   u32 tmp;
-  vnet_vxlan_gpe_add_del_tunnel_args_t _a, * a = &_a;
+  vnet_vxlan_gpe_add_del_tunnel_args_t _a, *a = &_a;
   u32 sw_if_index;
   clib_error_t *error = NULL;
 
   /* Get a line of input. */
-  if (! unformat_user (input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
 
-  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat (line_input, "del"))
-      is_add = 0;
-    else if (unformat (line_input, "local %U",
-                       unformat_ip4_address, &local.ip4))
-    {
-      local_set = 1;
-      ipv4_set = 1;
-    }
-    else if (unformat (line_input, "remote %U",
-                       unformat_ip4_address, &remote.ip4))
-    {
-      remote_set = 1;
-      ipv4_set = 1;
-    }
-    else if (unformat (line_input, "local %U",
-                       unformat_ip6_address, &local.ip6))
-    {
-      local_set = 1;
-      ipv6_set = 1;
-    }
-    else if (unformat (line_input, "remote %U",
-                       unformat_ip6_address, &remote.ip6))
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
     {
-      remote_set = 1;
-      ipv6_set = 1;
+      if (unformat (line_input, "del"))
+       is_add = 0;
+      else if (unformat (line_input, "local %U",
+                        unformat_ip4_address, &local.ip4))
+       {
+         local_set = 1;
+         ipv4_set = 1;
+       }
+      else if (unformat (line_input, "remote %U",
+                        unformat_ip4_address, &remote.ip4))
+       {
+         remote_set = 1;
+         ipv4_set = 1;
+       }
+      else if (unformat (line_input, "local %U",
+                        unformat_ip6_address, &local.ip6))
+       {
+         local_set = 1;
+         ipv6_set = 1;
+       }
+      else if (unformat (line_input, "remote %U",
+                        unformat_ip6_address, &remote.ip6))
+       {
+         remote_set = 1;
+         ipv6_set = 1;
+       }
+      else if (unformat (line_input, "group %U %U",
+                        unformat_ip4_address, &remote.ip4,
+                        unformat_vnet_sw_interface,
+                        vnet_get_main (), &mcast_sw_if_index))
+       {
+         grp_set = remote_set = 1;
+         ipv4_set = 1;
+       }
+      else if (unformat (line_input, "group %U %U",
+                        unformat_ip6_address, &remote.ip6,
+                        unformat_vnet_sw_interface,
+                        vnet_get_main (), &mcast_sw_if_index))
+       {
+         grp_set = remote_set = 1;
+         ipv6_set = 1;
+       }
+      else if (unformat (line_input, "encap-vrf-id %d", &tmp))
+       {
+         if (ipv6_set)
+           encap_fib_index = fib_table_find (FIB_PROTOCOL_IP6, tmp);
+         else
+           encap_fib_index = fib_table_find (FIB_PROTOCOL_IP4, tmp);
+
+         if (encap_fib_index == ~0)
+           {
+             error =
+               clib_error_return (0, "nonexistent encap fib id %d", tmp);
+             goto done;
+           }
+       }
+      else if (unformat (line_input, "decap-vrf-id %d", &tmp))
+       {
+         if (ipv6_set)
+           decap_fib_index = fib_table_find (FIB_PROTOCOL_IP6, tmp);
+         else
+           decap_fib_index = fib_table_find (FIB_PROTOCOL_IP4, tmp);
+
+         if (decap_fib_index == ~0)
+           {
+             error =
+               clib_error_return (0, "nonexistent decap fib id %d", tmp);
+             goto done;
+           }
+       }
+      else if (unformat (line_input, "vni %d", &vni))
+       vni_set = 1;
+      else if (unformat (line_input, "next-ip4"))
+       protocol = VXLAN_GPE_PROTOCOL_IP4;
+      else if (unformat (line_input, "next-ip6"))
+       protocol = VXLAN_GPE_PROTOCOL_IP6;
+      else if (unformat (line_input, "next-ethernet"))
+       protocol = VXLAN_GPE_PROTOCOL_ETHERNET;
+      else if (unformat (line_input, "next-nsh"))
+       protocol = VXLAN_GPE_PROTOCOL_NSH;
+      else
+       {
+         error = clib_error_return (0, "parse error: '%U'",
+                                    format_unformat_error, line_input);
+         goto done;
+       }
     }
-    else if (unformat (line_input, "group %U %U",
-                       unformat_ip4_address, &remote.ip4,
-                      unformat_vnet_sw_interface,
-                      vnet_get_main(), &mcast_sw_if_index))
-      {
-        grp_set = remote_set = 1;
-        ipv4_set = 1;
-      }
-    else if (unformat (line_input, "group %U %U",
-                       unformat_ip6_address, &remote.ip6,
-                      unformat_vnet_sw_interface,
-                      vnet_get_main(), &mcast_sw_if_index))
-      {
-        grp_set = remote_set = 1;
-        ipv6_set = 1;
-      }
-    else if (unformat (line_input, "encap-vrf-id %d", &tmp))
-      {
-        if (ipv6_set)
-          encap_fib_index = fib_table_find (FIB_PROTOCOL_IP6, tmp);
-        else
-          encap_fib_index = fib_table_find (FIB_PROTOCOL_IP4, tmp);
-
-        if (encap_fib_index == ~0)
-          {
-            error = clib_error_return (0, "nonexistent encap fib id %d", tmp);
-            goto done;
-          }
-      }
-    else if (unformat (line_input, "decap-vrf-id %d", &tmp))
-      {
-        if (ipv6_set)
-          decap_fib_index = fib_table_find (FIB_PROTOCOL_IP6, tmp);
-        else
-          decap_fib_index = fib_table_find (FIB_PROTOCOL_IP4, tmp);
-
-        if (decap_fib_index == ~0)
-          {
-            error = clib_error_return (0, "nonexistent decap fib id %d", tmp);
-            goto done;
-          }
-      }
-    else if (unformat (line_input, "vni %d", &vni))
-      vni_set = 1;
-    else if (unformat(line_input, "next-ip4"))
-      protocol = VXLAN_GPE_PROTOCOL_IP4;
-    else if (unformat(line_input, "next-ip6"))
-      protocol = VXLAN_GPE_PROTOCOL_IP6;
-    else if (unformat(line_input, "next-ethernet"))
-      protocol = VXLAN_GPE_PROTOCOL_ETHERNET;
-    else if (unformat(line_input, "next-nsh"))
-      protocol = VXLAN_GPE_PROTOCOL_NSH;
-    else
-      {
-        error = clib_error_return (0, "parse error: '%U'",
-                                   format_unformat_error, line_input);
-        goto done;
-      }
-  }
 
   if (local_set == 0)
     {
@@ -871,13 +898,13 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
       goto done;
     }
 
-  if (grp_set && !ip46_address_is_multicast(&remote))
+  if (grp_set && !ip46_address_is_multicast (&remote))
     {
       error = clib_error_return (0, "tunnel group address not multicast");
       goto done;
     }
 
-  if (grp_set == 0 && ip46_address_is_multicast(&remote))
+  if (grp_set == 0 && ip46_address_is_multicast (&remote))
     {
       error = clib_error_return (0, "remote address must be unicast");
       goto done;
@@ -894,8 +921,9 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
       goto done;
     }
 
-  if ((ipv4_set && memcmp(&local.ip4, &remote.ip4, sizeof(local.ip4)) == 0) ||
-      (ipv6_set && memcmp(&local.ip6, &remote.ip6, sizeof(local.ip6)) == 0))
+  if ((ipv4_set && memcmp (&local.ip4, &remote.ip4, sizeof (local.ip4)) == 0)
+      || (ipv6_set
+         && memcmp (&local.ip6, &remote.ip6, sizeof (local.ip6)) == 0))
     {
       error = clib_error_return (0, "src and remote addresses are identical");
       goto done;
@@ -914,16 +942,18 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
 
 #define _(x) a->x = x;
   foreach_gpe_copy_field;
-  if (ipv4_set) foreach_copy_ipv4
-  else          foreach_copy_ipv6
+  if (ipv4_set)
+    foreach_copy_ipv4
+    else
+    foreach_copy_ipv6
 #undef _
+      rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index);
 
-  rv = vnet_vxlan_gpe_add_del_tunnel (a, &sw_if_index);
-
-  switch(rv)
+  switch (rv)
     {
     case 0:
-      vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index);
+      vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name,
+                      vnet_get_main (), sw_if_index);
       break;
     case VNET_API_ERROR_INVALID_DECAP_NEXT:
       error = clib_error_return (0, "invalid decap-next...");
@@ -939,7 +969,7 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm,
 
     default:
       error = clib_error_return
-        (0, "vnet_vxlan_gpe_add_del_tunnel returned %d", rv);
+       (0, "vnet_vxlan_gpe_add_del_tunnel returned %d", rv);
       goto done;
     }
 
@@ -992,19 +1022,21 @@ VLIB_CLI_COMMAND (create_vxlan_gpe_tunnel_command, static) = {
  */
 static clib_error_t *
 show_vxlan_gpe_tunnel_command_fn (vlib_main_t * vm,
-                                unformat_input_t * input,
-                                vlib_cli_command_t * cmd)
+                                 unformat_input_t * input,
+                                 vlib_cli_command_t * cmd)
 {
-  vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
-  vxlan_gpe_tunnel_t * t;
+  vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+  vxlan_gpe_tunnel_t *t;
 
   if (pool_elts (ngm->tunnels) == 0)
     vlib_cli_output (vm, "No vxlan-gpe tunnels configured.");
 
+  /* *INDENT-OFF* */
   pool_foreach (t, ngm->tunnels,
   ({
     vlib_cli_output (vm, "%U", format_vxlan_gpe_tunnel, t);
   }));
+  /* *INDENT-ON* */
 
   return 0;
 }
@@ -1025,9 +1057,8 @@ VLIB_CLI_COMMAND (show_vxlan_gpe_tunnel_command, static) = {
 };
 /* *INDENT-ON* */
 
-void vnet_int_vxlan_gpe_bypass_mode (u32 sw_if_index,
-                                u8 is_ip6,
-                                u8 is_enable)
+void
+vnet_int_vxlan_gpe_bypass_mode (u32 sw_if_index, u8 is_ip6, u8 is_enable)
 {
   if (is_ip6)
     vnet_feature_enable_disable ("ip6-unicast", "ip6-vxlan-gpe-bypass",
@@ -1040,28 +1071,28 @@ void vnet_int_vxlan_gpe_bypass_mode (u32 sw_if_index,
 
 static clib_error_t *
 set_ip_vxlan_gpe_bypass (u32 is_ip6,
-                    unformat_input_t * input,
-                    vlib_cli_command_t * cmd)
+                        unformat_input_t * input, vlib_cli_command_t * cmd)
 {
-  unformat_input_t _line_input, * line_input = &_line_input;
-  vnet_main_t * vnm = vnet_get_main();
-  clib_error_t * error = 0;
+  unformat_input_t _line_input, *line_input = &_line_input;
+  vnet_main_t *vnm = vnet_get_main ();
+  clib_error_t *error = 0;
   u32 sw_if_index, is_enable;
 
   sw_if_index = ~0;
   is_enable = 1;
 
-  if (! unformat_user (input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
 
   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
     {
-      if (unformat_user (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
-         ;
+      if (unformat_user
+         (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+       ;
       else if (unformat (line_input, "del"))
-        is_enable = 0;
+       is_enable = 0;
       else
-        {
+       {
          error = unformat_parse_error (line_input);
          goto done;
        }
@@ -1076,7 +1107,7 @@ set_ip_vxlan_gpe_bypass (u32 is_ip6,
 
   vnet_int_vxlan_gpe_bypass_mode (sw_if_index, is_ip6, is_enable);
 
- done:
+done:
   unformat_free (line_input);
 
   return error;
@@ -1084,8 +1115,7 @@ set_ip_vxlan_gpe_bypass (u32 is_ip6,
 
 static clib_error_t *
 set_ip4_vxlan_gpe_bypass (vlib_main_t * vm,
-                     unformat_input_t * input,
-                     vlib_cli_command_t * cmd)
+                         unformat_input_t * input, vlib_cli_command_t * cmd)
 {
   return set_ip_vxlan_gpe_bypass (0, input, cmd);
 }
@@ -1142,8 +1172,7 @@ VLIB_CLI_COMMAND (set_interface_ip_vxlan_gpe_bypass_command, static) = {
 
 static clib_error_t *
 set_ip6_vxlan_gpe_bypass (vlib_main_t * vm,
-                     unformat_input_t * input,
-                     vlib_cli_command_t * cmd)
+                         unformat_input_t * input, vlib_cli_command_t * cmd)
 {
   return set_ip_vxlan_gpe_bypass (1, input, cmd);
 }
@@ -1222,41 +1251,50 @@ VNET_FEATURE_INIT (ip6_vxlan_gpe_bypass, static) =
  * @return error
  *
  */
-clib_error_t *vxlan_gpe_init (vlib_main_t *vm)
+clib_error_t *
+vxlan_gpe_init (vlib_main_t * vm)
 {
   vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
 
-  ngm->vnet_main = vnet_get_main();
+  ngm->vnet_main = vnet_get_main ();
   ngm->vlib_main = vm;
 
   ngm->vxlan4_gpe_tunnel_by_key
-    = hash_create_mem (0, sizeof(vxlan4_gpe_tunnel_key_t), sizeof (uword));
+    = hash_create_mem (0, sizeof (vxlan4_gpe_tunnel_key_t), sizeof (uword));
 
   ngm->vxlan6_gpe_tunnel_by_key
-    = hash_create_mem (0, sizeof(vxlan6_gpe_tunnel_key_t), sizeof (uword));
+    = hash_create_mem (0, sizeof (vxlan6_gpe_tunnel_key_t), sizeof (uword));
 
 
-  ngm->mcast_shared = hash_create_mem(0,
-        sizeof(ip46_address_t),
-       sizeof(mcast_shared_t));
+  ngm->mcast_shared = hash_create_mem (0,
+                                      sizeof (ip46_address_t),
+                                      sizeof (mcast_shared_t));
 
   udp_register_dst_port (vm, UDP_DST_PORT_VXLAN_GPE,
-                         vxlan4_gpe_input_node.index, 1 /* is_ip4 */);
+                        vxlan4_gpe_input_node.index, 1 /* is_ip4 */ );
   udp_register_dst_port (vm, UDP_DST_PORT_VXLAN6_GPE,
-                         vxlan6_gpe_input_node.index, 0 /* is_ip4 */);
+                        vxlan6_gpe_input_node.index, 0 /* is_ip4 */ );
 
   /* Register the list of standard decap protocols supported */
   vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IP4,
-                                     VXLAN_GPE_INPUT_NEXT_IP4_INPUT);
+                                    VXLAN_GPE_INPUT_NEXT_IP4_INPUT);
   vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IP6,
-                                     VXLAN_GPE_INPUT_NEXT_IP6_INPUT);
+                                    VXLAN_GPE_INPUT_NEXT_IP6_INPUT);
   vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_ETHERNET,
-                                     VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT);
+                                    VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT);
 
-  fib_node_register_type(FIB_NODE_TYPE_VXLAN_GPE_TUNNEL, &vxlan_gpe_vft);
+  fib_node_register_type (FIB_NODE_TYPE_VXLAN_GPE_TUNNEL, &vxlan_gpe_vft);
 
   return 0;
 }
 
-VLIB_INIT_FUNCTION(vxlan_gpe_init);
+VLIB_INIT_FUNCTION (vxlan_gpe_init);
 
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index 053bcbe..b7e75d7 100644 (file)
@@ -39,6 +39,7 @@
  * @brief VXLAN GPE header struct
  *
  */
+/* *INDENT-OFF* */
 typedef CLIB_PACKED (struct {
   /** 20 bytes */
   ip4_header_t ip4;
@@ -47,7 +48,9 @@ typedef CLIB_PACKED (struct {
   /** 8 bytes */
   vxlan_gpe_header_t vxlan;
 }) ip4_vxlan_gpe_header_t;
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 typedef CLIB_PACKED (struct {
   /** 40 bytes */
   ip6_header_t ip6;
@@ -56,6 +59,7 @@ typedef CLIB_PACKED (struct {
   /** 8 bytes */
   vxlan_gpe_header_t vxlan;
 }) ip6_vxlan_gpe_header_t;
+/* *INDENT-ON* */
 
 /**
  * @brief Key struct for IPv4 VXLAN GPE tunnel.
@@ -63,6 +67,7 @@ typedef CLIB_PACKED (struct {
  * all fields in NET byte order
  * VNI shifted 8 bits
  */
+/* *INDENT-OFF* */
 typedef CLIB_PACKED(struct {
   union {
     struct {
@@ -75,6 +80,7 @@ typedef CLIB_PACKED(struct {
     u64 as_u64[2];
   };
 }) vxlan4_gpe_tunnel_key_t;
+/* *INDENT-ON* */
 
 /**
  * @brief Key struct for IPv6 VXLAN GPE tunnel.
@@ -82,24 +88,27 @@ typedef CLIB_PACKED(struct {
  * all fields in NET byte order
  * VNI shifted 8 bits
  */
+/* *INDENT-OFF* */
 typedef CLIB_PACKED(struct {
   ip6_address_t local;
   ip6_address_t remote;
   u32 vni;
 }) vxlan6_gpe_tunnel_key_t;
+/* *INDENT-ON* */
 
 /**
  * @brief Struct for VXLAN GPE tunnel
  */
-typedef struct {
+typedef struct
+{
   /** Rewrite string. $$$$ embed vnet_rewrite header */
-  u8 * rewrite;
+  u8 *rewrite;
 
   /** encapsulated protocol */
   u8 protocol;
 
   /* FIB DPO for IP forwarding of VXLAN-GPE encap packet */
-  dpo_id_t next_dpo;  
+  dpo_id_t next_dpo;
   /** tunnel local address */
   ip46_address_t local;
   /** tunnel remote address */
@@ -107,7 +116,7 @@ typedef struct {
 
   /* mcast packet output intfc index (used only if dst is mcast) */
   u32 mcast_sw_if_index;
-  
+
   /** FIB indices - tunnel partner lookup here */
   u32 encap_fib_index;
   /** FIB indices - inner IP packet lookup here */
@@ -125,7 +134,7 @@ typedef struct {
   u32 flags;
 
   /** rewrite size for dynamic plugins like iOAM */
-  u8  rewrite_size;
+  u8 rewrite_size;
 
   /** Next node after VxLAN-GPE encap */
   uword encap_next_node;
@@ -163,15 +172,17 @@ _(IP6_INPUT, "ip6-input")                       \
 _(ETHERNET_INPUT, "ethernet-input")
 
 /** struct for next nodes for VXLAN GPE input */
-typedef enum {
+typedef enum
+{
 #define _(s,n) VXLAN_GPE_INPUT_NEXT_##s,
   foreach_vxlan_gpe_input_next
 #undef _
-  VXLAN_GPE_INPUT_N_NEXT,
+    VXLAN_GPE_INPUT_N_NEXT,
 } vxlan_gpe_input_next_t;
 
 /** struct for VXLAN GPE errors */
-typedef enum {
+typedef enum
+{
 #define vxlan_gpe_error(n,s) VXLAN_GPE_ERROR_##n,
 #include <vnet/vxlan-gpe/vxlan_gpe_error.def>
 #undef vxlan_gpe_error
@@ -179,31 +190,32 @@ typedef enum {
 } vxlan_gpe_input_error_t;
 
 /** Struct for VXLAN GPE node state */
-typedef struct {
+typedef struct
+{
   /** vector of encap tunnel instances */
   vxlan_gpe_tunnel_t *tunnels;
 
   /** lookup IPv4 VXLAN GPE tunnel by key */
-  uword * vxlan4_gpe_tunnel_by_key;
+  uword *vxlan4_gpe_tunnel_by_key;
   /** lookup IPv6 VXLAN GPE tunnel by key */
-  uword * vxlan6_gpe_tunnel_by_key;
+  uword *vxlan6_gpe_tunnel_by_key;
 
   /* local VTEP IPs ref count used by vxlan-bypass node to check if
      received VXLAN packet DIP matches any local VTEP address */
-  uword * vtep4;  /* local ip4 VTEPs keyed on their ip4 addr */
-  uword * vtep6;  /* local ip6 VTEPs keyed on their ip6 addr */
+  uword *vtep4;                        /* local ip4 VTEPs keyed on their ip4 addr */
+  uword *vtep6;                        /* local ip6 VTEPs keyed on their ip6 addr */
   /* mcast shared info */
-  uword * mcast_shared; /* keyed on mcast ip46 addr */
+  uword *mcast_shared;         /* keyed on mcast ip46 addr */
   /** Free vlib hw_if_indices */
-  u32 * free_vxlan_gpe_tunnel_hw_if_indices;
+  u32 *free_vxlan_gpe_tunnel_hw_if_indices;
 
   /** Mapping from sw_if_index to tunnel index */
-  u32 * tunnel_index_by_sw_if_index;
+  u32 *tunnel_index_by_sw_if_index;
 
   /** State convenience vlib_main_t */
-  vlib_main_t * vlib_main;
+  vlib_main_t *vlib_main;
   /** State convenience vnet_main_t */
-  vnet_main_t * vnet_main;
+  vnet_main_t *vnet_main;
 
   /** List of next nodes for the decap indexed on protocol */
   uword decap_next_node_list[VXLAN_GPE_PROTOCOL_MAX];
@@ -215,10 +227,11 @@ extern vlib_node_registration_t vxlan_gpe_encap_node;
 extern vlib_node_registration_t vxlan4_gpe_input_node;
 extern vlib_node_registration_t vxlan6_gpe_input_node;
 
-u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args);
+u8 *format_vxlan_gpe_encap_trace (u8 * s, va_list * args);
 
 /** Struct for VXLAN GPE add/del args */
-typedef struct {
+typedef struct
+{
   u8 is_add;
   u8 is_ip6;
   ip46_address_t local, remote;
@@ -231,18 +244,19 @@ typedef struct {
 
 
 int vnet_vxlan_gpe_add_del_tunnel
-(vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp);
+  (vnet_vxlan_gpe_add_del_tunnel_args_t * a, u32 * sw_if_indexp);
 
 
 int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
-                        u8 protocol_override, uword encap_next_node);
+                       u8 protocol_override, uword encap_next_node);
 int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
-                       u8 protocol_override, uword encap_next_node);
+                       u8 protocol_override, uword encap_next_node);
 
 /**
  * @brief Struct for defining VXLAN GPE next nodes
  */
-typedef enum {
+typedef enum
+{
   VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP,
   VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP,
   VXLAN_GPE_ENCAP_NEXT_DROP,
@@ -250,10 +264,21 @@ typedef enum {
 } vxlan_gpe_encap_next_t;
 
 
-void vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index);
+void vxlan_gpe_unregister_decap_protocol (u8 protocol_id,
+                                         uword next_node_index);
 
-void vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index);
+void vxlan_gpe_register_decap_protocol (u8 protocol_id,
+                                       uword next_node_index);
 
-void vnet_int_vxlan_gpe_bypass_mode (u32 sw_if_index, u8 is_ip6, u8 is_enable);
+void vnet_int_vxlan_gpe_bypass_mode (u32 sw_if_index, u8 is_ip6,
+                                    u8 is_enable);
 
 #endif /* included_vnet_vxlan_gpe_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index ec3c2e5..f5e5ddc 100644 (file)
  *   |                VXLAN Network Identifier (VNI) |   Reserved    |
  *   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
- *   I Bit: Flag bit 4 indicates that the VNI is valid. 
+ *   I Bit: Flag bit 4 indicates that the VNI is valid.
  *
  *   P Bit:  Flag bit 5 is defined as the Next Protocol bit.  The P bit
  *      MUST be set to 1 to indicate the presence of the 8 bit next
  *      protocol field.
  *
- *   O Bit: Flag bit 7 is defined as the O bit. When the O bit is set to 1, 
+ *   O Bit: Flag bit 7 is defined as the O bit. When the O bit is set to 1,
  *
  *      the packet is an OAM packet and OAM processing MUST occur.  The OAM
  *      protocol details are out of scope for this document.  As with the
@@ -80,17 +80,19 @@ _ (0x05, IOAM)
  * 4 - NSH
  * 5 - IOAM
  */
-typedef enum {
+typedef enum
+{
 #define _(n,f) VXLAN_GPE_PROTOCOL_##f = n,
   foreach_vxlan_gpe_protocol
 #undef _
-  VXLAN_GPE_PROTOCOL_MAX,
+    VXLAN_GPE_PROTOCOL_MAX,
 } vxlan_gpe_protocol_t;
 
 /**
  * @brief VXLAN GPE Header definition
  */
-typedef struct {
+typedef struct
+{
   u8 flags;
   /** Version and Reserved */
   u8 ver_res;
@@ -108,3 +110,11 @@ typedef struct {
 #define VXLAN_GPE_VERSION 0x0
 
 #endif /* included_vxlan_gpe_packet_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */