VPP-263 - Coding standards cleanup - vnet/vnet/map 44/2244/4
authorKeith Burns (alagalah) <alagalah@gmail.com>
Sun, 7 Aug 2016 15:43:18 +0000 (08:43 -0700)
committerDave Barach <openvpp@barachs.net>
Mon, 8 Aug 2016 15:32:42 +0000 (15:32 +0000)
Change-Id: I401124320a21e30b5cdf3684d36890e51a2fcb6c
Signed-off-by: Keith Burns (alagalah) <alagalah@gmail.com>
vnet/vnet/map/ip4_map.c
vnet/vnet/map/ip4_map_t.c
vnet/vnet/map/ip6_map.c
vnet/vnet/map/ip6_map_t.c
vnet/vnet/map/map.c
vnet/vnet/map/map.h

index a40238f..1a75ca3 100644 (file)
@@ -22,7 +22,8 @@
 
 vlib_node_registration_t ip4_map_reass_node;
 
-enum ip4_map_next_e {
+enum ip4_map_next_e
+{
   IP4_MAP_NEXT_IP6_LOOKUP,
 #ifdef MAP_SKIP_IP6_LOOKUP
   IP4_MAP_NEXT_IP6_REWRITE,
@@ -35,91 +36,121 @@ enum ip4_map_next_e {
   IP4_MAP_N_NEXT,
 };
 
-enum ip4_map_reass_next_t {
+enum ip4_map_reass_next_t
+{
   IP4_MAP_REASS_NEXT_IP6_LOOKUP,
   IP4_MAP_REASS_NEXT_IP4_FRAGMENT,
   IP4_MAP_REASS_NEXT_DROP,
   IP4_MAP_REASS_N_NEXT,
 };
 
-typedef struct {
+typedef struct
+{
   u32 map_domain_index;
   u16 port;
   u8 cached;
 } map_ip4_map_reass_trace_t;
 
 u8 *
-format_ip4_map_reass_trace (u8 *s, va_list *args)
+format_ip4_map_reass_trace (u8 * s, va_list * args)
 {
-  CLIB_UNUSED(vlib_main_t *vm) = va_arg (*args, vlib_main_t *);
-  CLIB_UNUSED(vlib_node_t *node) = va_arg (*args, vlib_node_t *);
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
   map_ip4_map_reass_trace_t *t = va_arg (*args, map_ip4_map_reass_trace_t *);
-  return format(s, "MAP domain index: %d L4 port: %u Status: %s", t->map_domain_index,
-                t->port, t->cached?"cached":"forwarded");
+  return format (s, "MAP domain index: %d L4 port: %u Status: %s",
+                t->map_domain_index, t->port,
+                t->cached ? "cached" : "forwarded");
 }
 
 /*
  * ip4_map_get_port
  */
 u16
-ip4_map_get_port (ip4_header_t *ip, map_dir_e dir)
+ip4_map_get_port (ip4_header_t * ip, map_dir_e dir)
 {
   /* Find port information */
-  if (PREDICT_TRUE((ip->protocol == IP_PROTOCOL_TCP) ||
-                  (ip->protocol == IP_PROTOCOL_UDP))) {
-    udp_header_t *udp = (void *)(ip + 1);
-    return (dir == MAP_SENDER ? udp->src_port : udp->dst_port);
-  } else if (ip->protocol == IP_PROTOCOL_ICMP) {
-    /*
-     * 1) ICMP Echo request or Echo reply
-     * 2) ICMP Error with inner packet being UDP or TCP
-     * 3) ICMP Error with inner packet being ICMP Echo request or Echo reply
-     */
-    icmp46_header_t *icmp = (void *)(ip + 1);
-    if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply) {
-      return *((u16 *)(icmp + 1));
-    } else if (clib_net_to_host_u16(ip->length) >= 56) { // IP + ICMP + IP + L4 header
-      ip4_header_t *icmp_ip = (ip4_header_t *)(icmp + 2);
-      if (PREDICT_TRUE((icmp_ip->protocol == IP_PROTOCOL_TCP) ||
-                      (icmp_ip->protocol == IP_PROTOCOL_UDP))) {
-       udp_header_t *udp = (void *)(icmp_ip + 1);
-       return (dir == MAP_SENDER ? udp->dst_port : udp->src_port);
-      } else if (icmp_ip->protocol == IP_PROTOCOL_ICMP) {
-       icmp46_header_t *inner_icmp = (void *)(icmp_ip + 1);
-       if (inner_icmp->type == ICMP4_echo_request || inner_icmp->type == ICMP4_echo_reply)
-         return (*((u16 *)(inner_icmp + 1)));
-      }
+  if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) ||
+                   (ip->protocol == IP_PROTOCOL_UDP)))
+    {
+      udp_header_t *udp = (void *) (ip + 1);
+      return (dir == MAP_SENDER ? udp->src_port : udp->dst_port);
+    }
+  else if (ip->protocol == IP_PROTOCOL_ICMP)
+    {
+      /*
+       * 1) ICMP Echo request or Echo reply
+       * 2) ICMP Error with inner packet being UDP or TCP
+       * 3) ICMP Error with inner packet being ICMP Echo request or Echo reply
+       */
+      icmp46_header_t *icmp = (void *) (ip + 1);
+      if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
+       {
+         return *((u16 *) (icmp + 1));
+       }
+      else if (clib_net_to_host_u16 (ip->length) >= 56)
+       {                       // IP + ICMP + IP + L4 header
+         ip4_header_t *icmp_ip = (ip4_header_t *) (icmp + 2);
+         if (PREDICT_TRUE ((icmp_ip->protocol == IP_PROTOCOL_TCP) ||
+                           (icmp_ip->protocol == IP_PROTOCOL_UDP)))
+           {
+             udp_header_t *udp = (void *) (icmp_ip + 1);
+             return (dir == MAP_SENDER ? udp->dst_port : udp->src_port);
+           }
+         else if (icmp_ip->protocol == IP_PROTOCOL_ICMP)
+           {
+             icmp46_header_t *inner_icmp = (void *) (icmp_ip + 1);
+             if (inner_icmp->type == ICMP4_echo_request
+                 || inner_icmp->type == ICMP4_echo_reply)
+               return (*((u16 *) (inner_icmp + 1)));
+           }
+       }
     }
-  }
   return (0);
 }
 
 static_always_inline u16
-ip4_map_port_and_security_check (map_domain_t *d, ip4_header_t *ip, u32 *next, u8 *error)
+ip4_map_port_and_security_check (map_domain_t * d, ip4_header_t * ip,
+                                u32 * next, u8 * error)
 {
   u16 port = 0;
 
-  if (d->psid_length > 0) {
-    if (ip4_get_fragment_offset(ip) == 0) {
-      if (PREDICT_FALSE((ip->ip_version_and_header_length != 0x45) || clib_host_to_net_u16(ip->length) < 28)) {
-       return 0;
-      }
-      port = ip4_map_get_port(ip, MAP_RECEIVER);
-      if (port) {
-       /* Verify that port is not among the well-known ports */
-       if ((d->psid_offset > 0) && (clib_net_to_host_u16(port) < (0x1 << (16 - d->psid_offset)))) {
-         *error = MAP_ERROR_ENCAP_SEC_CHECK;
-       } else {
-         if (ip4_get_fragment_more(ip)) *next = IP4_MAP_NEXT_REASS;
-         return (port);
+  if (d->psid_length > 0)
+    {
+      if (ip4_get_fragment_offset (ip) == 0)
+       {
+         if (PREDICT_FALSE
+             ((ip->ip_version_and_header_length != 0x45)
+              || clib_host_to_net_u16 (ip->length) < 28))
+           {
+             return 0;
+           }
+         port = ip4_map_get_port (ip, MAP_RECEIVER);
+         if (port)
+           {
+             /* Verify that port is not among the well-known ports */
+             if ((d->psid_offset > 0)
+                 && (clib_net_to_host_u16 (port) <
+                     (0x1 << (16 - d->psid_offset))))
+               {
+                 *error = MAP_ERROR_ENCAP_SEC_CHECK;
+               }
+             else
+               {
+                 if (ip4_get_fragment_more (ip))
+                   *next = IP4_MAP_NEXT_REASS;
+                 return (port);
+               }
+           }
+         else
+           {
+             *error = MAP_ERROR_BAD_PROTOCOL;
+           }
+       }
+      else
+       {
+         *next = IP4_MAP_NEXT_REASS;
        }
-      } else {
-       *error = MAP_ERROR_BAD_PROTOCOL;
-      }
-    } else {
-      *next = IP4_MAP_NEXT_REASS;
     }
-  }
   return (0);
 }
 
@@ -127,33 +158,35 @@ ip4_map_port_and_security_check (map_domain_t *d, ip4_header_t *ip, u32 *next, u
  * ip4_map_vtcfl
  */
 static_always_inline u32
-ip4_map_vtcfl (ip4_header_t *ip4, vlib_buffer_t *p)
+ip4_map_vtcfl (ip4_header_t * ip4, vlib_buffer_t * p)
 {
   map_main_t *mm = &map_main;
   u8 tc = mm->tc_copy ? ip4->tos : mm->tc;
   u32 vtcfl = 0x6 << 28;
   vtcfl |= tc << 20;
-  vtcfl |= vnet_buffer(p)->ip.flow_hash & 0x000fffff;
+  vtcfl |= vnet_buffer (p)->ip.flow_hash & 0x000fffff;
 
-  return (clib_host_to_net_u32(vtcfl));
+  return (clib_host_to_net_u32 (vtcfl));
 }
 
 static_always_inline bool
-ip4_map_ip6_lookup_bypass (vlib_buffer_t *p0, ip4_header_t *ip)
+ip4_map_ip6_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
 {
 #ifdef MAP_SKIP_IP6_LOOKUP
   map_main_t *mm = &map_main;
   u32 adj_index0 = mm->adj6_index;
-  if (adj_index0 > 0) {
-    ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
-    ip_adjacency_t *adj = ip_get_adjacency(lm6, mm->adj6_index);
-    if (adj->n_adj > 1) {
-      u32 hash_c0 = ip4_compute_flow_hash(ip, IP_FLOW_HASH_DEFAULT);
-      adj_index0 += (hash_c0 & (adj->n_adj - 1));
+  if (adj_index0 > 0)
+    {
+      ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
+      ip_adjacency_t *adj = ip_get_adjacency (lm6, mm->adj6_index);
+      if (adj->n_adj > 1)
+       {
+         u32 hash_c0 = ip4_compute_flow_hash (ip, IP_FLOW_HASH_DEFAULT);
+         adj_index0 += (hash_c0 & (adj->n_adj - 1));
+       }
+      vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0;
+      return (true);
     }
-    vnet_buffer(p0)->ip.adj_index[VLIB_TX] = adj_index0;
-    return (true);
-  }
 #endif
   return (false);
 }
@@ -162,14 +195,14 @@ ip4_map_ip6_lookup_bypass (vlib_buffer_t *p0, ip4_header_t *ip)
  * ip4_map_ttl
  */
 static inline void
-ip4_map_decrement_ttl (ip4_header_t *ip, u8 *error)
+ip4_map_decrement_ttl (ip4_header_t * ip, u8 * error)
 {
   i32 ttl = ip->ttl;
 
   /* Input node should have reject packets with ttl 0. */
   ASSERT (ip->ttl > 0);
 
-  u32 checksum = ip->checksum + clib_host_to_net_u16(0x0100);
+  u32 checksum = ip->checksum + clib_host_to_net_u16 (0x0100);
   checksum += checksum >= 0xffff;
   ip->checksum = checksum;
   ttl -= 1;
@@ -177,270 +210,356 @@ ip4_map_decrement_ttl (ip4_header_t *ip, u8 *error)
   *error = ttl <= 0 ? IP4_ERROR_TIME_EXPIRED : *error;
 
   /* Verify checksum. */
-  ASSERT (ip->checksum == ip4_header_checksum(ip));
+  ASSERT (ip->checksum == ip4_header_checksum (ip));
 }
 
 static u32
-ip4_map_fragment (vlib_buffer_t *b, u16 mtu, bool df, u8 *error)
+ip4_map_fragment (vlib_buffer_t * b, u16 mtu, bool df, u8 * error)
 {
   map_main_t *mm = &map_main;
 
-  if (mm->frag_inner) {
-    ip_frag_set_vnet_buffer(b, sizeof(ip6_header_t), mtu, IP4_FRAG_NEXT_IP6_LOOKUP, IP_FRAG_FLAG_IP6_HEADER);
-    return (IP4_MAP_NEXT_IP4_FRAGMENT);
-  } else {
-    if (df && !mm->frag_ignore_df) {
-      icmp4_error_set_vnet_buffer(b, ICMP4_destination_unreachable,
-                                 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set, mtu);
-      vlib_buffer_advance(b, sizeof(ip6_header_t));
-      *error = MAP_ERROR_DF_SET;
-      return (IP4_MAP_NEXT_ICMP_ERROR);
+  if (mm->frag_inner)
+    {
+      ip_frag_set_vnet_buffer (b, sizeof (ip6_header_t), mtu,
+                              IP4_FRAG_NEXT_IP6_LOOKUP,
+                              IP_FRAG_FLAG_IP6_HEADER);
+      return (IP4_MAP_NEXT_IP4_FRAGMENT);
+    }
+  else
+    {
+      if (df && !mm->frag_ignore_df)
+       {
+         icmp4_error_set_vnet_buffer (b, ICMP4_destination_unreachable,
+                                      ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
+                                      mtu);
+         vlib_buffer_advance (b, sizeof (ip6_header_t));
+         *error = MAP_ERROR_DF_SET;
+         return (IP4_MAP_NEXT_ICMP_ERROR);
+       }
+      ip_frag_set_vnet_buffer (b, 0, mtu, IP6_FRAG_NEXT_IP6_LOOKUP,
+                              IP_FRAG_FLAG_IP6_HEADER);
+      return (IP4_MAP_NEXT_IP6_FRAGMENT);
     }
-    ip_frag_set_vnet_buffer(b, 0, mtu, IP6_FRAG_NEXT_IP6_LOOKUP, IP_FRAG_FLAG_IP6_HEADER);
-    return (IP4_MAP_NEXT_IP6_FRAGMENT);
-  }
 }
 
 /*
  * ip4_map
  */
 static uword
-ip4_map (vlib_main_t *vm,
-        vlib_node_runtime_t *node,
-        vlib_frame_t *frame)
+ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_node.index);
-  from = vlib_frame_vector_args(frame);
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip4_map_node.index);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
   map_main_t *mm = &map_main;
   vlib_combined_counter_main_t *cm = mm->domain_counters;
-  u32 cpu_index = os_get_cpu_number();
-
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
-
-    /* Dual loop */
-    while (n_left_from >= 4 && n_left_to_next >= 2) {
-      u32 pi0, pi1;
-      vlib_buffer_t *p0, *p1;
-      map_domain_t *d0, *d1;
-      u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
-      ip4_header_t *ip40, *ip41;
-      u16 port0 = 0, port1 = 0;
-      ip6_header_t *ip6h0, *ip6h1;
-      u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
-      u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 = IP4_MAP_NEXT_IP6_LOOKUP;
-
-      /* Prefetch next iteration. */
-      {
-       vlib_buffer_t *p2, *p3;
-
-       p2 = vlib_get_buffer(vm, from[2]);
-       p3 = vlib_get_buffer(vm, from[3]);
-
-       vlib_prefetch_buffer_header(p2, STORE);
-       vlib_prefetch_buffer_header(p3, STORE);
-       /* IPv4 + 8 = 28. possibly plus -40 */
-       CLIB_PREFETCH (p2->data-40, 68, STORE);
-       CLIB_PREFETCH (p3->data-40, 68, STORE);
-      }
-
-      pi0 = to_next[0] = from[0];
-      pi1 = to_next[1] = from[1];
-      from += 2;
-      n_left_from -= 2;
-      to_next +=2;
-      n_left_to_next -= 2;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      p1 = vlib_get_buffer(vm, pi1);
-      ip40 = vlib_buffer_get_current(p0);
-      ip41 = vlib_buffer_get_current(p1);
-      d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
-      d1 = ip4_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX], &map_domain_index1);
-      ASSERT(d0);
-      ASSERT(d1);
-
-      /*
-       * Shared IPv4 address
-       */
-      port0 = ip4_map_port_and_security_check(d0, ip40, &next0, &error0);
-      port1 = ip4_map_port_and_security_check(d1, ip41, &next1, &error1);
-
-      /* Decrement IPv4 TTL */
-      ip4_map_decrement_ttl(ip40, &error0);
-      ip4_map_decrement_ttl(ip41, &error1);
-      bool df0 = ip40->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT);
-      bool df1 = ip41->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT);
-
-      /* MAP calc */
-      u32 da40 = clib_net_to_host_u32(ip40->dst_address.as_u32);
-      u32 da41 = clib_net_to_host_u32(ip41->dst_address.as_u32);
-      u16 dp40 = clib_net_to_host_u16(port0);
-      u16 dp41 = clib_net_to_host_u16(port1);
-      u64 dal60 = map_get_pfx(d0, da40, dp40);
-      u64 dal61 = map_get_pfx(d1, da41, dp41);
-      u64 dar60 = map_get_sfx(d0, da40, dp40);
-      u64 dar61 = map_get_sfx(d1, da41, dp41);
-      if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE && next0 != IP4_MAP_NEXT_REASS)
-       error0 = MAP_ERROR_NO_BINDING;
-      if (dal61 == 0 && dar61 == 0 && error1 == MAP_ERROR_NONE && next1 != IP4_MAP_NEXT_REASS)
-       error1 = MAP_ERROR_NO_BINDING;
-
-      /* construct ipv6 header */
-      vlib_buffer_advance(p0, - sizeof(ip6_header_t));
-      vlib_buffer_advance(p1, - sizeof(ip6_header_t));
-      ip6h0 = vlib_buffer_get_current(p0);
-      ip6h1 = vlib_buffer_get_current(p1);
-      vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
-      vnet_buffer(p1)->sw_if_index[VLIB_TX] = (u32)~0;
-
-      ip6h0->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip40, p0);
-      ip6h1->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip41, p1);
-      ip6h0->payload_length = ip40->length;
-      ip6h1->payload_length = ip41->length;
-      ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
-      ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
-      ip6h0->hop_limit = 0x40;
-      ip6h1->hop_limit = 0x40;
-      ip6h0->src_address = d0->ip6_src;
-      ip6h1->src_address = d1->ip6_src;
-      ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64(dal60);
-      ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64(dar60);
-      ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64(dal61);
-      ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64(dar61);
-
-      /*
-       * Determine next node. Can be one of:
-       * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
-       */
-      if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
-       if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip6h0->payload_length) + sizeof(*ip6h0) > d0->mtu))) {
-         next0 = ip4_map_fragment(p0, d0->mtu, df0, &error0);
-       } else {
-         next0 = ip4_map_ip6_lookup_bypass(p0, ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0;
-         vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
-                                         clib_net_to_host_u16(ip6h0->payload_length) + 40);
+  u32 cpu_index = os_get_cpu_number ();
+
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      /* Dual loop */
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 pi0, pi1;
+         vlib_buffer_t *p0, *p1;
+         map_domain_t *d0, *d1;
+         u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
+         ip4_header_t *ip40, *ip41;
+         u16 port0 = 0, port1 = 0;
+         ip6_header_t *ip6h0, *ip6h1;
+         u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
+         u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 =
+           IP4_MAP_NEXT_IP6_LOOKUP;
+
+         /* Prefetch next iteration. */
+         {
+           vlib_buffer_t *p2, *p3;
+
+           p2 = vlib_get_buffer (vm, from[2]);
+           p3 = vlib_get_buffer (vm, from[3]);
+
+           vlib_prefetch_buffer_header (p2, STORE);
+           vlib_prefetch_buffer_header (p3, STORE);
+           /* IPv4 + 8 = 28. possibly plus -40 */
+           CLIB_PREFETCH (p2->data - 40, 68, STORE);
+           CLIB_PREFETCH (p3->data - 40, 68, STORE);
+         }
+
+         pi0 = to_next[0] = from[0];
+         pi1 = to_next[1] = from[1];
+         from += 2;
+         n_left_from -= 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         p1 = vlib_get_buffer (vm, pi1);
+         ip40 = vlib_buffer_get_current (p0);
+         ip41 = vlib_buffer_get_current (p1);
+         d0 =
+           ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+                               &map_domain_index0);
+         d1 =
+           ip4_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
+                               &map_domain_index1);
+         ASSERT (d0);
+         ASSERT (d1);
+
+         /*
+          * Shared IPv4 address
+          */
+         port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
+         port1 = ip4_map_port_and_security_check (d1, ip41, &next1, &error1);
+
+         /* Decrement IPv4 TTL */
+         ip4_map_decrement_ttl (ip40, &error0);
+         ip4_map_decrement_ttl (ip41, &error1);
+         bool df0 =
+           ip40->
+           flags_and_fragment_offset &
+           clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
+         bool df1 =
+           ip41->
+           flags_and_fragment_offset &
+           clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
+
+         /* MAP calc */
+         u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
+         u32 da41 = clib_net_to_host_u32 (ip41->dst_address.as_u32);
+         u16 dp40 = clib_net_to_host_u16 (port0);
+         u16 dp41 = clib_net_to_host_u16 (port1);
+         u64 dal60 = map_get_pfx (d0, da40, dp40);
+         u64 dal61 = map_get_pfx (d1, da41, dp41);
+         u64 dar60 = map_get_sfx (d0, da40, dp40);
+         u64 dar61 = map_get_sfx (d1, da41, dp41);
+         if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
+             && next0 != IP4_MAP_NEXT_REASS)
+           error0 = MAP_ERROR_NO_BINDING;
+         if (dal61 == 0 && dar61 == 0 && error1 == MAP_ERROR_NONE
+             && next1 != IP4_MAP_NEXT_REASS)
+           error1 = MAP_ERROR_NO_BINDING;
+
+         /* construct ipv6 header */
+         vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+         vlib_buffer_advance (p1, -sizeof (ip6_header_t));
+         ip6h0 = vlib_buffer_get_current (p0);
+         ip6h1 = vlib_buffer_get_current (p1);
+         vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+         vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+         ip6h0->ip_version_traffic_class_and_flow_label =
+           ip4_map_vtcfl (ip40, p0);
+         ip6h1->ip_version_traffic_class_and_flow_label =
+           ip4_map_vtcfl (ip41, p1);
+         ip6h0->payload_length = ip40->length;
+         ip6h1->payload_length = ip41->length;
+         ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
+         ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
+         ip6h0->hop_limit = 0x40;
+         ip6h1->hop_limit = 0x40;
+         ip6h0->src_address = d0->ip6_src;
+         ip6h1->src_address = d1->ip6_src;
+         ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
+         ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
+         ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64 (dal61);
+         ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64 (dar61);
+
+         /*
+          * Determine next node. Can be one of:
+          * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
+          */
+         if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
+           {
+             if (PREDICT_FALSE
+                 (d0->mtu
+                  && (clib_net_to_host_u16 (ip6h0->payload_length) +
+                      sizeof (*ip6h0) > d0->mtu)))
+               {
+                 next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
+               }
+             else
+               {
+                 next0 =
+                   ip4_map_ip6_lookup_bypass (p0,
+                                              ip40) ?
+                   IP4_MAP_NEXT_IP6_REWRITE : next0;
+                 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+                                                  cpu_index,
+                                                  map_domain_index0, 1,
+                                                  clib_net_to_host_u16
+                                                  (ip6h0->payload_length) +
+                                                  40);
+               }
+           }
+         else
+           {
+             next0 = IP4_MAP_NEXT_DROP;
+           }
+
+         /*
+          * Determine next node. Can be one of:
+          * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
+          */
+         if (PREDICT_TRUE (error1 == MAP_ERROR_NONE))
+           {
+             if (PREDICT_FALSE
+                 (d1->mtu
+                  && (clib_net_to_host_u16 (ip6h1->payload_length) +
+                      sizeof (*ip6h1) > d1->mtu)))
+               {
+                 next1 = ip4_map_fragment (p1, d1->mtu, df1, &error1);
+               }
+             else
+               {
+                 next1 =
+                   ip4_map_ip6_lookup_bypass (p1,
+                                              ip41) ?
+                   IP4_MAP_NEXT_IP6_REWRITE : next1;
+                 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+                                                  cpu_index,
+                                                  map_domain_index1, 1,
+                                                  clib_net_to_host_u16
+                                                  (ip6h1->payload_length) +
+                                                  40);
+               }
+           }
+         else
+           {
+             next1 = IP4_MAP_NEXT_DROP;
+           }
+
+         if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
+             tr->map_domain_index = map_domain_index0;
+             tr->port = port0;
+           }
+         if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
+             tr->map_domain_index = map_domain_index1;
+             tr->port = port1;
+           }
+
+         p0->error = error_node->errors[error0];
+         p1->error = error_node->errors[error1];
+
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+                                          n_left_to_next, pi0, pi1, next0,
+                                          next1);
        }
-      } else {
-        next0 = IP4_MAP_NEXT_DROP;
-      }
 
-      /*
-       * Determine next node. Can be one of:
-       * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
-       */
-      if (PREDICT_TRUE(error1 == MAP_ERROR_NONE)) {
-       if (PREDICT_FALSE(d1->mtu && (clib_net_to_host_u16(ip6h1->payload_length) + sizeof(*ip6h1) > d1->mtu))) {
-         next1 = ip4_map_fragment(p1, d1->mtu, df1, &error1);
-       } else {
-         next1 = ip4_map_ip6_lookup_bypass(p1, ip41) ? IP4_MAP_NEXT_IP6_REWRITE : next1;
-         vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index1, 1,
-                                         clib_net_to_host_u16(ip6h1->payload_length) + 40);
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         map_domain_t *d0;
+         u8 error0 = MAP_ERROR_NONE;
+         ip4_header_t *ip40;
+         u16 port0 = 0;
+         ip6_header_t *ip6h0;
+         u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP;
+         u32 map_domain_index0 = ~0;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         ip40 = vlib_buffer_get_current (p0);
+         d0 =
+           ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+                               &map_domain_index0);
+         ASSERT (d0);
+
+         /*
+          * Shared IPv4 address
+          */
+         port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
+
+         /* Decrement IPv4 TTL */
+         ip4_map_decrement_ttl (ip40, &error0);
+         bool df0 =
+           ip40->
+           flags_and_fragment_offset &
+           clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
+
+         /* MAP calc */
+         u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
+         u16 dp40 = clib_net_to_host_u16 (port0);
+         u64 dal60 = map_get_pfx (d0, da40, dp40);
+         u64 dar60 = map_get_sfx (d0, da40, dp40);
+         if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
+             && next0 != IP4_MAP_NEXT_REASS)
+           error0 = MAP_ERROR_NO_BINDING;
+
+         /* construct ipv6 header */
+         vlib_buffer_advance (p0, -(sizeof (ip6_header_t)));
+         ip6h0 = vlib_buffer_get_current (p0);
+         vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+         ip6h0->ip_version_traffic_class_and_flow_label =
+           ip4_map_vtcfl (ip40, p0);
+         ip6h0->payload_length = ip40->length;
+         ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
+         ip6h0->hop_limit = 0x40;
+         ip6h0->src_address = d0->ip6_src;
+         ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
+         ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
+
+         /*
+          * Determine next node. Can be one of:
+          * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
+          */
+         if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
+           {
+             if (PREDICT_FALSE
+                 (d0->mtu
+                  && (clib_net_to_host_u16 (ip6h0->payload_length) +
+                      sizeof (*ip6h0) > d0->mtu)))
+               {
+                 next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
+               }
+             else
+               {
+                 next0 =
+                   ip4_map_ip6_lookup_bypass (p0,
+                                              ip40) ?
+                   IP4_MAP_NEXT_IP6_REWRITE : next0;
+                 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+                                                  cpu_index,
+                                                  map_domain_index0, 1,
+                                                  clib_net_to_host_u16
+                                                  (ip6h0->payload_length) +
+                                                  40);
+               }
+           }
+         else
+           {
+             next0 = IP4_MAP_NEXT_DROP;
+           }
+
+         if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
+             tr->map_domain_index = map_domain_index0;
+             tr->port = port0;
+           }
+
+         p0->error = error_node->errors[error0];
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                          n_left_to_next, pi0, next0);
        }
-      } else {
-        next1 = IP4_MAP_NEXT_DROP;
-      }
-
-      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
-       map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
-       tr->map_domain_index = map_domain_index0;
-       tr->port = port0;
-      }
-      if (PREDICT_FALSE(p1->flags & VLIB_BUFFER_IS_TRACED)) {
-       map_trace_t *tr = vlib_add_trace(vm, node, p1, sizeof(*tr));
-       tr->map_domain_index = map_domain_index1;
-       tr->port = port1;
-      }
-
-      p0->error = error_node->errors[error0];
-      p1->error = error_node->errors[error1];
-
-      vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1);
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
 
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      map_domain_t *d0;
-      u8 error0 = MAP_ERROR_NONE;
-      ip4_header_t *ip40;
-      u16 port0 = 0;
-      ip6_header_t *ip6h0;
-      u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP;
-      u32 map_domain_index0 = ~0;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      ip40 = vlib_buffer_get_current(p0);
-      d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
-      ASSERT(d0);
-
-      /*
-       * Shared IPv4 address
-       */
-      port0 = ip4_map_port_and_security_check(d0, ip40, &next0, &error0);
-
-      /* Decrement IPv4 TTL */
-      ip4_map_decrement_ttl(ip40, &error0);
-      bool df0 = ip40->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT);
-
-      /* MAP calc */
-      u32 da40 = clib_net_to_host_u32(ip40->dst_address.as_u32);
-      u16 dp40 = clib_net_to_host_u16(port0);
-      u64 dal60 = map_get_pfx(d0, da40, dp40);
-      u64 dar60 = map_get_sfx(d0, da40, dp40);
-      if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE && next0 != IP4_MAP_NEXT_REASS)
-       error0 = MAP_ERROR_NO_BINDING;
-
-      /* construct ipv6 header */
-      vlib_buffer_advance(p0, - (sizeof(ip6_header_t)));
-      ip6h0 = vlib_buffer_get_current(p0);
-      vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
-
-      ip6h0->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip40, p0);
-      ip6h0->payload_length = ip40->length;
-      ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
-      ip6h0->hop_limit = 0x40;
-      ip6h0->src_address = d0->ip6_src;
-      ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64(dal60);
-      ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64(dar60);
-
-      /*
-       * Determine next node. Can be one of:
-       * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
-       */
-      if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
-       if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip6h0->payload_length) + sizeof(*ip6h0) > d0->mtu))) {
-         next0 = ip4_map_fragment(p0, d0->mtu, df0, &error0);
-       } else {
-         next0 = ip4_map_ip6_lookup_bypass(p0, ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0;
-         vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
-                                         clib_net_to_host_u16(ip6h0->payload_length) + 40);
-       }
-      } else {
-        next0 = IP4_MAP_NEXT_DROP;
-      }
-
-      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
-       map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
-       tr->map_domain_index = map_domain_index0;
-       tr->port = port0;
-      }
-
-      p0->error = error_node->errors[error0];
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
-    }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);  
-  }
-
   return frame->n_vectors;
 }
 
@@ -448,143 +567,192 @@ ip4_map (vlib_main_t *vm,
  * ip4_map_reass
  */
 static uword
-ip4_map_reass (vlib_main_t *vm,
-               vlib_node_runtime_t *node,
-               vlib_frame_t *frame)
+ip4_map_reass (vlib_main_t * vm,
+              vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_reass_node.index);
-  from = vlib_frame_vector_args(frame);
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip4_map_reass_node.index);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
   map_main_t *mm = &map_main;
   vlib_combined_counter_main_t *cm = mm->domain_counters;
-  u32 cpu_index = os_get_cpu_number();
+  u32 cpu_index = os_get_cpu_number ();
   u32 *fragments_to_drop = NULL;
   u32 *fragments_to_loopback = NULL;
 
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
-
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      map_domain_t *d0;
-      u8 error0 = MAP_ERROR_NONE;
-      ip4_header_t *ip40;
-      i32 port0 = 0;
-      ip6_header_t *ip60;
-      u32 next0 = IP4_MAP_REASS_NEXT_IP6_LOOKUP;
-      u32 map_domain_index0;
-      u8 cached = 0;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      ip60 = vlib_buffer_get_current(p0);
-      ip40 = (ip4_header_t *)(ip60 + 1);
-      d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
-
-      map_ip4_reass_lock();
-      map_ip4_reass_t *r = map_ip4_reass_get(ip40->src_address.as_u32, ip40->dst_address.as_u32,
-                                             ip40->fragment_id, ip40->protocol, &fragments_to_drop);
-      if (PREDICT_FALSE(!r)) {
-        // Could not create a caching entry
-        error0 = MAP_ERROR_FRAGMENT_MEMORY;
-      } else if (PREDICT_TRUE(ip4_get_fragment_offset(ip40))) {
-        if (r->port >= 0) {
-          // We know the port already
-          port0 = r->port;
-        } else if (map_ip4_reass_add_fragment(r, pi0)) {
-          // Not enough space for caching
-          error0 = MAP_ERROR_FRAGMENT_MEMORY;
-          map_ip4_reass_free(r, &fragments_to_drop);
-        } else {
-          cached = 1;
-        }
-      } else if ((port0 = ip4_get_port(ip40, MAP_RECEIVER, p0->current_length)) < 0) {
-        // Could not find port. We'll free the reassembly.
-        error0 = MAP_ERROR_BAD_PROTOCOL;
-        port0 = 0;
-        map_ip4_reass_free(r, &fragments_to_drop);
-      } else {
-        r->port = port0;
-        map_ip4_reass_get_fragments(r, &fragments_to_loopback);
-      }
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         map_domain_t *d0;
+         u8 error0 = MAP_ERROR_NONE;
+         ip4_header_t *ip40;
+         i32 port0 = 0;
+         ip6_header_t *ip60;
+         u32 next0 = IP4_MAP_REASS_NEXT_IP6_LOOKUP;
+         u32 map_domain_index0;
+         u8 cached = 0;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         ip60 = vlib_buffer_get_current (p0);
+         ip40 = (ip4_header_t *) (ip60 + 1);
+         d0 =
+           ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+                               &map_domain_index0);
+
+         map_ip4_reass_lock ();
+         map_ip4_reass_t *r =
+           map_ip4_reass_get (ip40->src_address.as_u32,
+                              ip40->dst_address.as_u32,
+                              ip40->fragment_id, ip40->protocol,
+                              &fragments_to_drop);
+         if (PREDICT_FALSE (!r))
+           {
+             // Could not create a caching entry
+             error0 = MAP_ERROR_FRAGMENT_MEMORY;
+           }
+         else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
+           {
+             if (r->port >= 0)
+               {
+                 // We know the port already
+                 port0 = r->port;
+               }
+             else if (map_ip4_reass_add_fragment (r, pi0))
+               {
+                 // Not enough space for caching
+                 error0 = MAP_ERROR_FRAGMENT_MEMORY;
+                 map_ip4_reass_free (r, &fragments_to_drop);
+               }
+             else
+               {
+                 cached = 1;
+               }
+           }
+         else
+           if ((port0 =
+                ip4_get_port (ip40, MAP_RECEIVER, p0->current_length)) < 0)
+           {
+             // Could not find port. We'll free the reassembly.
+             error0 = MAP_ERROR_BAD_PROTOCOL;
+             port0 = 0;
+             map_ip4_reass_free (r, &fragments_to_drop);
+           }
+         else
+           {
+             r->port = port0;
+             map_ip4_reass_get_fragments (r, &fragments_to_loopback);
+           }
 
 #ifdef MAP_IP4_REASS_COUNT_BYTES
-      if (!cached && r) {
-        r->forwarded += clib_host_to_net_u16(ip40->length) - 20;
-        if (!ip4_get_fragment_more(ip40))
-          r->expected_total = ip4_get_fragment_offset(ip40) * 8 + clib_host_to_net_u16(ip40->length) - 20;
-        if(r->forwarded >= r->expected_total)
-          map_ip4_reass_free(r, &fragments_to_drop);
-      }
+         if (!cached && r)
+           {
+             r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
+             if (!ip4_get_fragment_more (ip40))
+               r->expected_total =
+                 ip4_get_fragment_offset (ip40) * 8 +
+                 clib_host_to_net_u16 (ip40->length) - 20;
+             if (r->forwarded >= r->expected_total)
+               map_ip4_reass_free (r, &fragments_to_drop);
+           }
 #endif
 
-      map_ip4_reass_unlock();
-
-      // NOTE: Most operations have already been performed by ip4_map
-      // All we need is the right destination address
-      ip60->dst_address.as_u64[0] = map_get_pfx_net(d0, ip40->dst_address.as_u32, port0);
-      ip60->dst_address.as_u64[1] = map_get_sfx_net(d0, ip40->dst_address.as_u32, port0);
-
-      if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip60->payload_length) + sizeof(*ip60) > d0->mtu))) {
-        vnet_buffer(p0)->ip_frag.header_offset = sizeof(*ip60);
-        vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
-        vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
-        vnet_buffer(p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
-        next0 = IP4_MAP_REASS_NEXT_IP4_FRAGMENT;
-      }
-
-      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
-        map_ip4_map_reass_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
-        tr->map_domain_index = map_domain_index0;
-        tr->port = port0;
-        tr->cached = cached;
-      }
-
-      if(cached) {
-        //Dequeue the packet
-        n_left_to_next++;
-        to_next--;
-      } else {
-        if (error0 == MAP_ERROR_NONE)
-          vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
-                                          clib_net_to_host_u16(ip60->payload_length) + 40);
-        next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
-        p0->error = error_node->errors[error0];
-        vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
-      }
-
-      //Loopback when we reach the end of the inpu vector
-      if(n_left_from == 0 && vec_len(fragments_to_loopback)) {
-        from = vlib_frame_vector_args(frame);
-        u32 len = vec_len(fragments_to_loopback);
-        if(len <= VLIB_FRAME_SIZE) {
-          clib_memcpy(from, fragments_to_loopback, sizeof(u32)*len);
-          n_left_from = len;
-          vec_reset_length(fragments_to_loopback);
-        } else {
-          clib_memcpy(from, fragments_to_loopback + (len - VLIB_FRAME_SIZE), sizeof(u32)*VLIB_FRAME_SIZE);
-          n_left_from = VLIB_FRAME_SIZE;
-          _vec_len(fragments_to_loopback) = len - VLIB_FRAME_SIZE;
-        }
-      }
+         map_ip4_reass_unlock ();
+
+         // NOTE: Most operations have already been performed by ip4_map
+         // All we need is the right destination address
+         ip60->dst_address.as_u64[0] =
+           map_get_pfx_net (d0, ip40->dst_address.as_u32, port0);
+         ip60->dst_address.as_u64[1] =
+           map_get_sfx_net (d0, ip40->dst_address.as_u32, port0);
+
+         if (PREDICT_FALSE
+             (d0->mtu
+              && (clib_net_to_host_u16 (ip60->payload_length) +
+                  sizeof (*ip60) > d0->mtu)))
+           {
+             vnet_buffer (p0)->ip_frag.header_offset = sizeof (*ip60);
+             vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
+             vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
+             vnet_buffer (p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
+             next0 = IP4_MAP_REASS_NEXT_IP4_FRAGMENT;
+           }
+
+         if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             map_ip4_map_reass_trace_t *tr =
+               vlib_add_trace (vm, node, p0, sizeof (*tr));
+             tr->map_domain_index = map_domain_index0;
+             tr->port = port0;
+             tr->cached = cached;
+           }
+
+         if (cached)
+           {
+             //Dequeue the packet
+             n_left_to_next++;
+             to_next--;
+           }
+         else
+           {
+             if (error0 == MAP_ERROR_NONE)
+               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+                                                cpu_index, map_domain_index0,
+                                                1,
+                                                clib_net_to_host_u16 (ip60->
+                                                                      payload_length)
+                                                + 40);
+             next0 =
+               (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
+             p0->error = error_node->errors[error0];
+             vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                              n_left_to_next, pi0, next0);
+           }
+
+         //Loopback when we reach the end of the inpu vector
+         if (n_left_from == 0 && vec_len (fragments_to_loopback))
+           {
+             from = vlib_frame_vector_args (frame);
+             u32 len = vec_len (fragments_to_loopback);
+             if (len <= VLIB_FRAME_SIZE)
+               {
+                 clib_memcpy (from, fragments_to_loopback,
+                              sizeof (u32) * len);
+                 n_left_from = len;
+                 vec_reset_length (fragments_to_loopback);
+               }
+             else
+               {
+                 clib_memcpy (from,
+                              fragments_to_loopback + (len -
+                                                       VLIB_FRAME_SIZE),
+                              sizeof (u32) * VLIB_FRAME_SIZE);
+                 n_left_from = VLIB_FRAME_SIZE;
+                 _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
+               }
+           }
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
 
-  map_send_all_to_node(vm, fragments_to_drop, node,
-                       &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
-                       IP4_MAP_REASS_NEXT_DROP);
+  map_send_all_to_node (vm, fragments_to_drop, node,
+                       &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
+                       IP4_MAP_REASS_NEXT_DROP);
 
-  vec_free(fragments_to_drop);
-  vec_free(fragments_to_loopback);
+  vec_free (fragments_to_drop);
+  vec_free (fragments_to_loopback);
   return frame->n_vectors;
 }
 
@@ -594,6 +762,7 @@ static char *map_error_strings[] = {
 #undef _
 };
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip4_map_node) = {
   .function = ip4_map,
   .name = "ip4-map",
@@ -617,7 +786,9 @@ VLIB_REGISTER_NODE(ip4_map_node) = {
     [IP4_MAP_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip4_map_reass_node) = {
   .function = ip4_map_reass,
   .name = "ip4-map-reass",
@@ -635,3 +806,12 @@ VLIB_REGISTER_NODE(ip4_map_reass_node) = {
     [IP4_MAP_REASS_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index 78cfae7..f4bae60 100644 (file)
@@ -18,7 +18,8 @@
 
 #define IP4_MAP_T_DUAL_LOOP 1
 
-typedef enum {
+typedef enum
+{
   IP4_MAPT_NEXT_MAPT_TCP_UDP,
   IP4_MAPT_NEXT_MAPT_ICMP,
   IP4_MAPT_NEXT_MAPT_FRAGMENTED,
@@ -26,21 +27,24 @@ typedef enum {
   IP4_MAPT_N_NEXT
 } ip4_mapt_next_t;
 
-typedef enum {
+typedef enum
+{
   IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
   IP4_MAPT_ICMP_NEXT_IP6_FRAG,
   IP4_MAPT_ICMP_NEXT_DROP,
   IP4_MAPT_ICMP_N_NEXT
 } ip4_mapt_icmp_next_t;
 
-typedef enum {
+typedef enum
+{
   IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
   IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
   IP4_MAPT_TCP_UDP_NEXT_DROP,
   IP4_MAPT_TCP_UDP_N_NEXT
 } ip4_mapt_tcp_udp_next_t;
 
-typedef enum {
+typedef enum
+{
   IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
   IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
   IP4_MAPT_FRAGMENTED_NEXT_DROP,
@@ -49,51 +53,60 @@ typedef enum {
 
 //This is used to pass information within the buffer data.
 //Buffer structure being too small to contain big structures like this.
-typedef CLIB_PACKED(struct {
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
   ip6_address_t daddr;
   ip6_address_t saddr;
   //IPv6 header + Fragmentation header will be here
   //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
   u8 unused[28];
 }) ip4_mapt_pseudo_header_t;
+/* *INDENT-ON* */
 
 #define frag_id_4to6(id) (id)
 
 //TODO: Find the right place in memory for this.
+/* *INDENT-OFF* */
 static u8 icmp_to_icmp6_updater_pointer_table[] =
-    { 0, 1, 4, 4,~0,
-     ~0,~0,~0, 7, 6,
-     ~0,~0, 8, 8, 8,
-      8, 24, 24, 24, 24 };
+  { 0, 1, 4, 4, ~0,
+    ~0, ~0, ~0, 7, 6,
+    ~0, ~0, 8, 8, 8,
+    8, 24, 24, 24, 24
+  };
+/* *INDENT-ON* */
 
 
 static_always_inline int
-ip4_map_fragment_cache (ip4_header_t *ip4, u16 port)
+ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
 {
   u32 *ignore = NULL;
-  map_ip4_reass_lock();
-  map_ip4_reass_t *r = map_ip4_reass_get(ip4->src_address.as_u32, ip4->dst_address.as_u32,
-                                         ip4->fragment_id,
-                                         (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
-                                             &ignore);
+  map_ip4_reass_lock ();
+  map_ip4_reass_t *r =
+    map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
+                      ip4->fragment_id,
+                      (ip4->protocol ==
+                       IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
+                      &ignore);
   if (r)
     r->port = port;
 
-  map_ip4_reass_unlock();
+  map_ip4_reass_unlock ();
   return !r;
 }
 
 static_always_inline i32
-ip4_map_fragment_get_port (ip4_header_t *ip4)
+ip4_map_fragment_get_port (ip4_header_t * ip4)
 {
   u32 *ignore = NULL;
-  map_ip4_reass_lock();
-  map_ip4_reass_t *r = map_ip4_reass_get(ip4->src_address.as_u32, ip4->dst_address.as_u32,
-                                         ip4->fragment_id,
-                                         (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
-                                             &ignore);
-  i32 ret = r?r->port:-1;
-  map_ip4_reass_unlock();
+  map_ip4_reass_lock ();
+  map_ip4_reass_t *r =
+    map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
+                      ip4->fragment_id,
+                      (ip4->protocol ==
+                       IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
+                      &ignore);
+  i32 ret = r ? r->port : -1;
+  map_ip4_reass_unlock ();
   return ret;
 }
 
@@ -104,114 +117,121 @@ ip4_map_fragment_get_port (ip4_header_t *ip4)
  *
  */
 static_always_inline int
-ip4_icmp_to_icmp6_in_place (icmp46_header_t *icmp, u32 icmp_len,
-                            i32 *receiver_port, ip4_header_t **inner_ip4)
+ip4_icmp_to_icmp6_in_place (icmp46_header_t * icmp, u32 icmp_len,
+                           i32 * receiver_port, ip4_header_t ** inner_ip4)
 {
   *inner_ip4 = NULL;
-  switch (icmp->type) {
+  switch (icmp->type)
+    {
     case ICMP4_echo_reply:
-      *receiver_port = ((u16 *)icmp)[2];
+      *receiver_port = ((u16 *) icmp)[2];
       icmp->type = ICMP6_echo_reply;
       break;
     case ICMP4_echo_request:
-      *receiver_port = ((u16 *)icmp)[2];
+      *receiver_port = ((u16 *) icmp)[2];
       icmp->type = ICMP6_echo_request;
       break;
     case ICMP4_destination_unreachable:
-      *inner_ip4 = (ip4_header_t *)(((u8 *) icmp) + 8);
-      *receiver_port = ip4_get_port(*inner_ip4, MAP_SENDER, icmp_len - 8);
+      *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8);
+      *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8);
 
-      switch (icmp->code) {
-       case ICMP4_destination_unreachable_destination_unreachable_net: //0
-       case ICMP4_destination_unreachable_destination_unreachable_host: //1
+      switch (icmp->code)
+       {
+       case ICMP4_destination_unreachable_destination_unreachable_net: //0
+       case ICMP4_destination_unreachable_destination_unreachable_host:        //1
          icmp->type = ICMP6_destination_unreachable;
          icmp->code = ICMP6_destination_unreachable_no_route_to_destination;
          break;
-       case ICMP4_destination_unreachable_protocol_unreachable: //2
+       case ICMP4_destination_unreachable_protocol_unreachable:        //2
          icmp->type = ICMP6_parameter_problem;
          icmp->code = ICMP6_parameter_problem_unrecognized_next_header;
          break;
-       case ICMP4_destination_unreachable_port_unreachable: //3
+       case ICMP4_destination_unreachable_port_unreachable:    //3
          icmp->type = ICMP6_destination_unreachable;
          icmp->code = ICMP6_destination_unreachable_port_unreachable;
          break;
-       case ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set: //4
-         icmp->type = ICMP6_packet_too_big;
+       case ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set:  //4
+         icmp->type =
+           ICMP6_packet_too_big;
          icmp->code = 0;
          {
-           u32 advertised_mtu = clib_net_to_host_u32(*((u32 *)(icmp + 1)));
+           u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1)));
            if (advertised_mtu)
              advertised_mtu += 20;
            else
-             advertised_mtu = 1000; //FIXME ! (RFC 1191 - plateau value)
+             advertised_mtu = 1000;    //FIXME ! (RFC 1191 - plateau value)
 
            //FIXME: = minimum(advertised MTU+20, MTU_of_IPv6_nexthop, (MTU_of_IPv4_nexthop)+20)
-           *((u32 *)(icmp + 1)) = clib_host_to_net_u32(advertised_mtu);
+           *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (advertised_mtu);
          }
          break;
 
-       case ICMP4_destination_unreachable_source_route_failed: //5
-       case ICMP4_destination_unreachable_destination_network_unknown: //6
-       case ICMP4_destination_unreachable_destination_host_unknown: //7
-       case ICMP4_destination_unreachable_source_host_isolated: //8
-       case ICMP4_destination_unreachable_network_unreachable_for_type_of_service: //11
-        case ICMP4_destination_unreachable_host_unreachable_for_type_of_service: //12
-         icmp->type = ICMP6_destination_unreachable;
+       case ICMP4_destination_unreachable_source_route_failed: //5
+       case ICMP4_destination_unreachable_destination_network_unknown: //6
+       case ICMP4_destination_unreachable_destination_host_unknown:    //7
+       case ICMP4_destination_unreachable_source_host_isolated:        //8
+       case ICMP4_destination_unreachable_network_unreachable_for_type_of_service:     //11
+       case ICMP4_destination_unreachable_host_unreachable_for_type_of_service:        //12
+         icmp->type =
+           ICMP6_destination_unreachable;
          icmp->code = ICMP6_destination_unreachable_no_route_to_destination;
          break;
-       case ICMP4_destination_unreachable_network_administratively_prohibited: //9
-       case ICMP4_destination_unreachable_host_administratively_prohibited: //10
-       case ICMP4_destination_unreachable_communication_administratively_prohibited: //13
-       case ICMP4_destination_unreachable_precedence_cutoff_in_effect: //15
+       case ICMP4_destination_unreachable_network_administratively_prohibited: //9
+       case ICMP4_destination_unreachable_host_administratively_prohibited:    //10
+       case ICMP4_destination_unreachable_communication_administratively_prohibited:   //13
+       case ICMP4_destination_unreachable_precedence_cutoff_in_effect: //15
          icmp->type = ICMP6_destination_unreachable;
-         icmp->code = ICMP6_destination_unreachable_destination_administratively_prohibited;
+         icmp->code =
+           ICMP6_destination_unreachable_destination_administratively_prohibited;
          break;
-       case ICMP4_destination_unreachable_host_precedence_violation: //14
+       case ICMP4_destination_unreachable_host_precedence_violation:   //14
        default:
          return -1;
-      }
+       }
       break;
 
-    case ICMP4_time_exceeded: //11
-      *inner_ip4 = (ip4_header_t *)(((u8 *) icmp) + 8);
-      *receiver_port = ip4_get_port(*inner_ip4, MAP_SENDER, icmp_len - 8);
+    case ICMP4_time_exceeded:  //11
+      *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8);
+      *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8);
       icmp->type = ICMP6_time_exceeded;
       //icmp->code = icmp->code //unchanged
       break;
 
     case ICMP4_parameter_problem:
-      *inner_ip4 = (ip4_header_t *)(((u8 *) icmp) + 8);
-      *receiver_port = ip4_get_port(*inner_ip4, MAP_SENDER, icmp_len - 8);
+      *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8);
+      *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8);
 
-      switch (icmp->code) {
+      switch (icmp->code)
+       {
        case ICMP4_parameter_problem_pointer_indicates_error:
        case ICMP4_parameter_problem_bad_length:
          icmp->type = ICMP6_parameter_problem;
          icmp->code = ICMP6_parameter_problem_erroneous_header_field;
          {
-           u8 ptr = icmp_to_icmp6_updater_pointer_table[*((u8 *)(icmp + 1))];
+           u8 ptr =
+             icmp_to_icmp6_updater_pointer_table[*((u8 *) (icmp + 1))];
            if (ptr == 0xff)
              return -1;
 
-           *((u32 *)(icmp + 1)) = clib_host_to_net_u32(ptr);
+           *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (ptr);
          }
          break;
-        default:
-          //All other codes cause dropping the packet
+       default:
+         //All other codes cause dropping the packet
          return -1;
-      }
+       }
       break;
 
     default:
       //All other types cause dropping the packet
       return -1;
       break;
-  }
+    }
   return 0;
 }
 
 static_always_inline void
-_ip4_map_t_icmp (map_domain_t *d, vlib_buffer_t *p, u8 *error)
+_ip4_map_t_icmp (map_domain_t * d, vlib_buffer_t * p, u8 * error)
 {
   ip4_header_t *ip4, *inner_ip4;
   ip6_header_t *ip6, *inner_ip6;
@@ -225,790 +245,1024 @@ _ip4_map_t_icmp (map_domain_t *d, vlib_buffer_t *p, u8 *error)
   u32 inner_frag_offset;
   u8 inner_frag_more;
 
-  ip4 = vlib_buffer_get_current(p);
-  ip_len = clib_net_to_host_u16(ip4->length);
-  ASSERT(ip_len <= p->current_length);
-
-  icmp = (icmp46_header_t *)(ip4 + 1);
-  if (ip4_icmp_to_icmp6_in_place(icmp, ip_len - sizeof(*ip4),
-                                &recv_port, &inner_ip4)) {
-    *error = MAP_ERROR_ICMP;
-    return;
-  }
-
-  if (recv_port < 0) {
-    // In case of 1:1 mapping, we don't care about the port
-    if(d->ea_bits_len == 0 && d->rules) {
-      recv_port = 0;
-    } else {
+  ip4 = vlib_buffer_get_current (p);
+  ip_len = clib_net_to_host_u16 (ip4->length);
+  ASSERT (ip_len <= p->current_length);
+
+  icmp = (icmp46_header_t *) (ip4 + 1);
+  if (ip4_icmp_to_icmp6_in_place (icmp, ip_len - sizeof (*ip4),
+                                 &recv_port, &inner_ip4))
+    {
       *error = MAP_ERROR_ICMP;
       return;
     }
-  }
-
-  if (inner_ip4) {
-    //We have 2 headers to translate.
-    //We need to make some room in the middle of the packet
-
-    if (PREDICT_FALSE(ip4_is_fragment(inner_ip4))) {
-      //Here it starts getting really tricky
-      //We will add a fragmentation header in the inner packet
-
-      if (!ip4_is_first_fragment(inner_ip4)) {
-        //For now we do not handle unless it is the first fragment
-        //Ideally we should handle the case as we are in slow path already
-        *error = MAP_ERROR_FRAGMENTED;
-        return;
-      }
-
-      vlib_buffer_advance(p, - 2*(sizeof(*ip6) - sizeof(*ip4)) - sizeof(*inner_frag));
-      ip6 = vlib_buffer_get_current(p);
-      clib_memcpy(u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4)), ip4, 20 + 8);
-      ip4 = (ip4_header_t *) u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4));
-      icmp = (icmp46_header_t *) (ip4 + 1);
-
-      inner_ip6 = (ip6_header_t *) u8_ptr_add(inner_ip4, sizeof(*ip4) - sizeof(*ip6) - sizeof(*inner_frag));
-      inner_frag = (ip6_frag_hdr_t *) u8_ptr_add(inner_ip6, sizeof(*inner_ip6));
-      ip6->payload_length = u16_net_add(ip4->length, sizeof(*ip6) - 2*sizeof(*ip4) + sizeof(*inner_frag));
-      inner_frag_id = frag_id_4to6(inner_ip4->fragment_id);
-      inner_frag_offset = ip4_get_fragment_offset(inner_ip4);
-      inner_frag_more = !!(inner_ip4->flags_and_fragment_offset & clib_net_to_host_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS));
-    } else {
-      vlib_buffer_advance(p, - 2*(sizeof(*ip6) - sizeof(*ip4)));
-      ip6 = vlib_buffer_get_current(p);
-      clib_memcpy(u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4)), ip4, 20 + 8);
-      ip4 = (ip4_header_t *) u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4));
-      icmp = (icmp46_header_t *) u8_ptr_add(ip4, sizeof(*ip4));
-      inner_ip6 = (ip6_header_t *) u8_ptr_add(inner_ip4, sizeof(*ip4) - sizeof(*ip6));
-      ip6->payload_length = u16_net_add(ip4->length, sizeof(*ip6) - 2*sizeof(*ip4));
-      inner_frag = NULL;
-    }
 
-    if (PREDICT_TRUE(inner_ip4->protocol == IP_PROTOCOL_TCP)) {
-      inner_L4_checksum = &((tcp_header_t *) (inner_ip4 + 1))->checksum;
-      *inner_L4_checksum = ip_csum_fold(ip_csum_sub_even(*inner_L4_checksum, *((u64 *) (&inner_ip4->src_address))));
-    } else if (PREDICT_TRUE(inner_ip4->protocol == IP_PROTOCOL_UDP)) {
-      inner_L4_checksum = &((udp_header_t *) (inner_ip4 + 1))->checksum;
-      if (!*inner_L4_checksum) {
-        //The inner packet was first translated, and therefore came from IPv6.
-        //As the packet was an IPv6 packet, the UDP checksum can't be NULL
-        *error = MAP_ERROR_ICMP;
-        return;
-      }
-      *inner_L4_checksum = ip_csum_fold(ip_csum_sub_even(*inner_L4_checksum, *((u64 *)(&inner_ip4->src_address))));
-    } else if (inner_ip4->protocol == IP_PROTOCOL_ICMP) {
-      //We have an ICMP inside an ICMP
-      //It needs to be translated, but not for error ICMP messages
-      icmp46_header_t *inner_icmp = (icmp46_header_t *) (inner_ip4 + 1);
-      csum = inner_icmp->checksum;
-      //Only types ICMP4_echo_request and ICMP4_echo_reply are handled by ip4_icmp_to_icmp6_in_place
-      csum = ip_csum_sub_even(csum, *((u16 *)inner_icmp));
-      inner_icmp->type = (inner_icmp->type == ICMP4_echo_request)?
-          ICMP6_echo_request:ICMP6_echo_reply;
-      csum = ip_csum_add_even(csum, *((u16 *)inner_icmp));
-      csum = ip_csum_add_even(csum, clib_host_to_net_u16(IP_PROTOCOL_ICMP6));
-      csum = ip_csum_add_even(csum, inner_ip4->length - sizeof(*inner_ip4));
-      inner_icmp->checksum = ip_csum_fold(csum);
-      inner_L4_checksum = &inner_icmp->checksum;
-      inner_ip4->protocol = IP_PROTOCOL_ICMP6;
-    } else {
-      ASSERT(0); // We had a port from that, so it is udp or tcp or ICMP
+  if (recv_port < 0)
+    {
+      // In case of 1:1 mapping, we don't care about the port
+      if (d->ea_bits_len == 0 && d->rules)
+       {
+         recv_port = 0;
+       }
+      else
+       {
+         *error = MAP_ERROR_ICMP;
+         return;
+       }
     }
 
-    //FIXME: Security check with the port found in the inner packet
-
-    csum = *inner_L4_checksum; //Initial checksum of the inner L4 header
-    //FIXME: Shouldn't we remove ip addresses from there ?
-
-    inner_ip6->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (inner_ip4->tos << 20));
-    inner_ip6->payload_length = u16_net_add(inner_ip4->length, - sizeof(*inner_ip4));
-    inner_ip6->hop_limit = inner_ip4->ttl;
-    inner_ip6->protocol = inner_ip4->protocol;
-
-    //Note that the source address is within the domain
-    //while the destination address is the one outside the domain
-    ip4_map_t_embedded_address(d, &inner_ip6->dst_address, &inner_ip4->dst_address);
-    inner_ip6->src_address.as_u64[0] = map_get_pfx_net(d, inner_ip4->src_address.as_u32, recv_port);
-    inner_ip6->src_address.as_u64[1] = map_get_sfx_net(d, inner_ip4->src_address.as_u32, recv_port);
-
-    if (PREDICT_FALSE(inner_frag != NULL)) {
-      inner_frag->next_hdr = inner_ip6->protocol;
-      inner_frag->identification = inner_frag_id;
-      inner_frag->rsv = 0;
-      inner_frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(inner_frag_offset, inner_frag_more);
-      inner_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
-      inner_ip6->payload_length = clib_host_to_net_u16(
-          clib_net_to_host_u16(inner_ip6->payload_length) + sizeof(*inner_frag));
-    }
-
-    csum = ip_csum_add_even(csum, inner_ip6->src_address.as_u64[0]);
-    csum = ip_csum_add_even(csum, inner_ip6->src_address.as_u64[1]);
-    csum = ip_csum_add_even(csum, inner_ip6->dst_address.as_u64[0]);
-    csum = ip_csum_add_even(csum, inner_ip6->dst_address.as_u64[1]);
-    *inner_L4_checksum = ip_csum_fold(csum);
+  if (inner_ip4)
+    {
+      //We have 2 headers to translate.
+      //We need to make some room in the middle of the packet
+
+      if (PREDICT_FALSE (ip4_is_fragment (inner_ip4)))
+       {
+         //Here it starts getting really tricky
+         //We will add a fragmentation header in the inner packet
+
+         if (!ip4_is_first_fragment (inner_ip4))
+           {
+             //For now we do not handle unless it is the first fragment
+             //Ideally we should handle the case as we are in slow path already
+             *error = MAP_ERROR_FRAGMENTED;
+             return;
+           }
+
+         vlib_buffer_advance (p,
+                              -2 * (sizeof (*ip6) - sizeof (*ip4)) -
+                              sizeof (*inner_frag));
+         ip6 = vlib_buffer_get_current (p);
+         clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4,
+                      20 + 8);
+         ip4 =
+           (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4));
+         icmp = (icmp46_header_t *) (ip4 + 1);
+
+         inner_ip6 =
+           (ip6_header_t *) u8_ptr_add (inner_ip4,
+                                        sizeof (*ip4) - sizeof (*ip6) -
+                                        sizeof (*inner_frag));
+         inner_frag =
+           (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, sizeof (*inner_ip6));
+         ip6->payload_length =
+           u16_net_add (ip4->length,
+                        sizeof (*ip6) - 2 * sizeof (*ip4) +
+                        sizeof (*inner_frag));
+         inner_frag_id = frag_id_4to6 (inner_ip4->fragment_id);
+         inner_frag_offset = ip4_get_fragment_offset (inner_ip4);
+         inner_frag_more =
+           ! !(inner_ip4->flags_and_fragment_offset &
+               clib_net_to_host_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS));
+       }
+      else
+       {
+         vlib_buffer_advance (p, -2 * (sizeof (*ip6) - sizeof (*ip4)));
+         ip6 = vlib_buffer_get_current (p);
+         clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4,
+                      20 + 8);
+         ip4 =
+           (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4));
+         icmp = (icmp46_header_t *) u8_ptr_add (ip4, sizeof (*ip4));
+         inner_ip6 =
+           (ip6_header_t *) u8_ptr_add (inner_ip4,
+                                        sizeof (*ip4) - sizeof (*ip6));
+         ip6->payload_length =
+           u16_net_add (ip4->length, sizeof (*ip6) - 2 * sizeof (*ip4));
+         inner_frag = NULL;
+       }
+
+      if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_TCP))
+       {
+         inner_L4_checksum = &((tcp_header_t *) (inner_ip4 + 1))->checksum;
+         *inner_L4_checksum =
+           ip_csum_fold (ip_csum_sub_even
+                         (*inner_L4_checksum,
+                          *((u64 *) (&inner_ip4->src_address))));
+       }
+      else if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_UDP))
+       {
+         inner_L4_checksum = &((udp_header_t *) (inner_ip4 + 1))->checksum;
+         if (!*inner_L4_checksum)
+           {
+             //The inner packet was first translated, and therefore came from IPv6.
+             //As the packet was an IPv6 packet, the UDP checksum can't be NULL
+             *error = MAP_ERROR_ICMP;
+             return;
+           }
+         *inner_L4_checksum =
+           ip_csum_fold (ip_csum_sub_even
+                         (*inner_L4_checksum,
+                          *((u64 *) (&inner_ip4->src_address))));
+       }
+      else if (inner_ip4->protocol == IP_PROTOCOL_ICMP)
+       {
+         //We have an ICMP inside an ICMP
+         //It needs to be translated, but not for error ICMP messages
+         icmp46_header_t *inner_icmp = (icmp46_header_t *) (inner_ip4 + 1);
+         csum = inner_icmp->checksum;
+         //Only types ICMP4_echo_request and ICMP4_echo_reply are handled by ip4_icmp_to_icmp6_in_place
+         csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp));
+         inner_icmp->type = (inner_icmp->type == ICMP4_echo_request) ?
+           ICMP6_echo_request : ICMP6_echo_reply;
+         csum = ip_csum_add_even (csum, *((u16 *) inner_icmp));
+         csum =
+           ip_csum_add_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6));
+         csum =
+           ip_csum_add_even (csum, inner_ip4->length - sizeof (*inner_ip4));
+         inner_icmp->checksum = ip_csum_fold (csum);
+         inner_L4_checksum = &inner_icmp->checksum;
+         inner_ip4->protocol = IP_PROTOCOL_ICMP6;
+       }
+      else
+       {
+         ASSERT (0);           // We had a port from that, so it is udp or tcp or ICMP
+       }
+
+      //FIXME: Security check with the port found in the inner packet
+
+      csum = *inner_L4_checksum;       //Initial checksum of the inner L4 header
+      //FIXME: Shouldn't we remove ip addresses from there ?
+
+      inner_ip6->ip_version_traffic_class_and_flow_label =
+       clib_host_to_net_u32 ((6 << 28) + (inner_ip4->tos << 20));
+      inner_ip6->payload_length =
+       u16_net_add (inner_ip4->length, -sizeof (*inner_ip4));
+      inner_ip6->hop_limit = inner_ip4->ttl;
+      inner_ip6->protocol = inner_ip4->protocol;
+
+      //Note that the source address is within the domain
+      //while the destination address is the one outside the domain
+      ip4_map_t_embedded_address (d, &inner_ip6->dst_address,
+                                 &inner_ip4->dst_address);
+      inner_ip6->src_address.as_u64[0] =
+       map_get_pfx_net (d, inner_ip4->src_address.as_u32, recv_port);
+      inner_ip6->src_address.as_u64[1] =
+       map_get_sfx_net (d, inner_ip4->src_address.as_u32, recv_port);
+
+      if (PREDICT_FALSE (inner_frag != NULL))
+       {
+         inner_frag->next_hdr = inner_ip6->protocol;
+         inner_frag->identification = inner_frag_id;
+         inner_frag->rsv = 0;
+         inner_frag->fragment_offset_and_more =
+           ip6_frag_hdr_offset_and_more (inner_frag_offset, inner_frag_more);
+         inner_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
+         inner_ip6->payload_length =
+           clib_host_to_net_u16 (clib_net_to_host_u16
+                                 (inner_ip6->payload_length) +
+                                 sizeof (*inner_frag));
+       }
+
+      csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]);
+      csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]);
+      csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]);
+      csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]);
+      *inner_L4_checksum = ip_csum_fold (csum);
 
-  } else {
-    vlib_buffer_advance(p, sizeof(*ip4) - sizeof(*ip6));
-    ip6 = vlib_buffer_get_current(p);
-    ip6->payload_length = clib_host_to_net_u16(clib_net_to_host_u16(ip4->length) - sizeof(*ip4));
-  }
+    }
+  else
+    {
+      vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
+      ip6 = vlib_buffer_get_current (p);
+      ip6->payload_length =
+       clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
+                             sizeof (*ip4));
+    }
 
   //Translate outer IPv6
-  ip6->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip4->tos << 20));
+  ip6->ip_version_traffic_class_and_flow_label =
+    clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
 
   ip6->hop_limit = ip4->ttl;
   ip6->protocol = IP_PROTOCOL_ICMP6;
 
-  ip4_map_t_embedded_address(d, &ip6->src_address, &ip4->src_address);
-  ip6->dst_address.as_u64[0] = map_get_pfx_net(d, ip4->dst_address.as_u32, recv_port);
-  ip6->dst_address.as_u64[1] = map_get_sfx_net(d, ip4->dst_address.as_u32, recv_port);
+  ip4_map_t_embedded_address (d, &ip6->src_address, &ip4->src_address);
+  ip6->dst_address.as_u64[0] =
+    map_get_pfx_net (d, ip4->dst_address.as_u32, recv_port);
+  ip6->dst_address.as_u64[1] =
+    map_get_sfx_net (d, ip4->dst_address.as_u32, recv_port);
 
   //Truncate when the packet exceeds the minimal IPv6 MTU
-  if (p->current_length > 1280) {
-    ip6->payload_length = clib_host_to_net_u16(1280 - sizeof(*ip6));
-    p->current_length = 1280; //Looks too simple to be correct...
-  }
+  if (p->current_length > 1280)
+    {
+      ip6->payload_length = clib_host_to_net_u16 (1280 - sizeof (*ip6));
+      p->current_length = 1280;        //Looks too simple to be correct...
+    }
 
   //TODO: We could do an easy diff-checksum for echo requests/replies
   //Recompute ICMP checksum
   icmp->checksum = 0;
-  csum = ip_csum_with_carry(0, ip6->payload_length);
-  csum = ip_csum_with_carry(csum, clib_host_to_net_u16(ip6->protocol));
-  csum = ip_csum_with_carry(csum, ip6->src_address.as_u64[0]);
-  csum = ip_csum_with_carry(csum, ip6->src_address.as_u64[1]);
-  csum = ip_csum_with_carry(csum, ip6->dst_address.as_u64[0]);
-  csum = ip_csum_with_carry(csum, ip6->dst_address.as_u64[1]);
-  csum = ip_incremental_checksum(csum, icmp, clib_net_to_host_u16(ip6->payload_length));
+  csum = ip_csum_with_carry (0, ip6->payload_length);
+  csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol));
+  csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]);
+  csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]);
+  csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]);
+  csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]);
+  csum =
+    ip_incremental_checksum (csum, icmp,
+                            clib_net_to_host_u16 (ip6->payload_length));
   icmp->checksum = ~ip_csum_fold (csum);
 }
 
 static uword
-ip4_map_t_icmp (vlib_main_t *vm,
-                vlib_node_runtime_t *node,
-                vlib_frame_t *frame)
+ip4_map_t_icmp (vlib_main_t * vm,
+               vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_t_icmp_node.index);
-  from = vlib_frame_vector_args(frame);
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
   vlib_combined_counter_main_t *cm = map_main.domain_counters;
-  u32 cpu_index = os_get_cpu_number();
-
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
-
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      ip4_mapt_icmp_next_t next0;
-      u8 error0;
-      map_domain_t *d0;
-      u16 len0;
-
-      next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-      error0 = MAP_ERROR_NONE;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      vlib_buffer_advance(p0, sizeof(ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
-      len0 = clib_net_to_host_u16(((ip4_header_t *)vlib_buffer_get_current(p0))->length);
-      d0 = pool_elt_at_index(map_main.domains, vnet_buffer(p0)->map_t.map_domain_index);
-      _ip4_map_t_icmp(d0, p0, &error0);
-
-      if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
-        vnet_buffer(p0)->ip_frag.header_offset = 0;
-        vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
-        vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
-        next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
-      }
-      if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
-             vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index,
-                                             vnet_buffer(p0)->map_t.map_domain_index, 1,
-                                             len0);
-      } else {
-        next0 = IP4_MAPT_ICMP_NEXT_DROP;
-      }
-      p0->error = error_node->errors[error0];
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index,
-                                      to_next, n_left_to_next, pi0,
-                                      next0);
+  u32 cpu_index = os_get_cpu_number ();
+
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         ip4_mapt_icmp_next_t next0;
+         u8 error0;
+         map_domain_t *d0;
+         u16 len0;
+
+         next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+         error0 = MAP_ERROR_NONE;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t));  //The pseudo-header is not used
+         len0 =
+           clib_net_to_host_u16 (((ip4_header_t *)
+                                  vlib_buffer_get_current (p0))->length);
+         d0 =
+           pool_elt_at_index (map_main.domains,
+                              vnet_buffer (p0)->map_t.map_domain_index);
+         _ip4_map_t_icmp (d0, p0, &error0);
+
+         if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+           {
+             vnet_buffer (p0)->ip_frag.header_offset = 0;
+             vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+             vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
+             next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
+           }
+         if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
+           {
+             vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+                                              cpu_index,
+                                              vnet_buffer (p0)->map_t.
+                                              map_domain_index, 1, len0);
+           }
+         else
+           {
+             next0 = IP4_MAPT_ICMP_NEXT_DROP;
+           }
+         p0->error = error_node->errors[error0];
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                          to_next, n_left_to_next, pi0,
+                                          next0);
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
   return frame->n_vectors;
 }
 
 static uword
-ip4_map_t_fragmented (vlib_main_t *vm,
-                      vlib_node_runtime_t *node,
-                      vlib_frame_t *frame)
+ip4_map_t_fragmented (vlib_main_t * vm,
+                     vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  from = vlib_frame_vector_args(frame);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
 
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
-
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      ip4_header_t *ip40;
-      ip6_header_t *ip60;
-      ip6_frag_hdr_t *frag0;
-      ip4_mapt_pseudo_header_t *pheader0;
-      ip4_mapt_fragmented_next_t next0;
-
-      next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-
-      p0 = vlib_get_buffer(vm, pi0);
-
-      //Accessing pseudo header
-      pheader0 = vlib_buffer_get_current(p0);
-      vlib_buffer_advance(p0, sizeof(*pheader0));
-
-      //Accessing ip4 header
-      ip40 = vlib_buffer_get_current(p0);
-      frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0));
-      ip60 = (ip6_header_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0) - sizeof(*ip60));
-      vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
-
-      //We know that the protocol was one of ICMP, TCP or UDP
-      //because the first fragment was found and cached
-      frag0->next_hdr = (ip40->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip40->protocol;
-      frag0->identification = frag_id_4to6(ip40->fragment_id);
-      frag0->rsv = 0;
-      frag0->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(
-          ip4_get_fragment_offset(ip40),
-          clib_net_to_host_u16(ip40->flags_and_fragment_offset) & IP4_HEADER_FLAG_MORE_FRAGMENTS);
-
-      ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip40->tos << 20));
-      ip60->payload_length = clib_host_to_net_u16(clib_net_to_host_u16(ip40->length) - sizeof(*ip40) + sizeof(*frag0));
-      ip60->hop_limit = ip40->ttl;
-      ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
-      ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
-      ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
-      ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
-      ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
-
-      if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
-        vnet_buffer(p0)->ip_frag.header_offset = 0;
-        vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
-        vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
-        next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
-      }
-
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index,
-                                       to_next, n_left_to_next, pi0,
-                                       next0);
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         ip4_header_t *ip40;
+         ip6_header_t *ip60;
+         ip6_frag_hdr_t *frag0;
+         ip4_mapt_pseudo_header_t *pheader0;
+         ip4_mapt_fragmented_next_t next0;
+
+         next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+
+         p0 = vlib_get_buffer (vm, pi0);
+
+         //Accessing pseudo header
+         pheader0 = vlib_buffer_get_current (p0);
+         vlib_buffer_advance (p0, sizeof (*pheader0));
+
+         //Accessing ip4 header
+         ip40 = vlib_buffer_get_current (p0);
+         frag0 =
+           (ip6_frag_hdr_t *) u8_ptr_add (ip40,
+                                          sizeof (*ip40) - sizeof (*frag0));
+         ip60 =
+           (ip6_header_t *) u8_ptr_add (ip40,
+                                        sizeof (*ip40) - sizeof (*frag0) -
+                                        sizeof (*ip60));
+         vlib_buffer_advance (p0,
+                              sizeof (*ip40) - sizeof (*ip60) -
+                              sizeof (*frag0));
+
+         //We know that the protocol was one of ICMP, TCP or UDP
+         //because the first fragment was found and cached
+         frag0->next_hdr =
+           (ip40->protocol ==
+            IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip40->protocol;
+         frag0->identification = frag_id_4to6 (ip40->fragment_id);
+         frag0->rsv = 0;
+         frag0->fragment_offset_and_more =
+           ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip40),
+                                         clib_net_to_host_u16
+                                         (ip40->flags_and_fragment_offset) &
+                                         IP4_HEADER_FLAG_MORE_FRAGMENTS);
+
+         ip60->ip_version_traffic_class_and_flow_label =
+           clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20));
+         ip60->payload_length =
+           clib_host_to_net_u16 (clib_net_to_host_u16 (ip40->length) -
+                                 sizeof (*ip40) + sizeof (*frag0));
+         ip60->hop_limit = ip40->ttl;
+         ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
+         ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
+         ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
+         ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
+         ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
+
+         if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+           {
+             vnet_buffer (p0)->ip_frag.header_offset = 0;
+             vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+             vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
+             next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
+           }
+
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                          to_next, n_left_to_next, pi0,
+                                          next0);
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
   return frame->n_vectors;
 }
 
 static uword
-ip4_map_t_tcp_udp(vlib_main_t *vm,
-                  vlib_node_runtime_t *node,
-                  vlib_frame_t *frame)
+ip4_map_t_tcp_udp (vlib_main_t * vm,
+                  vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  from = vlib_frame_vector_args(frame);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
 
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
 
 #ifdef IP4_MAP_T_DUAL_LOOP
-    while (n_left_from >= 4 && n_left_to_next >= 2) {
-      u32 pi0, pi1;
-      vlib_buffer_t *p0, *p1;
-      ip4_header_t *ip40, *ip41;
-      ip6_header_t *ip60, *ip61;
-      ip_csum_t csum0, csum1;
-      u16 *checksum0, *checksum1;
-      ip6_frag_hdr_t *frag0, *frag1;
-      u32 frag_id0, frag_id1;
-      ip4_mapt_pseudo_header_t *pheader0, *pheader1;
-      ip4_mapt_tcp_udp_next_t next0, next1;
-
-      pi0 = to_next[0] = from[0];
-      pi1 = to_next[1] = from[1];
-      from += 2;
-      n_left_from -= 2;
-      to_next +=2;
-      n_left_to_next -= 2;
-
-      next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
-      next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
-      p0 = vlib_get_buffer(vm, pi0);
-      p1 = vlib_get_buffer(vm, pi1);
-
-      //Accessing pseudo header
-      pheader0 = vlib_buffer_get_current(p0);
-      pheader1 = vlib_buffer_get_current(p1);
-      vlib_buffer_advance(p0, sizeof(*pheader0));
-      vlib_buffer_advance(p1, sizeof(*pheader1));
-
-      //Accessing ip4 header
-      ip40 = vlib_buffer_get_current(p0);
-      ip41 = vlib_buffer_get_current(p1);
-      checksum0 = (u16 *) u8_ptr_add(ip40, vnet_buffer(p0)->map_t.checksum_offset);
-      checksum1 = (u16 *) u8_ptr_add(ip41, vnet_buffer(p1)->map_t.checksum_offset);
-
-      //UDP checksum is optional over IPv4 but mandatory for IPv6
-      //We do not check udp->length sanity but use our safe computed value instead
-      if (PREDICT_FALSE(!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP)) {
-        u16 udp_len = clib_host_to_net_u16(ip40->length) - sizeof(*ip40);
-        udp_header_t *udp = (udp_header_t *) u8_ptr_add(ip40, sizeof(*ip40));
-        ip_csum_t csum;
-        csum = ip_incremental_checksum(0, udp, udp_len);
-        csum = ip_csum_with_carry(csum, clib_host_to_net_u16(udp_len));
-        csum = ip_csum_with_carry(csum, clib_host_to_net_u16(IP_PROTOCOL_UDP));
-        csum = ip_csum_with_carry(csum, *((u64 *)(&ip40->src_address)));
-        *checksum0 = ~ip_csum_fold(csum);
-      }
-      if (PREDICT_FALSE(!*checksum1 && ip41->protocol == IP_PROTOCOL_UDP)) {
-        u16 udp_len = clib_host_to_net_u16(ip41->length) - sizeof(*ip40);
-        udp_header_t *udp = (udp_header_t *) u8_ptr_add(ip41, sizeof(*ip40));
-        ip_csum_t csum;
-        csum = ip_incremental_checksum(0, udp, udp_len);
-        csum = ip_csum_with_carry(csum, clib_host_to_net_u16(udp_len));
-        csum = ip_csum_with_carry(csum, clib_host_to_net_u16(IP_PROTOCOL_UDP));
-        csum = ip_csum_with_carry(csum, *((u64 *)(&ip41->src_address)));
-        *checksum1 = ~ip_csum_fold(csum);
-      }
-
-      csum0 = ip_csum_sub_even(*checksum0, ip40->src_address.as_u32);
-      csum1 = ip_csum_sub_even(*checksum1, ip41->src_address.as_u32);
-      csum0 = ip_csum_sub_even(csum0, ip40->dst_address.as_u32);
-      csum1 = ip_csum_sub_even(csum1, ip41->dst_address.as_u32);
-
-      // Deal with fragmented packets
-      if (PREDICT_FALSE(ip40->flags_and_fragment_offset &
-                       clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS))) {
-        ip60 = (ip6_header_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
-        frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0));
-        frag_id0 = frag_id_4to6(ip40->fragment_id);
-        vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
-      } else {
-        ip60 = (ip6_header_t *) (((u8 *)ip40) + sizeof(*ip40) - sizeof(*ip60));
-        vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60));
-        frag0 = NULL;
-      }
-
-      if (PREDICT_FALSE(ip41->flags_and_fragment_offset &
-                       clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS))) {
-        ip61 = (ip6_header_t *) u8_ptr_add(ip41, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
-        frag1 = (ip6_frag_hdr_t *) u8_ptr_add(ip41, sizeof(*ip40) - sizeof(*frag0));
-        frag_id1 = frag_id_4to6(ip41->fragment_id);
-        vlib_buffer_advance(p1, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
-      } else {
-        ip61 = (ip6_header_t *) (((u8 *)ip41) + sizeof(*ip40) - sizeof(*ip60));
-        vlib_buffer_advance(p1, sizeof(*ip40) - sizeof(*ip60));
-        frag1 = NULL;
-      }
-
-      ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip40->tos << 20));
-      ip61->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip41->tos << 20));
-      ip60->payload_length = u16_net_add(ip40->length, - sizeof(*ip40));
-      ip61->payload_length = u16_net_add(ip41->length, - sizeof(*ip40));
-      ip60->hop_limit = ip40->ttl;
-      ip61->hop_limit = ip41->ttl;
-      ip60->protocol = ip40->protocol;
-      ip61->protocol = ip41->protocol;
-
-      if (PREDICT_FALSE(frag0 != NULL)) {
-        frag0->next_hdr = ip60->protocol;
-        frag0->identification = frag_id0;
-        frag0->rsv = 0;
-        frag0->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(0, 1);
-        ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
-        ip60->payload_length = u16_net_add(ip60->payload_length, sizeof(*frag0));
-      }
-
-      if (PREDICT_FALSE(frag1 != NULL)) {
-        frag1->next_hdr = ip61->protocol;
-        frag1->identification = frag_id1;
-        frag1->rsv = 0;
-        frag1->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(0, 1);
-        ip61->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
-        ip61->payload_length = u16_net_add(ip61->payload_length, sizeof(*frag0));
-      }
-
-      //Finally copying the address
-      ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
-      ip61->dst_address.as_u64[0] = pheader1->daddr.as_u64[0];
-      ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
-      ip61->dst_address.as_u64[1] = pheader1->daddr.as_u64[1];
-      ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
-      ip61->src_address.as_u64[0] = pheader1->saddr.as_u64[0];
-      ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
-      ip61->src_address.as_u64[1] = pheader1->saddr.as_u64[1];
-
-      csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[0]);
-      csum1 = ip_csum_add_even(csum1, ip61->src_address.as_u64[0]);
-      csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[1]);
-      csum1 = ip_csum_add_even(csum1, ip61->src_address.as_u64[1]);
-      csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[0]);
-      csum1 = ip_csum_add_even(csum1, ip61->dst_address.as_u64[0]);
-      csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[1]);
-      csum1 = ip_csum_add_even(csum1, ip61->dst_address.as_u64[1]);
-      *checksum0 = ip_csum_fold(csum0);
-      *checksum1 = ip_csum_fold(csum1);
-
-      if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
-        vnet_buffer(p0)->ip_frag.header_offset = 0;
-        vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
-        vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
-        next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
-      }
-
-      if(vnet_buffer(p1)->map_t.mtu < p1->current_length) {
-        vnet_buffer(p1)->ip_frag.header_offset = 0;
-        vnet_buffer(p1)->ip_frag.mtu = vnet_buffer(p1)->map_t.mtu;
-        vnet_buffer(p1)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
-        next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
-      }
-
-      vlib_validate_buffer_enqueue_x2(vm, node, next_index,
-                                      to_next, n_left_to_next, pi0, pi1,
-                                      next0, next1);
-    }
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 pi0, pi1;
+         vlib_buffer_t *p0, *p1;
+         ip4_header_t *ip40, *ip41;
+         ip6_header_t *ip60, *ip61;
+         ip_csum_t csum0, csum1;
+         u16 *checksum0, *checksum1;
+         ip6_frag_hdr_t *frag0, *frag1;
+         u32 frag_id0, frag_id1;
+         ip4_mapt_pseudo_header_t *pheader0, *pheader1;
+         ip4_mapt_tcp_udp_next_t next0, next1;
+
+         pi0 = to_next[0] = from[0];
+         pi1 = to_next[1] = from[1];
+         from += 2;
+         n_left_from -= 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+
+         next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
+         next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
+         p0 = vlib_get_buffer (vm, pi0);
+         p1 = vlib_get_buffer (vm, pi1);
+
+         //Accessing pseudo header
+         pheader0 = vlib_buffer_get_current (p0);
+         pheader1 = vlib_buffer_get_current (p1);
+         vlib_buffer_advance (p0, sizeof (*pheader0));
+         vlib_buffer_advance (p1, sizeof (*pheader1));
+
+         //Accessing ip4 header
+         ip40 = vlib_buffer_get_current (p0);
+         ip41 = vlib_buffer_get_current (p1);
+         checksum0 =
+           (u16 *) u8_ptr_add (ip40,
+                               vnet_buffer (p0)->map_t.checksum_offset);
+         checksum1 =
+           (u16 *) u8_ptr_add (ip41,
+                               vnet_buffer (p1)->map_t.checksum_offset);
+
+         //UDP checksum is optional over IPv4 but mandatory for IPv6
+         //We do not check udp->length sanity but use our safe computed value instead
+         if (PREDICT_FALSE
+             (!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP))
+           {
+             u16 udp_len =
+               clib_host_to_net_u16 (ip40->length) - sizeof (*ip40);
+             udp_header_t *udp =
+               (udp_header_t *) u8_ptr_add (ip40, sizeof (*ip40));
+             ip_csum_t csum;
+             csum = ip_incremental_checksum (0, udp, udp_len);
+             csum =
+               ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
+             csum =
+               ip_csum_with_carry (csum,
+                                   clib_host_to_net_u16 (IP_PROTOCOL_UDP));
+             csum =
+               ip_csum_with_carry (csum, *((u64 *) (&ip40->src_address)));
+             *checksum0 = ~ip_csum_fold (csum);
+           }
+         if (PREDICT_FALSE
+             (!*checksum1 && ip41->protocol == IP_PROTOCOL_UDP))
+           {
+             u16 udp_len =
+               clib_host_to_net_u16 (ip41->length) - sizeof (*ip40);
+             udp_header_t *udp =
+               (udp_header_t *) u8_ptr_add (ip41, sizeof (*ip40));
+             ip_csum_t csum;
+             csum = ip_incremental_checksum (0, udp, udp_len);
+             csum =
+               ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
+             csum =
+               ip_csum_with_carry (csum,
+                                   clib_host_to_net_u16 (IP_PROTOCOL_UDP));
+             csum =
+               ip_csum_with_carry (csum, *((u64 *) (&ip41->src_address)));
+             *checksum1 = ~ip_csum_fold (csum);
+           }
+
+         csum0 = ip_csum_sub_even (*checksum0, ip40->src_address.as_u32);
+         csum1 = ip_csum_sub_even (*checksum1, ip41->src_address.as_u32);
+         csum0 = ip_csum_sub_even (csum0, ip40->dst_address.as_u32);
+         csum1 = ip_csum_sub_even (csum1, ip41->dst_address.as_u32);
+
+         // Deal with fragmented packets
+         if (PREDICT_FALSE (ip40->flags_and_fragment_offset &
+                            clib_host_to_net_u16
+                            (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
+           {
+             ip60 =
+               (ip6_header_t *) u8_ptr_add (ip40,
+                                            sizeof (*ip40) - sizeof (*ip60) -
+                                            sizeof (*frag0));
+             frag0 =
+               (ip6_frag_hdr_t *) u8_ptr_add (ip40,
+                                              sizeof (*ip40) -
+                                              sizeof (*frag0));
+             frag_id0 = frag_id_4to6 (ip40->fragment_id);
+             vlib_buffer_advance (p0,
+                                  sizeof (*ip40) - sizeof (*ip60) -
+                                  sizeof (*frag0));
+           }
+         else
+           {
+             ip60 =
+               (ip6_header_t *) (((u8 *) ip40) + sizeof (*ip40) -
+                                 sizeof (*ip60));
+             vlib_buffer_advance (p0, sizeof (*ip40) - sizeof (*ip60));
+             frag0 = NULL;
+           }
+
+         if (PREDICT_FALSE (ip41->flags_and_fragment_offset &
+                            clib_host_to_net_u16
+                            (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
+           {
+             ip61 =
+               (ip6_header_t *) u8_ptr_add (ip41,
+                                            sizeof (*ip40) - sizeof (*ip60) -
+                                            sizeof (*frag0));
+             frag1 =
+               (ip6_frag_hdr_t *) u8_ptr_add (ip41,
+                                              sizeof (*ip40) -
+                                              sizeof (*frag0));
+             frag_id1 = frag_id_4to6 (ip41->fragment_id);
+             vlib_buffer_advance (p1,
+                                  sizeof (*ip40) - sizeof (*ip60) -
+                                  sizeof (*frag0));
+           }
+         else
+           {
+             ip61 =
+               (ip6_header_t *) (((u8 *) ip41) + sizeof (*ip40) -
+                                 sizeof (*ip60));
+             vlib_buffer_advance (p1, sizeof (*ip40) - sizeof (*ip60));
+             frag1 = NULL;
+           }
+
+         ip60->ip_version_traffic_class_and_flow_label =
+           clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20));
+         ip61->ip_version_traffic_class_and_flow_label =
+           clib_host_to_net_u32 ((6 << 28) + (ip41->tos << 20));
+         ip60->payload_length = u16_net_add (ip40->length, -sizeof (*ip40));
+         ip61->payload_length = u16_net_add (ip41->length, -sizeof (*ip40));
+         ip60->hop_limit = ip40->ttl;
+         ip61->hop_limit = ip41->ttl;
+         ip60->protocol = ip40->protocol;
+         ip61->protocol = ip41->protocol;
+
+         if (PREDICT_FALSE (frag0 != NULL))
+           {
+             frag0->next_hdr = ip60->protocol;
+             frag0->identification = frag_id0;
+             frag0->rsv = 0;
+             frag0->fragment_offset_and_more =
+               ip6_frag_hdr_offset_and_more (0, 1);
+             ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
+             ip60->payload_length =
+               u16_net_add (ip60->payload_length, sizeof (*frag0));
+           }
+
+         if (PREDICT_FALSE (frag1 != NULL))
+           {
+             frag1->next_hdr = ip61->protocol;
+             frag1->identification = frag_id1;
+             frag1->rsv = 0;
+             frag1->fragment_offset_and_more =
+               ip6_frag_hdr_offset_and_more (0, 1);
+             ip61->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
+             ip61->payload_length =
+               u16_net_add (ip61->payload_length, sizeof (*frag0));
+           }
+
+         //Finally copying the address
+         ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
+         ip61->dst_address.as_u64[0] = pheader1->daddr.as_u64[0];
+         ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
+         ip61->dst_address.as_u64[1] = pheader1->daddr.as_u64[1];
+         ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
+         ip61->src_address.as_u64[0] = pheader1->saddr.as_u64[0];
+         ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
+         ip61->src_address.as_u64[1] = pheader1->saddr.as_u64[1];
+
+         csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[0]);
+         csum1 = ip_csum_add_even (csum1, ip61->src_address.as_u64[0]);
+         csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[1]);
+         csum1 = ip_csum_add_even (csum1, ip61->src_address.as_u64[1]);
+         csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[0]);
+         csum1 = ip_csum_add_even (csum1, ip61->dst_address.as_u64[0]);
+         csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[1]);
+         csum1 = ip_csum_add_even (csum1, ip61->dst_address.as_u64[1]);
+         *checksum0 = ip_csum_fold (csum0);
+         *checksum1 = ip_csum_fold (csum1);
+
+         if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+           {
+             vnet_buffer (p0)->ip_frag.header_offset = 0;
+             vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+             vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
+             next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
+           }
+
+         if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
+           {
+             vnet_buffer (p1)->ip_frag.header_offset = 0;
+             vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
+             vnet_buffer (p1)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
+             next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
+           }
+
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+                                          to_next, n_left_to_next, pi0, pi1,
+                                          next0, next1);
+       }
 #endif
 
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      ip4_header_t *ip40;
-      ip6_header_t *ip60;
-      ip_csum_t csum0;
-      u16 *checksum0;
-      ip6_frag_hdr_t *frag0;
-      u32 frag_id0;
-      ip4_mapt_pseudo_header_t *pheader0;
-      ip4_mapt_tcp_udp_next_t next0;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-
-      next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
-      p0 = vlib_get_buffer(vm, pi0);
-
-      //Accessing pseudo header
-      pheader0 = vlib_buffer_get_current(p0);
-      vlib_buffer_advance(p0, sizeof(*pheader0));
-
-      //Accessing ip4 header
-      ip40 = vlib_buffer_get_current(p0);
-      checksum0 = (u16 *) u8_ptr_add(ip40, vnet_buffer(p0)->map_t.checksum_offset);
-
-      //UDP checksum is optional over IPv4 but mandatory for IPv6
-      //We do not check udp->length sanity but use our safe computed value instead
-      if (PREDICT_FALSE(!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP)) {
-        u16 udp_len = clib_host_to_net_u16(ip40->length) - sizeof(*ip40);
-        udp_header_t *udp = (udp_header_t *) u8_ptr_add(ip40, sizeof(*ip40));
-        ip_csum_t csum;
-        csum = ip_incremental_checksum(0, udp, udp_len);
-        csum = ip_csum_with_carry(csum, clib_host_to_net_u16(udp_len));
-        csum = ip_csum_with_carry(csum, clib_host_to_net_u16(IP_PROTOCOL_UDP));
-        csum = ip_csum_with_carry(csum, *((u64 *)(&ip40->src_address)));
-        *checksum0 = ~ip_csum_fold(csum);
-      }
-
-      csum0 = ip_csum_sub_even(*checksum0, ip40->src_address.as_u32);
-      csum0 = ip_csum_sub_even(csum0, ip40->dst_address.as_u32);
-
-      // Deal with fragmented packets
-      if (PREDICT_FALSE(ip40->flags_and_fragment_offset &
-                       clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS))) {
-        ip60 = (ip6_header_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
-        frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0));
-        frag_id0 = frag_id_4to6(ip40->fragment_id);
-        vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
-      } else {
-        ip60 = (ip6_header_t *) (((u8 *)ip40) + sizeof(*ip40) - sizeof(*ip60));
-        vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60));
-        frag0 = NULL;
-      }
-
-      ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip40->tos << 20));
-      ip60->payload_length = u16_net_add(ip40->length, - sizeof(*ip40));
-      ip60->hop_limit = ip40->ttl;
-      ip60->protocol = ip40->protocol;
-
-      if (PREDICT_FALSE(frag0 != NULL)) {
-        frag0->next_hdr = ip60->protocol;
-        frag0->identification = frag_id0;
-        frag0->rsv = 0;
-        frag0->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(0, 1);
-        ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
-        ip60->payload_length = u16_net_add(ip60->payload_length, sizeof(*frag0));
-      }
-
-      //Finally copying the address
-      ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
-      ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
-      ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
-      ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
-
-      csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[0]);
-      csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[1]);
-      csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[0]);
-      csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[1]);
-      *checksum0 = ip_csum_fold(csum0);
-
-      if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
-        //Send to fragmentation node if necessary
-        vnet_buffer(p0)->ip_frag.header_offset = 0;
-        vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
-        vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
-        next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
-      }
-
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index,
-                                       to_next, n_left_to_next, pi0,
-                                       next0);
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         ip4_header_t *ip40;
+         ip6_header_t *ip60;
+         ip_csum_t csum0;
+         u16 *checksum0;
+         ip6_frag_hdr_t *frag0;
+         u32 frag_id0;
+         ip4_mapt_pseudo_header_t *pheader0;
+         ip4_mapt_tcp_udp_next_t next0;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+
+         next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
+         p0 = vlib_get_buffer (vm, pi0);
+
+         //Accessing pseudo header
+         pheader0 = vlib_buffer_get_current (p0);
+         vlib_buffer_advance (p0, sizeof (*pheader0));
+
+         //Accessing ip4 header
+         ip40 = vlib_buffer_get_current (p0);
+         checksum0 =
+           (u16 *) u8_ptr_add (ip40,
+                               vnet_buffer (p0)->map_t.checksum_offset);
+
+         //UDP checksum is optional over IPv4 but mandatory for IPv6
+         //We do not check udp->length sanity but use our safe computed value instead
+         if (PREDICT_FALSE
+             (!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP))
+           {
+             u16 udp_len =
+               clib_host_to_net_u16 (ip40->length) - sizeof (*ip40);
+             udp_header_t *udp =
+               (udp_header_t *) u8_ptr_add (ip40, sizeof (*ip40));
+             ip_csum_t csum;
+             csum = ip_incremental_checksum (0, udp, udp_len);
+             csum =
+               ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
+             csum =
+               ip_csum_with_carry (csum,
+                                   clib_host_to_net_u16 (IP_PROTOCOL_UDP));
+             csum =
+               ip_csum_with_carry (csum, *((u64 *) (&ip40->src_address)));
+             *checksum0 = ~ip_csum_fold (csum);
+           }
+
+         csum0 = ip_csum_sub_even (*checksum0, ip40->src_address.as_u32);
+         csum0 = ip_csum_sub_even (csum0, ip40->dst_address.as_u32);
+
+         // Deal with fragmented packets
+         if (PREDICT_FALSE (ip40->flags_and_fragment_offset &
+                            clib_host_to_net_u16
+                            (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
+           {
+             ip60 =
+               (ip6_header_t *) u8_ptr_add (ip40,
+                                            sizeof (*ip40) - sizeof (*ip60) -
+                                            sizeof (*frag0));
+             frag0 =
+               (ip6_frag_hdr_t *) u8_ptr_add (ip40,
+                                              sizeof (*ip40) -
+                                              sizeof (*frag0));
+             frag_id0 = frag_id_4to6 (ip40->fragment_id);
+             vlib_buffer_advance (p0,
+                                  sizeof (*ip40) - sizeof (*ip60) -
+                                  sizeof (*frag0));
+           }
+         else
+           {
+             ip60 =
+               (ip6_header_t *) (((u8 *) ip40) + sizeof (*ip40) -
+                                 sizeof (*ip60));
+             vlib_buffer_advance (p0, sizeof (*ip40) - sizeof (*ip60));
+             frag0 = NULL;
+           }
+
+         ip60->ip_version_traffic_class_and_flow_label =
+           clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20));
+         ip60->payload_length = u16_net_add (ip40->length, -sizeof (*ip40));
+         ip60->hop_limit = ip40->ttl;
+         ip60->protocol = ip40->protocol;
+
+         if (PREDICT_FALSE (frag0 != NULL))
+           {
+             frag0->next_hdr = ip60->protocol;
+             frag0->identification = frag_id0;
+             frag0->rsv = 0;
+             frag0->fragment_offset_and_more =
+               ip6_frag_hdr_offset_and_more (0, 1);
+             ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
+             ip60->payload_length =
+               u16_net_add (ip60->payload_length, sizeof (*frag0));
+           }
+
+         //Finally copying the address
+         ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
+         ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
+         ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
+         ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
+
+         csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[0]);
+         csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[1]);
+         csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[0]);
+         csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[1]);
+         *checksum0 = ip_csum_fold (csum0);
+
+         if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+           {
+             //Send to fragmentation node if necessary
+             vnet_buffer (p0)->ip_frag.header_offset = 0;
+             vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+             vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
+             next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
+           }
+
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                          to_next, n_left_to_next, pi0,
+                                          next0);
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
 
   return frame->n_vectors;
 }
 
 static_always_inline void
-ip4_map_t_classify(vlib_buffer_t *p0, map_domain_t *d0, ip4_header_t *ip40, u16 ip4_len0,
-                   i32 *dst_port0, u8 *error0, ip4_mapt_next_t *next0)
+ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
+                   ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
+                   u8 * error0, ip4_mapt_next_t * next0)
 {
-  if (PREDICT_FALSE(ip4_get_fragment_offset(ip40))) {
-    *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
-    if(d0->ea_bits_len == 0 && d0->rules) {
-      *dst_port0 = 0;
-    } else {
-      *dst_port0 = ip4_map_fragment_get_port(ip40);
-      *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
+  if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
+    {
+      *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
+      if (d0->ea_bits_len == 0 && d0->rules)
+       {
+         *dst_port0 = 0;
+       }
+      else
+       {
+         *dst_port0 = ip4_map_fragment_get_port (ip40);
+         *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
+       }
+    }
+  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
+    {
+      vnet_buffer (p0)->map_t.checksum_offset = 36;
+      *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
+      *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
+      *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
+    }
+  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
+    {
+      vnet_buffer (p0)->map_t.checksum_offset = 26;
+      *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
+      *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
+      *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
+    }
+  else if (ip40->protocol == IP_PROTOCOL_ICMP)
+    {
+      *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
+      if (d0->ea_bits_len == 0 && d0->rules)
+       *dst_port0 = 0;
+      else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
+              == ICMP4_echo_reply
+              || ((icmp46_header_t *)
+                  u8_ptr_add (ip40,
+                              sizeof (*ip40)))->code == ICMP4_echo_request)
+       *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
+    }
+  else
+    {
+      *error0 = MAP_ERROR_BAD_PROTOCOL;
     }
-  } else if (PREDICT_TRUE(ip40->protocol == IP_PROTOCOL_TCP)) {
-    vnet_buffer(p0)->map_t.checksum_offset = 36;
-    *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
-    *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
-    *dst_port0 = (i32) *((u16 *)u8_ptr_add(ip40, sizeof(*ip40) + 2));
-  } else if (PREDICT_TRUE(ip40->protocol == IP_PROTOCOL_UDP)) {
-    vnet_buffer(p0)->map_t.checksum_offset = 26;
-    *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
-    *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
-    *dst_port0 = (i32) *((u16 *)u8_ptr_add(ip40, sizeof(*ip40) + 2));
-  } else if (ip40->protocol == IP_PROTOCOL_ICMP) {
-    *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
-    if(d0->ea_bits_len == 0 && d0->rules)
-      *dst_port0 = 0;
-    else if (((icmp46_header_t *) u8_ptr_add(ip40, sizeof(*ip40)))->code == ICMP4_echo_reply ||
-        ((icmp46_header_t *) u8_ptr_add(ip40, sizeof(*ip40)))->code == ICMP4_echo_request)
-      *dst_port0 = (i32) *((u16 *)u8_ptr_add(ip40, sizeof(*ip40) + 6));
-  } else {
-    *error0 = MAP_ERROR_BAD_PROTOCOL;
-  }
 }
 
 static uword
-ip4_map_t (vlib_main_t *vm,
-           vlib_node_runtime_t *node,
-           vlib_frame_t *frame)
+ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_t_node.index);
-  from = vlib_frame_vector_args(frame);
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip4_map_t_node.index);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
   vlib_combined_counter_main_t *cm = map_main.domain_counters;
-  u32 cpu_index = os_get_cpu_number();
+  u32 cpu_index = os_get_cpu_number ();
 
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
 
 #ifdef IP4_MAP_T_DUAL_LOOP
-    while (n_left_from >= 4 && n_left_to_next >= 2) {
-      u32 pi0, pi1;
-      vlib_buffer_t *p0, *p1;
-      ip4_header_t *ip40, *ip41;
-      map_domain_t *d0, *d1;
-      ip4_mapt_next_t next0 = 0, next1 = 0;
-      u16 ip4_len0, ip4_len1;
-      u8 error0, error1;
-      i32 dst_port0, dst_port1;
-      ip4_mapt_pseudo_header_t *pheader0, *pheader1;
-
-      pi0 = to_next[0] = from[0];
-      pi1 = to_next[1] = from[1];
-      from += 2;
-      n_left_from -= 2;
-      to_next +=2;
-      n_left_to_next -= 2;
-      error0 = MAP_ERROR_NONE;
-      error1 = MAP_ERROR_NONE;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      p1 = vlib_get_buffer(vm, pi1);
-      ip40 = vlib_buffer_get_current(p0);
-      ip41 = vlib_buffer_get_current(p1);
-      ip4_len0 = clib_host_to_net_u16(ip40->length);
-      ip4_len1 = clib_host_to_net_u16(ip41->length);
-
-      if (PREDICT_FALSE(p0->current_length < ip4_len0 ||
-                        ip40->ip_version_and_header_length != 0x45)) {
-        error0 = MAP_ERROR_UNKNOWN;
-        next0 = IP4_MAPT_NEXT_DROP;
-      }
-
-      if (PREDICT_FALSE(p1->current_length < ip4_len1 ||
-                        ip41->ip_version_and_header_length != 0x45)) {
-        error1 = MAP_ERROR_UNKNOWN;
-        next1 = IP4_MAPT_NEXT_DROP;
-      }
-
-      d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
-                              &vnet_buffer(p0)->map_t.map_domain_index);
-      d1 = ip4_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX],
-                                    &vnet_buffer(p1)->map_t.map_domain_index);
-
-      vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
-      vnet_buffer(p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
-
-      dst_port0 = -1;
-      dst_port1 = -1;
-
-      ip4_map_t_classify(p0, d0, ip40, ip4_len0, &dst_port0, &error0, &next0);
-      ip4_map_t_classify(p1, d1, ip41, ip4_len1, &dst_port1, &error1, &next1);
-
-      //Add MAP-T pseudo header in front of the packet
-      vlib_buffer_advance(p0, - sizeof(*pheader0));
-      vlib_buffer_advance(p1, - sizeof(*pheader1));
-      pheader0 = vlib_buffer_get_current(p0);
-      pheader1 = vlib_buffer_get_current(p1);
-
-      //Save addresses within the packet
-      ip4_map_t_embedded_address(d0, &pheader0->saddr, &ip40->src_address);
-      ip4_map_t_embedded_address(d1, &pheader1->saddr, &ip41->src_address);
-      pheader0->daddr.as_u64[0] = map_get_pfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
-      pheader0->daddr.as_u64[1] = map_get_sfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
-      pheader1->daddr.as_u64[0] = map_get_pfx_net(d1, ip41->dst_address.as_u32, (u16)dst_port1);
-      pheader1->daddr.as_u64[1] = map_get_sfx_net(d1, ip41->dst_address.as_u32, (u16)dst_port1);
-
-      if (PREDICT_FALSE(ip4_is_first_fragment(ip40) && (dst_port0 != -1) &&
-                        (d0->ea_bits_len != 0 || !d0->rules) &&
-                        ip4_map_fragment_cache(ip40, dst_port0))) {
-        error0 = MAP_ERROR_FRAGMENT_MEMORY;
-      }
-
-      if (PREDICT_FALSE(ip4_is_first_fragment(ip41) && (dst_port1 != -1) &&
-                        (d1->ea_bits_len != 0 || !d1->rules) &&
-                        ip4_map_fragment_cache(ip41, dst_port1))) {
-        error1 = MAP_ERROR_FRAGMENT_MEMORY;
-      }
-
-      if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) {
-        vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index,
-                                        vnet_buffer(p0)->map_t.map_domain_index, 1,
-                                        clib_net_to_host_u16(ip40->length));
-      }
-
-      if (PREDICT_TRUE(error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP)) {
-        vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index,
-                                        vnet_buffer(p1)->map_t.map_domain_index, 1,
-                                        clib_net_to_host_u16(ip41->length));
-      }
-
-      next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
-      next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
-      p0->error = error_node->errors[error0];
-      p1->error = error_node->errors[error1];
-      vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
-                                      n_left_to_next, pi0, pi1, next0, next1);
-    }
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 pi0, pi1;
+         vlib_buffer_t *p0, *p1;
+         ip4_header_t *ip40, *ip41;
+         map_domain_t *d0, *d1;
+         ip4_mapt_next_t next0 = 0, next1 = 0;
+         u16 ip4_len0, ip4_len1;
+         u8 error0, error1;
+         i32 dst_port0, dst_port1;
+         ip4_mapt_pseudo_header_t *pheader0, *pheader1;
+
+         pi0 = to_next[0] = from[0];
+         pi1 = to_next[1] = from[1];
+         from += 2;
+         n_left_from -= 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+         error0 = MAP_ERROR_NONE;
+         error1 = MAP_ERROR_NONE;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         p1 = vlib_get_buffer (vm, pi1);
+         ip40 = vlib_buffer_get_current (p0);
+         ip41 = vlib_buffer_get_current (p1);
+         ip4_len0 = clib_host_to_net_u16 (ip40->length);
+         ip4_len1 = clib_host_to_net_u16 (ip41->length);
+
+         if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
+                            ip40->ip_version_and_header_length != 0x45))
+           {
+             error0 = MAP_ERROR_UNKNOWN;
+             next0 = IP4_MAPT_NEXT_DROP;
+           }
+
+         if (PREDICT_FALSE (p1->current_length < ip4_len1 ||
+                            ip41->ip_version_and_header_length != 0x45))
+           {
+             error1 = MAP_ERROR_UNKNOWN;
+             next1 = IP4_MAPT_NEXT_DROP;
+           }
+
+         d0 = ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+                                  &vnet_buffer (p0)->map_t.map_domain_index);
+         d1 = ip4_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
+                                  &vnet_buffer (p1)->map_t.map_domain_index);
+
+         vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
+         vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
+
+         dst_port0 = -1;
+         dst_port1 = -1;
+
+         ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
+                             &next0);
+         ip4_map_t_classify (p1, d1, ip41, ip4_len1, &dst_port1, &error1,
+                             &next1);
+
+         //Add MAP-T pseudo header in front of the packet
+         vlib_buffer_advance (p0, -sizeof (*pheader0));
+         vlib_buffer_advance (p1, -sizeof (*pheader1));
+         pheader0 = vlib_buffer_get_current (p0);
+         pheader1 = vlib_buffer_get_current (p1);
+
+         //Save addresses within the packet
+         ip4_map_t_embedded_address (d0, &pheader0->saddr,
+                                     &ip40->src_address);
+         ip4_map_t_embedded_address (d1, &pheader1->saddr,
+                                     &ip41->src_address);
+         pheader0->daddr.as_u64[0] =
+           map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
+         pheader0->daddr.as_u64[1] =
+           map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
+         pheader1->daddr.as_u64[0] =
+           map_get_pfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
+         pheader1->daddr.as_u64[1] =
+           map_get_sfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
+
+         if (PREDICT_FALSE
+             (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
+              && (d0->ea_bits_len != 0 || !d0->rules)
+              && ip4_map_fragment_cache (ip40, dst_port0)))
+           {
+             error0 = MAP_ERROR_FRAGMENT_MEMORY;
+           }
+
+         if (PREDICT_FALSE
+             (ip4_is_first_fragment (ip41) && (dst_port1 != -1)
+              && (d1->ea_bits_len != 0 || !d1->rules)
+              && ip4_map_fragment_cache (ip41, dst_port1)))
+           {
+             error1 = MAP_ERROR_FRAGMENT_MEMORY;
+           }
+
+         if (PREDICT_TRUE
+             (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
+           {
+             vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+                                              cpu_index,
+                                              vnet_buffer (p0)->map_t.
+                                              map_domain_index, 1,
+                                              clib_net_to_host_u16 (ip40->
+                                                                    length));
+           }
+
+         if (PREDICT_TRUE
+             (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP))
+           {
+             vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+                                              cpu_index,
+                                              vnet_buffer (p1)->map_t.
+                                              map_domain_index, 1,
+                                              clib_net_to_host_u16 (ip41->
+                                                                    length));
+           }
+
+         next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
+         next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
+         p0->error = error_node->errors[error0];
+         p1->error = error_node->errors[error1];
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+                                          n_left_to_next, pi0, pi1, next0,
+                                          next1);
+       }
 #endif
 
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      ip4_header_t *ip40;
-      map_domain_t *d0;
-      ip4_mapt_next_t next0;
-      u16 ip4_len0;
-      u8 error0;
-      i32 dst_port0;
-      ip4_mapt_pseudo_header_t *pheader0;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-      error0 = MAP_ERROR_NONE;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      ip40 = vlib_buffer_get_current(p0);
-      ip4_len0 = clib_host_to_net_u16(ip40->length);
-      if (PREDICT_FALSE(p0->current_length < ip4_len0 ||
-                       ip40->ip_version_and_header_length != 0x45)) {
-        error0 = MAP_ERROR_UNKNOWN;
-        next0 = IP4_MAPT_NEXT_DROP;
-      }
-
-      d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
-                              &vnet_buffer(p0)->map_t.map_domain_index);
-
-      vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
-
-      dst_port0 = -1;
-      ip4_map_t_classify(p0, d0, ip40, ip4_len0, &dst_port0, &error0, &next0);
-
-      //Add MAP-T pseudo header in front of the packet
-      vlib_buffer_advance(p0, - sizeof(*pheader0));
-      pheader0 = vlib_buffer_get_current(p0);
-
-      //Save addresses within the packet
-      ip4_map_t_embedded_address(d0, &pheader0->saddr, &ip40->src_address);
-      pheader0->daddr.as_u64[0] = map_get_pfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
-      pheader0->daddr.as_u64[1] = map_get_sfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
-
-      //It is important to cache at this stage because the result might be necessary
-      //for packets within the same vector.
-      //Actually, this approach even provides some limited out-of-order fragments support
-      if (PREDICT_FALSE(ip4_is_first_fragment(ip40) && (dst_port0 != -1) &&
-                        (d0->ea_bits_len != 0 || !d0->rules) &&
-                        ip4_map_fragment_cache(ip40, dst_port0))) {
-          error0 = MAP_ERROR_UNKNOWN;
-      }
-
-      if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) {
-        vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index,
-                                        vnet_buffer(p0)->map_t.map_domain_index, 1,
-                                        clib_net_to_host_u16(ip40->length));
-      }
-
-      next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
-      p0->error = error_node->errors[error0];
-      vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                       to_next, n_left_to_next, pi0,
-                                       next0);
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         ip4_header_t *ip40;
+         map_domain_t *d0;
+         ip4_mapt_next_t next0;
+         u16 ip4_len0;
+         u8 error0;
+         i32 dst_port0;
+         ip4_mapt_pseudo_header_t *pheader0;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+         error0 = MAP_ERROR_NONE;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         ip40 = vlib_buffer_get_current (p0);
+         ip4_len0 = clib_host_to_net_u16 (ip40->length);
+         if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
+                            ip40->ip_version_and_header_length != 0x45))
+           {
+             error0 = MAP_ERROR_UNKNOWN;
+             next0 = IP4_MAPT_NEXT_DROP;
+           }
+
+         d0 = ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+                                  &vnet_buffer (p0)->map_t.map_domain_index);
+
+         vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
+
+         dst_port0 = -1;
+         ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
+                             &next0);
+
+         //Add MAP-T pseudo header in front of the packet
+         vlib_buffer_advance (p0, -sizeof (*pheader0));
+         pheader0 = vlib_buffer_get_current (p0);
+
+         //Save addresses within the packet
+         ip4_map_t_embedded_address (d0, &pheader0->saddr,
+                                     &ip40->src_address);
+         pheader0->daddr.as_u64[0] =
+           map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
+         pheader0->daddr.as_u64[1] =
+           map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
+
+         //It is important to cache at this stage because the result might be necessary
+         //for packets within the same vector.
+         //Actually, this approach even provides some limited out-of-order fragments support
+         if (PREDICT_FALSE
+             (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
+              && (d0->ea_bits_len != 0 || !d0->rules)
+              && ip4_map_fragment_cache (ip40, dst_port0)))
+           {
+             error0 = MAP_ERROR_UNKNOWN;
+           }
+
+         if (PREDICT_TRUE
+             (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
+           {
+             vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
+                                              cpu_index,
+                                              vnet_buffer (p0)->map_t.
+                                              map_domain_index, 1,
+                                              clib_net_to_host_u16 (ip40->
+                                                                    length));
+           }
+
+         next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
+         p0->error = error_node->errors[error0];
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                          to_next, n_left_to_next, pi0,
+                                          next0);
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
   return frame->n_vectors;
 }
 
@@ -1018,6 +1272,7 @@ static char *map_t_error_strings[] = {
 #undef _
 };
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
   .function = ip4_map_t_fragmented,
   .name = "ip4-map-t-fragmented",
@@ -1035,7 +1290,9 @@ VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
       [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
   .function = ip4_map_t_icmp,
   .name = "ip4-map-t-icmp",
@@ -1053,7 +1310,9 @@ VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
       [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
   .function = ip4_map_t_tcp_udp,
   .name = "ip4-map-t-tcp-udp",
@@ -1071,7 +1330,9 @@ VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
       [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip4_map_t_node) = {
   .function = ip4_map_t,
   .name = "ip4-map-t",
@@ -1090,3 +1351,12 @@ VLIB_REGISTER_NODE(ip4_map_t_node) = {
       [IP4_MAPT_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index 178b7c3..b51e835 100644 (file)
@@ -16,7 +16,8 @@
 
 #include "../ip/ip_frag.h"
 
-enum  ip6_map_next_e {
+enum ip6_map_next_e
+{
   IP6_MAP_NEXT_IP4_LOOKUP,
 #ifdef MAP_SKIP_IP6_LOOKUP
   IP6_MAP_NEXT_IP4_REWRITE,
@@ -31,20 +32,23 @@ enum  ip6_map_next_e {
   IP6_MAP_N_NEXT,
 };
 
-enum  ip6_map_ip6_reass_next_e {
+enum ip6_map_ip6_reass_next_e
+{
   IP6_MAP_IP6_REASS_NEXT_IP6_MAP,
   IP6_MAP_IP6_REASS_NEXT_DROP,
   IP6_MAP_IP6_REASS_N_NEXT,
 };
 
-enum  ip6_map_ip4_reass_next_e {
+enum ip6_map_ip4_reass_next_e
+{
   IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP,
   IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT,
   IP6_MAP_IP4_REASS_NEXT_DROP,
   IP6_MAP_IP4_REASS_N_NEXT,
 };
 
-enum  ip6_icmp_relay_next_e {
+enum ip6_icmp_relay_next_e
+{
   IP6_ICMP_RELAY_NEXT_IP4_LOOKUP,
   IP6_ICMP_RELAY_NEXT_DROP,
   IP6_ICMP_RELAY_N_NEXT,
@@ -54,91 +58,113 @@ vlib_node_registration_t ip6_map_ip4_reass_node;
 vlib_node_registration_t ip6_map_ip6_reass_node;
 static vlib_node_registration_t ip6_map_icmp_relay_node;
 
-typedef struct {
+typedef struct
+{
   u32 map_domain_index;
   u16 port;
   u8 cached;
 } map_ip6_map_ip4_reass_trace_t;
 
 u8 *
-format_ip6_map_ip4_reass_trace (u8 *s, va_list *args)
+format_ip6_map_ip4_reass_trace (u8 * s, va_list * args)
 {
-  CLIB_UNUSED(vlib_main_t *vm) = va_arg (*args, vlib_main_t *);
-  CLIB_UNUSED(vlib_node_t *node) = va_arg (*args, vlib_node_t *);
-  map_ip6_map_ip4_reass_trace_t *t = va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
-  return format(s, "MAP domain index: %d L4 port: %u Status: %s", t->map_domain_index,
-                t->port, t->cached?"cached":"forwarded");
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+  map_ip6_map_ip4_reass_trace_t *t =
+    va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
+  return format (s, "MAP domain index: %d L4 port: %u Status: %s",
+                t->map_domain_index, t->port,
+                t->cached ? "cached" : "forwarded");
 }
 
-typedef struct {
+typedef struct
+{
   u16 offset;
   u16 frag_len;
   u8 out;
 } map_ip6_map_ip6_reass_trace_t;
 
 u8 *
-format_ip6_map_ip6_reass_trace (u8 *s, va_list *args)
+format_ip6_map_ip6_reass_trace (u8 * s, va_list * args)
 {
-  CLIB_UNUSED(vlib_main_t *vm) = va_arg (*args, vlib_main_t *);
-  CLIB_UNUSED(vlib_node_t *node) = va_arg (*args, vlib_node_t *);
-  map_ip6_map_ip6_reass_trace_t *t = va_arg (*args, map_ip6_map_ip6_reass_trace_t *);
-  return format(s, "Offset: %d Fragment length: %d Status: %s", t->offset, t->frag_len, t->out?"out":"in");
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+  map_ip6_map_ip6_reass_trace_t *t =
+    va_arg (*args, map_ip6_map_ip6_reass_trace_t *);
+  return format (s, "Offset: %d Fragment length: %d Status: %s", t->offset,
+                t->frag_len, t->out ? "out" : "in");
 }
 
 /*
  * ip6_map_sec_check
  */
 static_always_inline bool
-ip6_map_sec_check (map_domain_t *d, u16 port, ip4_header_t *ip4, ip6_header_t *ip6)
+ip6_map_sec_check (map_domain_t * d, u16 port, ip4_header_t * ip4,
+                  ip6_header_t * ip6)
 {
-  u16 sp4 = clib_net_to_host_u16(port);
-  u32 sa4 = clib_net_to_host_u32(ip4->src_address.as_u32);
-  u64 sal6 = map_get_pfx(d, sa4, sp4);
-  u64 sar6 = map_get_sfx(d, sa4, sp4);
-
-  if (PREDICT_FALSE(sal6 != clib_net_to_host_u64(ip6->src_address.as_u64[0]) ||
-                   sar6 != clib_net_to_host_u64(ip6->src_address.as_u64[1])))
+  u16 sp4 = clib_net_to_host_u16 (port);
+  u32 sa4 = clib_net_to_host_u32 (ip4->src_address.as_u32);
+  u64 sal6 = map_get_pfx (d, sa4, sp4);
+  u64 sar6 = map_get_sfx (d, sa4, sp4);
+
+  if (PREDICT_FALSE
+      (sal6 != clib_net_to_host_u64 (ip6->src_address.as_u64[0])
+       || sar6 != clib_net_to_host_u64 (ip6->src_address.as_u64[1])))
     return (false);
   return (true);
 }
 
 static_always_inline void
-ip6_map_security_check (map_domain_t *d, ip4_header_t *ip4, ip6_header_t *ip6, u32 *next, u8 *error)
+ip6_map_security_check (map_domain_t * d, ip4_header_t * ip4,
+                       ip6_header_t * ip6, u32 * next, u8 * error)
 {
   map_main_t *mm = &map_main;
-  if (d->ea_bits_len || d->rules) {
-    if (d->psid_length > 0) {
-      if (!ip4_is_fragment(ip4)) {
-       u16 port = ip4_map_get_port(ip4, MAP_SENDER);
-       if (port) {
-         if (mm->sec_check)
-           *error = ip6_map_sec_check(d, port, ip4, ip6) ? MAP_ERROR_NONE : MAP_ERROR_DECAP_SEC_CHECK;
-       } else {
-         *error = MAP_ERROR_BAD_PROTOCOL;
+  if (d->ea_bits_len || d->rules)
+    {
+      if (d->psid_length > 0)
+       {
+         if (!ip4_is_fragment (ip4))
+           {
+             u16 port = ip4_map_get_port (ip4, MAP_SENDER);
+             if (port)
+               {
+                 if (mm->sec_check)
+                   *error =
+                     ip6_map_sec_check (d, port, ip4,
+                                        ip6) ? MAP_ERROR_NONE :
+                     MAP_ERROR_DECAP_SEC_CHECK;
+               }
+             else
+               {
+                 *error = MAP_ERROR_BAD_PROTOCOL;
+               }
+           }
+         else
+           {
+             *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
+           }
        }
-      } else {
-       *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
-      }
     }
-  }
 }
 
 static_always_inline bool
-ip6_map_ip4_lookup_bypass (vlib_buffer_t *p0, ip4_header_t *ip)
+ip6_map_ip4_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
 {
 #ifdef MAP_SKIP_IP6_LOOKUP
   map_main_t *mm = &map_main;
   u32 adj_index0 = mm->adj4_index;
-  if (adj_index0 > 0) {
-    ip_lookup_main_t *lm4 = &ip4_main.lookup_main;
-    ip_adjacency_t *adj = ip_get_adjacency(lm4, mm->adj4_index);
-    if (adj->n_adj > 1) {
-      u32 hash_c0 = ip4_compute_flow_hash(ip, IP_FLOW_HASH_DEFAULT);
-      adj_index0 += (hash_c0 & (adj->n_adj - 1));
+  if (adj_index0 > 0)
+    {
+      ip_lookup_main_t *lm4 = &ip4_main.lookup_main;
+      ip_adjacency_t *adj = ip_get_adjacency (lm4, mm->adj4_index);
+      if (adj->n_adj > 1)
+       {
+         u32 hash_c0 = ip4_compute_flow_hash (ip, IP_FLOW_HASH_DEFAULT);
+         adj_index0 += (hash_c0 & (adj->n_adj - 1));
+       }
+      vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0;
+      return (true);
     }
-    vnet_buffer(p0)->ip.adj_index[VLIB_TX] = adj_index0;
-    return (true);
-  }
 #endif
   return (false);
 }
@@ -147,376 +173,503 @@ ip6_map_ip4_lookup_bypass (vlib_buffer_t *p0, ip4_header_t *ip)
  * ip6_map
  */
 static uword
-ip6_map (vlib_main_t *vm,
-        vlib_node_runtime_t *node,
-        vlib_frame_t *frame)
+ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_map_node.index);
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip6_map_node.index);
   map_main_t *mm = &map_main;
   vlib_combined_counter_main_t *cm = mm->domain_counters;
-  u32 cpu_index = os_get_cpu_number();
+  u32 cpu_index = os_get_cpu_number ();
 
-  from = vlib_frame_vector_args(frame);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
-
-    /* Dual loop */
-    while (n_left_from >= 4 && n_left_to_next >= 2) {
-      u32 pi0, pi1;
-      vlib_buffer_t *p0, *p1;
-      u8 error0 = MAP_ERROR_NONE;
-      u8 error1 = MAP_ERROR_NONE;
-      map_domain_t *d0 = 0, *d1 = 0;
-      ip4_header_t *ip40, *ip41;
-      ip6_header_t *ip60, *ip61;
-      u16 port0 = 0, port1 = 0;
-      u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
-      u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
-      u32 next1 = IP6_MAP_NEXT_IP4_LOOKUP;
-
-      /* Prefetch next iteration. */
-      {
-       vlib_buffer_t *p2, *p3;
-
-       p2 = vlib_get_buffer(vm, from[2]);
-       p3 = vlib_get_buffer(vm, from[3]);
-
-        vlib_prefetch_buffer_header(p2, LOAD);
-        vlib_prefetch_buffer_header(p3, LOAD);
-
-        /* IPv6 + IPv4 header + 8 bytes of ULP */
-        CLIB_PREFETCH(p2->data, 68, LOAD);
-        CLIB_PREFETCH(p3->data, 68, LOAD);
-      }
-
-      pi0 = to_next[0] = from[0];
-      pi1 = to_next[1] = from[1];
-      from += 2;
-      n_left_from -= 2;
-      to_next +=2;
-      n_left_to_next -= 2;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      p1 = vlib_get_buffer(vm, pi1);
-      ip60 = vlib_buffer_get_current(p0);
-      ip61 = vlib_buffer_get_current(p1);
-      vlib_buffer_advance(p0, sizeof(ip6_header_t));
-      vlib_buffer_advance(p1, sizeof(ip6_header_t));
-      ip40 = vlib_buffer_get_current(p0);
-      ip41 = vlib_buffer_get_current(p1);
-
-      /*
-       * Encapsulated IPv4 packet
-       *   - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
-       *   - Lookup/Rewrite or Fragment node in case of packet > MTU
-       * Fragmented IPv6 packet
-       * ICMP IPv6 packet
-       *   - Error -> Pass to ICMPv6/ICMPv4 relay
-       *   - Info -> Pass to IPv6 local
-       * Anything else -> drop
-       */
-      if (PREDICT_TRUE(ip60->protocol == IP_PROTOCOL_IP_IN_IP && clib_net_to_host_u16(ip60->payload_length) > 20)) {
-       d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], (ip4_address_t *)&ip40->src_address.as_u32,
-                               &map_domain_index0, &error0);
-      } else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
-                clib_net_to_host_u16(ip60->payload_length) > sizeof(icmp46_header_t)) {
-       icmp46_header_t *icmp = (void *)(ip60 + 1);
-       next0 = (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply) ?
-         IP6_MAP_NEXT_IP6_LOCAL : IP6_MAP_NEXT_IP6_ICMP_RELAY;
-      } else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) {
-       next0 = IP6_MAP_NEXT_IP6_REASS;
-      } else {
-       error0 = MAP_ERROR_BAD_PROTOCOL;
-      }
-      if (PREDICT_TRUE(ip61->protocol == IP_PROTOCOL_IP_IN_IP && clib_net_to_host_u16(ip61->payload_length) > 20)) {
-       d1 = ip6_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX], (ip4_address_t *)&ip41->src_address.as_u32,
-                               &map_domain_index1, &error1);
-      } else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
-                clib_net_to_host_u16(ip61->payload_length) > sizeof(icmp46_header_t)) {
-       icmp46_header_t *icmp = (void *)(ip61 + 1);
-       next1 = (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply) ?
-         IP6_MAP_NEXT_IP6_LOCAL : IP6_MAP_NEXT_IP6_ICMP_RELAY;
-      } else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) {
-       next1 = IP6_MAP_NEXT_IP6_REASS;
-      } else {
-       error1 = MAP_ERROR_BAD_PROTOCOL;
-      }
-
-      if (d0) {
-       /* MAP inbound security check */
-       ip6_map_security_check(d0, ip40, ip60, &next0, &error0);
-
-       if (PREDICT_TRUE(error0 == MAP_ERROR_NONE &&
-                        next0 == IP6_MAP_NEXT_IP4_LOOKUP)) {
-         if (PREDICT_FALSE(d0->mtu && (clib_host_to_net_u16(ip40->length) > d0->mtu))) {
-           vnet_buffer(p0)->ip_frag.header_offset = 0;
-           vnet_buffer(p0)->ip_frag.flags = 0;
-           vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-           vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
-           next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
-         } else {
-           next0 = ip6_map_ip4_lookup_bypass(p0, ip40) ? IP6_MAP_NEXT_IP4_REWRITE : next0;
-         }
-         vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index, map_domain_index0, 1,
-                                         clib_net_to_host_u16(ip40->length));
-       }
-      }
-      if (d1) {
-       /* MAP inbound security check */
-       ip6_map_security_check(d1, ip41, ip61, &next1, &error1);
-
-       if (PREDICT_TRUE(error1 == MAP_ERROR_NONE &&
-                        next1 == IP6_MAP_NEXT_IP4_LOOKUP)) {
-         if (PREDICT_FALSE(d1->mtu && (clib_host_to_net_u16(ip41->length) > d1->mtu))) {
-           vnet_buffer(p1)->ip_frag.header_offset = 0;
-           vnet_buffer(p1)->ip_frag.flags = 0;
-           vnet_buffer(p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-           vnet_buffer(p1)->ip_frag.mtu = d1->mtu;
-           next1 = IP6_MAP_NEXT_IP4_FRAGMENT;
-         } else {
-           next1 = ip6_map_ip4_lookup_bypass(p1, ip41) ? IP6_MAP_NEXT_IP4_REWRITE : next1;
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      /* Dual loop */
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 pi0, pi1;
+         vlib_buffer_t *p0, *p1;
+         u8 error0 = MAP_ERROR_NONE;
+         u8 error1 = MAP_ERROR_NONE;
+         map_domain_t *d0 = 0, *d1 = 0;
+         ip4_header_t *ip40, *ip41;
+         ip6_header_t *ip60, *ip61;
+         u16 port0 = 0, port1 = 0;
+         u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
+         u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
+         u32 next1 = IP6_MAP_NEXT_IP4_LOOKUP;
+
+         /* Prefetch next iteration. */
+         {
+           vlib_buffer_t *p2, *p3;
+
+           p2 = vlib_get_buffer (vm, from[2]);
+           p3 = vlib_get_buffer (vm, from[3]);
+
+           vlib_prefetch_buffer_header (p2, LOAD);
+           vlib_prefetch_buffer_header (p3, LOAD);
+
+           /* IPv6 + IPv4 header + 8 bytes of ULP */
+           CLIB_PREFETCH (p2->data, 68, LOAD);
+           CLIB_PREFETCH (p3->data, 68, LOAD);
          }
-         vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index, map_domain_index1, 1,
-                                         clib_net_to_host_u16(ip41->length));
+
+         pi0 = to_next[0] = from[0];
+         pi1 = to_next[1] = from[1];
+         from += 2;
+         n_left_from -= 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         p1 = vlib_get_buffer (vm, pi1);
+         ip60 = vlib_buffer_get_current (p0);
+         ip61 = vlib_buffer_get_current (p1);
+         vlib_buffer_advance (p0, sizeof (ip6_header_t));
+         vlib_buffer_advance (p1, sizeof (ip6_header_t));
+         ip40 = vlib_buffer_get_current (p0);
+         ip41 = vlib_buffer_get_current (p1);
+
+         /*
+          * Encapsulated IPv4 packet
+          *   - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
+          *   - Lookup/Rewrite or Fragment node in case of packet > MTU
+          * Fragmented IPv6 packet
+          * ICMP IPv6 packet
+          *   - Error -> Pass to ICMPv6/ICMPv4 relay
+          *   - Info -> Pass to IPv6 local
+          * Anything else -> drop
+          */
+         if (PREDICT_TRUE
+             (ip60->protocol == IP_PROTOCOL_IP_IN_IP
+              && clib_net_to_host_u16 (ip60->payload_length) > 20))
+           {
+             d0 =
+               ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+                                   (ip4_address_t *) & ip40->src_address.
+                                   as_u32, &map_domain_index0, &error0);
+           }
+         else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
+                  clib_net_to_host_u16 (ip60->payload_length) >
+                  sizeof (icmp46_header_t))
+           {
+             icmp46_header_t *icmp = (void *) (ip60 + 1);
+             next0 = (icmp->type == ICMP6_echo_request
+                      || icmp->type ==
+                      ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
+               IP6_MAP_NEXT_IP6_ICMP_RELAY;
+           }
+         else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
+           {
+             next0 = IP6_MAP_NEXT_IP6_REASS;
+           }
+         else
+           {
+             error0 = MAP_ERROR_BAD_PROTOCOL;
+           }
+         if (PREDICT_TRUE
+             (ip61->protocol == IP_PROTOCOL_IP_IN_IP
+              && clib_net_to_host_u16 (ip61->payload_length) > 20))
+           {
+             d1 =
+               ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
+                                   (ip4_address_t *) & ip41->src_address.
+                                   as_u32, &map_domain_index1, &error1);
+           }
+         else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
+                  clib_net_to_host_u16 (ip61->payload_length) >
+                  sizeof (icmp46_header_t))
+           {
+             icmp46_header_t *icmp = (void *) (ip61 + 1);
+             next1 = (icmp->type == ICMP6_echo_request
+                      || icmp->type ==
+                      ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
+               IP6_MAP_NEXT_IP6_ICMP_RELAY;
+           }
+         else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
+           {
+             next1 = IP6_MAP_NEXT_IP6_REASS;
+           }
+         else
+           {
+             error1 = MAP_ERROR_BAD_PROTOCOL;
+           }
+
+         if (d0)
+           {
+             /* MAP inbound security check */
+             ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
+
+             if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
+                               next0 == IP6_MAP_NEXT_IP4_LOOKUP))
+               {
+                 if (PREDICT_FALSE
+                     (d0->mtu
+                      && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
+                   {
+                     vnet_buffer (p0)->ip_frag.header_offset = 0;
+                     vnet_buffer (p0)->ip_frag.flags = 0;
+                     vnet_buffer (p0)->ip_frag.next_index =
+                       IP4_FRAG_NEXT_IP4_LOOKUP;
+                     vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
+                     next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
+                   }
+                 else
+                   {
+                     next0 =
+                       ip6_map_ip4_lookup_bypass (p0,
+                                                  ip40) ?
+                       IP6_MAP_NEXT_IP4_REWRITE : next0;
+                   }
+                 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+                                                  cpu_index,
+                                                  map_domain_index0, 1,
+                                                  clib_net_to_host_u16
+                                                  (ip40->length));
+               }
+           }
+         if (d1)
+           {
+             /* MAP inbound security check */
+             ip6_map_security_check (d1, ip41, ip61, &next1, &error1);
+
+             if (PREDICT_TRUE (error1 == MAP_ERROR_NONE &&
+                               next1 == IP6_MAP_NEXT_IP4_LOOKUP))
+               {
+                 if (PREDICT_FALSE
+                     (d1->mtu
+                      && (clib_host_to_net_u16 (ip41->length) > d1->mtu)))
+                   {
+                     vnet_buffer (p1)->ip_frag.header_offset = 0;
+                     vnet_buffer (p1)->ip_frag.flags = 0;
+                     vnet_buffer (p1)->ip_frag.next_index =
+                       IP4_FRAG_NEXT_IP4_LOOKUP;
+                     vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
+                     next1 = IP6_MAP_NEXT_IP4_FRAGMENT;
+                   }
+                 else
+                   {
+                     next1 =
+                       ip6_map_ip4_lookup_bypass (p1,
+                                                  ip41) ?
+                       IP6_MAP_NEXT_IP4_REWRITE : next1;
+                   }
+                 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+                                                  cpu_index,
+                                                  map_domain_index1, 1,
+                                                  clib_net_to_host_u16
+                                                  (ip41->length));
+               }
+           }
+
+         if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
+             tr->map_domain_index = map_domain_index0;
+             tr->port = port0;
+           }
+
+         if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
+             tr->map_domain_index = map_domain_index1;
+             tr->port = port1;
+           }
+
+         if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
+           {
+             /* Set ICMP parameters */
+             vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+             icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
+                                          ICMP6_destination_unreachable_source_address_failed_policy,
+                                          0);
+             next0 = IP6_MAP_NEXT_ICMP;
+           }
+         else
+           {
+             next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
+           }
+
+         if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
+           {
+             /* Set ICMP parameters */
+             vlib_buffer_advance (p1, -sizeof (ip6_header_t));
+             icmp6_error_set_vnet_buffer (p1, ICMP6_destination_unreachable,
+                                          ICMP6_destination_unreachable_source_address_failed_policy,
+                                          0);
+             next1 = IP6_MAP_NEXT_ICMP;
+           }
+         else
+           {
+             next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
+           }
+
+         /* Reset packet */
+         if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
+           vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+         if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
+           vlib_buffer_advance (p1, -sizeof (ip6_header_t));
+
+         p0->error = error_node->errors[error0];
+         p1->error = error_node->errors[error1];
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+                                          n_left_to_next, pi0, pi1, next0,
+                                          next1);
        }
-      }
-
-      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
-       map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
-       tr->map_domain_index = map_domain_index0;
-       tr->port = port0;
-      }
-
-      if (PREDICT_FALSE(p1->flags & VLIB_BUFFER_IS_TRACED)) {
-       map_trace_t *tr = vlib_add_trace(vm, node, p1, sizeof(*tr));
-       tr->map_domain_index = map_domain_index1;
-       tr->port = port1;
-      }
-
-      if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled) {
-       /* Set ICMP parameters */
-       vlib_buffer_advance(p0, -sizeof(ip6_header_t));
-       icmp6_error_set_vnet_buffer(p0, ICMP6_destination_unreachable,
-                                   ICMP6_destination_unreachable_source_address_failed_policy, 0);
-       next0 = IP6_MAP_NEXT_ICMP;
-      } else {
-       next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
-      }
-
-      if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled) {
-       /* Set ICMP parameters */
-       vlib_buffer_advance(p1, -sizeof(ip6_header_t));
-       icmp6_error_set_vnet_buffer(p1, ICMP6_destination_unreachable,
-                                     ICMP6_destination_unreachable_source_address_failed_policy, 0);
-       next1 = IP6_MAP_NEXT_ICMP;
-      } else {
-       next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
-      }
-
-      /* Reset packet */
-      if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
-       vlib_buffer_advance(p0, -sizeof(ip6_header_t));
-      if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
-       vlib_buffer_advance(p1, -sizeof(ip6_header_t));
-
-      p0->error = error_node->errors[error0];
-      p1->error = error_node->errors[error1];
-      vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1);
-    }
 
-    /* Single loop */
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      u8 error0 = MAP_ERROR_NONE;
-      map_domain_t *d0 = 0;
-      ip4_header_t *ip40;
-      ip6_header_t *ip60;
-      i32 port0 = 0;
-      u32 map_domain_index0 = ~0;
-      u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      ip60 = vlib_buffer_get_current(p0);
-      vlib_buffer_advance(p0, sizeof(ip6_header_t));
-      ip40 = vlib_buffer_get_current(p0);
-
-      /*
-       * Encapsulated IPv4 packet
-       *   - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
-       *   - Lookup/Rewrite or Fragment node in case of packet > MTU
-       * Fragmented IPv6 packet
-       * ICMP IPv6 packet
-       *   - Error -> Pass to ICMPv6/ICMPv4 relay
-       *   - Info -> Pass to IPv6 local
-       * Anything else -> drop
-       */
-      if (PREDICT_TRUE(ip60->protocol == IP_PROTOCOL_IP_IN_IP && clib_net_to_host_u16(ip60->payload_length) > 20)) {
-       d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], (ip4_address_t *)&ip40->src_address.as_u32,
-                               &map_domain_index0, &error0);
-      } else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
-                clib_net_to_host_u16(ip60->payload_length) > sizeof(icmp46_header_t)) {
-       icmp46_header_t *icmp = (void *)(ip60 + 1);
-       next0 = (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply) ?
-         IP6_MAP_NEXT_IP6_LOCAL : IP6_MAP_NEXT_IP6_ICMP_RELAY;
-      } else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
-          (((ip6_frag_hdr_t *)(ip60+1))->next_hdr == IP_PROTOCOL_IP_IN_IP)) {
-       next0 = IP6_MAP_NEXT_IP6_REASS;
-      } else {
-       error0 = MAP_ERROR_BAD_PROTOCOL;
-      }
-
-      if (d0) {
-       /* MAP inbound security check */
-       ip6_map_security_check(d0, ip40, ip60, &next0, &error0);
-
-       if (PREDICT_TRUE(error0 == MAP_ERROR_NONE &&
-                        next0 == IP6_MAP_NEXT_IP4_LOOKUP)) {
-         if (PREDICT_FALSE(d0->mtu && (clib_host_to_net_u16(ip40->length) > d0->mtu))) {
-           vnet_buffer(p0)->ip_frag.header_offset = 0;
-           vnet_buffer(p0)->ip_frag.flags = 0;
-           vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-           vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
-           next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
-         } else {
-           next0 = ip6_map_ip4_lookup_bypass(p0, ip40) ? IP6_MAP_NEXT_IP4_REWRITE : next0;
-         }
-         vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index, map_domain_index0, 1,
-                                         clib_net_to_host_u16(ip40->length));
+      /* Single loop */
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         u8 error0 = MAP_ERROR_NONE;
+         map_domain_t *d0 = 0;
+         ip4_header_t *ip40;
+         ip6_header_t *ip60;
+         i32 port0 = 0;
+         u32 map_domain_index0 = ~0;
+         u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         ip60 = vlib_buffer_get_current (p0);
+         vlib_buffer_advance (p0, sizeof (ip6_header_t));
+         ip40 = vlib_buffer_get_current (p0);
+
+         /*
+          * Encapsulated IPv4 packet
+          *   - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
+          *   - Lookup/Rewrite or Fragment node in case of packet > MTU
+          * Fragmented IPv6 packet
+          * ICMP IPv6 packet
+          *   - Error -> Pass to ICMPv6/ICMPv4 relay
+          *   - Info -> Pass to IPv6 local
+          * Anything else -> drop
+          */
+         if (PREDICT_TRUE
+             (ip60->protocol == IP_PROTOCOL_IP_IN_IP
+              && clib_net_to_host_u16 (ip60->payload_length) > 20))
+           {
+             d0 =
+               ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+                                   (ip4_address_t *) & ip40->src_address.
+                                   as_u32, &map_domain_index0, &error0);
+           }
+         else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
+                  clib_net_to_host_u16 (ip60->payload_length) >
+                  sizeof (icmp46_header_t))
+           {
+             icmp46_header_t *icmp = (void *) (ip60 + 1);
+             next0 = (icmp->type == ICMP6_echo_request
+                      || icmp->type ==
+                      ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
+               IP6_MAP_NEXT_IP6_ICMP_RELAY;
+           }
+         else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
+                  (((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr ==
+                   IP_PROTOCOL_IP_IN_IP))
+           {
+             next0 = IP6_MAP_NEXT_IP6_REASS;
+           }
+         else
+           {
+             error0 = MAP_ERROR_BAD_PROTOCOL;
+           }
+
+         if (d0)
+           {
+             /* MAP inbound security check */
+             ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
+
+             if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
+                               next0 == IP6_MAP_NEXT_IP4_LOOKUP))
+               {
+                 if (PREDICT_FALSE
+                     (d0->mtu
+                      && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
+                   {
+                     vnet_buffer (p0)->ip_frag.header_offset = 0;
+                     vnet_buffer (p0)->ip_frag.flags = 0;
+                     vnet_buffer (p0)->ip_frag.next_index =
+                       IP4_FRAG_NEXT_IP4_LOOKUP;
+                     vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
+                     next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
+                   }
+                 else
+                   {
+                     next0 =
+                       ip6_map_ip4_lookup_bypass (p0,
+                                                  ip40) ?
+                       IP6_MAP_NEXT_IP4_REWRITE : next0;
+                   }
+                 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+                                                  cpu_index,
+                                                  map_domain_index0, 1,
+                                                  clib_net_to_host_u16
+                                                  (ip40->length));
+               }
+           }
+
+         if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
+             tr->map_domain_index = map_domain_index0;
+             tr->port = (u16) port0;
+           }
+
+         if (mm->icmp6_enabled &&
+             (error0 == MAP_ERROR_DECAP_SEC_CHECK
+              || error0 == MAP_ERROR_NO_DOMAIN))
+           {
+             /* Set ICMP parameters */
+             vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+             icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
+                                          ICMP6_destination_unreachable_source_address_failed_policy,
+                                          0);
+             next0 = IP6_MAP_NEXT_ICMP;
+           }
+         else
+           {
+             next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
+           }
+
+         /* Reset packet */
+         if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
+           vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+
+         p0->error = error_node->errors[error0];
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                          n_left_to_next, pi0, next0);
        }
-      }
-
-      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
-        map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
-       tr->map_domain_index = map_domain_index0;
-       tr->port = (u16)port0;
-      }
-
-      if (mm->icmp6_enabled &&
-         (error0 == MAP_ERROR_DECAP_SEC_CHECK || error0 == MAP_ERROR_NO_DOMAIN)) {
-       /* Set ICMP parameters */
-       vlib_buffer_advance(p0, -sizeof(ip6_header_t));
-       icmp6_error_set_vnet_buffer(p0, ICMP6_destination_unreachable,
-                                     ICMP6_destination_unreachable_source_address_failed_policy, 0);
-       next0 = IP6_MAP_NEXT_ICMP;
-      } else {
-       next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
-      }
-
-      /* Reset packet */
-      if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
-       vlib_buffer_advance(p0, -sizeof(ip6_header_t));
-
-      p0->error = error_node->errors[error0];
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
 
   return frame->n_vectors;
 }
 
 
 static_always_inline void
-ip6_map_ip6_reass_prepare(vlib_main_t *vm, vlib_node_runtime_t *node, map_ip6_reass_t *r,
-                          u32 **fragments_ready, u32 **fragments_to_drop)
+ip6_map_ip6_reass_prepare (vlib_main_t * vm, vlib_node_runtime_t * node,
+                          map_ip6_reass_t * r, u32 ** fragments_ready,
+                          u32 ** fragments_to_drop)
 {
   ip4_header_t *ip40;
   ip6_header_t *ip60;
   ip6_frag_hdr_t *frag0;
   vlib_buffer_t *p0;
 
-  if(!r->ip4_header.ip_version_and_header_length)
+  if (!r->ip4_header.ip_version_and_header_length)
     return;
 
   //The IP header is here, we need to check for packets
   //that can be forwarded
   int i;
-  for (i=0; i<MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) {
-    if (r->fragments[i].pi == ~0 ||
-        ((!r->fragments[i].next_data_len) && (r->fragments[i].next_data_offset != (0xffff))))
-      continue;
-
-    p0 = vlib_get_buffer(vm, r->fragments[i].pi);
-    ip60 = vlib_buffer_get_current(p0);
-    frag0 = (ip6_frag_hdr_t *)(ip60 + 1);
-    ip40 = (ip4_header_t *)(frag0 + 1);
-
-    if (ip6_frag_hdr_offset(frag0)) {
-      //Not first fragment, add the IPv4 header
-      clib_memcpy(ip40, &r->ip4_header, 20);
-    }
+  for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+    {
+      if (r->fragments[i].pi == ~0 ||
+         ((!r->fragments[i].next_data_len)
+          && (r->fragments[i].next_data_offset != (0xffff))))
+       continue;
+
+      p0 = vlib_get_buffer (vm, r->fragments[i].pi);
+      ip60 = vlib_buffer_get_current (p0);
+      frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
+      ip40 = (ip4_header_t *) (frag0 + 1);
+
+      if (ip6_frag_hdr_offset (frag0))
+       {
+         //Not first fragment, add the IPv4 header
+         clib_memcpy (ip40, &r->ip4_header, 20);
+       }
 
 #ifdef MAP_IP6_REASS_COUNT_BYTES
-    r->forwarded += clib_net_to_host_u16(ip60->payload_length) - sizeof(*frag0);
+      r->forwarded +=
+       clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
 #endif
 
-    if (ip6_frag_hdr_more(frag0)) {
-      //Not last fragment, we copy end of next
-      clib_memcpy(u8_ptr_add(ip60, p0->current_length), r->fragments[i].next_data, 20);
-      p0->current_length += 20;
-      ip60->payload_length = u16_net_add(ip60->payload_length, 20);
-    }
-
-    if (!ip4_is_fragment(ip40)) {
-      ip40->fragment_id = frag_id_6to4(frag0->identification);
-      ip40->flags_and_fragment_offset = clib_host_to_net_u16(ip6_frag_hdr_offset(frag0));
-    } else {
-      ip40->flags_and_fragment_offset = clib_host_to_net_u16(ip4_get_fragment_offset(ip40) + ip6_frag_hdr_offset(frag0));
-    }
-
-    if (ip6_frag_hdr_more(frag0))
-      ip40->flags_and_fragment_offset |= clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
+      if (ip6_frag_hdr_more (frag0))
+       {
+         //Not last fragment, we copy end of next
+         clib_memcpy (u8_ptr_add (ip60, p0->current_length),
+                      r->fragments[i].next_data, 20);
+         p0->current_length += 20;
+         ip60->payload_length = u16_net_add (ip60->payload_length, 20);
+       }
 
-    ip40->length = clib_host_to_net_u16(p0->current_length - sizeof(*ip60) - sizeof(*frag0));
-    ip40->checksum = ip4_header_checksum(ip40);
+      if (!ip4_is_fragment (ip40))
+       {
+         ip40->fragment_id = frag_id_6to4 (frag0->identification);
+         ip40->flags_and_fragment_offset =
+           clib_host_to_net_u16 (ip6_frag_hdr_offset (frag0));
+       }
+      else
+       {
+         ip40->flags_and_fragment_offset =
+           clib_host_to_net_u16 (ip4_get_fragment_offset (ip40) +
+                                 ip6_frag_hdr_offset (frag0));
+       }
 
-    if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
-      map_ip6_map_ip6_reass_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
-      tr->offset = ip4_get_fragment_offset(ip40);
-      tr->frag_len = clib_net_to_host_u16(ip40->length) - sizeof(*ip40);
-      tr->out = 1;
-    }
+      if (ip6_frag_hdr_more (frag0))
+       ip40->flags_and_fragment_offset |=
+         clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+
+      ip40->length =
+       clib_host_to_net_u16 (p0->current_length - sizeof (*ip60) -
+                             sizeof (*frag0));
+      ip40->checksum = ip4_header_checksum (ip40);
+
+      if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+       {
+         map_ip6_map_ip6_reass_trace_t *tr =
+           vlib_add_trace (vm, node, p0, sizeof (*tr));
+         tr->offset = ip4_get_fragment_offset (ip40);
+         tr->frag_len = clib_net_to_host_u16 (ip40->length) - sizeof (*ip40);
+         tr->out = 1;
+       }
 
-    vec_add1(*fragments_ready, r->fragments[i].pi);
-    r->fragments[i].pi = ~0;
-    r->fragments[i].next_data_len = 0;
-    r->fragments[i].next_data_offset = 0;
-    map_main.ip6_reass_buffered_counter--;
+      vec_add1 (*fragments_ready, r->fragments[i].pi);
+      r->fragments[i].pi = ~0;
+      r->fragments[i].next_data_len = 0;
+      r->fragments[i].next_data_offset = 0;
+      map_main.ip6_reass_buffered_counter--;
 
-    //TODO: Best solution would be that ip6_map handles extension headers
-    // and ignores atomic fragment. But in the meantime, let's just copy the header.
+      //TODO: Best solution would be that ip6_map handles extension headers
+      // and ignores atomic fragment. But in the meantime, let's just copy the header.
 
-    u8 protocol = frag0->next_hdr;
-    memmove(u8_ptr_add(ip40, - sizeof(*ip60)), ip60, sizeof(*ip60));
-    ((ip6_header_t *)u8_ptr_add(ip40, - sizeof(*ip60)))->protocol = protocol;
-    vlib_buffer_advance(p0, sizeof(*frag0));
-  }
+      u8 protocol = frag0->next_hdr;
+      memmove (u8_ptr_add (ip40, -sizeof (*ip60)), ip60, sizeof (*ip60));
+      ((ip6_header_t *) u8_ptr_add (ip40, -sizeof (*ip60)))->protocol =
+       protocol;
+      vlib_buffer_advance (p0, sizeof (*frag0));
+    }
 }
 
 void
-map_ip6_drop_pi(u32 pi)
+map_ip6_drop_pi (u32 pi)
 {
-  vlib_main_t *vm = vlib_get_main();
-  vlib_node_runtime_t *n = vlib_node_get_runtime(vm, ip6_map_ip6_reass_node.index);
-  vlib_set_next_frame_buffer(vm, n, IP6_MAP_IP6_REASS_NEXT_DROP, pi);
+  vlib_main_t *vm = vlib_get_main ();
+  vlib_node_runtime_t *n =
+    vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index);
+  vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP6_REASS_NEXT_DROP, pi);
 }
 
 void
-map_ip4_drop_pi(u32 pi)
+map_ip4_drop_pi (u32 pi)
 {
-  vlib_main_t *vm = vlib_get_main();
-  vlib_node_runtime_t *n = vlib_node_get_runtime(vm, ip6_map_ip4_reass_node.index);
-  vlib_set_next_frame_buffer(vm, n, IP6_MAP_IP4_REASS_NEXT_DROP, pi);
+  vlib_main_t *vm = vlib_get_main ();
+  vlib_node_runtime_t *n =
+    vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index);
+  vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP4_REASS_NEXT_DROP, pi);
 }
 
 /*
@@ -527,109 +680,139 @@ map_ip4_drop_pi(u32 pi)
  * have been forwarded.
  */
 static uword
-ip6_map_ip6_reass (vlib_main_t *vm,
-          vlib_node_runtime_t *node,
-          vlib_frame_t *frame)
+ip6_map_ip6_reass (vlib_main_t * vm,
+                  vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_map_ip6_reass_node.index);
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index);
   u32 *fragments_to_drop = NULL;
   u32 *fragments_ready = NULL;
 
-  from = vlib_frame_vector_args(frame);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
-
-    /* Single loop */
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      u8 error0 = MAP_ERROR_NONE;
-      ip6_header_t *ip60;
-      ip6_frag_hdr_t *frag0;
-      u16 offset;
-      u16 next_offset;
-      u16 frag_len;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      ip60 = vlib_buffer_get_current(p0);
-      frag0 = (ip6_frag_hdr_t *)(ip60 + 1);
-      offset = clib_host_to_net_u16(frag0->fragment_offset_and_more) & (~7);
-      frag_len = clib_net_to_host_u16(ip60->payload_length) - sizeof(*frag0);
-      next_offset = ip6_frag_hdr_more(frag0) ? (offset + frag_len) : (0xffff);
-
-      //FIXME: Support other extension headers, maybe
-
-      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
-        map_ip6_map_ip6_reass_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
-        tr->offset = offset;
-        tr->frag_len = frag_len;
-        tr->out = 0;
-      }
-
-      map_ip6_reass_lock();
-      map_ip6_reass_t *r = map_ip6_reass_get(&ip60->src_address, &ip60->dst_address,
-                                             frag0->identification, frag0->next_hdr, &fragments_to_drop);
-      //FIXME: Use better error codes
-      if (PREDICT_FALSE(!r)) {
-        // Could not create a caching entry
-        error0 = MAP_ERROR_FRAGMENT_MEMORY;
-      } else if (PREDICT_FALSE((frag_len <= 20 &&
-          (ip6_frag_hdr_more(frag0) || (!offset))))) {
-        //Very small fragment are restricted to the last one and
-        //can't be the first one
-        error0 = MAP_ERROR_FRAGMENT_MALFORMED;
-      } else if (map_ip6_reass_add_fragment(r, pi0, offset, next_offset, (u8 *)(frag0 + 1), frag_len)) {
-        map_ip6_reass_free(r, &fragments_to_drop);
-        error0 = MAP_ERROR_FRAGMENT_MEMORY;
-      } else {
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      /* Single loop */
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         u8 error0 = MAP_ERROR_NONE;
+         ip6_header_t *ip60;
+         ip6_frag_hdr_t *frag0;
+         u16 offset;
+         u16 next_offset;
+         u16 frag_len;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         ip60 = vlib_buffer_get_current (p0);
+         frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
+         offset =
+           clib_host_to_net_u16 (frag0->fragment_offset_and_more) & (~7);
+         frag_len =
+           clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
+         next_offset =
+           ip6_frag_hdr_more (frag0) ? (offset + frag_len) : (0xffff);
+
+         //FIXME: Support other extension headers, maybe
+
+         if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             map_ip6_map_ip6_reass_trace_t *tr =
+               vlib_add_trace (vm, node, p0, sizeof (*tr));
+             tr->offset = offset;
+             tr->frag_len = frag_len;
+             tr->out = 0;
+           }
+
+         map_ip6_reass_lock ();
+         map_ip6_reass_t *r =
+           map_ip6_reass_get (&ip60->src_address, &ip60->dst_address,
+                              frag0->identification, frag0->next_hdr,
+                              &fragments_to_drop);
+         //FIXME: Use better error codes
+         if (PREDICT_FALSE (!r))
+           {
+             // Could not create a caching entry
+             error0 = MAP_ERROR_FRAGMENT_MEMORY;
+           }
+         else if (PREDICT_FALSE ((frag_len <= 20 &&
+                                  (ip6_frag_hdr_more (frag0) || (!offset)))))
+           {
+             //Very small fragment are restricted to the last one and
+             //can't be the first one
+             error0 = MAP_ERROR_FRAGMENT_MALFORMED;
+           }
+         else
+           if (map_ip6_reass_add_fragment
+               (r, pi0, offset, next_offset, (u8 *) (frag0 + 1), frag_len))
+           {
+             map_ip6_reass_free (r, &fragments_to_drop);
+             error0 = MAP_ERROR_FRAGMENT_MEMORY;
+           }
+         else
+           {
 #ifdef MAP_IP6_REASS_COUNT_BYTES
-        if (!ip6_frag_hdr_more(frag0))
-          r->expected_total = offset + frag_len;
+             if (!ip6_frag_hdr_more (frag0))
+               r->expected_total = offset + frag_len;
 #endif
-        ip6_map_ip6_reass_prepare(vm, node, r, &fragments_ready, &fragments_to_drop);
+             ip6_map_ip6_reass_prepare (vm, node, r, &fragments_ready,
+                                        &fragments_to_drop);
 #ifdef MAP_IP6_REASS_COUNT_BYTES
-        if(r->forwarded >= r->expected_total)
-          map_ip6_reass_free(r, &fragments_to_drop);
+             if (r->forwarded >= r->expected_total)
+               map_ip6_reass_free (r, &fragments_to_drop);
 #endif
-      }
-      map_ip6_reass_unlock();
-
-      if (error0 == MAP_ERROR_NONE) {
-        if (frag_len > 20) {
-          //Dequeue the packet
-          n_left_to_next++;
-          to_next--;
-        } else {
-          //All data from that packet was copied no need to keep it, but this is not an error
-          p0->error = error_node->errors[MAP_ERROR_NONE];
-          vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, IP6_MAP_IP6_REASS_NEXT_DROP);
-        }
-      } else {
-        p0->error = error_node->errors[error0];
-        vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, IP6_MAP_IP6_REASS_NEXT_DROP);
-      }
+           }
+         map_ip6_reass_unlock ();
+
+         if (error0 == MAP_ERROR_NONE)
+           {
+             if (frag_len > 20)
+               {
+                 //Dequeue the packet
+                 n_left_to_next++;
+                 to_next--;
+               }
+             else
+               {
+                 //All data from that packet was copied no need to keep it, but this is not an error
+                 p0->error = error_node->errors[MAP_ERROR_NONE];
+                 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                                  to_next, n_left_to_next,
+                                                  pi0,
+                                                  IP6_MAP_IP6_REASS_NEXT_DROP);
+               }
+           }
+         else
+           {
+             p0->error = error_node->errors[error0];
+             vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                              n_left_to_next, pi0,
+                                              IP6_MAP_IP6_REASS_NEXT_DROP);
+           }
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
-
-  map_send_all_to_node(vm, fragments_ready, node,
-                           &error_node->errors[MAP_ERROR_NONE],
-                           IP6_MAP_IP6_REASS_NEXT_IP6_MAP);
-  map_send_all_to_node(vm, fragments_to_drop, node,
-                           &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
-                           IP6_MAP_IP6_REASS_NEXT_DROP);
-
-  vec_free(fragments_to_drop);
-  vec_free(fragments_ready);
+
+  map_send_all_to_node (vm, fragments_ready, node,
+                       &error_node->errors[MAP_ERROR_NONE],
+                       IP6_MAP_IP6_REASS_NEXT_IP6_MAP);
+  map_send_all_to_node (vm, fragments_to_drop, node,
+                       &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
+                       IP6_MAP_IP6_REASS_NEXT_DROP);
+
+  vec_free (fragments_to_drop);
+  vec_free (fragments_ready);
   return frame->n_vectors;
 }
 
@@ -637,147 +820,195 @@ ip6_map_ip6_reass (vlib_main_t *vm,
  * ip6_ip4_virt_reass
  */
 static uword
-ip6_map_ip4_reass (vlib_main_t *vm,
-                   vlib_node_runtime_t *node,
-                   vlib_frame_t *frame)
+ip6_map_ip4_reass (vlib_main_t * vm,
+                  vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_map_ip4_reass_node.index);
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index);
   map_main_t *mm = &map_main;
   vlib_combined_counter_main_t *cm = mm->domain_counters;
-  u32 cpu_index = os_get_cpu_number();
+  u32 cpu_index = os_get_cpu_number ();
   u32 *fragments_to_drop = NULL;
   u32 *fragments_to_loopback = NULL;
 
-  from = vlib_frame_vector_args(frame);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
-
-    /* Single loop */
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      u8 error0 = MAP_ERROR_NONE;
-      map_domain_t *d0;
-      ip4_header_t *ip40;
-      ip6_header_t *ip60;
-      i32 port0 = 0;
-      u32 map_domain_index0 = ~0;
-      u32 next0 = IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP;
-      u8 cached = 0;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      ip40 = vlib_buffer_get_current(p0);
-      ip60 = ((ip6_header_t *)ip40) - 1;
-
-      d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], (ip4_address_t *)&ip40->src_address.as_u32,
-                              &map_domain_index0, &error0);
-
-      map_ip4_reass_lock();
-      //This node only deals with fragmented ip4
-      map_ip4_reass_t *r = map_ip4_reass_get(ip40->src_address.as_u32, ip40->dst_address.as_u32,
-                                             ip40->fragment_id, ip40->protocol, &fragments_to_drop);
-      if (PREDICT_FALSE(!r)) {
-        // Could not create a caching entry
-        error0 = MAP_ERROR_FRAGMENT_MEMORY;
-      } else if (PREDICT_TRUE(ip4_get_fragment_offset(ip40))) {
-        // This is a fragment
-        if (r->port >= 0) {
-          // We know the port already
-          port0 = r->port;
-        } else if (map_ip4_reass_add_fragment(r, pi0)) {
-          // Not enough space for caching
-          error0 = MAP_ERROR_FRAGMENT_MEMORY;
-          map_ip4_reass_free(r, &fragments_to_drop);
-        } else {
-          cached = 1;
-        }
-      } else if ((port0 = ip4_get_port(ip40, MAP_SENDER, p0->current_length)) < 0) {
-        // Could not find port from first fragment. Stop reassembling.
-        error0 = MAP_ERROR_BAD_PROTOCOL;
-        port0 = 0;
-        map_ip4_reass_free(r, &fragments_to_drop);
-      } else {
-        // Found port. Remember it and loopback saved fragments
-        r->port = port0;
-        map_ip4_reass_get_fragments(r, &fragments_to_loopback);
-      }
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      /* Single loop */
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         u8 error0 = MAP_ERROR_NONE;
+         map_domain_t *d0;
+         ip4_header_t *ip40;
+         ip6_header_t *ip60;
+         i32 port0 = 0;
+         u32 map_domain_index0 = ~0;
+         u32 next0 = IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP;
+         u8 cached = 0;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         ip40 = vlib_buffer_get_current (p0);
+         ip60 = ((ip6_header_t *) ip40) - 1;
+
+         d0 =
+           ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+                               (ip4_address_t *) & ip40->src_address.as_u32,
+                               &map_domain_index0, &error0);
+
+         map_ip4_reass_lock ();
+         //This node only deals with fragmented ip4
+         map_ip4_reass_t *r =
+           map_ip4_reass_get (ip40->src_address.as_u32,
+                              ip40->dst_address.as_u32,
+                              ip40->fragment_id, ip40->protocol,
+                              &fragments_to_drop);
+         if (PREDICT_FALSE (!r))
+           {
+             // Could not create a caching entry
+             error0 = MAP_ERROR_FRAGMENT_MEMORY;
+           }
+         else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
+           {
+             // This is a fragment
+             if (r->port >= 0)
+               {
+                 // We know the port already
+                 port0 = r->port;
+               }
+             else if (map_ip4_reass_add_fragment (r, pi0))
+               {
+                 // Not enough space for caching
+                 error0 = MAP_ERROR_FRAGMENT_MEMORY;
+                 map_ip4_reass_free (r, &fragments_to_drop);
+               }
+             else
+               {
+                 cached = 1;
+               }
+           }
+         else
+           if ((port0 =
+                ip4_get_port (ip40, MAP_SENDER, p0->current_length)) < 0)
+           {
+             // Could not find port from first fragment. Stop reassembling.
+             error0 = MAP_ERROR_BAD_PROTOCOL;
+             port0 = 0;
+             map_ip4_reass_free (r, &fragments_to_drop);
+           }
+         else
+           {
+             // Found port. Remember it and loopback saved fragments
+             r->port = port0;
+             map_ip4_reass_get_fragments (r, &fragments_to_loopback);
+           }
 
 #ifdef MAP_IP4_REASS_COUNT_BYTES
-      if (!cached && r) {
-        r->forwarded += clib_host_to_net_u16(ip40->length) - 20;
-        if (!ip4_get_fragment_more(ip40))
-          r->expected_total = ip4_get_fragment_offset(ip40) * 8 + clib_host_to_net_u16(ip40->length) - 20;
-        if(r->forwarded >= r->expected_total)
-          map_ip4_reass_free(r, &fragments_to_drop);
-      }
+         if (!cached && r)
+           {
+             r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
+             if (!ip4_get_fragment_more (ip40))
+               r->expected_total =
+                 ip4_get_fragment_offset (ip40) * 8 +
+                 clib_host_to_net_u16 (ip40->length) - 20;
+             if (r->forwarded >= r->expected_total)
+               map_ip4_reass_free (r, &fragments_to_drop);
+           }
 #endif
 
-      map_ip4_reass_unlock();
-
-      if(PREDICT_TRUE(error0 == MAP_ERROR_NONE))
-       error0 = ip6_map_sec_check(d0, port0, ip40, ip60) ? MAP_ERROR_NONE : MAP_ERROR_DECAP_SEC_CHECK;
-
-      if (PREDICT_FALSE(d0->mtu && (clib_host_to_net_u16(ip40->length) > d0->mtu) &&
-                        error0 == MAP_ERROR_NONE && !cached)) {
-        vnet_buffer(p0)->ip_frag.header_offset = 0;
-        vnet_buffer(p0)->ip_frag.flags = 0;
-        vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-        vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
-        next0 = IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT;
-      }
-
-      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
-        map_ip6_map_ip4_reass_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
-        tr->map_domain_index = map_domain_index0;
-        tr->port = port0;
-        tr->cached = cached;
-      }
-
-      if (cached) {
-        //Dequeue the packet
-        n_left_to_next++;
-        to_next--;
-      } else {
-        if (error0 == MAP_ERROR_NONE)
-               vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index, map_domain_index0, 1,
-                                               clib_net_to_host_u16(ip40->length));
-        next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
-        p0->error = error_node->errors[error0];
-        vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
-      }
-
-      //Loopback when we reach the end of the inpu vector
-      if(n_left_from == 0 && vec_len(fragments_to_loopback)) {
-        from = vlib_frame_vector_args(frame);
-        u32 len = vec_len(fragments_to_loopback);
-        if(len <= VLIB_FRAME_SIZE) {
-          clib_memcpy(from, fragments_to_loopback, sizeof(u32)*len);
-          n_left_from = len;
-          vec_reset_length(fragments_to_loopback);
-        } else {
-          clib_memcpy(from, fragments_to_loopback + (len - VLIB_FRAME_SIZE), sizeof(u32)*VLIB_FRAME_SIZE);
-          n_left_from = VLIB_FRAME_SIZE;
-          _vec_len(fragments_to_loopback) = len - VLIB_FRAME_SIZE;
-        }
-      }
+         map_ip4_reass_unlock ();
+
+         if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
+           error0 =
+             ip6_map_sec_check (d0, port0, ip40,
+                                ip60) ? MAP_ERROR_NONE :
+             MAP_ERROR_DECAP_SEC_CHECK;
+
+         if (PREDICT_FALSE
+             (d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)
+              && error0 == MAP_ERROR_NONE && !cached))
+           {
+             vnet_buffer (p0)->ip_frag.header_offset = 0;
+             vnet_buffer (p0)->ip_frag.flags = 0;
+             vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+             vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
+             next0 = IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT;
+           }
+
+         if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             map_ip6_map_ip4_reass_trace_t *tr =
+               vlib_add_trace (vm, node, p0, sizeof (*tr));
+             tr->map_domain_index = map_domain_index0;
+             tr->port = port0;
+             tr->cached = cached;
+           }
+
+         if (cached)
+           {
+             //Dequeue the packet
+             n_left_to_next++;
+             to_next--;
+           }
+         else
+           {
+             if (error0 == MAP_ERROR_NONE)
+               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+                                                cpu_index, map_domain_index0,
+                                                1,
+                                                clib_net_to_host_u16 (ip40->
+                                                                      length));
+             next0 =
+               (error0 ==
+                MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
+             p0->error = error_node->errors[error0];
+             vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                              n_left_to_next, pi0, next0);
+           }
+
+         //Loopback when we reach the end of the inpu vector
+         if (n_left_from == 0 && vec_len (fragments_to_loopback))
+           {
+             from = vlib_frame_vector_args (frame);
+             u32 len = vec_len (fragments_to_loopback);
+             if (len <= VLIB_FRAME_SIZE)
+               {
+                 clib_memcpy (from, fragments_to_loopback,
+                              sizeof (u32) * len);
+                 n_left_from = len;
+                 vec_reset_length (fragments_to_loopback);
+               }
+             else
+               {
+                 clib_memcpy (from,
+                              fragments_to_loopback + (len -
+                                                       VLIB_FRAME_SIZE),
+                              sizeof (u32) * VLIB_FRAME_SIZE);
+                 n_left_from = VLIB_FRAME_SIZE;
+                 _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
+               }
+           }
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
-  map_send_all_to_node(vm, fragments_to_drop, node,
-                             &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
-                             IP6_MAP_IP4_REASS_NEXT_DROP);
-
-  vec_free(fragments_to_drop);
-  vec_free(fragments_to_loopback);
+  map_send_all_to_node (vm, fragments_to_drop, node,
+                       &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
+                       IP6_MAP_IP4_REASS_NEXT_DROP);
+
+  vec_free (fragments_to_drop);
+  vec_free (fragments_to_loopback);
   return frame->n_vectors;
 }
 
@@ -785,142 +1016,160 @@ ip6_map_ip4_reass (vlib_main_t *vm,
  * ip6_icmp_relay
  */
 static uword
-ip6_map_icmp_relay (vlib_main_t *vm,
-                   vlib_node_runtime_t *node,
-                   vlib_frame_t *frame)
+ip6_map_icmp_relay (vlib_main_t * vm,
+                   vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_map_icmp_relay_node.index);
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index);
   map_main_t *mm = &map_main;
-  u32 cpu_index = os_get_cpu_number();
+  u32 cpu_index = os_get_cpu_number ();
   u16 *fragment_ids, *fid;
 
-  from = vlib_frame_vector_args(frame);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
 
   /* Get random fragment IDs for replies. */
-  fid = fragment_ids = clib_random_buffer_get_data (&vm->random_buffer, n_left_from * sizeof (fragment_ids[0]));
-
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
-
-    /* Single loop */
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      u8 error0 = MAP_ERROR_NONE;
-      ip6_header_t *ip60;
-      u32 next0 = IP6_ICMP_RELAY_NEXT_IP4_LOOKUP;
-      u32 mtu;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      ip60 = vlib_buffer_get_current(p0);
-      u16 tlen = clib_net_to_host_u16(ip60->payload_length);
-
-      /*
-       * In:
-       *  IPv6 header           (40)
-       *  ICMPv6 header          (8) 
-       *  IPv6 header           (40)
-       *  Original IPv4 header / packet
-       * Out:
-       *  New IPv4 header
-       *  New ICMP header
-       *  Original IPv4 header / packet
-       */
-
-      /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
-      if (tlen < 76) {
-       error0 = MAP_ERROR_ICMP_RELAY;
-       goto error;
-      }
-
-      icmp46_header_t *icmp60 = (icmp46_header_t *)(ip60 + 1);
-      ip6_header_t *inner_ip60 = (ip6_header_t *)(icmp60 + 2);
-
-      if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP) {
-       error0 = MAP_ERROR_ICMP_RELAY;
-       goto error;
-      }
-
-      ip4_header_t *inner_ip40 = (ip4_header_t *)(inner_ip60 + 1);
-      vlib_buffer_advance(p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
-      ip4_header_t *new_ip40 = vlib_buffer_get_current(p0);
-      icmp46_header_t *new_icmp40 = (icmp46_header_t *)(new_ip40 + 1);
-
-      /*
-       * Relay according to RFC2473, section 8.3
-       */
-      switch (icmp60->type) {
-      case ICMP6_destination_unreachable:
-      case ICMP6_time_exceeded:
-      case ICMP6_parameter_problem:
-       /* Type 3 - destination unreachable, Code 1 - host unreachable */
-       new_icmp40->type = ICMP4_destination_unreachable;
-       new_icmp40->code = ICMP4_destination_unreachable_destination_unreachable_host;
-       break;
-
-      case ICMP6_packet_too_big:
-       /* Type 3 - destination unreachable, Code 4 - packet too big */
-       /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
-       mtu = clib_net_to_host_u32(*((u32 *)(icmp60 + 1)));
-
-       /* Check DF flag */
-       if (!(inner_ip40->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT))) {
-         error0 = MAP_ERROR_ICMP_RELAY;
-         goto error;
+  fid = fragment_ids =
+    clib_random_buffer_get_data (&vm->random_buffer,
+                                n_left_from * sizeof (fragment_ids[0]));
+
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      /* Single loop */
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         u8 error0 = MAP_ERROR_NONE;
+         ip6_header_t *ip60;
+         u32 next0 = IP6_ICMP_RELAY_NEXT_IP4_LOOKUP;
+         u32 mtu;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         ip60 = vlib_buffer_get_current (p0);
+         u16 tlen = clib_net_to_host_u16 (ip60->payload_length);
+
+         /*
+          * In:
+          *  IPv6 header           (40)
+          *  ICMPv6 header          (8) 
+          *  IPv6 header           (40)
+          *  Original IPv4 header / packet
+          * Out:
+          *  New IPv4 header
+          *  New ICMP header
+          *  Original IPv4 header / packet
+          */
+
+         /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
+         if (tlen < 76)
+           {
+             error0 = MAP_ERROR_ICMP_RELAY;
+             goto error;
+           }
+
+         icmp46_header_t *icmp60 = (icmp46_header_t *) (ip60 + 1);
+         ip6_header_t *inner_ip60 = (ip6_header_t *) (icmp60 + 2);
+
+         if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP)
+           {
+             error0 = MAP_ERROR_ICMP_RELAY;
+             goto error;
+           }
+
+         ip4_header_t *inner_ip40 = (ip4_header_t *) (inner_ip60 + 1);
+         vlib_buffer_advance (p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
+         ip4_header_t *new_ip40 = vlib_buffer_get_current (p0);
+         icmp46_header_t *new_icmp40 = (icmp46_header_t *) (new_ip40 + 1);
+
+         /*
+          * Relay according to RFC2473, section 8.3
+          */
+         switch (icmp60->type)
+           {
+           case ICMP6_destination_unreachable:
+           case ICMP6_time_exceeded:
+           case ICMP6_parameter_problem:
+             /* Type 3 - destination unreachable, Code 1 - host unreachable */
+             new_icmp40->type = ICMP4_destination_unreachable;
+             new_icmp40->code =
+               ICMP4_destination_unreachable_destination_unreachable_host;
+             break;
+
+           case ICMP6_packet_too_big:
+             /* Type 3 - destination unreachable, Code 4 - packet too big */
+             /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
+             mtu = clib_net_to_host_u32 (*((u32 *) (icmp60 + 1)));
+
+             /* Check DF flag */
+             if (!
+                 (inner_ip40->
+                  flags_and_fragment_offset &
+                  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT)))
+               {
+                 error0 = MAP_ERROR_ICMP_RELAY;
+                 goto error;
+               }
+
+             new_icmp40->type = ICMP4_destination_unreachable;
+             new_icmp40->code =
+               ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
+             *((u32 *) (new_icmp40 + 1)) =
+               clib_host_to_net_u32 (mtu < 1280 ? 1280 : mtu);
+             break;
+
+           default:
+             error0 = MAP_ERROR_ICMP_RELAY;
+             break;
+           }
+
+         /*
+          * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
+          */
+         new_ip40->ip_version_and_header_length = 0x45;
+         new_ip40->tos = 0;
+         u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
+         new_ip40->length = clib_host_to_net_u16 (nlen);
+         new_ip40->fragment_id = fid[0];
+         fid++;
+         new_ip40->ttl = 64;
+         new_ip40->protocol = IP_PROTOCOL_ICMP;
+         new_ip40->src_address = mm->icmp4_src_address;
+         new_ip40->dst_address = inner_ip40->src_address;
+         new_ip40->checksum = ip4_header_checksum (new_ip40);
+
+         new_icmp40->checksum = 0;
+         ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20);
+         new_icmp40->checksum = ~ip_csum_fold (sum);
+
+         vlib_increment_simple_counter (&mm->icmp_relayed, cpu_index, 0, 1);
+
+       error:
+         if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
+             tr->map_domain_index = 0;
+             tr->port = 0;
+           }
+
+         next0 =
+           (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
+         p0->error = error_node->errors[error0];
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                          n_left_to_next, pi0, next0);
        }
-
-       new_icmp40->type = ICMP4_destination_unreachable;
-       new_icmp40->code = ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
-       *((u32 *)(new_icmp40 + 1)) = clib_host_to_net_u32(mtu < 1280 ? 1280 : mtu);
-       break;
-
-      default:
-       error0 = MAP_ERROR_ICMP_RELAY;
-       break;
-      }
-
-      /*
-       * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
-       */
-      new_ip40->ip_version_and_header_length = 0x45;
-      new_ip40->tos = 0;
-      u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
-      new_ip40->length = clib_host_to_net_u16(nlen);
-      new_ip40->fragment_id = fid[0]; fid++;
-      new_ip40->ttl = 64;
-      new_ip40->protocol = IP_PROTOCOL_ICMP;
-      new_ip40->src_address = mm->icmp4_src_address;
-      new_ip40->dst_address = inner_ip40->src_address;
-      new_ip40->checksum = ip4_header_checksum(new_ip40);
-
-      new_icmp40->checksum = 0;
-      ip_csum_t sum = ip_incremental_checksum(0, new_icmp40, nlen - 20);
-      new_icmp40->checksum = ~ip_csum_fold(sum);
-
-      vlib_increment_simple_counter(&mm->icmp_relayed, cpu_index, 0, 1);
-
-    error:
-      if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
-       map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
-       tr->map_domain_index = 0;
-       tr->port = 0;
-      }
-
-      next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
-      p0->error = error_node->errors[error0];
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
 
   return frame->n_vectors;
 
@@ -932,6 +1181,7 @@ static char *map_error_strings[] = {
 #undef _
 };
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip6_map_node) = {
   .function = ip6_map,
   .name = "ip6-map",
@@ -957,7 +1207,9 @@ VLIB_REGISTER_NODE(ip6_map_node) = {
     [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error",
   },
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip6_map_ip6_reass_node) = {
   .function = ip6_map_ip6_reass,
   .name = "ip6-map-ip6-reass",
@@ -972,7 +1224,9 @@ VLIB_REGISTER_NODE(ip6_map_ip6_reass_node) = {
     [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip6_map_ip4_reass_node) = {
   .function = ip6_map_ip4_reass,
   .name = "ip6-map-ip4-reass",
@@ -988,7 +1242,9 @@ VLIB_REGISTER_NODE(ip6_map_ip4_reass_node) = {
     [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip6_map_icmp_relay_node, static) = {
   .function = ip6_map_icmp_relay,
   .name = "ip6-map-icmp-relay",
@@ -1003,3 +1259,12 @@ VLIB_REGISTER_NODE(ip6_map_icmp_relay_node, static) = {
     [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index 39e8e0a..eb3996c 100644 (file)
@@ -18,7 +18,8 @@
 
 #define IP6_MAP_T_DUAL_LOOP
 
-typedef enum {
+typedef enum
+{
   IP6_MAPT_NEXT_MAPT_TCP_UDP,
   IP6_MAPT_NEXT_MAPT_ICMP,
   IP6_MAPT_NEXT_MAPT_FRAGMENTED,
@@ -26,21 +27,24 @@ typedef enum {
   IP6_MAPT_N_NEXT
 } ip6_mapt_next_t;
 
-typedef enum {
+typedef enum
+{
   IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
   IP6_MAPT_ICMP_NEXT_IP4_FRAG,
   IP6_MAPT_ICMP_NEXT_DROP,
   IP6_MAPT_ICMP_N_NEXT
 } ip6_mapt_icmp_next_t;
 
-typedef enum {
+typedef enum
+{
   IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
   IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
   IP6_MAPT_TCP_UDP_NEXT_DROP,
   IP6_MAPT_TCP_UDP_N_NEXT
 } ip6_mapt_tcp_udp_next_t;
 
-typedef enum {
+typedef enum
+{
   IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
   IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
   IP6_MAPT_FRAGMENTED_NEXT_DROP,
@@ -48,131 +52,153 @@ typedef enum {
 } ip6_mapt_fragmented_next_t;
 
 static_always_inline int
-ip6_map_fragment_cache (ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d, u16 port)
+ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
+                       map_domain_t * d, u16 port)
 {
   u32 *ignore = NULL;
-  map_ip4_reass_lock();
-  map_ip4_reass_t *r = map_ip4_reass_get(map_get_ip4(&ip6->src_address), ip6_map_t_embedded_address(d, &ip6->dst_address),
-                                         frag_id_6to4(frag->identification),
-                                         (ip6->protocol == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : ip6->protocol,
-                                             &ignore);
+  map_ip4_reass_lock ();
+  map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address),
+                                         ip6_map_t_embedded_address (d,
+                                                                     &ip6->
+                                                                     dst_address),
+                                         frag_id_6to4 (frag->identification),
+                                         (ip6->protocol ==
+                                          IP_PROTOCOL_ICMP6) ?
+                                         IP_PROTOCOL_ICMP : ip6->protocol,
+                                         &ignore);
   if (r)
     r->port = port;
 
-  map_ip4_reass_unlock();
+  map_ip4_reass_unlock ();
   return !r;
 }
 
 /* Returns the associated port or -1 */
 static_always_inline i32
-ip6_map_fragment_get(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d)
+ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
+                     map_domain_t * d)
 {
   u32 *ignore = NULL;
-  map_ip4_reass_lock();
-  map_ip4_reass_t *r = map_ip4_reass_get(map_get_ip4(&ip6->src_address), ip6_map_t_embedded_address(d, &ip6->dst_address),
-                                         frag_id_6to4(frag->identification),
-                                         (ip6->protocol == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : ip6->protocol,
-                                             &ignore);
-  i32 ret = r?r->port:-1;
-  map_ip4_reass_unlock();
+  map_ip4_reass_lock ();
+  map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address),
+                                         ip6_map_t_embedded_address (d,
+                                                                     &ip6->
+                                                                     dst_address),
+                                         frag_id_6to4 (frag->identification),
+                                         (ip6->protocol ==
+                                          IP_PROTOCOL_ICMP6) ?
+                                         IP_PROTOCOL_ICMP : ip6->protocol,
+                                         &ignore);
+  i32 ret = r ? r->port : -1;
+  map_ip4_reass_unlock ();
   return ret;
 }
 
 static_always_inline u8
-ip6_translate_tos(const ip6_header_t *ip6)
+ip6_translate_tos (const ip6_header_t * ip6)
 {
 #ifdef IP6_MAP_T_OVERRIDE_TOS
   return IP6_MAP_T_OVERRIDE_TOS;
 #else
-  return (clib_net_to_host_u32(ip6->ip_version_traffic_class_and_flow_label) & 0x0ff00000) >> 20;
+  return (clib_net_to_host_u32 (ip6->ip_version_traffic_class_and_flow_label)
+         & 0x0ff00000) >> 20;
 #endif
 }
 
 //TODO: Find right place in memory for that
+/* *INDENT-OFF* */
 static u8 icmp6_to_icmp_updater_pointer_table[] =
-    { 0, 1,~0,~0,
-      2, 2, 9, 8,
-      12,12,12,12,
-      12,12,12,12,
-      12,12,12,12,
-      12,12,12,12,
-      24,24,24,24,
-      24,24,24,24,
-      24,24,24,24,
-      24,24,24,24
-    };
+  { 0, 1, ~0, ~0,
+    2, 2, 9, 8,
+    12, 12, 12, 12,
+    12, 12, 12, 12,
+    12, 12, 12, 12,
+    12, 12, 12, 12,
+    24, 24, 24, 24,
+    24, 24, 24, 24,
+    24, 24, 24, 24,
+    24, 24, 24, 24
+  };
+/* *INDENT-ON* */
 
 static_always_inline int
-ip6_icmp_to_icmp6_in_place (icmp46_header_t *icmp, u32 icmp_len,
-                            i32 *sender_port,  ip6_header_t **inner_ip6)
+ip6_icmp_to_icmp6_in_place (icmp46_header_t * icmp, u32 icmp_len,
+                           i32 * sender_port, ip6_header_t ** inner_ip6)
 {
   *inner_ip6 = NULL;
-  switch (icmp->type) {
+  switch (icmp->type)
+    {
     case ICMP6_echo_request:
-      *sender_port = ((u16 *)icmp)[2];
+      *sender_port = ((u16 *) icmp)[2];
       icmp->type = ICMP4_echo_request;
       break;
     case ICMP6_echo_reply:
-      *sender_port = ((u16 *)icmp)[2];
+      *sender_port = ((u16 *) icmp)[2];
       icmp->type = ICMP4_echo_reply;
       break;
     case ICMP6_destination_unreachable:
-    *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
-    *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
-
-    switch (icmp->code) {
-      case ICMP6_destination_unreachable_no_route_to_destination: //0
-      case ICMP6_destination_unreachable_beyond_scope_of_source_address: //2
-      case ICMP6_destination_unreachable_address_unreachable: //3
-       icmp->type = ICMP4_destination_unreachable;
-       icmp->code = ICMP4_destination_unreachable_destination_unreachable_host;
-       break;
-      case ICMP6_destination_unreachable_destination_administratively_prohibited: //1
-       icmp->type = ICMP4_destination_unreachable;
-       icmp->code = ICMP4_destination_unreachable_communication_administratively_prohibited;
-       break;
-      case ICMP6_destination_unreachable_port_unreachable:
-       icmp->type = ICMP4_destination_unreachable;
-       icmp->code = ICMP4_destination_unreachable_port_unreachable;
-       break;
-      default:
-       return -1;
-    }
-    break;
+      *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
+      *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
+
+      switch (icmp->code)
+       {
+       case ICMP6_destination_unreachable_no_route_to_destination:     //0
+       case ICMP6_destination_unreachable_beyond_scope_of_source_address:      //2
+       case ICMP6_destination_unreachable_address_unreachable: //3
+         icmp->type = ICMP4_destination_unreachable;
+         icmp->code =
+           ICMP4_destination_unreachable_destination_unreachable_host;
+         break;
+       case ICMP6_destination_unreachable_destination_administratively_prohibited:     //1
+         icmp->type =
+           ICMP4_destination_unreachable;
+         icmp->code =
+           ICMP4_destination_unreachable_communication_administratively_prohibited;
+         break;
+       case ICMP6_destination_unreachable_port_unreachable:
+         icmp->type = ICMP4_destination_unreachable;
+         icmp->code = ICMP4_destination_unreachable_port_unreachable;
+         break;
+       default:
+         return -1;
+       }
+      break;
     case ICMP6_packet_too_big:
-      *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
-      *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
+      *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
+      *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
 
       icmp->type = ICMP4_destination_unreachable;
       icmp->code = 4;
       {
-       u32 advertised_mtu = clib_net_to_host_u32(*((u32 *)(icmp + 1)));
+       u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1)));
        advertised_mtu -= 20;
-         //FIXME: = minimum(advertised MTU-20, MTU_of_IPv4_nexthop, (MTU_of_IPv6_nexthop)-20)
-       ((u16 *)(icmp))[3] = clib_host_to_net_u16(advertised_mtu);
+       //FIXME: = minimum(advertised MTU-20, MTU_of_IPv4_nexthop, (MTU_of_IPv6_nexthop)-20)
+       ((u16 *) (icmp))[3] = clib_host_to_net_u16 (advertised_mtu);
       }
       break;
 
     case ICMP6_time_exceeded:
-      *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
-      *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
+      *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
+      *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
 
       icmp->type = ICMP4_time_exceeded;
       break;
 
     case ICMP6_parameter_problem:
-      *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
-      *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
+      *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
+      *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
 
-      switch (icmp->code) {
+      switch (icmp->code)
+       {
        case ICMP6_parameter_problem_erroneous_header_field:
          icmp->type = ICMP4_parameter_problem;
          icmp->code = ICMP4_parameter_problem_pointer_indicates_error;
-         u32 pointer = clib_net_to_host_u32(*((u32*)(icmp + 1)));
+         u32 pointer = clib_net_to_host_u32 (*((u32 *) (icmp + 1)));
          if (pointer >= 40)
            return -1;
 
-         ((u8*)(icmp + 1))[0] = icmp6_to_icmp_updater_pointer_table[pointer];
+         ((u8 *) (icmp + 1))[0] =
+           icmp6_to_icmp_updater_pointer_table[pointer];
          break;
        case ICMP6_parameter_problem_unrecognized_next_header:
          icmp->type = ICMP4_destination_unreachable;
@@ -181,17 +207,17 @@ ip6_icmp_to_icmp6_in_place (icmp46_header_t *icmp, u32 icmp_len,
        case ICMP6_parameter_problem_unrecognized_option:
        default:
          return -1;
-      }
+       }
       break;
     default:
       return -1;
       break;
-  }
+    }
   return 0;
 }
 
 static_always_inline void
-_ip6_map_t_icmp (map_domain_t *d, vlib_buffer_t *p, u8 *error)
+_ip6_map_t_icmp (map_domain_t * d, vlib_buffer_t * p, u8 * error)
 {
   ip6_header_t *ip6, *inner_ip6;
   ip4_header_t *ip4, *inner_ip4;
@@ -201,872 +227,1207 @@ _ip6_map_t_icmp (map_domain_t *d, vlib_buffer_t *p, u8 *error)
   ip_csum_t csum;
   u32 ip4_sadr, inner_ip4_dadr;
 
-  ip6 = vlib_buffer_get_current(p);
-  ip6_pay_len = clib_net_to_host_u16(ip6->payload_length);
-  icmp = (icmp46_header_t *)(ip6 + 1);
-  ASSERT(ip6_pay_len + sizeof(*ip6) <= p->current_length);
-
-  if (ip6->protocol != IP_PROTOCOL_ICMP6) {
-    //No extensions headers allowed here
-    //TODO: SR header
-    *error = MAP_ERROR_MALFORMED;
-    return;
-  }
-
-  //There are no fragmented ICMP messages, so no extension header for now
-
-  if (ip6_icmp_to_icmp6_in_place(icmp, ip6_pay_len, &sender_port, &inner_ip6)) {
-    //TODO: In case of 1:1 mapping it is not necessary to have the sender port
-    *error = MAP_ERROR_ICMP;
-    return;
-  }
-
-  if (sender_port < 0) {
-    // In case of 1:1 mapping, we don't care about the port
-    if(d->ea_bits_len == 0 && d->rules) {
-      sender_port = 0;
-    } else {
-      *error = MAP_ERROR_ICMP;
-      return;
-    }
-  }
+  ip6 = vlib_buffer_get_current (p);
+  ip6_pay_len = clib_net_to_host_u16 (ip6->payload_length);
+  icmp = (icmp46_header_t *) (ip6 + 1);
+  ASSERT (ip6_pay_len + sizeof (*ip6) <= p->current_length);
 
-  //Security check
-  //Note that this prevents an intermediate IPv6 router from answering the request
-  ip4_sadr = map_get_ip4(&ip6->src_address);
-  if (ip6->src_address.as_u64[0] != map_get_pfx_net(d, ip4_sadr, sender_port) ||
-      ip6->src_address.as_u64[1] != map_get_sfx_net(d, ip4_sadr, sender_port)) {
-    *error = MAP_ERROR_SEC_CHECK;
-    return;
-  }
-
-  if (inner_ip6) {
-    u16 *inner_L4_checksum, inner_l4_offset, inner_frag_offset, inner_frag_id;
-    u8 *inner_l4, inner_protocol;
-
-    //We have two headers to translate
-    //   FROM
-    //   [   IPv6   ]<- ext ->[IC][   IPv6   ]<- ext ->[L4 header ...
-    // Handled cases:
-    //                     [   IPv6   ][IC][   IPv6   ][L4 header ...
-    //                 [   IPv6   ][IC][   IPv6   ][Fr][L4 header ...
-    //    TO
-    //                               [ IPv4][IC][ IPv4][L4 header ...
-
-    //TODO: This was already done deep in ip6_icmp_to_icmp6_in_place
-    //We shouldn't have to do it again
-    if (ip6_parse(inner_ip6, ip6_pay_len - 8,
-                  &inner_protocol, &inner_l4_offset, &inner_frag_offset)) {
+  if (ip6->protocol != IP_PROTOCOL_ICMP6)
+    {
+      //No extensions headers allowed here
+      //TODO: SR header
       *error = MAP_ERROR_MALFORMED;
       return;
     }
 
-    inner_l4 = u8_ptr_add(inner_ip6, inner_l4_offset);
-    inner_ip4 = (ip4_header_t *) u8_ptr_add(inner_l4, - sizeof(*inner_ip4));
-    if (inner_frag_offset) {
-      ip6_frag_hdr_t *inner_frag = (ip6_frag_hdr_t *) u8_ptr_add(inner_ip6, inner_frag_offset);
-      inner_frag_id = frag_id_6to4(inner_frag->identification);
-    } else {
-      inner_frag_id = 0;
-    }
+  //There are no fragmented ICMP messages, so no extension header for now
 
-    //Do the translation of the inner packet
-    if (inner_protocol == IP_PROTOCOL_TCP) {
-      inner_L4_checksum = (u16 *) u8_ptr_add(inner_l4, 16);
-    } else if (inner_protocol == IP_PROTOCOL_UDP) {
-      inner_L4_checksum = (u16 *) u8_ptr_add(inner_l4, 6);
-    } else if (inner_protocol == IP_PROTOCOL_ICMP6) {
-      icmp46_header_t *inner_icmp = (icmp46_header_t *) inner_l4;
-      csum = inner_icmp->checksum;
-      csum = ip_csum_sub_even(csum, *((u16 *)inner_icmp));
-      //It cannot be of a different type as ip6_icmp_to_icmp6_in_place succeeded
-      inner_icmp->type = (inner_icmp->type == ICMP6_echo_request) ?
-          ICMP4_echo_request : ICMP4_echo_reply;
-      csum = ip_csum_add_even(csum, *((u16 *)inner_icmp));
-      inner_icmp->checksum = ip_csum_fold(csum);
-      inner_protocol = IP_PROTOCOL_ICMP; //Will be copied to ip6 later
-      inner_L4_checksum = &inner_icmp->checksum;
-    } else {
-      *error = MAP_ERROR_BAD_PROTOCOL;
+  if (ip6_icmp_to_icmp6_in_place
+      (icmp, ip6_pay_len, &sender_port, &inner_ip6))
+    {
+      //TODO: In case of 1:1 mapping it is not necessary to have the sender port
+      *error = MAP_ERROR_ICMP;
       return;
     }
 
-    csum = *inner_L4_checksum;
-    csum = ip_csum_sub_even(csum, inner_ip6->src_address.as_u64[0]);
-    csum = ip_csum_sub_even(csum, inner_ip6->src_address.as_u64[1]);
-    csum = ip_csum_sub_even(csum, inner_ip6->dst_address.as_u64[0]);
-    csum = ip_csum_sub_even(csum, inner_ip6->dst_address.as_u64[1]);
-
-    //Sanity check of the outer destination address
-    if (ip6->dst_address.as_u64[0] != inner_ip6->src_address.as_u64[0] &&
-        ip6->dst_address.as_u64[1] != inner_ip6->src_address.as_u64[1]) {
-      *error = MAP_ERROR_SEC_CHECK;
-      return;
+  if (sender_port < 0)
+    {
+      // In case of 1:1 mapping, we don't care about the port
+      if (d->ea_bits_len == 0 && d->rules)
+       {
+         sender_port = 0;
+       }
+      else
+       {
+         *error = MAP_ERROR_ICMP;
+         return;
+       }
     }
 
-    //Security check of inner packet
-    inner_ip4_dadr = map_get_ip4(&inner_ip6->dst_address);
-    if (inner_ip6->dst_address.as_u64[0] != map_get_pfx_net(d, inner_ip4_dadr, sender_port) ||
-        inner_ip6->dst_address.as_u64[1] != map_get_sfx_net(d, inner_ip4_dadr, sender_port)) {
+  //Security check
+  //Note that this prevents an intermediate IPv6 router from answering the request
+  ip4_sadr = map_get_ip4 (&ip6->src_address);
+  if (ip6->src_address.as_u64[0] != map_get_pfx_net (d, ip4_sadr, sender_port)
+      || ip6->src_address.as_u64[1] != map_get_sfx_net (d, ip4_sadr,
+                                                       sender_port))
+    {
       *error = MAP_ERROR_SEC_CHECK;
       return;
     }
 
-    inner_ip4->dst_address.as_u32 = inner_ip4_dadr;
-    inner_ip4->src_address.as_u32 = ip6_map_t_embedded_address(d, &inner_ip6->src_address);
-    inner_ip4->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
-    inner_ip4->tos = ip6_translate_tos(inner_ip6);
-    inner_ip4->length = u16_net_add(inner_ip6->payload_length, sizeof(*ip4) + sizeof(*ip6) -
-                                    inner_l4_offset);
-    inner_ip4->fragment_id = inner_frag_id;
-    inner_ip4->flags_and_fragment_offset = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
-    inner_ip4->ttl = inner_ip6->hop_limit;
-    inner_ip4->protocol = inner_protocol;
-    inner_ip4->checksum = ip4_header_checksum(inner_ip4);
-
-    if (inner_ip4->protocol == IP_PROTOCOL_ICMP) {
-      //Remove remainings of the pseudo-header in the csum
-      csum = ip_csum_sub_even(csum, clib_host_to_net_u16(IP_PROTOCOL_ICMP6));
-      csum = ip_csum_sub_even(csum, inner_ip4->length - sizeof(*inner_ip4));
-    } else {
-      //Update to new pseudo-header
-      csum = ip_csum_add_even(csum, inner_ip4->src_address.as_u32);
-      csum = ip_csum_add_even(csum, inner_ip4->dst_address.as_u32);
+  if (inner_ip6)
+    {
+      u16 *inner_L4_checksum, inner_l4_offset, inner_frag_offset,
+       inner_frag_id;
+      u8 *inner_l4, inner_protocol;
+
+      //We have two headers to translate
+      //   FROM
+      //   [   IPv6   ]<- ext ->[IC][   IPv6   ]<- ext ->[L4 header ...
+      // Handled cases:
+      //                     [   IPv6   ][IC][   IPv6   ][L4 header ...
+      //                 [   IPv6   ][IC][   IPv6   ][Fr][L4 header ...
+      //    TO
+      //                               [ IPv4][IC][ IPv4][L4 header ...
+
+      //TODO: This was already done deep in ip6_icmp_to_icmp6_in_place
+      //We shouldn't have to do it again
+      if (ip6_parse (inner_ip6, ip6_pay_len - 8,
+                    &inner_protocol, &inner_l4_offset, &inner_frag_offset))
+       {
+         *error = MAP_ERROR_MALFORMED;
+         return;
+       }
+
+      inner_l4 = u8_ptr_add (inner_ip6, inner_l4_offset);
+      inner_ip4 =
+       (ip4_header_t *) u8_ptr_add (inner_l4, -sizeof (*inner_ip4));
+      if (inner_frag_offset)
+       {
+         ip6_frag_hdr_t *inner_frag =
+           (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, inner_frag_offset);
+         inner_frag_id = frag_id_6to4 (inner_frag->identification);
+       }
+      else
+       {
+         inner_frag_id = 0;
+       }
+
+      //Do the translation of the inner packet
+      if (inner_protocol == IP_PROTOCOL_TCP)
+       {
+         inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 16);
+       }
+      else if (inner_protocol == IP_PROTOCOL_UDP)
+       {
+         inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 6);
+       }
+      else if (inner_protocol == IP_PROTOCOL_ICMP6)
+       {
+         icmp46_header_t *inner_icmp = (icmp46_header_t *) inner_l4;
+         csum = inner_icmp->checksum;
+         csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp));
+         //It cannot be of a different type as ip6_icmp_to_icmp6_in_place succeeded
+         inner_icmp->type = (inner_icmp->type == ICMP6_echo_request) ?
+           ICMP4_echo_request : ICMP4_echo_reply;
+         csum = ip_csum_add_even (csum, *((u16 *) inner_icmp));
+         inner_icmp->checksum = ip_csum_fold (csum);
+         inner_protocol = IP_PROTOCOL_ICMP;    //Will be copied to ip6 later
+         inner_L4_checksum = &inner_icmp->checksum;
+       }
+      else
+       {
+         *error = MAP_ERROR_BAD_PROTOCOL;
+         return;
+       }
+
+      csum = *inner_L4_checksum;
+      csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[0]);
+      csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]);
+      csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]);
+      csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]);
+
+      //Sanity check of the outer destination address
+      if (ip6->dst_address.as_u64[0] != inner_ip6->src_address.as_u64[0] &&
+         ip6->dst_address.as_u64[1] != inner_ip6->src_address.as_u64[1])
+       {
+         *error = MAP_ERROR_SEC_CHECK;
+         return;
+       }
+
+      //Security check of inner packet
+      inner_ip4_dadr = map_get_ip4 (&inner_ip6->dst_address);
+      if (inner_ip6->dst_address.as_u64[0] !=
+         map_get_pfx_net (d, inner_ip4_dadr, sender_port)
+         || inner_ip6->dst_address.as_u64[1] != map_get_sfx_net (d,
+                                                                 inner_ip4_dadr,
+                                                                 sender_port))
+       {
+         *error = MAP_ERROR_SEC_CHECK;
+         return;
+       }
+
+      inner_ip4->dst_address.as_u32 = inner_ip4_dadr;
+      inner_ip4->src_address.as_u32 =
+       ip6_map_t_embedded_address (d, &inner_ip6->src_address);
+      inner_ip4->ip_version_and_header_length =
+       IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+      inner_ip4->tos = ip6_translate_tos (inner_ip6);
+      inner_ip4->length =
+       u16_net_add (inner_ip6->payload_length,
+                    sizeof (*ip4) + sizeof (*ip6) - inner_l4_offset);
+      inner_ip4->fragment_id = inner_frag_id;
+      inner_ip4->flags_and_fragment_offset =
+       clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+      inner_ip4->ttl = inner_ip6->hop_limit;
+      inner_ip4->protocol = inner_protocol;
+      inner_ip4->checksum = ip4_header_checksum (inner_ip4);
+
+      if (inner_ip4->protocol == IP_PROTOCOL_ICMP)
+       {
+         //Remove remainings of the pseudo-header in the csum
+         csum =
+           ip_csum_sub_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6));
+         csum =
+           ip_csum_sub_even (csum, inner_ip4->length - sizeof (*inner_ip4));
+       }
+      else
+       {
+         //Update to new pseudo-header
+         csum = ip_csum_add_even (csum, inner_ip4->src_address.as_u32);
+         csum = ip_csum_add_even (csum, inner_ip4->dst_address.as_u32);
+       }
+      *inner_L4_checksum = ip_csum_fold (csum);
+
+      //Move up icmp header
+      ip4 = (ip4_header_t *) u8_ptr_add (inner_l4, -2 * sizeof (*ip4) - 8);
+      clib_memcpy (u8_ptr_add (inner_l4, -sizeof (*ip4) - 8), icmp, 8);
+      icmp = (icmp46_header_t *) u8_ptr_add (inner_l4, -sizeof (*ip4) - 8);
     }
-    *inner_L4_checksum = ip_csum_fold(csum);
-
-    //Move up icmp header
-    ip4 = (ip4_header_t *) u8_ptr_add(inner_l4, - 2 * sizeof(*ip4) - 8);
-    clib_memcpy(u8_ptr_add(inner_l4, - sizeof(*ip4) - 8), icmp, 8);
-    icmp = (icmp46_header_t *) u8_ptr_add(inner_l4, - sizeof(*ip4) - 8);
-  } else {
-    //Only one header to translate
-    ip4 = (ip4_header_t *) u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4));
-  }
-  vlib_buffer_advance(p, (u32) (((u8 *)ip4) - ((u8 *)ip6)));
-
-  ip4->dst_address.as_u32 = ip6_map_t_embedded_address(d, &ip6->dst_address);
+  else
+    {
+      //Only one header to translate
+      ip4 = (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4));
+    }
+  vlib_buffer_advance (p, (u32) (((u8 *) ip4) - ((u8 *) ip6)));
+
+  ip4->dst_address.as_u32 = ip6_map_t_embedded_address (d, &ip6->dst_address);
   ip4->src_address.as_u32 = ip4_sadr;
-  ip4->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
-  ip4->tos = ip6_translate_tos(ip6);
+  ip4->ip_version_and_header_length =
+    IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+  ip4->tos = ip6_translate_tos (ip6);
   ip4->fragment_id = 0;
   ip4->flags_and_fragment_offset = 0;
   ip4->ttl = ip6->hop_limit;
   ip4->protocol = IP_PROTOCOL_ICMP;
   //TODO fix the length depending on offset length
-  ip4->length = u16_net_add(ip6->payload_length,
-                            (inner_ip6 == NULL)?sizeof(*ip4):(2*sizeof(*ip4) - sizeof(*ip6)));
-  ip4->checksum = ip4_header_checksum(ip4);
+  ip4->length = u16_net_add (ip6->payload_length,
+                            (inner_ip6 ==
+                             NULL) ? sizeof (*ip4) : (2 * sizeof (*ip4) -
+                                                      sizeof (*ip6)));
+  ip4->checksum = ip4_header_checksum (ip4);
 
   //TODO: We could do an easy diff-checksum for echo requests/replies
   //Recompute ICMP checksum
   icmp->checksum = 0;
-  csum = ip_incremental_checksum(0, icmp, clib_net_to_host_u16(ip4->length) - sizeof(*ip4));
+  csum =
+    ip_incremental_checksum (0, icmp,
+                            clib_net_to_host_u16 (ip4->length) -
+                            sizeof (*ip4));
   icmp->checksum = ~ip_csum_fold (csum);
 }
 
 static uword
-ip6_map_t_icmp (vlib_main_t *vm,
-                vlib_node_runtime_t *node,
-                vlib_frame_t *frame)
+ip6_map_t_icmp (vlib_main_t * vm,
+               vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
   from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
   vlib_combined_counter_main_t *cm = map_main.domain_counters;
-  u32 cpu_index = os_get_cpu_number();
-
-  while (n_left_from > 0) {
-    vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      u8 error0;
-      ip6_mapt_icmp_next_t next0;
-      map_domain_t *d0;
-      u16 len0;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-      error0 = MAP_ERROR_NONE;
-      next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      len0 = clib_net_to_host_u16(((ip6_header_t *)vlib_buffer_get_current(p0))->payload_length);
-      d0 = pool_elt_at_index(map_main.domains, vnet_buffer(p0)->map_t.map_domain_index);
-      _ip6_map_t_icmp(d0, p0, &error0);
-
-      if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
-        //Send to fragmentation node if necessary
-        vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
-        vnet_buffer(p0)->ip_frag.header_offset = 0;
-        vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-        next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
-      }
-
-      if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
-        vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index,
-                                        vnet_buffer(p0)->map_t.map_domain_index, 1,
-                                        len0);
-      } else {
-        next0 = IP6_MAPT_ICMP_NEXT_DROP;
-      }
-
-      p0->error = error_node->errors[error0];
-      vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                       to_next, n_left_to_next, pi0,
-                                       next0);
+  u32 cpu_index = os_get_cpu_number ();
+
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         u8 error0;
+         ip6_mapt_icmp_next_t next0;
+         map_domain_t *d0;
+         u16 len0;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+         error0 = MAP_ERROR_NONE;
+         next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         len0 =
+           clib_net_to_host_u16 (((ip6_header_t *)
+                                  vlib_buffer_get_current
+                                  (p0))->payload_length);
+         d0 =
+           pool_elt_at_index (map_main.domains,
+                              vnet_buffer (p0)->map_t.map_domain_index);
+         _ip6_map_t_icmp (d0, p0, &error0);
+
+         if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+           {
+             //Send to fragmentation node if necessary
+             vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+             vnet_buffer (p0)->ip_frag.header_offset = 0;
+             vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+             next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
+           }
+
+         if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
+           {
+             vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+                                              cpu_index,
+                                              vnet_buffer (p0)->
+                                              map_t.map_domain_index, 1,
+                                              len0);
+           }
+         else
+           {
+             next0 = IP6_MAPT_ICMP_NEXT_DROP;
+           }
+
+         p0->error = error_node->errors[error0];
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                          to_next, n_left_to_next, pi0,
+                                          next0);
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-  }
   return frame->n_vectors;
 }
 
 static uword
-ip6_map_t_fragmented (vlib_main_t *vm,
-                      vlib_node_runtime_t *node,
-                      vlib_frame_t *frame)
+ip6_map_t_fragmented (vlib_main_t * vm,
+                     vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  from = vlib_frame_vector_args(frame);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
 
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
 
 #ifdef IP6_MAP_T_DUAL_LOOP
-    while(n_left_from >= 4 && n_left_to_next >= 2) {
-      u32 pi0, pi1;
-      vlib_buffer_t *p0, *p1;
-      ip6_header_t *ip60, *ip61;
-      ip6_frag_hdr_t *frag0, *frag1;
-      ip4_header_t *ip40, *ip41;
-      u16 frag_id0, frag_offset0,
-          frag_id1, frag_offset1;
-      u8 frag_more0, frag_more1;
-      u32 next0, next1;
-
-      pi0 = to_next[0] = from[0];
-      pi1 = to_next[1] = from[1];
-      from += 2;
-      n_left_from -= 2;
-      to_next += 2;
-      n_left_to_next -= 2;
-
-      next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
-      next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
-      p0 = vlib_get_buffer(vm, pi0);
-      p1 = vlib_get_buffer(vm, pi1);
-      ip60 = vlib_buffer_get_current(p0);
-      ip61 = vlib_buffer_get_current(p1);
-      frag0 = (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
-      frag1 = (ip6_frag_hdr_t *)u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset);
-      ip40 = (ip4_header_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
-      ip41 = (ip4_header_t *)u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
-      vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
-      vlib_buffer_advance(p1, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
-
-      frag_id0 = frag_id_6to4(frag0->identification);
-      frag_id1 = frag_id_6to4(frag1->identification);
-      frag_more0 = ip6_frag_hdr_more(frag0);
-      frag_more1 = ip6_frag_hdr_more(frag1);
-      frag_offset0 = ip6_frag_hdr_offset(frag0);
-      frag_offset1 = ip6_frag_hdr_offset(frag1);
-
-      ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
-      ip41->dst_address.as_u32 = vnet_buffer(p1)->map_t.v6.daddr;
-      ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
-      ip41->src_address.as_u32 = vnet_buffer(p1)->map_t.v6.saddr;
-      ip40->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
-      ip41->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
-      ip40->tos = ip6_translate_tos(ip60);
-      ip41->tos = ip6_translate_tos(ip61);
-      ip40->length = u16_net_add(ip60->payload_length,
-                                 sizeof(*ip40) - vnet_buffer(p0)->map_t.v6.l4_offset + sizeof(*ip60));
-      ip41->length = u16_net_add(ip61->payload_length,
-                                 sizeof(*ip40) - vnet_buffer(p1)->map_t.v6.l4_offset + sizeof(*ip60));
-      ip40->fragment_id = frag_id0;
-      ip41->fragment_id = frag_id1;
-      ip40->flags_and_fragment_offset =
-            clib_host_to_net_u16(frag_offset0 | (frag_more0?IP4_HEADER_FLAG_MORE_FRAGMENTS:0));
-      ip41->flags_and_fragment_offset =
-            clib_host_to_net_u16(frag_offset1 | (frag_more1?IP4_HEADER_FLAG_MORE_FRAGMENTS:0));
-      ip40->ttl = ip60->hop_limit;
-      ip41->ttl = ip61->hop_limit;
-      ip40->protocol = (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)?
-            IP_PROTOCOL_ICMP:vnet_buffer(p0)->map_t.v6.l4_protocol;
-      ip41->protocol = (vnet_buffer(p1)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)?
-            IP_PROTOCOL_ICMP:vnet_buffer(p1)->map_t.v6.l4_protocol;
-      ip40->checksum = ip4_header_checksum(ip40);
-      ip41->checksum = ip4_header_checksum(ip41);
-
-      if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
-        vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
-        vnet_buffer(p0)->ip_frag.header_offset = 0;
-        vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-        next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
-      }
-
-      if(vnet_buffer(p1)->map_t.mtu < p1->current_length) {
-        vnet_buffer(p1)->ip_frag.mtu = vnet_buffer(p1)->map_t.mtu;
-        vnet_buffer(p1)->ip_frag.header_offset = 0;
-        vnet_buffer(p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-        next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
-      }
-
-      vlib_validate_buffer_enqueue_x2(vm, node, next_index,
-                                      to_next, n_left_to_next, pi0, pi1,
-                                      next0, next1);
-    }
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 pi0, pi1;
+         vlib_buffer_t *p0, *p1;
+         ip6_header_t *ip60, *ip61;
+         ip6_frag_hdr_t *frag0, *frag1;
+         ip4_header_t *ip40, *ip41;
+         u16 frag_id0, frag_offset0, frag_id1, frag_offset1;
+         u8 frag_more0, frag_more1;
+         u32 next0, next1;
+
+         pi0 = to_next[0] = from[0];
+         pi1 = to_next[1] = from[1];
+         from += 2;
+         n_left_from -= 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+
+         next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+         next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+         p0 = vlib_get_buffer (vm, pi0);
+         p1 = vlib_get_buffer (vm, pi1);
+         ip60 = vlib_buffer_get_current (p0);
+         ip61 = vlib_buffer_get_current (p1);
+         frag0 =
+           (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+                                          vnet_buffer (p0)->map_t.
+                                          v6.frag_offset);
+         frag1 =
+           (ip6_frag_hdr_t *) u8_ptr_add (ip61,
+                                          vnet_buffer (p1)->map_t.
+                                          v6.frag_offset);
+         ip40 =
+           (ip4_header_t *) u8_ptr_add (ip60,
+                                        vnet_buffer (p0)->map_t.
+                                        v6.l4_offset - sizeof (*ip40));
+         ip41 =
+           (ip4_header_t *) u8_ptr_add (ip61,
+                                        vnet_buffer (p1)->map_t.
+                                        v6.l4_offset - sizeof (*ip40));
+         vlib_buffer_advance (p0,
+                              vnet_buffer (p0)->map_t.v6.l4_offset -
+                              sizeof (*ip40));
+         vlib_buffer_advance (p1,
+                              vnet_buffer (p1)->map_t.v6.l4_offset -
+                              sizeof (*ip40));
+
+         frag_id0 = frag_id_6to4 (frag0->identification);
+         frag_id1 = frag_id_6to4 (frag1->identification);
+         frag_more0 = ip6_frag_hdr_more (frag0);
+         frag_more1 = ip6_frag_hdr_more (frag1);
+         frag_offset0 = ip6_frag_hdr_offset (frag0);
+         frag_offset1 = ip6_frag_hdr_offset (frag1);
+
+         ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
+         ip41->dst_address.as_u32 = vnet_buffer (p1)->map_t.v6.daddr;
+         ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
+         ip41->src_address.as_u32 = vnet_buffer (p1)->map_t.v6.saddr;
+         ip40->ip_version_and_header_length =
+           IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+         ip41->ip_version_and_header_length =
+           IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+         ip40->tos = ip6_translate_tos (ip60);
+         ip41->tos = ip6_translate_tos (ip61);
+         ip40->length = u16_net_add (ip60->payload_length,
+                                     sizeof (*ip40) -
+                                     vnet_buffer (p0)->map_t.v6.l4_offset +
+                                     sizeof (*ip60));
+         ip41->length =
+           u16_net_add (ip61->payload_length,
+                        sizeof (*ip40) -
+                        vnet_buffer (p1)->map_t.v6.l4_offset +
+                        sizeof (*ip60));
+         ip40->fragment_id = frag_id0;
+         ip41->fragment_id = frag_id1;
+         ip40->flags_and_fragment_offset =
+           clib_host_to_net_u16 (frag_offset0 |
+                                 (frag_more0 ? IP4_HEADER_FLAG_MORE_FRAGMENTS
+                                  : 0));
+         ip41->flags_and_fragment_offset =
+           clib_host_to_net_u16 (frag_offset1 |
+                                 (frag_more1 ? IP4_HEADER_FLAG_MORE_FRAGMENTS
+                                  : 0));
+         ip40->ttl = ip60->hop_limit;
+         ip41->ttl = ip61->hop_limit;
+         ip40->protocol =
+           (vnet_buffer (p0)->map_t.v6.l4_protocol ==
+            IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p0)->
+           map_t.v6.l4_protocol;
+         ip41->protocol =
+           (vnet_buffer (p1)->map_t.v6.l4_protocol ==
+            IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p1)->
+           map_t.v6.l4_protocol;
+         ip40->checksum = ip4_header_checksum (ip40);
+         ip41->checksum = ip4_header_checksum (ip41);
+
+         if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+           {
+             vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+             vnet_buffer (p0)->ip_frag.header_offset = 0;
+             vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+             next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
+           }
+
+         if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
+           {
+             vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
+             vnet_buffer (p1)->ip_frag.header_offset = 0;
+             vnet_buffer (p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+             next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
+           }
+
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+                                          to_next, n_left_to_next, pi0, pi1,
+                                          next0, next1);
+       }
 #endif
 
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      ip6_header_t *ip60;
-      ip6_frag_hdr_t *frag0;
-      ip4_header_t *ip40;
-      u16 frag_id0;
-      u8 frag_more0;
-      u16 frag_offset0;
-      u32 next0;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-
-      next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
-      p0 = vlib_get_buffer(vm, pi0);
-      ip60 = vlib_buffer_get_current(p0);
-      frag0 = (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
-      ip40 = (ip4_header_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
-      vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
-
-      frag_id0 = frag_id_6to4(frag0->identification);
-      frag_more0 = ip6_frag_hdr_more(frag0);
-      frag_offset0 = ip6_frag_hdr_offset(frag0);
-
-      ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
-      ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
-      ip40->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
-      ip40->tos = ip6_translate_tos(ip60);
-      ip40->length = u16_net_add(ip60->payload_length,
-                                 sizeof(*ip40) - vnet_buffer(p0)->map_t.v6.l4_offset + sizeof(*ip60));
-      ip40->fragment_id = frag_id0;
-      ip40->flags_and_fragment_offset =
-          clib_host_to_net_u16(frag_offset0 | (frag_more0?IP4_HEADER_FLAG_MORE_FRAGMENTS:0));
-      ip40->ttl = ip60->hop_limit;
-      ip40->protocol = (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)?
-          IP_PROTOCOL_ICMP:vnet_buffer(p0)->map_t.v6.l4_protocol;
-      ip40->checksum = ip4_header_checksum(ip40);
-
-      if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
-        //Send to fragmentation node if necessary
-        vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
-        vnet_buffer(p0)->ip_frag.header_offset = 0;
-        vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-        next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
-      }
-
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index,
-                                       to_next, n_left_to_next, pi0,
-                                       next0);
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         ip6_header_t *ip60;
+         ip6_frag_hdr_t *frag0;
+         ip4_header_t *ip40;
+         u16 frag_id0;
+         u8 frag_more0;
+         u16 frag_offset0;
+         u32 next0;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+
+         next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+         p0 = vlib_get_buffer (vm, pi0);
+         ip60 = vlib_buffer_get_current (p0);
+         frag0 =
+           (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+                                          vnet_buffer (p0)->map_t.
+                                          v6.frag_offset);
+         ip40 =
+           (ip4_header_t *) u8_ptr_add (ip60,
+                                        vnet_buffer (p0)->map_t.
+                                        v6.l4_offset - sizeof (*ip40));
+         vlib_buffer_advance (p0,
+                              vnet_buffer (p0)->map_t.v6.l4_offset -
+                              sizeof (*ip40));
+
+         frag_id0 = frag_id_6to4 (frag0->identification);
+         frag_more0 = ip6_frag_hdr_more (frag0);
+         frag_offset0 = ip6_frag_hdr_offset (frag0);
+
+         ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
+         ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
+         ip40->ip_version_and_header_length =
+           IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+         ip40->tos = ip6_translate_tos (ip60);
+         ip40->length = u16_net_add (ip60->payload_length,
+                                     sizeof (*ip40) -
+                                     vnet_buffer (p0)->map_t.v6.l4_offset +
+                                     sizeof (*ip60));
+         ip40->fragment_id = frag_id0;
+         ip40->flags_and_fragment_offset =
+           clib_host_to_net_u16 (frag_offset0 |
+                                 (frag_more0 ? IP4_HEADER_FLAG_MORE_FRAGMENTS
+                                  : 0));
+         ip40->ttl = ip60->hop_limit;
+         ip40->protocol =
+           (vnet_buffer (p0)->map_t.v6.l4_protocol ==
+            IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p0)->
+           map_t.v6.l4_protocol;
+         ip40->checksum = ip4_header_checksum (ip40);
+
+         if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+           {
+             //Send to fragmentation node if necessary
+             vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+             vnet_buffer (p0)->ip_frag.header_offset = 0;
+             vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+             next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
+           }
+
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                          to_next, n_left_to_next, pi0,
+                                          next0);
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
   return frame->n_vectors;
 }
 
 static uword
-ip6_map_t_tcp_udp (vlib_main_t *vm,
-                   vlib_node_runtime_t *node,
-                   vlib_frame_t *frame)
+ip6_map_t_tcp_udp (vlib_main_t * vm,
+                  vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  from = vlib_frame_vector_args(frame);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
 
 #ifdef IP6_MAP_T_DUAL_LOOP
-    while(n_left_from >= 4 && n_left_to_next >= 2) {
-      u32 pi0, pi1;
-      vlib_buffer_t *p0, *p1;
-      ip6_header_t *ip60, *ip61;
-      ip_csum_t csum0, csum1;
-      ip4_header_t *ip40, *ip41;
-      u16 fragment_id0, flags0, *checksum0,
-          fragment_id1, flags1, *checksum1;
-      ip6_mapt_tcp_udp_next_t next0, next1;
-
-      pi0 = to_next[0] = from[0];
-      pi1 = to_next[1] = from[1];
-      from += 2;
-      n_left_from -= 2;
-      to_next += 2;
-      n_left_to_next -= 2;
-      next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
-      next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      p1 = vlib_get_buffer(vm, pi1);
-      ip60 = vlib_buffer_get_current(p0);
-      ip61 = vlib_buffer_get_current(p1);
-      ip40 = (ip4_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
-      ip41 = (ip4_header_t *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
-      vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
-      vlib_buffer_advance(p1, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
-      checksum0 = (u16 *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.checksum_offset);
-      checksum1 = (u16 *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.checksum_offset);
-
-      csum0 = ip_csum_sub_even(*checksum0, ip60->src_address.as_u64[0]);
-      csum1 = ip_csum_sub_even(*checksum1, ip61->src_address.as_u64[0]);
-      csum0 = ip_csum_sub_even(csum0, ip60->src_address.as_u64[1]);
-      csum1 = ip_csum_sub_even(csum1, ip61->src_address.as_u64[1]);
-      csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[0]);
-      csum1 = ip_csum_sub_even(csum0, ip61->dst_address.as_u64[0]);
-      csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[1]);
-      csum1 = ip_csum_sub_even(csum1, ip61->dst_address.as_u64[1]);
-      csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.daddr);
-      csum1 = ip_csum_add_even(csum1, vnet_buffer(p1)->map_t.v6.daddr);
-      csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.saddr);
-      csum1 = ip_csum_add_even(csum1, vnet_buffer(p1)->map_t.v6.saddr);
-      *checksum0 = ip_csum_fold(csum0);
-      *checksum1 = ip_csum_fold(csum1);
-
-      if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset)) {
-        ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
-        fragment_id0 = frag_id_6to4(hdr->identification);
-        flags0 = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
-      } else {
-        fragment_id0 = 0;
-        flags0 = 0;
-      }
-
-      if (PREDICT_FALSE(vnet_buffer(p1)->map_t.v6.frag_offset)) {
-        ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset);
-        fragment_id1 = frag_id_6to4(hdr->identification);
-        flags1 = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
-      } else {
-        fragment_id1 = 0;
-        flags1 = 0;
-      }
-
-      ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
-      ip41->dst_address.as_u32 = vnet_buffer(p1)->map_t.v6.daddr;
-      ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
-      ip41->src_address.as_u32 = vnet_buffer(p1)->map_t.v6.saddr;
-      ip40->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
-      ip41->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
-      ip40->tos = ip6_translate_tos(ip60);
-      ip41->tos = ip6_translate_tos(ip61);
-      ip40->length = u16_net_add(ip60->payload_length,
-                                 sizeof(*ip40) + sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset);
-      ip41->length = u16_net_add(ip61->payload_length,
-                                 sizeof(*ip40) + sizeof(*ip60) - vnet_buffer(p1)->map_t.v6.l4_offset);
-      ip40->fragment_id = fragment_id0;
-      ip41->fragment_id = fragment_id1;
-      ip40->flags_and_fragment_offset = flags0;
-      ip41->flags_and_fragment_offset = flags1;
-      ip40->ttl = ip60->hop_limit;
-      ip41->ttl = ip61->hop_limit;
-      ip40->protocol = vnet_buffer(p0)->map_t.v6.l4_protocol;
-      ip41->protocol = vnet_buffer(p1)->map_t.v6.l4_protocol;
-      ip40->checksum = ip4_header_checksum(ip40);
-      ip41->checksum = ip4_header_checksum(ip41);
-
-      if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
-        vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
-        vnet_buffer(p0)->ip_frag.header_offset = 0;
-        vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-        next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
-      }
-
-      if(vnet_buffer(p1)->map_t.mtu < p1->current_length) {
-        vnet_buffer(p1)->ip_frag.mtu = vnet_buffer(p1)->map_t.mtu;
-        vnet_buffer(p1)->ip_frag.header_offset = 0;
-        vnet_buffer(p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-        next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
-      }
-
-      vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
-                                      n_left_to_next, pi0, pi1, next0, next1);
-    }
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 pi0, pi1;
+         vlib_buffer_t *p0, *p1;
+         ip6_header_t *ip60, *ip61;
+         ip_csum_t csum0, csum1;
+         ip4_header_t *ip40, *ip41;
+         u16 fragment_id0, flags0, *checksum0,
+           fragment_id1, flags1, *checksum1;
+         ip6_mapt_tcp_udp_next_t next0, next1;
+
+         pi0 = to_next[0] = from[0];
+         pi1 = to_next[1] = from[1];
+         from += 2;
+         n_left_from -= 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+         next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+         next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         p1 = vlib_get_buffer (vm, pi1);
+         ip60 = vlib_buffer_get_current (p0);
+         ip61 = vlib_buffer_get_current (p1);
+         ip40 =
+           (ip4_header_t *) u8_ptr_add (ip60,
+                                        vnet_buffer (p0)->map_t.
+                                        v6.l4_offset - sizeof (*ip40));
+         ip41 =
+           (ip4_header_t *) u8_ptr_add (ip61,
+                                        vnet_buffer (p1)->map_t.
+                                        v6.l4_offset - sizeof (*ip40));
+         vlib_buffer_advance (p0,
+                              vnet_buffer (p0)->map_t.v6.l4_offset -
+                              sizeof (*ip40));
+         vlib_buffer_advance (p1,
+                              vnet_buffer (p1)->map_t.v6.l4_offset -
+                              sizeof (*ip40));
+         checksum0 =
+           (u16 *) u8_ptr_add (ip60,
+                               vnet_buffer (p0)->map_t.checksum_offset);
+         checksum1 =
+           (u16 *) u8_ptr_add (ip61,
+                               vnet_buffer (p1)->map_t.checksum_offset);
+
+         csum0 = ip_csum_sub_even (*checksum0, ip60->src_address.as_u64[0]);
+         csum1 = ip_csum_sub_even (*checksum1, ip61->src_address.as_u64[0]);
+         csum0 = ip_csum_sub_even (csum0, ip60->src_address.as_u64[1]);
+         csum1 = ip_csum_sub_even (csum1, ip61->src_address.as_u64[1]);
+         csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[0]);
+         csum1 = ip_csum_sub_even (csum0, ip61->dst_address.as_u64[0]);
+         csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[1]);
+         csum1 = ip_csum_sub_even (csum1, ip61->dst_address.as_u64[1]);
+         csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.daddr);
+         csum1 = ip_csum_add_even (csum1, vnet_buffer (p1)->map_t.v6.daddr);
+         csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.saddr);
+         csum1 = ip_csum_add_even (csum1, vnet_buffer (p1)->map_t.v6.saddr);
+         *checksum0 = ip_csum_fold (csum0);
+         *checksum1 = ip_csum_fold (csum1);
+
+         if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset))
+           {
+             ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+                                                                  vnet_buffer
+                                                                  (p0)->
+                                                                  map_t.
+                                                                  v6.frag_offset);
+             fragment_id0 = frag_id_6to4 (hdr->identification);
+             flags0 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+           }
+         else
+           {
+             fragment_id0 = 0;
+             flags0 = 0;
+           }
+
+         if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset))
+           {
+             ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip61,
+                                                                  vnet_buffer
+                                                                  (p1)->
+                                                                  map_t.
+                                                                  v6.frag_offset);
+             fragment_id1 = frag_id_6to4 (hdr->identification);
+             flags1 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+           }
+         else
+           {
+             fragment_id1 = 0;
+             flags1 = 0;
+           }
+
+         ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
+         ip41->dst_address.as_u32 = vnet_buffer (p1)->map_t.v6.daddr;
+         ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
+         ip41->src_address.as_u32 = vnet_buffer (p1)->map_t.v6.saddr;
+         ip40->ip_version_and_header_length =
+           IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+         ip41->ip_version_and_header_length =
+           IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+         ip40->tos = ip6_translate_tos (ip60);
+         ip41->tos = ip6_translate_tos (ip61);
+         ip40->length = u16_net_add (ip60->payload_length,
+                                     sizeof (*ip40) + sizeof (*ip60) -
+                                     vnet_buffer (p0)->map_t.v6.l4_offset);
+         ip41->length =
+           u16_net_add (ip61->payload_length,
+                        sizeof (*ip40) + sizeof (*ip60) -
+                        vnet_buffer (p1)->map_t.v6.l4_offset);
+         ip40->fragment_id = fragment_id0;
+         ip41->fragment_id = fragment_id1;
+         ip40->flags_and_fragment_offset = flags0;
+         ip41->flags_and_fragment_offset = flags1;
+         ip40->ttl = ip60->hop_limit;
+         ip41->ttl = ip61->hop_limit;
+         ip40->protocol = vnet_buffer (p0)->map_t.v6.l4_protocol;
+         ip41->protocol = vnet_buffer (p1)->map_t.v6.l4_protocol;
+         ip40->checksum = ip4_header_checksum (ip40);
+         ip41->checksum = ip4_header_checksum (ip41);
+
+         if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+           {
+             vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+             vnet_buffer (p0)->ip_frag.header_offset = 0;
+             vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+             next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
+           }
+
+         if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
+           {
+             vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
+             vnet_buffer (p1)->ip_frag.header_offset = 0;
+             vnet_buffer (p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+             next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
+           }
+
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+                                          n_left_to_next, pi0, pi1, next0,
+                                          next1);
+       }
 #endif
 
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      ip6_header_t *ip60;
-      u16 *checksum0;
-      ip_csum_t csum0;
-      ip4_header_t *ip40;
-      u16 fragment_id0;
-      u16 flags0;
-      ip6_mapt_tcp_udp_next_t next0;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-      next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      ip60 = vlib_buffer_get_current(p0);
-      ip40 = (ip4_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
-      vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
-      checksum0 = (u16 *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.checksum_offset);
-
-      //TODO: This can probably be optimized
-      csum0 = ip_csum_sub_even(*checksum0, ip60->src_address.as_u64[0]);
-      csum0 = ip_csum_sub_even(csum0, ip60->src_address.as_u64[1]);
-      csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[0]);
-      csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[1]);
-      csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.daddr);
-      csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.saddr);
-      *checksum0 = ip_csum_fold(csum0);
-
-      if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset)) {
-        //Only the first fragment
-        ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
-        fragment_id0 = frag_id_6to4(hdr->identification);
-        flags0 = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
-      } else {
-        fragment_id0 = 0;
-        flags0 = 0;
-      }
-
-      ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
-      ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
-      ip40->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
-      ip40->tos = ip6_translate_tos(ip60);
-      ip40->length = u16_net_add(ip60->payload_length,
-                                 sizeof(*ip40) + sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset);
-      ip40->fragment_id = fragment_id0;
-      ip40->flags_and_fragment_offset = flags0;
-      ip40->ttl = ip60->hop_limit;
-      ip40->protocol = vnet_buffer(p0)->map_t.v6.l4_protocol;
-      ip40->checksum = ip4_header_checksum(ip40);
-
-      if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
-        //Send to fragmentation node if necessary
-        vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
-        vnet_buffer(p0)->ip_frag.header_offset = 0;
-        vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
-        next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
-      }
-
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index,
-                                       to_next, n_left_to_next, pi0,
-                                       next0);
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         ip6_header_t *ip60;
+         u16 *checksum0;
+         ip_csum_t csum0;
+         ip4_header_t *ip40;
+         u16 fragment_id0;
+         u16 flags0;
+         ip6_mapt_tcp_udp_next_t next0;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+         next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         ip60 = vlib_buffer_get_current (p0);
+         ip40 =
+           (ip4_header_t *) u8_ptr_add (ip60,
+                                        vnet_buffer (p0)->map_t.
+                                        v6.l4_offset - sizeof (*ip40));
+         vlib_buffer_advance (p0,
+                              vnet_buffer (p0)->map_t.v6.l4_offset -
+                              sizeof (*ip40));
+         checksum0 =
+           (u16 *) u8_ptr_add (ip60,
+                               vnet_buffer (p0)->map_t.checksum_offset);
+
+         //TODO: This can probably be optimized
+         csum0 = ip_csum_sub_even (*checksum0, ip60->src_address.as_u64[0]);
+         csum0 = ip_csum_sub_even (csum0, ip60->src_address.as_u64[1]);
+         csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[0]);
+         csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[1]);
+         csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.daddr);
+         csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.saddr);
+         *checksum0 = ip_csum_fold (csum0);
+
+         if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset))
+           {
+             //Only the first fragment
+             ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+                                                                  vnet_buffer
+                                                                  (p0)->
+                                                                  map_t.
+                                                                  v6.frag_offset);
+             fragment_id0 = frag_id_6to4 (hdr->identification);
+             flags0 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
+           }
+         else
+           {
+             fragment_id0 = 0;
+             flags0 = 0;
+           }
+
+         ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
+         ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
+         ip40->ip_version_and_header_length =
+           IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
+         ip40->tos = ip6_translate_tos (ip60);
+         ip40->length = u16_net_add (ip60->payload_length,
+                                     sizeof (*ip40) + sizeof (*ip60) -
+                                     vnet_buffer (p0)->map_t.v6.l4_offset);
+         ip40->fragment_id = fragment_id0;
+         ip40->flags_and_fragment_offset = flags0;
+         ip40->ttl = ip60->hop_limit;
+         ip40->protocol = vnet_buffer (p0)->map_t.v6.l4_protocol;
+         ip40->checksum = ip4_header_checksum (ip40);
+
+         if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
+           {
+             //Send to fragmentation node if necessary
+             vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
+             vnet_buffer (p0)->ip_frag.header_offset = 0;
+             vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+             next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
+           }
+
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                          to_next, n_left_to_next, pi0,
+                                          next0);
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
   return frame->n_vectors;
 }
 
 static_always_inline void
-ip6_map_t_classify(vlib_buffer_t *p0, ip6_header_t *ip60,
-                   map_domain_t *d0, i32 *src_port0,
-                   u8 *error0, ip6_mapt_next_t *next0,
-                   u32 l4_len0, ip6_frag_hdr_t *frag0)
+ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60,
+                   map_domain_t * d0, i32 * src_port0,
+                   u8 * error0, ip6_mapt_next_t * next0,
+                   u32 l4_len0, ip6_frag_hdr_t * frag0)
 {
-  if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
-                    ip6_frag_hdr_offset(frag0))) {
-    *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
-    if(d0->ea_bits_len == 0 && d0->rules) {
-      *src_port0 = 0;
-    } else {
-      *src_port0 = ip6_map_fragment_get(ip60, frag0, d0);
-      *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
+  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
+                    ip6_frag_hdr_offset (frag0)))
+    {
+      *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
+      if (d0->ea_bits_len == 0 && d0->rules)
+       {
+         *src_port0 = 0;
+       }
+      else
+       {
+         *src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
+         *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
+       }
+    }
+  else
+    if (PREDICT_TRUE
+       (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
+    {
+      *error0 =
+       l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
+      vnet_buffer (p0)->map_t.checksum_offset =
+       vnet_buffer (p0)->map_t.v6.l4_offset + 16;
+      *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
+      *src_port0 =
+       (i32) *
+       ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
+    }
+  else
+    if (PREDICT_TRUE
+       (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
+    {
+      *error0 =
+       l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
+      vnet_buffer (p0)->map_t.checksum_offset =
+       vnet_buffer (p0)->map_t.v6.l4_offset + 6;
+      *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
+      *src_port0 =
+       (i32) *
+       ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
     }
-  } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP)) {
-    *error0 = l4_len0 < sizeof(tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
-    vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 16;
-    *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
-    *src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
-  } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP)) {
-    *error0 = l4_len0 < sizeof(udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
-    vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 6;
-    *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
-    *src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
-  } else if (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6) {
-    *error0 = l4_len0 < sizeof(icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
-    *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
-    if(d0->ea_bits_len == 0 && d0->rules) {
-      *src_port0 = 0;
-    } else if (((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_reply ||
-        ((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_request) {
-      *src_port0 = (i32) *((u16 *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset + 6));
+  else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
+    {
+      *error0 =
+       l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
+      *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
+      if (d0->ea_bits_len == 0 && d0->rules)
+       {
+         *src_port0 = 0;
+       }
+      else
+       if (((icmp46_header_t *)
+            u8_ptr_add (ip60,
+                        vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
+           ICMP6_echo_reply
+           || ((icmp46_header_t *)
+               u8_ptr_add (ip60,
+                           vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
+           ICMP6_echo_request)
+       {
+         *src_port0 =
+           (i32) *
+           ((u16 *)
+            u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
+       }
+    }
+  else
+    {
+      //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
+      *error0 = MAP_ERROR_BAD_PROTOCOL;
     }
-  } else {
-    //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
-    *error0 = MAP_ERROR_BAD_PROTOCOL;
-  }
 }
 
 static uword
-ip6_map_t (vlib_main_t *vm,
-           vlib_node_runtime_t *node,
-           vlib_frame_t *frame)
+ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_map_t_node.index);
+  vlib_node_runtime_t *error_node =
+    vlib_node_get_runtime (vm, ip6_map_t_node.index);
   vlib_combined_counter_main_t *cm = map_main.domain_counters;
-  u32 cpu_index = os_get_cpu_number();
+  u32 cpu_index = os_get_cpu_number ();
 
-  from = vlib_frame_vector_args(frame);
+  from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
   next_index = node->cached_next_index;
-  while (n_left_from > 0) {
-    vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+  while (n_left_from > 0)
+    {
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
 
 #ifdef IP6_MAP_T_DUAL_LOOP
-    while (n_left_from >= 4 && n_left_to_next >=2) {
-      u32 pi0, pi1;
-      vlib_buffer_t *p0, *p1;
-      ip6_header_t *ip60, *ip61;
-      u8 error0, error1;
-      ip6_mapt_next_t next0, next1;
-      u32 l4_len0, l4_len1;
-      i32 src_port0, src_port1;
-      map_domain_t *d0, *d1;
-      ip6_frag_hdr_t *frag0, *frag1;
-      u32 saddr0, saddr1;
-      next0 = next1 = 0; //Because compiler whines
-
-      pi0 = to_next[0] = from[0];
-      pi1 = to_next[1] = from[1];
-      from += 2;
-      n_left_from -= 2;
-      to_next += 2;
-      n_left_to_next -= 2;
-
-      error0 = MAP_ERROR_NONE;
-      error1 = MAP_ERROR_NONE;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      p1 = vlib_get_buffer(vm, pi1);
-      ip60 = vlib_buffer_get_current(p0);
-      ip61 = vlib_buffer_get_current(p1);
-
-      saddr0 = map_get_ip4(&ip60->src_address);
-      saddr1 = map_get_ip4(&ip61->src_address);
-      d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
-                              (ip4_address_t *)&saddr0,
-                              &vnet_buffer(p0)->map_t.map_domain_index, &error0);
-      d1 = ip6_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX],
-                              (ip4_address_t *)&saddr1,
-                              &vnet_buffer(p1)->map_t.map_domain_index, &error1);
-
-      vnet_buffer(p0)->map_t.v6.saddr = saddr0;
-      vnet_buffer(p1)->map_t.v6.saddr = saddr1;
-      vnet_buffer(p0)->map_t.v6.daddr = ip6_map_t_embedded_address(d0, &ip60->dst_address);
-      vnet_buffer(p1)->map_t.v6.daddr = ip6_map_t_embedded_address(d1, &ip61->dst_address);
-      vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
-      vnet_buffer(p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
-
-      if (PREDICT_FALSE(ip6_parse(ip60, p0->current_length,
-                                  &(vnet_buffer(p0)->map_t.v6.l4_protocol),
-                                  &(vnet_buffer(p0)->map_t.v6.l4_offset),
-                                  &(vnet_buffer(p0)->map_t.v6.frag_offset)))) {
-        error0 = MAP_ERROR_MALFORMED;
-        next0 = IP6_MAPT_NEXT_DROP;
-      }
-
-      if (PREDICT_FALSE(ip6_parse(ip61, p1->current_length,
-                                  &(vnet_buffer(p1)->map_t.v6.l4_protocol),
-                                  &(vnet_buffer(p1)->map_t.v6.l4_offset),
-                                  &(vnet_buffer(p1)->map_t.v6.frag_offset)))) {
-        error1 = MAP_ERROR_MALFORMED;
-        next1 = IP6_MAPT_NEXT_DROP;
-      }
-
-      src_port0 = src_port1 = -1;
-      l4_len0 = (u32)clib_net_to_host_u16(ip60->payload_length) +
-          sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset;
-      l4_len1 = (u32)clib_net_to_host_u16(ip61->payload_length) +
-                sizeof(*ip60) - vnet_buffer(p1)->map_t.v6.l4_offset;
-      frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
-      frag1 = (ip6_frag_hdr_t *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset);
-
-      ip6_map_t_classify(p0, ip60, d0, &src_port0, &error0, &next0, l4_len0, frag0);
-      ip6_map_t_classify(p1, ip61, d1, &src_port1, &error1, &next1, l4_len1, frag1);
-
-      if (PREDICT_FALSE((src_port0 != -1) && (
-          ip60->src_address.as_u64[0] != map_get_pfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0) ||
-          ip60->src_address.as_u64[1] != map_get_sfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0)))) {
-        error0 = MAP_ERROR_SEC_CHECK;
-      }
-
-      if (PREDICT_FALSE((src_port1 != -1) && (
-          ip61->src_address.as_u64[0] != map_get_pfx_net(d1, vnet_buffer(p1)->map_t.v6.saddr, src_port1) ||
-          ip61->src_address.as_u64[1] != map_get_sfx_net(d1, vnet_buffer(p1)->map_t.v6.saddr, src_port1)))) {
-        error1 = MAP_ERROR_SEC_CHECK;
-      }
-
-      if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
-                        !ip6_frag_hdr_offset((ip6_frag_hdr_t *)
-                                             u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset))) &&
-          (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)  && (error0 == MAP_ERROR_NONE)) {
-        ip6_map_fragment_cache(ip60,
-                               (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset),
-                               d0, src_port0);
-      }
-
-      if (PREDICT_FALSE(vnet_buffer(p1)->map_t.v6.frag_offset &&
-                        !ip6_frag_hdr_offset((ip6_frag_hdr_t *)
-                                             u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset))) &&
-          (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules) && (error1 == MAP_ERROR_NONE)) {
-        ip6_map_fragment_cache(ip61,
-                               (ip6_frag_hdr_t *)u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset),
-                               d1, src_port1);
-      }
-
-      if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) {
-        vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index,
-                                        vnet_buffer(p0)->map_t.map_domain_index, 1,
-                                        clib_net_to_host_u16(ip60->payload_length));
-      }
-
-      if (PREDICT_TRUE(error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP)) {
-        vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index,
-                                        vnet_buffer(p1)->map_t.map_domain_index, 1,
-                                        clib_net_to_host_u16(ip61->payload_length));
-      }
-
-      next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
-      next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
-      p0->error = error_node->errors[error0];
-      p1->error = error_node->errors[error1];
-      vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1);
-    }
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 pi0, pi1;
+         vlib_buffer_t *p0, *p1;
+         ip6_header_t *ip60, *ip61;
+         u8 error0, error1;
+         ip6_mapt_next_t next0, next1;
+         u32 l4_len0, l4_len1;
+         i32 src_port0, src_port1;
+         map_domain_t *d0, *d1;
+         ip6_frag_hdr_t *frag0, *frag1;
+         u32 saddr0, saddr1;
+         next0 = next1 = 0;    //Because compiler whines
+
+         pi0 = to_next[0] = from[0];
+         pi1 = to_next[1] = from[1];
+         from += 2;
+         n_left_from -= 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+
+         error0 = MAP_ERROR_NONE;
+         error1 = MAP_ERROR_NONE;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         p1 = vlib_get_buffer (vm, pi1);
+         ip60 = vlib_buffer_get_current (p0);
+         ip61 = vlib_buffer_get_current (p1);
+
+         saddr0 = map_get_ip4 (&ip60->src_address);
+         saddr1 = map_get_ip4 (&ip61->src_address);
+         d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+                                  (ip4_address_t *) & saddr0,
+                                  &vnet_buffer (p0)->map_t.map_domain_index,
+                                  &error0);
+         d1 =
+           ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
+                               (ip4_address_t *) & saddr1,
+                               &vnet_buffer (p1)->map_t.map_domain_index,
+                               &error1);
+
+         vnet_buffer (p0)->map_t.v6.saddr = saddr0;
+         vnet_buffer (p1)->map_t.v6.saddr = saddr1;
+         vnet_buffer (p0)->map_t.v6.daddr =
+           ip6_map_t_embedded_address (d0, &ip60->dst_address);
+         vnet_buffer (p1)->map_t.v6.daddr =
+           ip6_map_t_embedded_address (d1, &ip61->dst_address);
+         vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
+         vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
+
+         if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
+                                       &(vnet_buffer (p0)->map_t.
+                                         v6.l4_protocol),
+                                       &(vnet_buffer (p0)->map_t.
+                                         v6.l4_offset),
+                                       &(vnet_buffer (p0)->map_t.
+                                         v6.frag_offset))))
+           {
+             error0 = MAP_ERROR_MALFORMED;
+             next0 = IP6_MAPT_NEXT_DROP;
+           }
+
+         if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
+                                       &(vnet_buffer (p1)->map_t.
+                                         v6.l4_protocol),
+                                       &(vnet_buffer (p1)->map_t.
+                                         v6.l4_offset),
+                                       &(vnet_buffer (p1)->map_t.
+                                         v6.frag_offset))))
+           {
+             error1 = MAP_ERROR_MALFORMED;
+             next1 = IP6_MAPT_NEXT_DROP;
+           }
+
+         src_port0 = src_port1 = -1;
+         l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
+           sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
+         l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
+           sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
+         frag0 =
+           (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+                                          vnet_buffer (p0)->map_t.
+                                          v6.frag_offset);
+         frag1 =
+           (ip6_frag_hdr_t *) u8_ptr_add (ip61,
+                                          vnet_buffer (p1)->map_t.
+                                          v6.frag_offset);
+
+         ip6_map_t_classify (p0, ip60, d0, &src_port0, &error0, &next0,
+                             l4_len0, frag0);
+         ip6_map_t_classify (p1, ip61, d1, &src_port1, &error1, &next1,
+                             l4_len1, frag1);
+
+         if (PREDICT_FALSE
+             ((src_port0 != -1)
+              && (ip60->src_address.as_u64[0] !=
+                  map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
+                                   src_port0)
+                  || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
+                                                                     vnet_buffer
+                                                                     (p0)->map_t.v6.saddr,
+                                                                     src_port0))))
+           {
+             error0 = MAP_ERROR_SEC_CHECK;
+           }
+
+         if (PREDICT_FALSE
+             ((src_port1 != -1)
+              && (ip61->src_address.as_u64[0] !=
+                  map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
+                                   src_port1)
+                  || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
+                                                                     vnet_buffer
+                                                                     (p1)->map_t.v6.saddr,
+                                                                     src_port1))))
+           {
+             error1 = MAP_ERROR_SEC_CHECK;
+           }
+
+         if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
+                            !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
+                                                  u8_ptr_add (ip60,
+                                                              vnet_buffer
+                                                              (p0)->map_t.
+                                                              v6.frag_offset)))
+             && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
+             && (error0 == MAP_ERROR_NONE))
+           {
+             ip6_map_fragment_cache (ip60,
+                                     (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+                                                                    vnet_buffer
+                                                                    (p0)->map_t.
+                                                                    v6.frag_offset),
+                                     d0, src_port0);
+           }
+
+         if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
+                            !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
+                                                  u8_ptr_add (ip61,
+                                                              vnet_buffer
+                                                              (p1)->map_t.
+                                                              v6.frag_offset)))
+             && (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
+             && (error1 == MAP_ERROR_NONE))
+           {
+             ip6_map_fragment_cache (ip61,
+                                     (ip6_frag_hdr_t *) u8_ptr_add (ip61,
+                                                                    vnet_buffer
+                                                                    (p1)->map_t.
+                                                                    v6.frag_offset),
+                                     d1, src_port1);
+           }
+
+         if (PREDICT_TRUE
+             (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
+           {
+             vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+                                              cpu_index,
+                                              vnet_buffer (p0)->
+                                              map_t.map_domain_index, 1,
+                                              clib_net_to_host_u16
+                                              (ip60->payload_length));
+           }
+
+         if (PREDICT_TRUE
+             (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
+           {
+             vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+                                              cpu_index,
+                                              vnet_buffer (p1)->
+                                              map_t.map_domain_index, 1,
+                                              clib_net_to_host_u16
+                                              (ip61->payload_length));
+           }
+
+         next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
+         next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
+         p0->error = error_node->errors[error0];
+         p1->error = error_node->errors[error1];
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+                                          n_left_to_next, pi0, pi1, next0,
+                                          next1);
+       }
 #endif
 
-    while (n_left_from > 0 && n_left_to_next > 0) {
-      u32 pi0;
-      vlib_buffer_t *p0;
-      ip6_header_t *ip60;
-      u8 error0;
-      u32 l4_len0;
-      i32 src_port0;
-      map_domain_t *d0;
-      ip6_frag_hdr_t *frag0;
-      ip6_mapt_next_t next0 = 0;
-      u32 saddr;
-
-      pi0 = to_next[0] = from[0];
-      from += 1;
-      n_left_from -= 1;
-      to_next +=1;
-      n_left_to_next -= 1;
-      error0 = MAP_ERROR_NONE;
-
-      p0 = vlib_get_buffer(vm, pi0);
-      ip60 = vlib_buffer_get_current(p0);
-      //Save saddr in a different variable to not overwrite ip.adj_index
-      saddr = map_get_ip4(&ip60->src_address);
-      d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
-                              (ip4_address_t *)&saddr,
-                              &vnet_buffer(p0)->map_t.map_domain_index, &error0);
-
-      //FIXME: What if d0 is null
-      vnet_buffer(p0)->map_t.v6.saddr = saddr;
-      vnet_buffer(p0)->map_t.v6.daddr = ip6_map_t_embedded_address(d0, &ip60->dst_address);
-      vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
-
-      if (PREDICT_FALSE(ip6_parse(ip60, p0->current_length,
-                    &(vnet_buffer(p0)->map_t.v6.l4_protocol),
-                    &(vnet_buffer(p0)->map_t.v6.l4_offset),
-                    &(vnet_buffer(p0)->map_t.v6.frag_offset)))) {
-        error0 = MAP_ERROR_MALFORMED;
-        next0 = IP6_MAPT_NEXT_DROP;
-      }
-
-      src_port0 = -1;
-      l4_len0 = (u32)clib_net_to_host_u16(ip60->payload_length) +
-          sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset;
-      frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
-
-
-      if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
-                        ip6_frag_hdr_offset(frag0))) {
-        src_port0 = ip6_map_fragment_get(ip60, frag0, d0);
-        error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
-        next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
-      } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP)) {
-        error0 = l4_len0 < sizeof(tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
-        vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 16;
-        next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
-        src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
-      } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP)) {
-        error0 = l4_len0 < sizeof(udp_header_t) ? MAP_ERROR_MALFORMED : error0;
-        vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 6;
-        next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
-        src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
-      } else if (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6) {
-        error0 = l4_len0 < sizeof(icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
-        next0 = IP6_MAPT_NEXT_MAPT_ICMP;
-        if (((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_reply ||
-            ((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_request)
-          src_port0 = (i32) *((u16 *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset + 6));
-      } else {
-        //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
-        error0 = MAP_ERROR_BAD_PROTOCOL;
-      }
-
-      //Security check
-      if (PREDICT_FALSE((src_port0 != -1) && (
-          ip60->src_address.as_u64[0] != map_get_pfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0) ||
-          ip60->src_address.as_u64[1] != map_get_sfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0)))) {
-        //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
-        error0 = MAP_ERROR_SEC_CHECK;
-      }
-
-      //Fragmented first packet needs to be cached for following packets
-      if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
-                        !ip6_frag_hdr_offset((ip6_frag_hdr_t *)
-                                             u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset))) &&
-          (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) && (error0 == MAP_ERROR_NONE)) {
-        ip6_map_fragment_cache(ip60,
-                                (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset),
-                                d0, src_port0);
-      }
-
-      if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) {
-        vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index,
-                                        vnet_buffer(p0)->map_t.map_domain_index, 1,
-                                        clib_net_to_host_u16(ip60->payload_length));
-      }
-
-      next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
-      p0->error = error_node->errors[error0];
-      vlib_validate_buffer_enqueue_x1(vm, node, next_index,
-                                       to_next, n_left_to_next, pi0,
-                                       next0);
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 pi0;
+         vlib_buffer_t *p0;
+         ip6_header_t *ip60;
+         u8 error0;
+         u32 l4_len0;
+         i32 src_port0;
+         map_domain_t *d0;
+         ip6_frag_hdr_t *frag0;
+         ip6_mapt_next_t next0 = 0;
+         u32 saddr;
+
+         pi0 = to_next[0] = from[0];
+         from += 1;
+         n_left_from -= 1;
+         to_next += 1;
+         n_left_to_next -= 1;
+         error0 = MAP_ERROR_NONE;
+
+         p0 = vlib_get_buffer (vm, pi0);
+         ip60 = vlib_buffer_get_current (p0);
+         //Save saddr in a different variable to not overwrite ip.adj_index
+         saddr = map_get_ip4 (&ip60->src_address);
+         d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
+                                  (ip4_address_t *) & saddr,
+                                  &vnet_buffer (p0)->map_t.map_domain_index,
+                                  &error0);
+
+         //FIXME: What if d0 is null
+         vnet_buffer (p0)->map_t.v6.saddr = saddr;
+         vnet_buffer (p0)->map_t.v6.daddr =
+           ip6_map_t_embedded_address (d0, &ip60->dst_address);
+         vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
+
+         if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
+                                       &(vnet_buffer (p0)->map_t.
+                                         v6.l4_protocol),
+                                       &(vnet_buffer (p0)->map_t.
+                                         v6.l4_offset),
+                                       &(vnet_buffer (p0)->map_t.
+                                         v6.frag_offset))))
+           {
+             error0 = MAP_ERROR_MALFORMED;
+             next0 = IP6_MAPT_NEXT_DROP;
+           }
+
+         src_port0 = -1;
+         l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
+           sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
+         frag0 =
+           (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+                                          vnet_buffer (p0)->map_t.
+                                          v6.frag_offset);
+
+
+         if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
+                            ip6_frag_hdr_offset (frag0)))
+           {
+             src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
+             error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
+             next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
+           }
+         else
+           if (PREDICT_TRUE
+               (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
+           {
+             error0 =
+               l4_len0 <
+               sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
+             vnet_buffer (p0)->map_t.checksum_offset =
+               vnet_buffer (p0)->map_t.v6.l4_offset + 16;
+             next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
+             src_port0 =
+               (i32) *
+               ((u16 *)
+                u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
+           }
+         else
+           if (PREDICT_TRUE
+               (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
+           {
+             error0 =
+               l4_len0 <
+               sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
+             vnet_buffer (p0)->map_t.checksum_offset =
+               vnet_buffer (p0)->map_t.v6.l4_offset + 6;
+             next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
+             src_port0 =
+               (i32) *
+               ((u16 *)
+                u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
+           }
+         else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
+                  IP_PROTOCOL_ICMP6)
+           {
+             error0 =
+               l4_len0 <
+               sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
+             next0 = IP6_MAPT_NEXT_MAPT_ICMP;
+             if (((icmp46_header_t *)
+                  u8_ptr_add (ip60,
+                              vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
+                 ICMP6_echo_reply
+                 || ((icmp46_header_t *)
+                     u8_ptr_add (ip60,
+                                 vnet_buffer (p0)->map_t.v6.
+                                 l4_offset))->code == ICMP6_echo_request)
+               src_port0 =
+                 (i32) *
+                 ((u16 *)
+                  u8_ptr_add (ip60,
+                              vnet_buffer (p0)->map_t.v6.l4_offset + 6));
+           }
+         else
+           {
+             //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
+             error0 = MAP_ERROR_BAD_PROTOCOL;
+           }
+
+         //Security check
+         if (PREDICT_FALSE
+             ((src_port0 != -1)
+              && (ip60->src_address.as_u64[0] !=
+                  map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
+                                   src_port0)
+                  || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
+                                                                     vnet_buffer
+                                                                     (p0)->map_t.v6.saddr,
+                                                                     src_port0))))
+           {
+             //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
+             error0 = MAP_ERROR_SEC_CHECK;
+           }
+
+         //Fragmented first packet needs to be cached for following packets
+         if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
+                            !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
+                                                  u8_ptr_add (ip60,
+                                                              vnet_buffer
+                                                              (p0)->map_t.
+                                                              v6.frag_offset)))
+             && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
+             && (error0 == MAP_ERROR_NONE))
+           {
+             ip6_map_fragment_cache (ip60,
+                                     (ip6_frag_hdr_t *) u8_ptr_add (ip60,
+                                                                    vnet_buffer
+                                                                    (p0)->map_t.
+                                                                    v6.frag_offset),
+                                     d0, src_port0);
+           }
+
+         if (PREDICT_TRUE
+             (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
+           {
+             vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+                                              cpu_index,
+                                              vnet_buffer (p0)->
+                                              map_t.map_domain_index, 1,
+                                              clib_net_to_host_u16
+                                              (ip60->payload_length));
+           }
+
+         next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
+         p0->error = error_node->errors[error0];
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+                                          to_next, n_left_to_next, pi0,
+                                          next0);
+       }
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-    vlib_put_next_frame(vm, node, next_index, n_left_to_next);
-  }
   return frame->n_vectors;
 }
 
 static char *map_t_error_strings[] = {
 #define _(sym,string) string,
-    foreach_map_error
+  foreach_map_error
 #undef _
 };
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
   .function = ip6_map_t_fragmented,
   .name = "ip6-map-t-fragmented",
@@ -1084,7 +1445,9 @@ VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
       [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
   .function = ip6_map_t_icmp,
   .name = "ip6-map-t-icmp",
@@ -1102,7 +1465,9 @@ VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
       [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
   .function = ip6_map_t_tcp_udp,
   .name = "ip6-map-t-tcp-udp",
@@ -1120,7 +1485,9 @@ VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
       [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_REGISTER_NODE(ip6_map_t_node) = {
   .function = ip6_map_t,
   .name = "ip6-map-t",
@@ -1139,3 +1506,12 @@ VLIB_REGISTER_NODE(ip6_map_t_node) = {
       [IP6_MAPT_NEXT_DROP] = "error-drop",
   },
 };
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index f91a871..cd32777 100644 (file)
 #include <vppinfra/xxhash.h>
 
 static inline u32
-crc_u32(u32 data, u32 value)
+crc_u32 (u32 data, u32 value)
 {
-  u64 tmp = ((u64)data<<32) | (u64) value;
-  return (u32) clib_xxhash(tmp);
+  u64 tmp = ((u64) data << 32) | (u64) value;
+  return (u32) clib_xxhash (tmp);
 }
 #endif
 
@@ -63,86 +63,100 @@ crc_u32(u32 data, u32 value)
 
 
 i32
-ip4_get_port (ip4_header_t *ip, map_dir_e dir, u16 buffer_len)
+ip4_get_port (ip4_header_t * ip, map_dir_e dir, u16 buffer_len)
 {
   //TODO: use buffer length
   if (ip->ip_version_and_header_length != 0x45 ||
-      ip4_get_fragment_offset(ip))
-      return -1;
+      ip4_get_fragment_offset (ip))
+    return -1;
 
-  if (PREDICT_TRUE((ip->protocol == IP_PROTOCOL_TCP) ||
-                   (ip->protocol == IP_PROTOCOL_UDP))) {
-    udp_header_t *udp = (void *)(ip + 1);
-    return (dir == MAP_SENDER) ? udp->src_port : udp->dst_port;
-  } else if (ip->protocol == IP_PROTOCOL_ICMP) {
-    icmp46_header_t *icmp = (void *)(ip + 1);
-    if (icmp->type == ICMP4_echo_request ||
-        icmp->type == ICMP4_echo_reply) {
-      return *((u16 *)(icmp + 1));
-    } else if (clib_net_to_host_u16(ip->length) >= 64) {
-      ip = (ip4_header_t *)(icmp + 2);
-      if (PREDICT_TRUE((ip->protocol == IP_PROTOCOL_TCP) ||
-                       (ip->protocol == IP_PROTOCOL_UDP))) {
-        udp_header_t *udp = (void *)(ip + 1);
-        return (dir == MAP_SENDER) ? udp->dst_port : udp->src_port;
-      } else if (ip->protocol == IP_PROTOCOL_ICMP) {
-        icmp46_header_t *icmp = (void *)(ip + 1);
-        if (icmp->type == ICMP4_echo_request ||
-            icmp->type == ICMP4_echo_reply) {
-          return *((u16 *)(icmp + 1));
-        }
-      }
+  if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) ||
+                   (ip->protocol == IP_PROTOCOL_UDP)))
+    {
+      udp_header_t *udp = (void *) (ip + 1);
+      return (dir == MAP_SENDER) ? udp->src_port : udp->dst_port;
+    }
+  else if (ip->protocol == IP_PROTOCOL_ICMP)
+    {
+      icmp46_header_t *icmp = (void *) (ip + 1);
+      if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
+       {
+         return *((u16 *) (icmp + 1));
+       }
+      else if (clib_net_to_host_u16 (ip->length) >= 64)
+       {
+         ip = (ip4_header_t *) (icmp + 2);
+         if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) ||
+                           (ip->protocol == IP_PROTOCOL_UDP)))
+           {
+             udp_header_t *udp = (void *) (ip + 1);
+             return (dir == MAP_SENDER) ? udp->dst_port : udp->src_port;
+           }
+         else if (ip->protocol == IP_PROTOCOL_ICMP)
+           {
+             icmp46_header_t *icmp = (void *) (ip + 1);
+             if (icmp->type == ICMP4_echo_request ||
+                 icmp->type == ICMP4_echo_reply)
+               {
+                 return *((u16 *) (icmp + 1));
+               }
+           }
+       }
     }
-  }
   return -1;
 }
 
 i32
-ip6_get_port (ip6_header_t *ip6, map_dir_e dir, u16 buffer_len)
+ip6_get_port (ip6_header_t * ip6, map_dir_e dir, u16 buffer_len)
 {
   u8 l4_protocol;
   u16 l4_offset;
   u16 frag_offset;
   u8 *l4;
 
-  if (ip6_parse(ip6, buffer_len, &l4_protocol, &l4_offset, &frag_offset))
+  if (ip6_parse (ip6, buffer_len, &l4_protocol, &l4_offset, &frag_offset))
     return -1;
 
   //TODO: Use buffer length
 
   if (frag_offset &&
-      ip6_frag_hdr_offset(((ip6_frag_hdr_t *)u8_ptr_add(ip6, frag_offset))))
-    return -1; //Can't deal with non-first fragment for now
-
-  l4 = u8_ptr_add(ip6, l4_offset);
-  if (l4_protocol == IP_PROTOCOL_TCP ||
-      l4_protocol == IP_PROTOCOL_UDP) {
-    return (dir == MAP_SENDER) ? ((udp_header_t *)(l4))->src_port : ((udp_header_t *)(l4))->dst_port;
-  } else if (l4_protocol == IP_PROTOCOL_ICMP6) {
-    icmp46_header_t *icmp = (icmp46_header_t *)(l4);
-    if (icmp->type == ICMP6_echo_request) {
-      return (dir == MAP_SENDER) ? ((u16*)(icmp))[2] : -1;
-    } else if (icmp->type == ICMP6_echo_reply) {
-      return (dir == MAP_SENDER) ? -1 : ((u16*)(icmp))[2];
+      ip6_frag_hdr_offset (((ip6_frag_hdr_t *)
+                           u8_ptr_add (ip6, frag_offset))))
+    return -1;                 //Can't deal with non-first fragment for now
+
+  l4 = u8_ptr_add (ip6, l4_offset);
+  if (l4_protocol == IP_PROTOCOL_TCP || l4_protocol == IP_PROTOCOL_UDP)
+    {
+      return (dir ==
+             MAP_SENDER) ? ((udp_header_t *) (l4))->
+       src_port : ((udp_header_t *) (l4))->dst_port;
+    }
+  else if (l4_protocol == IP_PROTOCOL_ICMP6)
+    {
+      icmp46_header_t *icmp = (icmp46_header_t *) (l4);
+      if (icmp->type == ICMP6_echo_request)
+       {
+         return (dir == MAP_SENDER) ? ((u16 *) (icmp))[2] : -1;
+       }
+      else if (icmp->type == ICMP6_echo_reply)
+       {
+         return (dir == MAP_SENDER) ? -1 : ((u16 *) (icmp))[2];
+       }
     }
-  }
   return -1;
 }
 
 
 int
-map_create_domain (ip4_address_t *ip4_prefix,
-                   u8 ip4_prefix_len,
-                   ip6_address_t *ip6_prefix,
-                   u8 ip6_prefix_len,
-                   ip6_address_t *ip6_src,
-                   u8 ip6_src_len,
-                   u8 ea_bits_len,
-                   u8 psid_offset,
-                   u8 psid_length,
-                   u32 *map_domain_index,
-                  u16 mtu,
-                  u8 flags)
+map_create_domain (ip4_address_t * ip4_prefix,
+                  u8 ip4_prefix_len,
+                  ip6_address_t * ip6_prefix,
+                  u8 ip6_prefix_len,
+                  ip6_address_t * ip6_src,
+                  u8 ip6_src_len,
+                  u8 ea_bits_len,
+                  u8 psid_offset,
+                  u8 psid_length, u32 * map_domain_index, u16 mtu, u8 flags)
 {
   map_main_t *mm = &map_main;
   ip4_main_t *im4 = &ip4_main;
@@ -159,21 +173,27 @@ map_create_domain (ip4_address_t *ip4_prefix,
     return -1;
 
   /* Sanity check on the src prefix length */
-  if (flags & MAP_DOMAIN_TRANSLATION) {
-      if (ip6_src_len != 96) {
-         clib_warning("MAP-T only supports ip6_src_len = 96 for now.");
+  if (flags & MAP_DOMAIN_TRANSLATION)
+    {
+      if (ip6_src_len != 96)
+       {
+         clib_warning ("MAP-T only supports ip6_src_len = 96 for now.");
          return -1;
-      }
-  } else {
-      if (ip6_src_len != 128) {
-         clib_warning("MAP-E requires a BR address, not a prefix (ip6_src_len should be 128).");
+       }
+    }
+  else
+    {
+      if (ip6_src_len != 128)
+       {
+         clib_warning
+           ("MAP-E requires a BR address, not a prefix (ip6_src_len should be 128).");
          return -1;
-      }
-  }
+       }
+    }
 
   /* Get domain index */
-  pool_get_aligned(mm->domains, d, CLIB_CACHE_LINE_BYTES);
-  memset(d, 0, sizeof (*d));
+  pool_get_aligned (mm->domains, d, CLIB_CACHE_LINE_BYTES);
+  memset (d, 0, sizeof (*d));
   *map_domain_index = d - mm->domains;
 
   /* Init domain struct */
@@ -190,34 +210,42 @@ map_create_domain (ip4_address_t *ip4_prefix,
   d->flags = flags;
 
   /* How many, and which bits to grab from the IPv4 DA */
-  if (ip4_prefix_len + ea_bits_len < 32) {
-    d->flags |= MAP_DOMAIN_PREFIX;
-    suffix_len = d->suffix_shift = 32 - ip4_prefix_len - ea_bits_len;
-  } else {
-    d->suffix_shift = 0;
-    suffix_len = 32 - ip4_prefix_len;
-  }
-  d->suffix_mask = (1<<suffix_len) - 1;
+  if (ip4_prefix_len + ea_bits_len < 32)
+    {
+      d->flags |= MAP_DOMAIN_PREFIX;
+      suffix_len = d->suffix_shift = 32 - ip4_prefix_len - ea_bits_len;
+    }
+  else
+    {
+      d->suffix_shift = 0;
+      suffix_len = 32 - ip4_prefix_len;
+    }
+  d->suffix_mask = (1 << suffix_len) - 1;
 
   d->psid_shift = 16 - psid_length - psid_offset;
   d->psid_mask = (1 << d->psid_length) - 1;
   d->ea_shift = 64 - ip6_prefix_len - suffix_len - d->psid_length;
 
   /* Init IP adjacency */
-  memset(&adj, 0, sizeof(adj));
+  memset (&adj, 0, sizeof (adj));
   adj.explicit_fib_index = ~0;
-  adj.lookup_next_index = (d->flags & MAP_DOMAIN_TRANSLATION) ? IP_LOOKUP_NEXT_MAP_T : IP_LOOKUP_NEXT_MAP;
-  p = (uword *)&adj.rewrite_data[0];
+  adj.lookup_next_index =
+    (d->
+     flags & MAP_DOMAIN_TRANSLATION) ? IP_LOOKUP_NEXT_MAP_T :
+    IP_LOOKUP_NEXT_MAP;
+  p = (uword *) & adj.rewrite_data[0];
   *p = (uword) (*map_domain_index);
 
-  if (ip4_get_route(im4, 0, 0, (u8 *)ip4_prefix, ip4_prefix_len)) {
-    clib_warning("IPv4 route already defined: %U/%d", format_ip4_address, ip4_prefix, ip4_prefix_len);
-    pool_put(mm->domains, d);
-    return -1;
-  }
-    
+  if (ip4_get_route (im4, 0, 0, (u8 *) ip4_prefix, ip4_prefix_len))
+    {
+      clib_warning ("IPv4 route already defined: %U/%d", format_ip4_address,
+                   ip4_prefix, ip4_prefix_len);
+      pool_put (mm->domains, d);
+      return -1;
+    }
+
   /* Create ip4 adjacency */
-  memset(&args4, 0, sizeof(args4));
+  memset (&args4, 0, sizeof (args4));
   args4.table_index_or_table_id = 0;
   args4.flags = IP4_ROUTE_FLAG_ADD;
   args4.dst_address.as_u32 = ip4_prefix->as_u32;
@@ -226,51 +254,61 @@ map_create_domain (ip4_address_t *ip4_prefix,
   args4.adj_index = ~0;
   args4.add_adj = &adj;
   args4.n_add_adj = 1;
-  ip4_add_del_route(im4, &args4);
+  ip4_add_del_route (im4, &args4);
 
   /* Multiple MAP domains may share same source IPv6 TEP */
-  u32 ai = ip6_get_route(im6, 0, 0, ip6_src, ip6_src_len);
-  if (ai > 0) {
-    ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
-    ip_adjacency_t *adj6 = ip_get_adjacency(lm6, ai);
-    if (adj6->lookup_next_index != IP_LOOKUP_NEXT_MAP &&
-       adj6->lookup_next_index != IP_LOOKUP_NEXT_MAP_T) {
-      clib_warning("BR source address already assigned: %U", format_ip6_address, ip6_src);
-      pool_put(mm->domains, d);
-      return -1;
+  u32 ai = ip6_get_route (im6, 0, 0, ip6_src, ip6_src_len);
+  if (ai > 0)
+    {
+      ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
+      ip_adjacency_t *adj6 = ip_get_adjacency (lm6, ai);
+      if (adj6->lookup_next_index != IP_LOOKUP_NEXT_MAP &&
+         adj6->lookup_next_index != IP_LOOKUP_NEXT_MAP_T)
+       {
+         clib_warning ("BR source address already assigned: %U",
+                       format_ip6_address, ip6_src);
+         pool_put (mm->domains, d);
+         return -1;
+       }
+      /* Shared source */
+      p = (uword *) & adj6->rewrite_data[0];
+      p[0] = ~0;
+
+      /* Add refcount, so we don't accidentially delete the route underneath someone */
+      p[1]++;
+    }
+  else
+    {
+      /* Create ip6 adjacency. */
+      memset (&args6, 0, sizeof (args6));
+      args6.table_index_or_table_id = 0;
+      args6.flags = IP6_ROUTE_FLAG_ADD;
+      args6.dst_address.as_u64[0] = ip6_src->as_u64[0];
+      args6.dst_address.as_u64[1] = ip6_src->as_u64[1];
+      args6.dst_address_length = ip6_src_len;
+      args6.adj_index = ~0;
+      args6.add_adj = &adj;
+      args6.n_add_adj = 1;
+      ip6_add_del_route (im6, &args6);
     }
-    /* Shared source */
-    p = (uword *)&adj6->rewrite_data[0];
-    p[0] = ~0;
-
-    /* Add refcount, so we don't accidentially delete the route underneath someone */
-    p[1]++;
-  } else {
-    /* Create ip6 adjacency. */
-    memset(&args6, 0, sizeof(args6));
-    args6.table_index_or_table_id = 0;
-    args6.flags = IP6_ROUTE_FLAG_ADD;
-    args6.dst_address.as_u64[0] = ip6_src->as_u64[0];
-    args6.dst_address.as_u64[1] = ip6_src->as_u64[1];
-    args6.dst_address_length = ip6_src_len;
-    args6.adj_index = ~0;
-    args6.add_adj = &adj;
-    args6.n_add_adj = 1;
-    ip6_add_del_route(im6, &args6);
-  }
 
   /* Validate packet/byte counters */
-  map_domain_counter_lock(mm);
+  map_domain_counter_lock (mm);
   int i;
-  for (i = 0; i < vec_len(mm->simple_domain_counters); i++) {
-    vlib_validate_simple_counter(&mm->simple_domain_counters[i], *map_domain_index);
-    vlib_zero_simple_counter(&mm->simple_domain_counters[i], *map_domain_index);
-  }
-  for (i = 0; i < vec_len(mm->domain_counters); i++) {
-    vlib_validate_combined_counter(&mm->domain_counters[i], *map_domain_index);
-    vlib_zero_combined_counter(&mm->domain_counters[i], *map_domain_index);
-  }
-  map_domain_counter_unlock(mm);
+  for (i = 0; i < vec_len (mm->simple_domain_counters); i++)
+    {
+      vlib_validate_simple_counter (&mm->simple_domain_counters[i],
+                                   *map_domain_index);
+      vlib_zero_simple_counter (&mm->simple_domain_counters[i],
+                               *map_domain_index);
+    }
+  for (i = 0; i < vec_len (mm->domain_counters); i++)
+    {
+      vlib_validate_combined_counter (&mm->domain_counters[i],
+                                     *map_domain_index);
+      vlib_zero_combined_counter (&mm->domain_counters[i], *map_domain_index);
+    }
+  map_domain_counter_unlock (mm);
 
   return 0;
 }
@@ -289,19 +327,24 @@ map_delete_domain (u32 map_domain_index)
   ip4_add_del_route_args_t args4;
   ip6_add_del_route_args_t args6;
 
-  if (pool_is_free_index(mm->domains, map_domain_index)) {
-    clib_warning("MAP domain delete: domain does not exist: %d", map_domain_index);
-    return -1;
-  }
+  if (pool_is_free_index (mm->domains, map_domain_index))
+    {
+      clib_warning ("MAP domain delete: domain does not exist: %d",
+                   map_domain_index);
+      return -1;
+    }
 
-  d = pool_elt_at_index(mm->domains, map_domain_index);
+  d = pool_elt_at_index (mm->domains, map_domain_index);
 
-  memset(&adj, 0, sizeof(adj));
+  memset (&adj, 0, sizeof (adj));
   adj.explicit_fib_index = ~0;
-  adj.lookup_next_index = (d->flags & MAP_DOMAIN_TRANSLATION) ? IP_LOOKUP_NEXT_MAP_T : IP_LOOKUP_NEXT_MAP;
+  adj.lookup_next_index =
+    (d->
+     flags & MAP_DOMAIN_TRANSLATION) ? IP_LOOKUP_NEXT_MAP_T :
+    IP_LOOKUP_NEXT_MAP;
 
   /* Delete ip4 adjacency */
-  memset(&args4, 0, sizeof(args4));
+  memset (&args4, 0, sizeof (args4));
   args4.table_index_or_table_id = 0;
   args4.flags = IP4_ROUTE_FLAG_DEL;
   args4.dst_address.as_u32 = d->ip4_prefix.as_u32;
@@ -309,147 +352,160 @@ map_delete_domain (u32 map_domain_index)
   args4.adj_index = 0;
   args4.add_adj = &adj;
   args4.n_add_adj = 0;
-  ip4_add_del_route(im4, &args4);
+  ip4_add_del_route (im4, &args4);
 
   /* Delete ip6 adjacency */
-  u32 ai = ip6_get_route(im6, 0, 0, &d->ip6_src, d->ip6_src_len);
-  if (ai > 0) {
-    ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
-    ip_adjacency_t *adj6 = ip_get_adjacency(lm6, ai);
-
-    uword *p = (uword *)&adj6->rewrite_data[0];
-    /* Delete route when no other domains use this source */
-    if (p[1] == 0) {
-      memset(&args6, 0, sizeof (args6));
-      args6.table_index_or_table_id = 0;
-      args6.flags = IP6_ROUTE_FLAG_DEL;
-      args6.dst_address.as_u64[0] = d->ip6_src.as_u64[0];
-      args6.dst_address.as_u64[1] = d->ip6_src.as_u64[1];
-      args6.dst_address_length = d->ip6_src_len;
-      args6.adj_index = 0;
-      args6.add_adj = &adj;
-      args6.n_add_adj = 0;
-      ip6_add_del_route(im6, &args6);
+  u32 ai = ip6_get_route (im6, 0, 0, &d->ip6_src, d->ip6_src_len);
+  if (ai > 0)
+    {
+      ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
+      ip_adjacency_t *adj6 = ip_get_adjacency (lm6, ai);
+
+      uword *p = (uword *) & adj6->rewrite_data[0];
+      /* Delete route when no other domains use this source */
+      if (p[1] == 0)
+       {
+         memset (&args6, 0, sizeof (args6));
+         args6.table_index_or_table_id = 0;
+         args6.flags = IP6_ROUTE_FLAG_DEL;
+         args6.dst_address.as_u64[0] = d->ip6_src.as_u64[0];
+         args6.dst_address.as_u64[1] = d->ip6_src.as_u64[1];
+         args6.dst_address_length = d->ip6_src_len;
+         args6.adj_index = 0;
+         args6.add_adj = &adj;
+         args6.n_add_adj = 0;
+         ip6_add_del_route (im6, &args6);
+       }
+      p[1]--;
     }
-    p[1]--;
-  }
   /* Deleting rules */
   if (d->rules)
-    clib_mem_free(d->rules);
+    clib_mem_free (d->rules);
 
-  pool_put(mm->domains, d);
+  pool_put (mm->domains, d);
 
   return 0;
 }
 
 int
-map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t *tep,
+map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep,
                  u8 is_add)
 {
   map_domain_t *d;
   map_main_t *mm = &map_main;
 
-  if (pool_is_free_index(mm->domains, map_domain_index)) {
-    clib_warning("MAP rule: domain does not exist: %d", map_domain_index);
-    return -1;
-  }
-  d = pool_elt_at_index(mm->domains, map_domain_index);
+  if (pool_is_free_index (mm->domains, map_domain_index))
+    {
+      clib_warning ("MAP rule: domain does not exist: %d", map_domain_index);
+      return -1;
+    }
+  d = pool_elt_at_index (mm->domains, map_domain_index);
 
   /* Rules are only used in 1:1 independent case */
   if (d->ea_bits_len > 0)
     return (-1);
 
-  if (!d->rules) {
-    u32 l = (0x1 << d->psid_length) * sizeof(ip6_address_t);
-    d->rules = clib_mem_alloc_aligned(l, CLIB_CACHE_LINE_BYTES);
-    if (!d->rules) return -1;
-    memset(d->rules, 0, l);
-  }
+  if (!d->rules)
+    {
+      u32 l = (0x1 << d->psid_length) * sizeof (ip6_address_t);
+      d->rules = clib_mem_alloc_aligned (l, CLIB_CACHE_LINE_BYTES);
+      if (!d->rules)
+       return -1;
+      memset (d->rules, 0, l);
+    }
 
-  if (psid >= (0x1 << d->psid_length)) {
-    clib_warning("MAP rule: PSID outside bounds: %d [%d]", psid, 0x1 << d->psid_length);
-    return -1;
-  }
+  if (psid >= (0x1 << d->psid_length))
+    {
+      clib_warning ("MAP rule: PSID outside bounds: %d [%d]", psid,
+                   0x1 << d->psid_length);
+      return -1;
+    }
 
-  if (is_add) {
-    d->rules[psid] = *tep;
-  } else {
-    memset(&d->rules[psid], 0, sizeof(ip6_address_t));
-  }
+  if (is_add)
+    {
+      d->rules[psid] = *tep;
+    }
+  else
+    {
+      memset (&d->rules[psid], 0, sizeof (ip6_address_t));
+    }
   return 0;
 }
 
 #ifdef MAP_SKIP_IP6_LOOKUP
 static void
-map_pre_resolve (ip4_address_t *ip4, ip6_address_t *ip6)
+map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6)
 {
   map_main_t *mm = &map_main;
   ip4_main_t *im4 = &ip4_main;
   ip6_main_t *im6 = &ip6_main;
 
-  if (ip6->as_u64[0] != 0 || ip6->as_u64[1] != 0) {
-    mm->adj6_index = ip6_fib_lookup_with_table(im6, 0, ip6);
-    clib_warning("FIB lookup results in: %u", mm->adj6_index);
-  }
-  if (ip4->as_u32 != 0) {
-    mm->adj4_index = ip4_fib_lookup_with_table(im4, 0, ip4, 0);
-    clib_warning("FIB lookup results in: %u", mm->adj4_index);
-  }
+  if (ip6->as_u64[0] != 0 || ip6->as_u64[1] != 0)
+    {
+      mm->adj6_index = ip6_fib_lookup_with_table (im6, 0, ip6);
+      clib_warning ("FIB lookup results in: %u", mm->adj6_index);
+    }
+  if (ip4->as_u32 != 0)
+    {
+      mm->adj4_index = ip4_fib_lookup_with_table (im4, 0, ip4, 0);
+      clib_warning ("FIB lookup results in: %u", mm->adj4_index);
+    }
 }
 #endif
 
 static clib_error_t *
-map_security_check_command_fn (vlib_main_t *vm,
-                              unformat_input_t *input,
-                              vlib_cli_command_t *cmd)
+map_security_check_command_fn (vlib_main_t * vm,
+                              unformat_input_t * input,
+                              vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   map_main_t *mm = &map_main;
   /* Get a line of input. */
-  if (!unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "off"))
-      mm->sec_check = false;
-    else if (unformat(line_input, "on"))
-      mm->sec_check = true;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "off"))
+       mm->sec_check = false;
+      else if (unformat (line_input, "on"))
+       mm->sec_check = true;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
   return 0;
 }
 
 static clib_error_t *
-map_security_check_frag_command_fn (vlib_main_t *vm,
-                                   unformat_input_t *input,
-                                   vlib_cli_command_t *cmd)
+map_security_check_frag_command_fn (vlib_main_t * vm,
+                                   unformat_input_t * input,
+                                   vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   map_main_t *mm = &map_main;
   /* Get a line of input. */
-  if (!unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "off"))
-      mm->sec_check_frag = false;
-    else if (unformat(line_input, "on"))
-      mm->sec_check_frag = true;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "off"))
+       mm->sec_check_frag = false;
+      else if (unformat (line_input, "on"))
+       mm->sec_check_frag = true;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
   return 0;
 }
 
 static clib_error_t *
-map_add_domain_command_fn (vlib_main_t *vm,
-                           unformat_input_t *input,
-                           vlib_cli_command_t *cmd)
+map_add_domain_command_fn (vlib_main_t * vm,
+                          unformat_input_t * input, vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   ip4_address_t ip4_prefix;
@@ -464,150 +520,165 @@ map_add_domain_command_fn (vlib_main_t *vm,
   ip6_src_len = 128;
 
   /* Get a line of input. */
-  if (!unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "ip4-pfx %U/%d", unformat_ip4_address, &ip4_prefix, &ip4_prefix_len))
-      num_m_args++;
-    else if (unformat(line_input, "ip6-pfx %U/%d", unformat_ip6_address, &ip6_prefix, &ip6_prefix_len))
-      num_m_args++;
-    else if (unformat(line_input, "ip6-src %U/%d", unformat_ip6_address, &ip6_src, &ip6_src_len))
-      num_m_args++;
-    else if (unformat(line_input, "ip6-src %U", unformat_ip6_address, &ip6_src))
-      num_m_args++;
-    else if (unformat(line_input, "ea-bits-len %d", &ea_bits_len))
-      num_m_args++;
-    else if (unformat(line_input, "psid-offset %d", &psid_offset))
-      num_m_args++;
-    else if (unformat(line_input, "psid-len %d", &psid_length))
-      num_m_args++;
-    else if (unformat(line_input, "mtu %d", &mtu))
-      num_m_args++;
-    else if (unformat(line_input, "map-t"))
-      flags |= MAP_DOMAIN_TRANSLATION;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat
+         (line_input, "ip4-pfx %U/%d", unformat_ip4_address, &ip4_prefix,
+          &ip4_prefix_len))
+       num_m_args++;
+      else
+       if (unformat
+           (line_input, "ip6-pfx %U/%d", unformat_ip6_address, &ip6_prefix,
+            &ip6_prefix_len))
+       num_m_args++;
+      else
+       if (unformat
+           (line_input, "ip6-src %U/%d", unformat_ip6_address, &ip6_src,
+            &ip6_src_len))
+       num_m_args++;
+      else
+       if (unformat
+           (line_input, "ip6-src %U", unformat_ip6_address, &ip6_src))
+       num_m_args++;
+      else if (unformat (line_input, "ea-bits-len %d", &ea_bits_len))
+       num_m_args++;
+      else if (unformat (line_input, "psid-offset %d", &psid_offset))
+       num_m_args++;
+      else if (unformat (line_input, "psid-len %d", &psid_length))
+       num_m_args++;
+      else if (unformat (line_input, "mtu %d", &mtu))
+       num_m_args++;
+      else if (unformat (line_input, "map-t"))
+       flags |= MAP_DOMAIN_TRANSLATION;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
 
   if (num_m_args < 3)
-    return clib_error_return(0, "mandatory argument(s) missing");
+    return clib_error_return (0, "mandatory argument(s) missing");
 
-  map_create_domain(&ip4_prefix, ip4_prefix_len,
-                   &ip6_prefix, ip6_prefix_len, &ip6_src, ip6_src_len,
-                   ea_bits_len, psid_offset, psid_length, &map_domain_index,
-                   mtu, flags);
+  map_create_domain (&ip4_prefix, ip4_prefix_len,
+                    &ip6_prefix, ip6_prefix_len, &ip6_src, ip6_src_len,
+                    ea_bits_len, psid_offset, psid_length, &map_domain_index,
+                    mtu, flags);
 
   return 0;
 }
 
 static clib_error_t *
-map_del_domain_command_fn (vlib_main_t *vm,
-                          unformat_input_t *input,
-                          vlib_cli_command_t *cmd)
+map_del_domain_command_fn (vlib_main_t * vm,
+                          unformat_input_t * input, vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   u32 num_m_args = 0;
   u32 map_domain_index;
 
   /* Get a line of input. */
-  if (! unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "index %d", &map_domain_index))
-      num_m_args++;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "index %d", &map_domain_index))
+       num_m_args++;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
 
   if (num_m_args != 1)
-    return clib_error_return(0, "mandatory argument(s) missing");
+    return clib_error_return (0, "mandatory argument(s) missing");
 
-  map_delete_domain(map_domain_index);
+  map_delete_domain (map_domain_index);
 
   return 0;
 }
 
 static clib_error_t *
-map_add_rule_command_fn (vlib_main_t *vm,
-                        unformat_input_t *input,
-                        vlib_cli_command_t *cmd)
+map_add_rule_command_fn (vlib_main_t * vm,
+                        unformat_input_t * input, vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   ip6_address_t tep;
   u32 num_m_args = 0;
   u32 psid, map_domain_index;
-    
+
   /* Get a line of input. */
-  if (! unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
 
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "index %d", &map_domain_index))
-      num_m_args++;
-    else if (unformat(line_input, "psid %d", &psid))
-      num_m_args++;
-    else if (unformat(line_input, "ip6-dst %U", unformat_ip6_address, &tep))
-      num_m_args++;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "index %d", &map_domain_index))
+       num_m_args++;
+      else if (unformat (line_input, "psid %d", &psid))
+       num_m_args++;
+      else
+       if (unformat (line_input, "ip6-dst %U", unformat_ip6_address, &tep))
+       num_m_args++;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
 
   if (num_m_args != 3)
-    return clib_error_return(0, "mandatory argument(s) missing");
+    return clib_error_return (0, "mandatory argument(s) missing");
 
-  if (map_add_del_psid(map_domain_index, psid, &tep, 1) != 0) {
-    return clib_error_return(0, "Failing to add Mapping Rule");
-  }
+  if (map_add_del_psid (map_domain_index, psid, &tep, 1) != 0)
+    {
+      return clib_error_return (0, "Failing to add Mapping Rule");
+    }
   return 0;
 }
 
 #if MAP_SKIP_IP6_LOOKUP
 static clib_error_t *
-map_pre_resolve_command_fn (vlib_main_t *vm,
-                           unformat_input_t *input,
-                           vlib_cli_command_t *cmd)
+map_pre_resolve_command_fn (vlib_main_t * vm,
+                           unformat_input_t * input,
+                           vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   ip4_address_t ip4nh;
   ip6_address_t ip6nh;
   map_main_t *mm = &map_main;
 
-  memset(&ip4nh, 0, sizeof(ip4nh));
-  memset(&ip6nh, 0, sizeof(ip6nh));
+  memset (&ip4nh, 0, sizeof (ip4nh));
+  memset (&ip6nh, 0, sizeof (ip6nh));
 
   /* Get a line of input. */
-  if (!unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "ip4-nh %U", unformat_ip4_address, &ip4nh))
-      mm->preresolve_ip4 = ip4nh;
-    else if (unformat(line_input, "ip6-nh %U", unformat_ip6_address, &ip6nh))
-      mm->preresolve_ip6 = ip6nh;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
 
-  map_pre_resolve(&ip4nh, &ip6nh);
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "ip4-nh %U", unformat_ip4_address, &ip4nh))
+       mm->preresolve_ip4 = ip4nh;
+      else
+       if (unformat (line_input, "ip6-nh %U", unformat_ip6_address, &ip6nh))
+       mm->preresolve_ip6 = ip6nh;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
+
+  map_pre_resolve (&ip4nh, &ip6nh);
 
   return 0;
 }
 #endif
 
 static clib_error_t *
-map_icmp_relay_source_address_command_fn (vlib_main_t *vm,
-                                         unformat_input_t *input,
-                                         vlib_cli_command_t *cmd)
+map_icmp_relay_source_address_command_fn (vlib_main_t * vm,
+                                         unformat_input_t * input,
+                                         vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   ip4_address_t icmp_src_address;
@@ -616,109 +687,113 @@ map_icmp_relay_source_address_command_fn (vlib_main_t *vm,
   mm->icmp4_src_address.as_u32 = 0;
 
   /* Get a line of input. */
-  if (!unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "%U", unformat_ip4_address, &icmp_src_address))
-      mm->icmp4_src_address = icmp_src_address;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat
+         (line_input, "%U", unformat_ip4_address, &icmp_src_address))
+       mm->icmp4_src_address = icmp_src_address;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
 
   return 0;
 }
 
 static clib_error_t *
-map_icmp_unreachables_command_fn (vlib_main_t *vm,
-                                 unformat_input_t *input,
-                                 vlib_cli_command_t *cmd)
+map_icmp_unreachables_command_fn (vlib_main_t * vm,
+                                 unformat_input_t * input,
+                                 vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   map_main_t *mm = &map_main;
   int num_m_args = 0;
 
   /* Get a line of input. */
-  if (!unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    num_m_args++;
-    if (unformat(line_input, "on"))
-      mm->icmp6_enabled = true;
-    else if (unformat(line_input, "off"))
-      mm->icmp6_enabled = false;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      num_m_args++;
+      if (unformat (line_input, "on"))
+       mm->icmp6_enabled = true;
+      else if (unformat (line_input, "off"))
+       mm->icmp6_enabled = false;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
 
 
   if (num_m_args != 1)
-    return clib_error_return(0, "mandatory argument(s) missing");
+    return clib_error_return (0, "mandatory argument(s) missing");
 
   return 0;
 }
 
 static clib_error_t *
-map_fragment_command_fn (vlib_main_t *vm,
-                        unformat_input_t *input,
-                        vlib_cli_command_t *cmd)
+map_fragment_command_fn (vlib_main_t * vm,
+                        unformat_input_t * input, vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   map_main_t *mm = &map_main;
 
   /* Get a line of input. */
-  if (!unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "inner"))
-      mm->frag_inner = true;
-    else if (unformat(line_input, "outer"))
-      mm->frag_inner = false;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "inner"))
+       mm->frag_inner = true;
+      else if (unformat (line_input, "outer"))
+       mm->frag_inner = false;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
 
   return 0;
 }
 
 static clib_error_t *
-map_fragment_df_command_fn (vlib_main_t *vm,
-                           unformat_input_t *input,
-                           vlib_cli_command_t *cmd)
+map_fragment_df_command_fn (vlib_main_t * vm,
+                           unformat_input_t * input,
+                           vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   map_main_t *mm = &map_main;
 
   /* Get a line of input. */
-  if (!unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "on"))
-      mm->frag_ignore_df = true;
-    else if (unformat(line_input, "off"))
-      mm->frag_ignore_df = false;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "on"))
+       mm->frag_ignore_df = true;
+      else if (unformat (line_input, "off"))
+       mm->frag_ignore_df = false;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
 
   return 0;
 }
 
 static clib_error_t *
-map_traffic_class_command_fn (vlib_main_t *vm,
-                             unformat_input_t *input,
-                             vlib_cli_command_t *cmd)
+map_traffic_class_command_fn (vlib_main_t * vm,
+                             unformat_input_t * input,
+                             vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   map_main_t *mm = &map_main;
@@ -727,104 +802,115 @@ map_traffic_class_command_fn (vlib_main_t *vm,
   mm->tc_copy = false;
 
   /* Get a line of input. */
-  if (!unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "copy"))
-      mm->tc_copy = true;
-    else if (unformat(line_input, "%x", &tc))
-      mm->tc = tc & 0xff;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "copy"))
+       mm->tc_copy = true;
+      else if (unformat (line_input, "%x", &tc))
+       mm->tc = tc & 0xff;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
 
   return 0;
 }
 
 static u8 *
-format_map_domain (u8 *s, va_list *args)
+format_map_domain (u8 * s, va_list * args)
 {
-  map_domain_t *d = va_arg(*args, map_domain_t *);
-  bool counters = va_arg(*args, int);
+  map_domain_t *d = va_arg (*args, map_domain_t *);
+  bool counters = va_arg (*args, int);
   map_main_t *mm = &map_main;
   ip6_address_t ip6_prefix;
 
   if (d->rules)
-    memset(&ip6_prefix, 0, sizeof(ip6_prefix));
+    memset (&ip6_prefix, 0, sizeof (ip6_prefix));
   else
     ip6_prefix = d->ip6_prefix;
-  
-  s = format(s,
-            "[%d] ip4-pfx %U/%d ip6-pfx %U/%d ip6-src %U/%d ea_bits_len %d psid-offset %d psid-len %d mtu %d %s",
-            d - mm->domains,
-            format_ip4_address, &d->ip4_prefix, d->ip4_prefix_len,
-            format_ip6_address, &ip6_prefix, d->ip6_prefix_len,
-            format_ip6_address, &d->ip6_src, d->ip6_src_len,
-            d->ea_bits_len, d->psid_offset, d->psid_length, d->mtu,
-            (d->flags & MAP_DOMAIN_TRANSLATION) ? "map-t" : "");
-
-  if (counters) {
-    map_domain_counter_lock(mm);
-    vlib_counter_t v;
-    vlib_get_combined_counter(&mm->domain_counters[MAP_DOMAIN_COUNTER_TX], d - mm->domains, &v);
-    s = format(s, "  TX: %lld/%lld", v.packets, v.bytes);
-    vlib_get_combined_counter(&mm->domain_counters[MAP_DOMAIN_COUNTER_RX], d - mm->domains, &v);
-    s = format(s, "  RX: %lld/%lld", v.packets, v.bytes);
-    map_domain_counter_unlock(mm);
-  }
-  s = format(s, "\n");
-
-  if (d->rules) {
-    int i;
-    ip6_address_t dst;
-    for (i = 0; i < (0x1 << d->psid_length); i++) {
-      dst = d->rules[i];
-      if (dst.as_u64[0] == 0 && dst.as_u64[1] == 0 )
-       continue;
-      s = format(s,
-                " rule psid: %d ip6-dst %U\n", i, format_ip6_address, &dst);
+
+  s = format (s,
+             "[%d] ip4-pfx %U/%d ip6-pfx %U/%d ip6-src %U/%d ea_bits_len %d psid-offset %d psid-len %d mtu %d %s",
+             d - mm->domains,
+             format_ip4_address, &d->ip4_prefix, d->ip4_prefix_len,
+             format_ip6_address, &ip6_prefix, d->ip6_prefix_len,
+             format_ip6_address, &d->ip6_src, d->ip6_src_len,
+             d->ea_bits_len, d->psid_offset, d->psid_length, d->mtu,
+             (d->flags & MAP_DOMAIN_TRANSLATION) ? "map-t" : "");
+
+  if (counters)
+    {
+      map_domain_counter_lock (mm);
+      vlib_counter_t v;
+      vlib_get_combined_counter (&mm->domain_counters[MAP_DOMAIN_COUNTER_TX],
+                                d - mm->domains, &v);
+      s = format (s, "  TX: %lld/%lld", v.packets, v.bytes);
+      vlib_get_combined_counter (&mm->domain_counters[MAP_DOMAIN_COUNTER_RX],
+                                d - mm->domains, &v);
+      s = format (s, "  RX: %lld/%lld", v.packets, v.bytes);
+      map_domain_counter_unlock (mm);
+    }
+  s = format (s, "\n");
+
+  if (d->rules)
+    {
+      int i;
+      ip6_address_t dst;
+      for (i = 0; i < (0x1 << d->psid_length); i++)
+       {
+         dst = d->rules[i];
+         if (dst.as_u64[0] == 0 && dst.as_u64[1] == 0)
+           continue;
+         s = format (s,
+                     " rule psid: %d ip6-dst %U\n", i, format_ip6_address,
+                     &dst);
+       }
     }
-  }
   return s;
 }
 
 static u8 *
-format_map_ip4_reass (u8 *s, va_list *args)
+format_map_ip4_reass (u8 * s, va_list * args)
 {
   map_main_t *mm = &map_main;
-  map_ip4_reass_t *r = va_arg(*args, map_ip4_reass_t *);
+  map_ip4_reass_t *r = va_arg (*args, map_ip4_reass_t *);
   map_ip4_reass_key_t *k = &r->key;
-  f64 now = vlib_time_now(mm->vlib_main);
-  f64 lifetime = (((f64)mm->ip4_reass_conf_lifetime_ms) / 1000);
+  f64 now = vlib_time_now (mm->vlib_main);
+  f64 lifetime = (((f64) mm->ip4_reass_conf_lifetime_ms) / 1000);
   f64 dt = (r->ts + lifetime > now) ? (r->ts + lifetime - now) : -1;
-  s = format(s,
-            "ip4-reass src=%U  dst=%U  protocol=%d  identifier=%d  port=%d  lifetime=%.3lf\n",
-            format_ip4_address, &k->src.as_u8, format_ip4_address, &k->dst.as_u8,
-            k->protocol, clib_net_to_host_u16(k->fragment_id), (r->port >= 0)?clib_net_to_host_u16(r->port):-1, dt);
+  s = format (s,
+             "ip4-reass src=%U  dst=%U  protocol=%d  identifier=%d  port=%d  lifetime=%.3lf\n",
+             format_ip4_address, &k->src.as_u8, format_ip4_address,
+             &k->dst.as_u8, k->protocol,
+             clib_net_to_host_u16 (k->fragment_id),
+             (r->port >= 0) ? clib_net_to_host_u16 (r->port) : -1, dt);
   return s;
 }
 
 static u8 *
-format_map_ip6_reass (u8 *s, va_list *args)
+format_map_ip6_reass (u8 * s, va_list * args)
 {
   map_main_t *mm = &map_main;
-  map_ip6_reass_t *r = va_arg(*args, map_ip6_reass_t *);
+  map_ip6_reass_t *r = va_arg (*args, map_ip6_reass_t *);
   map_ip6_reass_key_t *k = &r->key;
-  f64 now = vlib_time_now(mm->vlib_main);
-  f64 lifetime = (((f64)mm->ip6_reass_conf_lifetime_ms) / 1000);
+  f64 now = vlib_time_now (mm->vlib_main);
+  f64 lifetime = (((f64) mm->ip6_reass_conf_lifetime_ms) / 1000);
   f64 dt = (r->ts + lifetime > now) ? (r->ts + lifetime - now) : -1;
-  s = format(s,
-             "ip6-reass src=%U  dst=%U  protocol=%d  identifier=%d  lifetime=%.3lf\n",
-             format_ip6_address, &k->src.as_u8, format_ip6_address, &k->dst.as_u8,
-             k->protocol, clib_net_to_host_u32(k->fragment_id), dt);
+  s = format (s,
+             "ip6-reass src=%U  dst=%U  protocol=%d  identifier=%d  lifetime=%.3lf\n",
+             format_ip6_address, &k->src.as_u8, format_ip6_address,
+             &k->dst.as_u8, k->protocol,
+             clib_net_to_host_u32 (k->fragment_id), dt);
   return s;
 }
 
 static clib_error_t *
-show_map_domain_command_fn (vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
+show_map_domain_command_fn (vlib_main_t * vm, unformat_input_t * input,
+                           vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   map_main_t *mm = &map_main;
@@ -833,60 +919,73 @@ show_map_domain_command_fn (vlib_main_t *vm, unformat_input_t *input, vlib_cli_c
   u32 map_domain_index = ~0;
 
   /* Get a line of input. */
-  if (!unformat_user(input, unformat_line_input, line_input))
+  if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (unformat(line_input, "counters"))
-      counters = true;
-    else if (unformat(line_input, "index %d", &map_domain_index))
-      ;
-    else
-      return clib_error_return(0, "unknown input `%U'",
-                               format_unformat_error, input);
-  }
-  unformat_free(line_input);
 
-  if (pool_elts(mm->domains) == 0)
-    vlib_cli_output(vm, "No MAP domains are configured...");
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "counters"))
+       counters = true;
+      else if (unformat (line_input, "index %d", &map_domain_index))
+       ;
+      else
+       return clib_error_return (0, "unknown input `%U'",
+                                 format_unformat_error, input);
+    }
+  unformat_free (line_input);
+
+  if (pool_elts (mm->domains) == 0)
+    vlib_cli_output (vm, "No MAP domains are configured...");
 
-  if (map_domain_index == ~0) {
+  if (map_domain_index == ~0)
+    {
+    /* *INDENT-OFF* */
     pool_foreach(d, mm->domains, ({vlib_cli_output(vm, "%U", format_map_domain, d, counters);}));
-  } else {
-    if (pool_is_free_index(mm->domains, map_domain_index)) {
-      return clib_error_return(0, "MAP domain does not exists %d", map_domain_index);
+    /* *INDENT-ON* */
+    }
+  else
+    {
+      if (pool_is_free_index (mm->domains, map_domain_index))
+       {
+         return clib_error_return (0, "MAP domain does not exists %d",
+                                   map_domain_index);
+       }
+
+      d = pool_elt_at_index (mm->domains, map_domain_index);
+      vlib_cli_output (vm, "%U", format_map_domain, d, counters);
     }
-
-    d = pool_elt_at_index(mm->domains, map_domain_index);
-    vlib_cli_output(vm, "%U", format_map_domain, d, counters);
-  }
 
   return 0;
 }
 
 static clib_error_t *
-show_map_fragments_command_fn (vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
+show_map_fragments_command_fn (vlib_main_t * vm, unformat_input_t * input,
+                              vlib_cli_command_t * cmd)
 {
   map_main_t *mm = &map_main;
   map_ip4_reass_t *f4;
   map_ip6_reass_t *f6;
 
+  /* *INDENT-OFF* */
   pool_foreach(f4, mm->ip4_reass_pool, ({vlib_cli_output (vm, "%U", format_map_ip4_reass, f4);}));
+  /* *INDENT-ON* */
+  /* *INDENT-OFF* */
   pool_foreach(f6, mm->ip6_reass_pool, ({vlib_cli_output (vm, "%U", format_map_ip6_reass, f6);}));
+  /* *INDENT-ON* */
   return (0);
 }
 
 u64
 map_error_counter_get (u32 node_index, map_error_t map_error)
 {
-  vlib_main_t *vm = vlib_get_main();
-  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, node_index);
+  vlib_main_t *vm = vlib_get_main ();
+  vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, node_index);
   vlib_error_main_t *em = &vm->error_main;
   vlib_error_t e = error_node->errors[map_error];
-  vlib_node_t *n = vlib_get_node(vm, node_index);
+  vlib_node_t *n = vlib_get_node (vm, node_index);
   u32 ci;
 
-  ci = vlib_error_get_code(e);
+  ci = vlib_error_get_code (e);
   ASSERT (ci < n->n_errors);
   ci += n->error_heap_index;
 
@@ -894,14 +993,16 @@ map_error_counter_get (u32 node_index, map_error_t map_error)
 }
 
 static clib_error_t *
-show_map_stats_command_fn (vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
+show_map_stats_command_fn (vlib_main_t * vm, unformat_input_t * input,
+                          vlib_cli_command_t * cmd)
 {
   map_main_t *mm = &map_main;
   map_domain_t *d;
   int domains = 0, rules = 0, domaincount = 0, rulecount = 0;
   if (pool_elts (mm->domains) == 0)
-    vlib_cli_output(vm, "No MAP domains are configured...");
+    vlib_cli_output (vm, "No MAP domains are configured...");
 
+  /* *INDENT-OFF* */
   pool_foreach(d, mm->domains, ({
     if (d->rules) {
       rulecount+= 0x1 << d->psid_length;
@@ -910,30 +1011,38 @@ show_map_stats_command_fn (vlib_main_t *vm, unformat_input_t *input, vlib_cli_co
     domains += sizeof(*d);
     domaincount++;
   }));
+  /* *INDENT-ON* */
 
-  vlib_cli_output(vm, "MAP domains structure: %d\n", sizeof (map_domain_t));
-  vlib_cli_output(vm, "MAP domains: %d (%d bytes)\n", domaincount, domains);
-  vlib_cli_output(vm, "MAP rules: %d (%d bytes)\n", rulecount, rules);
-  vlib_cli_output(vm, "Total: %d bytes)\n", rules + domains);
+  vlib_cli_output (vm, "MAP domains structure: %d\n", sizeof (map_domain_t));
+  vlib_cli_output (vm, "MAP domains: %d (%d bytes)\n", domaincount, domains);
+  vlib_cli_output (vm, "MAP rules: %d (%d bytes)\n", rulecount, rules);
+  vlib_cli_output (vm, "Total: %d bytes)\n", rules + domains);
 
 #if MAP_SKIP_IP6_LOOKUP
-  vlib_cli_output(vm, "MAP pre-resolve: IP6 next-hop: %U (%u), IP4 next-hop: %U (%u)\n",
-                 format_ip6_address, &mm->preresolve_ip6, mm->adj6_index,
-                 format_ip4_address, &mm->preresolve_ip4, mm->adj4_index);
+  vlib_cli_output (vm,
+                  "MAP pre-resolve: IP6 next-hop: %U (%u), IP4 next-hop: %U (%u)\n",
+                  format_ip6_address, &mm->preresolve_ip6, mm->adj6_index,
+                  format_ip4_address, &mm->preresolve_ip4, mm->adj4_index);
 #endif
 
   if (mm->tc_copy)
-    vlib_cli_output(vm, "MAP traffic-class: copy");
+    vlib_cli_output (vm, "MAP traffic-class: copy");
   else
-    vlib_cli_output(vm, "MAP traffic-class: %x", mm->tc);
-
-  vlib_cli_output(vm, "MAP IPv6 inbound security check: %s, fragmented packet security check: %s", mm->sec_check ? "enabled" : "disabled",
-                 mm->sec_check_frag ? "enabled" : "disabled");
-
-  vlib_cli_output(vm, "ICMP-relay IPv4 source address: %U\n", format_ip4_address, &mm->icmp4_src_address);
-  vlib_cli_output(vm, "ICMP6 unreachables sent for unmatched packets: %s\n", mm->icmp6_enabled ? "enabled" : "disabled");
-  vlib_cli_output(vm, "Inner fragmentation: %s\n", mm->frag_inner ? "enabled" : "disabled");
-  vlib_cli_output(vm, "Fragment packets regardless of DF flag: %s\n", mm->frag_ignore_df ? "enabled" : "disabled");
+    vlib_cli_output (vm, "MAP traffic-class: %x", mm->tc);
+
+  vlib_cli_output (vm,
+                  "MAP IPv6 inbound security check: %s, fragmented packet security check: %s",
+                  mm->sec_check ? "enabled" : "disabled",
+                  mm->sec_check_frag ? "enabled" : "disabled");
+
+  vlib_cli_output (vm, "ICMP-relay IPv4 source address: %U\n",
+                  format_ip4_address, &mm->icmp4_src_address);
+  vlib_cli_output (vm, "ICMP6 unreachables sent for unmatched packets: %s\n",
+                  mm->icmp6_enabled ? "enabled" : "disabled");
+  vlib_cli_output (vm, "Inner fragmentation: %s\n",
+                  mm->frag_inner ? "enabled" : "disabled");
+  vlib_cli_output (vm, "Fragment packets regardless of DF flag: %s\n",
+                  mm->frag_ignore_df ? "enabled" : "disabled");
 
   /*
    * Counters
@@ -948,147 +1057,203 @@ show_map_stats_command_fn (vlib_main_t *vm, unformat_input_t *input, vlib_cli_co
   memset (total_bytes, 0, sizeof (total_bytes));
 
   map_domain_counter_lock (mm);
-  vec_foreach (cm, mm->domain_counters) {
+  vec_foreach (cm, mm->domain_counters)
+  {
     which = cm - mm->domain_counters;
 
-    for (i = 0; i < vec_len (cm->maxi); i++) {
-      vlib_get_combined_counter (cm, i, &v);
-      total_pkts[which] += v.packets;
-      total_bytes[which] += v.bytes;
-    }
+    for (i = 0; i < vec_len (cm->maxi); i++)
+      {
+       vlib_get_combined_counter (cm, i, &v);
+       total_pkts[which] += v.packets;
+       total_bytes[which] += v.bytes;
+      }
   }
   map_domain_counter_unlock (mm);
 
-  vlib_cli_output(vm, "Encapsulated packets: %lld bytes: %lld\n", total_pkts[MAP_DOMAIN_COUNTER_TX],
-                 total_bytes[MAP_DOMAIN_COUNTER_TX]);
-  vlib_cli_output(vm, "Decapsulated packets: %lld bytes: %lld\n", total_pkts[MAP_DOMAIN_COUNTER_RX],
-                 total_bytes[MAP_DOMAIN_COUNTER_RX]);
+  vlib_cli_output (vm, "Encapsulated packets: %lld bytes: %lld\n",
+                  total_pkts[MAP_DOMAIN_COUNTER_TX],
+                  total_bytes[MAP_DOMAIN_COUNTER_TX]);
+  vlib_cli_output (vm, "Decapsulated packets: %lld bytes: %lld\n",
+                  total_pkts[MAP_DOMAIN_COUNTER_RX],
+                  total_bytes[MAP_DOMAIN_COUNTER_RX]);
 
-  vlib_cli_output(vm, "ICMP relayed packets: %d\n", vlib_get_simple_counter(&mm->icmp_relayed, 0));
+  vlib_cli_output (vm, "ICMP relayed packets: %d\n",
+                  vlib_get_simple_counter (&mm->icmp_relayed, 0));
 
   return 0;
 }
 
 static clib_error_t *
-map_params_reass_command_fn (vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
+map_params_reass_command_fn (vlib_main_t * vm, unformat_input_t * input,
+                            vlib_cli_command_t * cmd)
 {
   unformat_input_t _line_input, *line_input = &_line_input;
   u32 lifetime = ~0;
-  f64 ht_ratio = (MAP_IP4_REASS_CONF_HT_RATIO_MAX+1);
+  f64 ht_ratio = (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1);
   u32 pool_size = ~0;
   u64 buffers = ~(0ull);
   u8 ip4 = 0, ip6 = 0;
 
-  if (!unformat_user(input, unformat_line_input, line_input))
-      return 0;
+  if (!unformat_user (input, unformat_line_input, line_input))
+    return 0;
 
-  while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) {
-    if (!unformat(line_input, "lifetime %u", &lifetime) &&
-        !unformat(line_input, "ht-ratio %lf", &ht_ratio) &&
-        !unformat(line_input, "pool-size %u", &pool_size) &&
-        !unformat(line_input, "buffers %llu", &buffers) &&
-        !((unformat(line_input, "ip4")) && (ip4 = 1)) &&
-        !((unformat(line_input, "ip6")) && (ip6 = 1))) {
-      unformat_free(line_input);
-      return clib_error_return(0, "invalid input");
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (!unformat (line_input, "lifetime %u", &lifetime) &&
+         !unformat (line_input, "ht-ratio %lf", &ht_ratio) &&
+         !unformat (line_input, "pool-size %u", &pool_size) &&
+         !unformat (line_input, "buffers %llu", &buffers) &&
+         !((unformat (line_input, "ip4")) && (ip4 = 1)) &&
+         !((unformat (line_input, "ip6")) && (ip6 = 1)))
+       {
+         unformat_free (line_input);
+         return clib_error_return (0, "invalid input");
+       }
     }
-  }
-  unformat_free(line_input);
+  unformat_free (line_input);
 
   if (!ip4 && !ip6)
-    return clib_error_return(0, "must specify ip4 and/or ip6");
-
-  if (ip4) {
-    if (pool_size != ~0 && pool_size > MAP_IP4_REASS_CONF_POOL_SIZE_MAX)
-      return clib_error_return(0, "invalid ip4-reass pool-size ( > %d)", MAP_IP4_REASS_CONF_POOL_SIZE_MAX);
-    if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX+1) && ht_ratio > MAP_IP4_REASS_CONF_HT_RATIO_MAX)
-      return clib_error_return(0, "invalid ip4-reass ht-ratio ( > %d)", MAP_IP4_REASS_CONF_HT_RATIO_MAX);
-    if (lifetime != ~0 && lifetime > MAP_IP4_REASS_CONF_LIFETIME_MAX)
-      return clib_error_return(0, "invalid ip4-reass lifetime ( > %d)", MAP_IP4_REASS_CONF_LIFETIME_MAX);
-    if (buffers != ~(0ull) && buffers > MAP_IP4_REASS_CONF_BUFFERS_MAX)
-      return clib_error_return(0, "invalid ip4-reass buffers ( > %ld)", MAP_IP4_REASS_CONF_BUFFERS_MAX);
-  }
-
-  if (ip6) {
-    if (pool_size != ~0 && pool_size > MAP_IP6_REASS_CONF_POOL_SIZE_MAX)
-      return clib_error_return(0, "invalid ip6-reass pool-size ( > %d)", MAP_IP6_REASS_CONF_POOL_SIZE_MAX);
-    if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX+1) && ht_ratio > MAP_IP6_REASS_CONF_HT_RATIO_MAX)
-      return clib_error_return(0, "invalid ip6-reass ht-log2len ( > %d)", MAP_IP6_REASS_CONF_HT_RATIO_MAX);
-    if (lifetime != ~0 && lifetime > MAP_IP6_REASS_CONF_LIFETIME_MAX)
-      return clib_error_return(0, "invalid ip6-reass lifetime ( > %d)", MAP_IP6_REASS_CONF_LIFETIME_MAX);
-    if (buffers != ~(0ull) && buffers > MAP_IP6_REASS_CONF_BUFFERS_MAX)
-      return clib_error_return(0, "invalid ip6-reass buffers ( > %ld)", MAP_IP6_REASS_CONF_BUFFERS_MAX);
-  }
-
-  if (ip4) {
-    u32 reass = 0, packets = 0;
-    if (pool_size != ~0) {
-      if (map_ip4_reass_conf_pool_size(pool_size, &reass, &packets)) {
-        vlib_cli_output(vm, "Could not set ip4-reass pool-size");
-      } else {
-        vlib_cli_output(vm, "Setting ip4-reass pool-size (destroyed-reassembly=%u , dropped-fragments=%u)", reass, packets);
-      }
-    }
-    if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX+1)) {
-      if (map_ip4_reass_conf_ht_ratio(ht_ratio, &reass, &packets)) {
-        vlib_cli_output(vm, "Could not set ip4-reass ht-log2len");
-      } else {
-        vlib_cli_output(vm, "Setting ip4-reass ht-log2len (destroyed-reassembly=%u , dropped-fragments=%u)", reass, packets);
-      }
-    }
-    if (lifetime != ~0) {
-      if (map_ip4_reass_conf_lifetime(lifetime))
-        vlib_cli_output(vm, "Could not set ip4-reass lifetime");
-      else
-        vlib_cli_output(vm, "Setting ip4-reass lifetime");
-    }
-    if (buffers != ~(0ull)) {
-      if (map_ip4_reass_conf_buffers(buffers))
-        vlib_cli_output(vm, "Could not set ip4-reass buffers");
-      else
-        vlib_cli_output(vm, "Setting ip4-reass buffers");
+    return clib_error_return (0, "must specify ip4 and/or ip6");
+
+  if (ip4)
+    {
+      if (pool_size != ~0 && pool_size > MAP_IP4_REASS_CONF_POOL_SIZE_MAX)
+       return clib_error_return (0, "invalid ip4-reass pool-size ( > %d)",
+                                 MAP_IP4_REASS_CONF_POOL_SIZE_MAX);
+      if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1)
+         && ht_ratio > MAP_IP4_REASS_CONF_HT_RATIO_MAX)
+       return clib_error_return (0, "invalid ip4-reass ht-ratio ( > %d)",
+                                 MAP_IP4_REASS_CONF_HT_RATIO_MAX);
+      if (lifetime != ~0 && lifetime > MAP_IP4_REASS_CONF_LIFETIME_MAX)
+       return clib_error_return (0, "invalid ip4-reass lifetime ( > %d)",
+                                 MAP_IP4_REASS_CONF_LIFETIME_MAX);
+      if (buffers != ~(0ull) && buffers > MAP_IP4_REASS_CONF_BUFFERS_MAX)
+       return clib_error_return (0, "invalid ip4-reass buffers ( > %ld)",
+                                 MAP_IP4_REASS_CONF_BUFFERS_MAX);
     }
 
-    if (map_main.ip4_reass_conf_buffers >
-      map_main.ip4_reass_conf_pool_size * MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY) {
-      vlib_cli_output(vm, "Note: 'ip4-reass buffers' > pool-size * max-fragments-per-reassembly.");
+  if (ip6)
+    {
+      if (pool_size != ~0 && pool_size > MAP_IP6_REASS_CONF_POOL_SIZE_MAX)
+       return clib_error_return (0, "invalid ip6-reass pool-size ( > %d)",
+                                 MAP_IP6_REASS_CONF_POOL_SIZE_MAX);
+      if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1)
+         && ht_ratio > MAP_IP6_REASS_CONF_HT_RATIO_MAX)
+       return clib_error_return (0, "invalid ip6-reass ht-log2len ( > %d)",
+                                 MAP_IP6_REASS_CONF_HT_RATIO_MAX);
+      if (lifetime != ~0 && lifetime > MAP_IP6_REASS_CONF_LIFETIME_MAX)
+       return clib_error_return (0, "invalid ip6-reass lifetime ( > %d)",
+                                 MAP_IP6_REASS_CONF_LIFETIME_MAX);
+      if (buffers != ~(0ull) && buffers > MAP_IP6_REASS_CONF_BUFFERS_MAX)
+       return clib_error_return (0, "invalid ip6-reass buffers ( > %ld)",
+                                 MAP_IP6_REASS_CONF_BUFFERS_MAX);
     }
-  }
 
-  if (ip6) {
-    u32 reass = 0, packets = 0;
-    if (pool_size != ~0) {
-      if (map_ip6_reass_conf_pool_size(pool_size, &reass, &packets)) {
-        vlib_cli_output(vm, "Could not set ip6-reass pool-size");
-      } else {
-        vlib_cli_output(vm, "Setting ip6-reass pool-size (destroyed-reassembly=%u , dropped-fragments=%u)", reass, packets);
-      }
-    }
-    if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX+1)) {
-      if (map_ip6_reass_conf_ht_ratio(ht_ratio, &reass, &packets)) {
-        vlib_cli_output(vm, "Could not set ip6-reass ht-log2len");
-      } else {
-        vlib_cli_output(vm, "Setting ip6-reass ht-log2len (destroyed-reassembly=%u , dropped-fragments=%u)", reass, packets);
-      }
-    }
-    if (lifetime != ~0) {
-      if (map_ip6_reass_conf_lifetime(lifetime))
-        vlib_cli_output(vm, "Could not set ip6-reass lifetime");
-      else
-        vlib_cli_output(vm, "Setting ip6-reass lifetime");
-    }
-    if (buffers != ~(0ull)) {
-      if (map_ip6_reass_conf_buffers(buffers))
-        vlib_cli_output(vm, "Could not set ip6-reass buffers");
-      else
-        vlib_cli_output(vm, "Setting ip6-reass buffers");
+  if (ip4)
+    {
+      u32 reass = 0, packets = 0;
+      if (pool_size != ~0)
+       {
+         if (map_ip4_reass_conf_pool_size (pool_size, &reass, &packets))
+           {
+             vlib_cli_output (vm, "Could not set ip4-reass pool-size");
+           }
+         else
+           {
+             vlib_cli_output (vm,
+                              "Setting ip4-reass pool-size (destroyed-reassembly=%u , dropped-fragments=%u)",
+                              reass, packets);
+           }
+       }
+      if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1))
+       {
+         if (map_ip4_reass_conf_ht_ratio (ht_ratio, &reass, &packets))
+           {
+             vlib_cli_output (vm, "Could not set ip4-reass ht-log2len");
+           }
+         else
+           {
+             vlib_cli_output (vm,
+                              "Setting ip4-reass ht-log2len (destroyed-reassembly=%u , dropped-fragments=%u)",
+                              reass, packets);
+           }
+       }
+      if (lifetime != ~0)
+       {
+         if (map_ip4_reass_conf_lifetime (lifetime))
+           vlib_cli_output (vm, "Could not set ip4-reass lifetime");
+         else
+           vlib_cli_output (vm, "Setting ip4-reass lifetime");
+       }
+      if (buffers != ~(0ull))
+       {
+         if (map_ip4_reass_conf_buffers (buffers))
+           vlib_cli_output (vm, "Could not set ip4-reass buffers");
+         else
+           vlib_cli_output (vm, "Setting ip4-reass buffers");
+       }
+
+      if (map_main.ip4_reass_conf_buffers >
+         map_main.ip4_reass_conf_pool_size *
+         MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY)
+       {
+         vlib_cli_output (vm,
+                          "Note: 'ip4-reass buffers' > pool-size * max-fragments-per-reassembly.");
+       }
     }
 
-    if (map_main.ip6_reass_conf_buffers >
-        map_main.ip6_reass_conf_pool_size * MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY) {
-      vlib_cli_output(vm, "Note: 'ip6-reass buffers' > pool-size * max-fragments-per-reassembly.");
+  if (ip6)
+    {
+      u32 reass = 0, packets = 0;
+      if (pool_size != ~0)
+       {
+         if (map_ip6_reass_conf_pool_size (pool_size, &reass, &packets))
+           {
+             vlib_cli_output (vm, "Could not set ip6-reass pool-size");
+           }
+         else
+           {
+             vlib_cli_output (vm,
+                              "Setting ip6-reass pool-size (destroyed-reassembly=%u , dropped-fragments=%u)",
+                              reass, packets);
+           }
+       }
+      if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1))
+       {
+         if (map_ip6_reass_conf_ht_ratio (ht_ratio, &reass, &packets))
+           {
+             vlib_cli_output (vm, "Could not set ip6-reass ht-log2len");
+           }
+         else
+           {
+             vlib_cli_output (vm,
+                              "Setting ip6-reass ht-log2len (destroyed-reassembly=%u , dropped-fragments=%u)",
+                              reass, packets);
+           }
+       }
+      if (lifetime != ~0)
+       {
+         if (map_ip6_reass_conf_lifetime (lifetime))
+           vlib_cli_output (vm, "Could not set ip6-reass lifetime");
+         else
+           vlib_cli_output (vm, "Setting ip6-reass lifetime");
+       }
+      if (buffers != ~(0ull))
+       {
+         if (map_ip6_reass_conf_buffers (buffers))
+           vlib_cli_output (vm, "Could not set ip6-reass buffers");
+         else
+           vlib_cli_output (vm, "Setting ip6-reass buffers");
+       }
+
+      if (map_main.ip6_reass_conf_buffers >
+         map_main.ip6_reass_conf_pool_size *
+         MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY)
+       {
+         vlib_cli_output (vm,
+                          "Note: 'ip6-reass buffers' > pool-size * max-fragments-per-reassembly.");
+       }
     }
-  }
 
   return 0;
 }
@@ -1098,114 +1263,130 @@ map_params_reass_command_fn (vlib_main_t *vm, unformat_input_t *input, vlib_cli_
  * packet trace format function
  */
 u8 *
-format_map_trace (u8 *s, va_list *args)
+format_map_trace (u8 * s, va_list * args)
 {
-  CLIB_UNUSED(vlib_main_t *vm) = va_arg (*args, vlib_main_t *);
-  CLIB_UNUSED(vlib_node_t *node) = va_arg (*args, vlib_node_t *);
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
   map_trace_t *t = va_arg (*args, map_trace_t *);
   u32 map_domain_index = t->map_domain_index;
   u16 port = t->port;
 
-  s = format(s, "MAP domain index: %d L4 port: %u", map_domain_index, clib_net_to_host_u16(port));
+  s =
+    format (s, "MAP domain index: %d L4 port: %u", map_domain_index,
+           clib_net_to_host_u16 (port));
 
   return s;
 }
 
 static_always_inline map_ip4_reass_t *
-map_ip4_reass_lookup(map_ip4_reass_key_t *k, u32 bucket, f64 now)
+map_ip4_reass_lookup (map_ip4_reass_key_t * k, u32 bucket, f64 now)
 {
   map_main_t *mm = &map_main;
   u32 ri = mm->ip4_reass_hash_table[bucket];
-  while(ri != MAP_REASS_INDEX_NONE) {
-    map_ip4_reass_t * r = pool_elt_at_index(mm->ip4_reass_pool, ri);
-    if (r->key.as_u64[0] == k->as_u64[0] &&
-        r->key.as_u64[1] == k->as_u64[1] &&
-        now < r->ts + (((f64)mm->ip4_reass_conf_lifetime_ms) / 1000)) {
-      return r;
-    }
-    ri = r->bucket_next;
-  }
+  while (ri != MAP_REASS_INDEX_NONE)
+    {
+      map_ip4_reass_t *r = pool_elt_at_index (mm->ip4_reass_pool, ri);
+      if (r->key.as_u64[0] == k->as_u64[0] &&
+         r->key.as_u64[1] == k->as_u64[1] &&
+         now < r->ts + (((f64) mm->ip4_reass_conf_lifetime_ms) / 1000))
+       {
+         return r;
+       }
+      ri = r->bucket_next;
+    }
   return NULL;
 }
 
 #define map_ip4_reass_pool_index(r) (r - map_main.ip4_reass_pool)
 
 void
-map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop)
+map_ip4_reass_free (map_ip4_reass_t * r, u32 ** pi_to_drop)
 {
   map_main_t *mm = &map_main;
-  map_ip4_reass_get_fragments(r, pi_to_drop);
+  map_ip4_reass_get_fragments (r, pi_to_drop);
 
   // Unlink in hash bucket
   map_ip4_reass_t *r2 = NULL;
   u32 r2i = mm->ip4_reass_hash_table[r->bucket];
-  while (r2i != map_ip4_reass_pool_index(r)) {
-    ASSERT(r2i != MAP_REASS_INDEX_NONE);
-    r2 = pool_elt_at_index(mm->ip4_reass_pool, r2i);
-    r2i = r2->bucket_next;
-  }
-  if (r2) {
-    r2->bucket_next = r->bucket_next;
-  } else {
-    mm->ip4_reass_hash_table[r->bucket] = r->bucket_next;
-  }
+  while (r2i != map_ip4_reass_pool_index (r))
+    {
+      ASSERT (r2i != MAP_REASS_INDEX_NONE);
+      r2 = pool_elt_at_index (mm->ip4_reass_pool, r2i);
+      r2i = r2->bucket_next;
+    }
+  if (r2)
+    {
+      r2->bucket_next = r->bucket_next;
+    }
+  else
+    {
+      mm->ip4_reass_hash_table[r->bucket] = r->bucket_next;
+    }
 
   // Unlink in list
-  if (r->fifo_next == map_ip4_reass_pool_index(r)) {
-    mm->ip4_reass_fifo_last = MAP_REASS_INDEX_NONE;
-  } else {
-    if(mm->ip4_reass_fifo_last == map_ip4_reass_pool_index(r))
-      mm->ip4_reass_fifo_last = r->fifo_prev;
-    pool_elt_at_index(mm->ip4_reass_pool, r->fifo_prev)->fifo_next = r->fifo_next;
-    pool_elt_at_index(mm->ip4_reass_pool, r->fifo_next)->fifo_prev = r->fifo_prev;
-  }
+  if (r->fifo_next == map_ip4_reass_pool_index (r))
+    {
+      mm->ip4_reass_fifo_last = MAP_REASS_INDEX_NONE;
+    }
+  else
+    {
+      if (mm->ip4_reass_fifo_last == map_ip4_reass_pool_index (r))
+       mm->ip4_reass_fifo_last = r->fifo_prev;
+      pool_elt_at_index (mm->ip4_reass_pool, r->fifo_prev)->fifo_next =
+       r->fifo_next;
+      pool_elt_at_index (mm->ip4_reass_pool, r->fifo_next)->fifo_prev =
+       r->fifo_prev;
+    }
 
-  pool_put(mm->ip4_reass_pool, r);
+  pool_put (mm->ip4_reass_pool, r);
   mm->ip4_reass_allocated--;
 }
 
 map_ip4_reass_t *
-map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id,
-                  u8 protocol, u32 **pi_to_drop)
+map_ip4_reass_get (u32 src, u32 dst, u16 fragment_id,
+                  u8 protocol, u32 ** pi_to_drop)
 {
-  map_ip4_reass_t * r;
+  map_ip4_reass_t *r;
   map_main_t *mm = &map_main;
   map_ip4_reass_key_t k = {.src.data_u32 = src,
-      .dst.data_u32 = dst,
-      .fragment_id = fragment_id,
-      .protocol = protocol };
+    .dst.data_u32 = dst,
+    .fragment_id = fragment_id,
+    .protocol = protocol
+  };
 
   u32 h = 0;
-  h = crc_u32(k.as_u32[0], h);
-  h = crc_u32(k.as_u32[1], h);
-  h = crc_u32(k.as_u32[2], h);
-  h = crc_u32(k.as_u32[3], h);
+  h = crc_u32 (k.as_u32[0], h);
+  h = crc_u32 (k.as_u32[1], h);
+  h = crc_u32 (k.as_u32[2], h);
+  h = crc_u32 (k.as_u32[3], h);
   h = h >> (32 - mm->ip4_reass_ht_log2len);
 
-  f64 now = vlib_time_now(mm->vlib_main);
+  f64 now = vlib_time_now (mm->vlib_main);
 
   //Cache garbage collection
-  while (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE) {
-    map_ip4_reass_t *last = pool_elt_at_index(mm->ip4_reass_pool, mm->ip4_reass_fifo_last);
-    if (last->ts + (((f64)mm->ip4_reass_conf_lifetime_ms) / 1000) < now)
-      map_ip4_reass_free(last, pi_to_drop);
-    else
-      break;
-  }
+  while (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE)
+    {
+      map_ip4_reass_t *last =
+       pool_elt_at_index (mm->ip4_reass_pool, mm->ip4_reass_fifo_last);
+      if (last->ts + (((f64) mm->ip4_reass_conf_lifetime_ms) / 1000) < now)
+       map_ip4_reass_free (last, pi_to_drop);
+      else
+       break;
+    }
 
-  if ((r = map_ip4_reass_lookup(&k, h, now)))
+  if ((r = map_ip4_reass_lookup (&k, h, now)))
     return r;
 
   if (mm->ip4_reass_allocated >= mm->ip4_reass_conf_pool_size)
     return NULL;
 
-  pool_get(mm->ip4_reass_pool, r);
+  pool_get (mm->ip4_reass_pool, r);
   mm->ip4_reass_allocated++;
   int i;
-  for (i=0; i<MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+  for (i = 0; i < MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
     r->fragments[i] = ~0;
 
-  u32 ri = map_ip4_reass_pool_index(r);
+  u32 ri = map_ip4_reass_pool_index (r);
 
   //Link in new bucket
   r->bucket = h;
@@ -1213,15 +1394,20 @@ map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id,
   mm->ip4_reass_hash_table[h] = ri;
 
   //Link in fifo
-  if(mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE) {
-    r->fifo_next = pool_elt_at_index(mm->ip4_reass_pool, mm->ip4_reass_fifo_last)->fifo_next;
-    r->fifo_prev = mm->ip4_reass_fifo_last;
-    pool_elt_at_index(mm->ip4_reass_pool, r->fifo_prev)->fifo_next = ri;
-    pool_elt_at_index(mm->ip4_reass_pool, r->fifo_next)->fifo_prev = ri;
-  } else {
-    r->fifo_next = r->fifo_prev = ri;
-    mm->ip4_reass_fifo_last = ri;
-  }
+  if (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE)
+    {
+      r->fifo_next =
+       pool_elt_at_index (mm->ip4_reass_pool,
+                          mm->ip4_reass_fifo_last)->fifo_next;
+      r->fifo_prev = mm->ip4_reass_fifo_last;
+      pool_elt_at_index (mm->ip4_reass_pool, r->fifo_prev)->fifo_next = ri;
+      pool_elt_at_index (mm->ip4_reass_pool, r->fifo_next)->fifo_prev = ri;
+    }
+  else
+    {
+      r->fifo_next = r->fifo_prev = ri;
+      mm->ip4_reass_fifo_last = ri;
+    }
 
   //Set other fields
   r->ts = now;
@@ -1236,128 +1422,144 @@ map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id,
 }
 
 int
-map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi)
+map_ip4_reass_add_fragment (map_ip4_reass_t * r, u32 pi)
 {
   if (map_main.ip4_reass_buffered_counter >= map_main.ip4_reass_conf_buffers)
     return -1;
 
   int i;
-  for (i=0; i<MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
-    if(r->fragments[i] == ~0) {
-      r->fragments[i] = pi;
-      map_main.ip4_reass_buffered_counter++;
-      return 0;
-    }
+  for (i = 0; i < MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+    if (r->fragments[i] == ~0)
+      {
+       r->fragments[i] = pi;
+       map_main.ip4_reass_buffered_counter++;
+       return 0;
+      }
   return -1;
 }
 
 static_always_inline map_ip6_reass_t *
-map_ip6_reass_lookup(map_ip6_reass_key_t *k, u32 bucket, f64 now)
+map_ip6_reass_lookup (map_ip6_reass_key_t * k, u32 bucket, f64 now)
 {
   map_main_t *mm = &map_main;
   u32 ri = mm->ip6_reass_hash_table[bucket];
-  while(ri != MAP_REASS_INDEX_NONE) {
-    map_ip6_reass_t * r = pool_elt_at_index(mm->ip6_reass_pool, ri);
-    if(now < r->ts + (((f64)mm->ip6_reass_conf_lifetime_ms) / 1000) &&
-        r->key.as_u64[0] == k->as_u64[0] &&
-        r->key.as_u64[1] == k->as_u64[1] &&
-        r->key.as_u64[2] == k->as_u64[2] &&
-        r->key.as_u64[3] == k->as_u64[3] &&
-        r->key.as_u64[4] == k->as_u64[4])
-      return r;
-    ri = r->bucket_next;
-  }
+  while (ri != MAP_REASS_INDEX_NONE)
+    {
+      map_ip6_reass_t *r = pool_elt_at_index (mm->ip6_reass_pool, ri);
+      if (now < r->ts + (((f64) mm->ip6_reass_conf_lifetime_ms) / 1000) &&
+         r->key.as_u64[0] == k->as_u64[0] &&
+         r->key.as_u64[1] == k->as_u64[1] &&
+         r->key.as_u64[2] == k->as_u64[2] &&
+         r->key.as_u64[3] == k->as_u64[3] &&
+         r->key.as_u64[4] == k->as_u64[4])
+       return r;
+      ri = r->bucket_next;
+    }
   return NULL;
 }
 
 #define map_ip6_reass_pool_index(r) (r - map_main.ip6_reass_pool)
 
 void
-map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop)
+map_ip6_reass_free (map_ip6_reass_t * r, u32 ** pi_to_drop)
 {
   map_main_t *mm = &map_main;
   int i;
-  for (i=0; i<MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
-    if(r->fragments[i].pi != ~0) {
-      vec_add1(*pi_to_drop, r->fragments[i].pi);
-      r->fragments[i].pi = ~0;
-      map_main.ip6_reass_buffered_counter--;
-    }
+  for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+    if (r->fragments[i].pi != ~0)
+      {
+       vec_add1 (*pi_to_drop, r->fragments[i].pi);
+       r->fragments[i].pi = ~0;
+       map_main.ip6_reass_buffered_counter--;
+      }
 
   // Unlink in hash bucket
   map_ip6_reass_t *r2 = NULL;
   u32 r2i = mm->ip6_reass_hash_table[r->bucket];
-  while (r2i != map_ip6_reass_pool_index(r)) {
-    ASSERT(r2i != MAP_REASS_INDEX_NONE);
-    r2 = pool_elt_at_index(mm->ip6_reass_pool, r2i);
-    r2i = r2->bucket_next;
-  }
-  if (r2) {
-    r2->bucket_next = r->bucket_next;
-  } else {
-    mm->ip6_reass_hash_table[r->bucket] = r->bucket_next;
-  }
+  while (r2i != map_ip6_reass_pool_index (r))
+    {
+      ASSERT (r2i != MAP_REASS_INDEX_NONE);
+      r2 = pool_elt_at_index (mm->ip6_reass_pool, r2i);
+      r2i = r2->bucket_next;
+    }
+  if (r2)
+    {
+      r2->bucket_next = r->bucket_next;
+    }
+  else
+    {
+      mm->ip6_reass_hash_table[r->bucket] = r->bucket_next;
+    }
 
   // Unlink in list
-  if (r->fifo_next == map_ip6_reass_pool_index(r)) {
-    //Single element in the list, list is now empty
-    mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE;
-  } else {
-    if (mm->ip6_reass_fifo_last == map_ip6_reass_pool_index(r)) //First element
-      mm->ip6_reass_fifo_last = r->fifo_prev;
-    pool_elt_at_index(mm->ip6_reass_pool, r->fifo_prev)->fifo_next = r->fifo_next;
-    pool_elt_at_index(mm->ip6_reass_pool, r->fifo_next)->fifo_prev = r->fifo_prev;
-  }
+  if (r->fifo_next == map_ip6_reass_pool_index (r))
+    {
+      //Single element in the list, list is now empty
+      mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE;
+    }
+  else
+    {
+      if (mm->ip6_reass_fifo_last == map_ip6_reass_pool_index (r))     //First element
+       mm->ip6_reass_fifo_last = r->fifo_prev;
+      pool_elt_at_index (mm->ip6_reass_pool, r->fifo_prev)->fifo_next =
+       r->fifo_next;
+      pool_elt_at_index (mm->ip6_reass_pool, r->fifo_next)->fifo_prev =
+       r->fifo_prev;
+    }
 
   // Free from pool if necessary
-  pool_put(mm->ip6_reass_pool, r);
+  pool_put (mm->ip6_reass_pool, r);
   mm->ip6_reass_allocated--;
 }
 
 map_ip6_reass_t *
-map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id,
-                  u8 protocol, u32 **pi_to_drop)
+map_ip6_reass_get (ip6_address_t * src, ip6_address_t * dst, u32 fragment_id,
+                  u8 protocol, u32 ** pi_to_drop)
 {
-  map_ip6_reass_t * r;
+  map_ip6_reass_t *r;
   map_main_t *mm = &map_main;
   map_ip6_reass_key_t k = {
-      .src = *src,
-      .dst = *dst,
-      .fragment_id = fragment_id,
-      .protocol = protocol };
+    .src = *src,
+    .dst = *dst,
+    .fragment_id = fragment_id,
+    .protocol = protocol
+  };
 
   u32 h = 0;
   int i;
-  for (i=0; i<10; i++)
-    h = crc_u32(k.as_u32[i], h);
+  for (i = 0; i < 10; i++)
+    h = crc_u32 (k.as_u32[i], h);
   h = h >> (32 - mm->ip6_reass_ht_log2len);
 
-  f64 now = vlib_time_now(mm->vlib_main);
+  f64 now = vlib_time_now (mm->vlib_main);
 
   //Cache garbage collection
-  while (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE) {
-    map_ip6_reass_t *last = pool_elt_at_index(mm->ip6_reass_pool, mm->ip6_reass_fifo_last);
-    if (last->ts + (((f64)mm->ip6_reass_conf_lifetime_ms) / 1000) < now)
-      map_ip6_reass_free(last, pi_to_drop);
-    else
-      break;
-  }
+  while (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE)
+    {
+      map_ip6_reass_t *last =
+       pool_elt_at_index (mm->ip6_reass_pool, mm->ip6_reass_fifo_last);
+      if (last->ts + (((f64) mm->ip6_reass_conf_lifetime_ms) / 1000) < now)
+       map_ip6_reass_free (last, pi_to_drop);
+      else
+       break;
+    }
 
-  if ((r = map_ip6_reass_lookup(&k, h, now)))
+  if ((r = map_ip6_reass_lookup (&k, h, now)))
     return r;
 
   if (mm->ip6_reass_allocated >= mm->ip6_reass_conf_pool_size)
     return NULL;
 
-  pool_get(mm->ip6_reass_pool, r);
+  pool_get (mm->ip6_reass_pool, r);
   mm->ip6_reass_allocated++;
-  for (i=0; i<MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) {
-    r->fragments[i].pi = ~0;
-    r->fragments[i].next_data_len = 0;
-    r->fragments[i].next_data_offset = 0;
-  }
+  for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+    {
+      r->fragments[i].pi = ~0;
+      r->fragments[i].next_data_len = 0;
+      r->fragments[i].next_data_offset = 0;
+    }
 
-  u32 ri = map_ip6_reass_pool_index(r);
+  u32 ri = map_ip6_reass_pool_index (r);
 
   //Link in new bucket
   r->bucket = h;
@@ -1365,15 +1567,20 @@ map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id,
   mm->ip6_reass_hash_table[h] = ri;
 
   //Link in fifo
-  if(mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE) {
-    r->fifo_next = pool_elt_at_index(mm->ip6_reass_pool, mm->ip6_reass_fifo_last)->fifo_next;
-    r->fifo_prev = mm->ip6_reass_fifo_last;
-    pool_elt_at_index(mm->ip6_reass_pool, r->fifo_prev)->fifo_next = ri;
-    pool_elt_at_index(mm->ip6_reass_pool, r->fifo_next)->fifo_prev = ri;
-  } else {
-    r->fifo_next = r->fifo_prev = ri;
-    mm->ip6_reass_fifo_last = ri;
-  }
+  if (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE)
+    {
+      r->fifo_next =
+       pool_elt_at_index (mm->ip6_reass_pool,
+                          mm->ip6_reass_fifo_last)->fifo_next;
+      r->fifo_prev = mm->ip6_reass_fifo_last;
+      pool_elt_at_index (mm->ip6_reass_pool, r->fifo_prev)->fifo_next = ri;
+      pool_elt_at_index (mm->ip6_reass_pool, r->fifo_next)->fifo_prev = ri;
+    }
+  else
+    {
+      r->fifo_next = r->fifo_prev = ri;
+      mm->ip6_reass_fifo_last = ri;
+    }
 
   //Set other fields
   r->ts = now;
@@ -1387,9 +1594,9 @@ map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id,
 }
 
 int
-map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi,
-                           u16 data_offset, u16 next_data_offset,
-                           u8 *data_start, u16 data_len)
+map_ip6_reass_add_fragment (map_ip6_reass_t * r, u32 pi,
+                           u16 data_offset, u16 next_data_offset,
+                           u8 * data_start, u16 data_len)
 {
   map_ip6_fragment_t *f = NULL, *prev_f = NULL;
   u16 copied_len = (data_len > 20) ? 20 : data_len;
@@ -1400,258 +1607,310 @@ map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi,
   //Lookup for fragments for the current buffer
   //and the one before that
   int i;
-  for (i=0; i<MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) {
-    if (data_offset && r->fragments[i].next_data_offset == data_offset) {
-      prev_f = &r->fragments[i]; // This is buffer for previous packet
-    } else if (r->fragments[i].next_data_offset == next_data_offset) {
-      f = &r->fragments[i]; // This is a buffer for the current packet
-    } else if (r->fragments[i].next_data_offset == 0) { //Available
-      if (f == NULL)
-        f = &r->fragments[i];
-      else if (prev_f == NULL)
-        prev_f = &r->fragments[i];
+  for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+    {
+      if (data_offset && r->fragments[i].next_data_offset == data_offset)
+       {
+         prev_f = &r->fragments[i];    // This is buffer for previous packet
+       }
+      else if (r->fragments[i].next_data_offset == next_data_offset)
+       {
+         f = &r->fragments[i]; // This is a buffer for the current packet
+       }
+      else if (r->fragments[i].next_data_offset == 0)
+       {                       //Available
+         if (f == NULL)
+           f = &r->fragments[i];
+         else if (prev_f == NULL)
+           prev_f = &r->fragments[i];
+       }
     }
-  }
 
   if (!f || f->pi != ~0)
     return -1;
 
-  if (data_offset) {
-    if (!prev_f)
-      return -1;
+  if (data_offset)
+    {
+      if (!prev_f)
+       return -1;
 
-    clib_memcpy(prev_f->next_data, data_start, copied_len);
-    prev_f->next_data_len = copied_len;
-    prev_f->next_data_offset = data_offset;
-  } else {
-    if (((ip4_header_t *)data_start)->ip_version_and_header_length != 0x45)
-      return -1;
+      clib_memcpy (prev_f->next_data, data_start, copied_len);
+      prev_f->next_data_len = copied_len;
+      prev_f->next_data_offset = data_offset;
+    }
+  else
+    {
+      if (((ip4_header_t *) data_start)->ip_version_and_header_length != 0x45)
+       return -1;
 
-    if (r->ip4_header.ip_version_and_header_length == 0)
-      clib_memcpy(&r->ip4_header, data_start, sizeof(ip4_header_t));
-  }
+      if (r->ip4_header.ip_version_and_header_length == 0)
+       clib_memcpy (&r->ip4_header, data_start, sizeof (ip4_header_t));
+    }
 
-  if(data_len > 20) {
-    f->next_data_offset = next_data_offset;
-    f->pi = pi;
-    map_main.ip6_reass_buffered_counter++;
-  }
+  if (data_len > 20)
+    {
+      f->next_data_offset = next_data_offset;
+      f->pi = pi;
+      map_main.ip6_reass_buffered_counter++;
+    }
   return 0;
 }
 
-void map_ip4_reass_reinit(u32 *trashed_reass, u32 *dropped_packets)
+void
+map_ip4_reass_reinit (u32 * trashed_reass, u32 * dropped_packets)
 {
   map_main_t *mm = &map_main;
   int i;
 
-  if(dropped_packets)
+  if (dropped_packets)
     *dropped_packets = mm->ip4_reass_buffered_counter;
-  if(trashed_reass)
+  if (trashed_reass)
     *trashed_reass = mm->ip4_reass_allocated;
-  if (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE) {
-    u16 ri = mm->ip4_reass_fifo_last;
-    do {
-      map_ip4_reass_t *r = pool_elt_at_index(mm->ip4_reass_pool, ri);
-      for (i=0; i<MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
-        if (r->fragments[i] != ~0)
-          map_ip4_drop_pi(r->fragments[i]);
-
-      ri = r->fifo_next;
-      pool_put(mm->ip4_reass_pool, r);
-    } while (ri != mm->ip4_reass_fifo_last);
-  }
+  if (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE)
+    {
+      u16 ri = mm->ip4_reass_fifo_last;
+      do
+       {
+         map_ip4_reass_t *r = pool_elt_at_index (mm->ip4_reass_pool, ri);
+         for (i = 0; i < MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+           if (r->fragments[i] != ~0)
+             map_ip4_drop_pi (r->fragments[i]);
+
+         ri = r->fifo_next;
+         pool_put (mm->ip4_reass_pool, r);
+       }
+      while (ri != mm->ip4_reass_fifo_last);
+    }
 
-  vec_free(mm->ip4_reass_hash_table);
-  vec_resize(mm->ip4_reass_hash_table, 1 << mm->ip4_reass_ht_log2len);
-  for (i=0; i<(1 << mm->ip4_reass_ht_log2len); i++)
+  vec_free (mm->ip4_reass_hash_table);
+  vec_resize (mm->ip4_reass_hash_table, 1 << mm->ip4_reass_ht_log2len);
+  for (i = 0; i < (1 << mm->ip4_reass_ht_log2len); i++)
     mm->ip4_reass_hash_table[i] = MAP_REASS_INDEX_NONE;
-  pool_free(mm->ip4_reass_pool);
-  pool_alloc(mm->ip4_reass_pool, mm->ip4_reass_conf_pool_size);
+  pool_free (mm->ip4_reass_pool);
+  pool_alloc (mm->ip4_reass_pool, mm->ip4_reass_conf_pool_size);
 
   mm->ip4_reass_allocated = 0;
   mm->ip4_reass_fifo_last = MAP_REASS_INDEX_NONE;
   mm->ip4_reass_buffered_counter = 0;
 }
 
-u8 map_get_ht_log2len(f32 ht_ratio, u16 pool_size)
+u8
+map_get_ht_log2len (f32 ht_ratio, u16 pool_size)
 {
-  u32 desired_size = (u32)(pool_size * ht_ratio);
+  u32 desired_size = (u32) (pool_size * ht_ratio);
   u8 i;
-  for (i=1; i<31; i++)
+  for (i = 1; i < 31; i++)
     if ((1 << i) >= desired_size)
       return i;
   return 4;
 }
 
-int map_ip4_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets)
+int
+map_ip4_reass_conf_ht_ratio (f32 ht_ratio, u32 * trashed_reass,
+                            u32 * dropped_packets)
 {
   map_main_t *mm = &map_main;
   if (ht_ratio > MAP_IP4_REASS_CONF_HT_RATIO_MAX)
     return -1;
 
-  map_ip4_reass_lock();
+  map_ip4_reass_lock ();
   mm->ip4_reass_conf_ht_ratio = ht_ratio;
-  mm->ip4_reass_ht_log2len = map_get_ht_log2len(ht_ratio, mm->ip4_reass_conf_pool_size);
-  map_ip4_reass_reinit(trashed_reass, dropped_packets);
-  map_ip4_reass_unlock();
+  mm->ip4_reass_ht_log2len =
+    map_get_ht_log2len (ht_ratio, mm->ip4_reass_conf_pool_size);
+  map_ip4_reass_reinit (trashed_reass, dropped_packets);
+  map_ip4_reass_unlock ();
   return 0;
 }
 
-int map_ip4_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets)
+int
+map_ip4_reass_conf_pool_size (u16 pool_size, u32 * trashed_reass,
+                             u32 * dropped_packets)
 {
   map_main_t *mm = &map_main;
   if (pool_size > MAP_IP4_REASS_CONF_POOL_SIZE_MAX)
     return -1;
 
-  map_ip4_reass_lock();
+  map_ip4_reass_lock ();
   mm->ip4_reass_conf_pool_size = pool_size;
-  map_ip4_reass_reinit(trashed_reass, dropped_packets);
-  map_ip4_reass_unlock();
+  map_ip4_reass_reinit (trashed_reass, dropped_packets);
+  map_ip4_reass_unlock ();
   return 0;
 }
 
-int map_ip4_reass_conf_lifetime(u16 lifetime_ms)
+int
+map_ip4_reass_conf_lifetime (u16 lifetime_ms)
 {
   map_main.ip4_reass_conf_lifetime_ms = lifetime_ms;
   return 0;
 }
 
-int map_ip4_reass_conf_buffers(u32 buffers)
+int
+map_ip4_reass_conf_buffers (u32 buffers)
 {
   map_main.ip4_reass_conf_buffers = buffers;
   return 0;
 }
 
-void map_ip6_reass_reinit(u32 *trashed_reass, u32 *dropped_packets)
+void
+map_ip6_reass_reinit (u32 * trashed_reass, u32 * dropped_packets)
 {
   map_main_t *mm = &map_main;
-  if(dropped_packets)
+  if (dropped_packets)
     *dropped_packets = mm->ip6_reass_buffered_counter;
-  if(trashed_reass)
+  if (trashed_reass)
     *trashed_reass = mm->ip6_reass_allocated;
   int i;
-  if (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE) {
-    u16 ri = mm->ip6_reass_fifo_last;
-    do {
-      map_ip6_reass_t *r = pool_elt_at_index(mm->ip6_reass_pool, ri);
-      for (i=0; i<MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
-        if (r->fragments[i].pi != ~0)
-          map_ip6_drop_pi(r->fragments[i].pi);
-
-      ri = r->fifo_next;
-      pool_put(mm->ip6_reass_pool, r);
-    } while (ri != mm->ip6_reass_fifo_last);
-    mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE;
-  }
+  if (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE)
+    {
+      u16 ri = mm->ip6_reass_fifo_last;
+      do
+       {
+         map_ip6_reass_t *r = pool_elt_at_index (mm->ip6_reass_pool, ri);
+         for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
+           if (r->fragments[i].pi != ~0)
+             map_ip6_drop_pi (r->fragments[i].pi);
+
+         ri = r->fifo_next;
+         pool_put (mm->ip6_reass_pool, r);
+       }
+      while (ri != mm->ip6_reass_fifo_last);
+      mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE;
+    }
 
-  vec_free(mm->ip6_reass_hash_table);
-  vec_resize(mm->ip6_reass_hash_table, 1 << mm->ip6_reass_ht_log2len);
-  for(i=0; i<(1 << mm->ip6_reass_ht_log2len); i++)
+  vec_free (mm->ip6_reass_hash_table);
+  vec_resize (mm->ip6_reass_hash_table, 1 << mm->ip6_reass_ht_log2len);
+  for (i = 0; i < (1 << mm->ip6_reass_ht_log2len); i++)
     mm->ip6_reass_hash_table[i] = MAP_REASS_INDEX_NONE;
-  pool_free(mm->ip6_reass_pool);
-  pool_alloc(mm->ip6_reass_pool, mm->ip4_reass_conf_pool_size);
+  pool_free (mm->ip6_reass_pool);
+  pool_alloc (mm->ip6_reass_pool, mm->ip4_reass_conf_pool_size);
 
   mm->ip6_reass_allocated = 0;
   mm->ip6_reass_buffered_counter = 0;
 }
 
-int map_ip6_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets)
+int
+map_ip6_reass_conf_ht_ratio (f32 ht_ratio, u32 * trashed_reass,
+                            u32 * dropped_packets)
 {
   map_main_t *mm = &map_main;
   if (ht_ratio > MAP_IP6_REASS_CONF_HT_RATIO_MAX)
     return -1;
 
-  map_ip6_reass_lock();
+  map_ip6_reass_lock ();
   mm->ip6_reass_conf_ht_ratio = ht_ratio;
-  mm->ip6_reass_ht_log2len = map_get_ht_log2len(ht_ratio, mm->ip6_reass_conf_pool_size);
-  map_ip6_reass_reinit(trashed_reass, dropped_packets);
-  map_ip6_reass_unlock();
+  mm->ip6_reass_ht_log2len =
+    map_get_ht_log2len (ht_ratio, mm->ip6_reass_conf_pool_size);
+  map_ip6_reass_reinit (trashed_reass, dropped_packets);
+  map_ip6_reass_unlock ();
   return 0;
 }
 
-int map_ip6_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets)
+int
+map_ip6_reass_conf_pool_size (u16 pool_size, u32 * trashed_reass,
+                             u32 * dropped_packets)
 {
   map_main_t *mm = &map_main;
   if (pool_size > MAP_IP6_REASS_CONF_POOL_SIZE_MAX)
     return -1;
 
-  map_ip6_reass_lock();
+  map_ip6_reass_lock ();
   mm->ip6_reass_conf_pool_size = pool_size;
-  map_ip6_reass_reinit(trashed_reass, dropped_packets);
-  map_ip6_reass_unlock();
+  map_ip6_reass_reinit (trashed_reass, dropped_packets);
+  map_ip6_reass_unlock ();
   return 0;
 }
 
-int map_ip6_reass_conf_lifetime(u16 lifetime_ms)
+int
+map_ip6_reass_conf_lifetime (u16 lifetime_ms)
 {
   map_main.ip6_reass_conf_lifetime_ms = lifetime_ms;
   return 0;
 }
 
-int map_ip6_reass_conf_buffers(u32 buffers)
+int
+map_ip6_reass_conf_buffers (u32 buffers)
 {
   map_main.ip6_reass_conf_buffers = buffers;
   return 0;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_ip4_reass_lifetime_command, static) = {
   .path = "map params reassembly",
   .short_help = "[ip4 | ip6] [lifetime <lifetime-ms>] [pool-size <pool-size>] [buffers <buffers>] [ht-ratio <ht-ratio>]",
   .function = map_params_reass_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_traffic_class_command, static) = {
   .path = "map params traffic-class",
   .short_help = 
   "traffic-class {0x0-0xff | copy}",
   .function = map_traffic_class_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_pre_resolve_command, static) = {
   .path = "map params pre-resolve",
   .short_help = 
   "pre-resolve {ip4-nh <address>} | {ip6-nh <address>}",
   .function = map_pre_resolve_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_security_check_command, static) = {
   .path = "map params security-check",
   .short_help = 
   "security-check on|off",
   .function = map_security_check_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_icmp_relay_source_address_command, static) = {
   .path = "map params icmp source-address",
    .short_help = "source-address <ip4-address>",
   .function = map_icmp_relay_source_address_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_icmp_unreachables_command, static) = {
   .path = "map params icmp6 unreachables",
   .short_help = "unreachables {on|off}",
   .function = map_icmp_unreachables_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_fragment_command, static) = {
   .path = "map params fragment",
   .short_help = "[inner|outer] [ignore-df [on|off]]",
   .function = map_fragment_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_fragment_df_command, static) = {
   .path = "map params fragment ignore-df",
   .short_help = "on|off",
   .function = map_fragment_df_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_security_check_frag_command, static) = {
   .path = "map params security-check fragments",
   .short_help = 
   "fragments on|off",
   .function = map_security_check_frag_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_add_domain_command, static) = {
   .path = "map add domain",
   .short_help = 
@@ -1659,48 +1918,60 @@ VLIB_CLI_COMMAND(map_add_domain_command, static) = {
       "ea-bits-len <n> psid-offset <n> psid-len <n> [map-t] [mtu <mtu>]",
   .function = map_add_domain_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_add_rule_command, static) = {
   .path = "map add rule",
   .short_help = 
   "map add rule index <domain> psid <psid> ip6-dst <ip6-addr>",
   .function = map_add_rule_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(map_del_command, static) = {
   .path = "map del domain",
   .short_help = 
   "map del domain index <domain>",
   .function = map_del_domain_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(show_map_domain_command, static) = {
   .path = "show map domain",
   .function = show_map_domain_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(show_map_stats_command, static) = {
   .path = "show map stats",
   .function = show_map_stats_command_fn,
 };
+/* *INDENT-ON* */
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND(show_map_fragments_command, static) = {
   .path = "show map fragments",
   .function = show_map_fragments_command_fn,
 };
+/* *INDENT-ON* */
 
 /*
  * map_init
  */
-clib_error_t *map_init (vlib_main_t *vm)
+clib_error_t *
+map_init (vlib_main_t * vm)
 {
   map_main_t *mm = &map_main;
-  mm->vnet_main = vnet_get_main();
+  mm->vnet_main = vnet_get_main ();
   mm->vlib_main = vm;
 
-#ifdef MAP_SKIP_IP6_LOOKUP  
-  memset(&mm->preresolve_ip4, 0, sizeof(mm->preresolve_ip4));
-  memset(&mm->preresolve_ip6, 0, sizeof(mm->preresolve_ip6));
+#ifdef MAP_SKIP_IP6_LOOKUP
+  memset (&mm->preresolve_ip4, 0, sizeof (mm->preresolve_ip4));
+  memset (&mm->preresolve_ip6, 0, sizeof (mm->preresolve_ip6));
   mm->adj4_index = 0;
   mm->adj6_index = 0;
 #endif
@@ -1720,38 +1991,52 @@ clib_error_t *map_init (vlib_main_t *vm)
   mm->frag_inner = false;
   mm->frag_ignore_df = false;
 
-  vec_validate(mm->domain_counters, MAP_N_DOMAIN_COUNTER - 1);
+  vec_validate (mm->domain_counters, MAP_N_DOMAIN_COUNTER - 1);
   mm->domain_counters[MAP_DOMAIN_COUNTER_RX].name = "rx";
   mm->domain_counters[MAP_DOMAIN_COUNTER_TX].name = "tx";
 
-  vlib_validate_simple_counter(&mm->icmp_relayed, 0);
-  vlib_zero_simple_counter(&mm->icmp_relayed, 0);
+  vlib_validate_simple_counter (&mm->icmp_relayed, 0);
+  vlib_zero_simple_counter (&mm->icmp_relayed, 0);
 
   /* IP4 virtual reassembly */
   mm->ip4_reass_hash_table = 0;
   mm->ip4_reass_pool = 0;
-  mm->ip4_reass_lock = clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
+  mm->ip4_reass_lock =
+    clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
   mm->ip4_reass_conf_ht_ratio = MAP_IP4_REASS_HT_RATIO_DEFAULT;
   mm->ip4_reass_conf_lifetime_ms = MAP_IP4_REASS_LIFETIME_DEFAULT;
   mm->ip4_reass_conf_pool_size = MAP_IP4_REASS_POOL_SIZE_DEFAULT;
   mm->ip4_reass_conf_buffers = MAP_IP4_REASS_BUFFERS_DEFAULT;
-  mm->ip4_reass_ht_log2len = map_get_ht_log2len(mm->ip4_reass_conf_ht_ratio, mm->ip4_reass_conf_pool_size);
+  mm->ip4_reass_ht_log2len =
+    map_get_ht_log2len (mm->ip4_reass_conf_ht_ratio,
+                       mm->ip4_reass_conf_pool_size);
   mm->ip4_reass_fifo_last = MAP_REASS_INDEX_NONE;
-  map_ip4_reass_reinit(NULL, NULL);
+  map_ip4_reass_reinit (NULL, NULL);
 
   /* IP6 virtual reassembly */
   mm->ip6_reass_hash_table = 0;
   mm->ip6_reass_pool = 0;
-  mm->ip6_reass_lock = clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
+  mm->ip6_reass_lock =
+    clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
   mm->ip6_reass_conf_ht_ratio = MAP_IP6_REASS_HT_RATIO_DEFAULT;
   mm->ip6_reass_conf_lifetime_ms = MAP_IP6_REASS_LIFETIME_DEFAULT;
   mm->ip6_reass_conf_pool_size = MAP_IP6_REASS_POOL_SIZE_DEFAULT;
   mm->ip6_reass_conf_buffers = MAP_IP6_REASS_BUFFERS_DEFAULT;
-  mm->ip6_reass_ht_log2len = map_get_ht_log2len(mm->ip6_reass_conf_ht_ratio, mm->ip6_reass_conf_pool_size);
+  mm->ip6_reass_ht_log2len =
+    map_get_ht_log2len (mm->ip6_reass_conf_ht_ratio,
+                       mm->ip6_reass_conf_pool_size);
   mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE;
-  map_ip6_reass_reinit(NULL, NULL);
+  map_ip6_reass_reinit (NULL, NULL);
 
   return 0;
 }
 
-VLIB_INIT_FUNCTION(map_init);
+VLIB_INIT_FUNCTION (map_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index a79da2c..4b3df5f 100644 (file)
 
 #define MAP_SKIP_IP6_LOOKUP 1
 
-typedef enum {
+typedef enum
+{
   MAP_SENDER,
   MAP_RECEIVER
 } map_dir_e;
 
-int map_create_domain(ip4_address_t *ip4_prefix, u8 ip4_prefix_len,
-                     ip6_address_t *ip6_prefix, u8 ip6_prefix_len,
-                     ip6_address_t *ip6_src, u8 ip6_src_len,
-                     u8 ea_bits_len, u8 psid_offset, u8 psid_length,
-                     u32 *map_domain_index, u16 mtu, u8 flags);
-int map_delete_domain(u32 map_domain_index);
-int map_add_del_psid(u32 map_domain_index, u16 psid, ip6_address_t *tep, u8 is_add);
-u8 *format_map_trace(u8 *s, va_list *args);
-i32 ip4_get_port(ip4_header_t *ip, map_dir_e dir, u16 buffer_len);
-i32 ip6_get_port(ip6_header_t *ip6, map_dir_e dir, u16 buffer_len);
-u16 ip4_map_get_port (ip4_header_t *ip, map_dir_e dir);
-
-typedef enum __attribute__ ((__packed__)) {
-  MAP_DOMAIN_PREFIX        = 1 << 0,
-  MAP_DOMAIN_TRANSLATION   = 1 << 1, // The domain uses MAP-T
+int map_create_domain (ip4_address_t * ip4_prefix, u8 ip4_prefix_len,
+                      ip6_address_t * ip6_prefix, u8 ip6_prefix_len,
+                      ip6_address_t * ip6_src, u8 ip6_src_len,
+                      u8 ea_bits_len, u8 psid_offset, u8 psid_length,
+                      u32 * map_domain_index, u16 mtu, u8 flags);
+int map_delete_domain (u32 map_domain_index);
+int map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep,
+                     u8 is_add);
+u8 *format_map_trace (u8 * s, va_list * args);
+i32 ip4_get_port (ip4_header_t * ip, map_dir_e dir, u16 buffer_len);
+i32 ip6_get_port (ip6_header_t * ip6, map_dir_e dir, u16 buffer_len);
+u16 ip4_map_get_port (ip4_header_t * ip, map_dir_e dir);
+
+typedef enum __attribute__ ((__packed__))
+{
+  MAP_DOMAIN_PREFIX = 1 << 0, MAP_DOMAIN_TRANSLATION = 1 << 1, // The domain uses MAP-T
 } map_domain_flags_e;
 
 /**
@@ -55,16 +57,16 @@ typedef enum __attribute__ ((__packed__)) {
  * In case no structure can be allocated, the fragment is dropped.
  */
 
-#define MAP_IP4_REASS_LIFETIME_DEFAULT (100) /* ms */
+#define MAP_IP4_REASS_LIFETIME_DEFAULT (100)   /* ms */
 #define MAP_IP4_REASS_HT_RATIO_DEFAULT (1.0)
-#define MAP_IP4_REASS_POOL_SIZE_DEFAULT 1024 // Number of reassembly structures
+#define MAP_IP4_REASS_POOL_SIZE_DEFAULT 1024   // Number of reassembly structures
 #define MAP_IP4_REASS_BUFFERS_DEFAULT 2048
 
-#define MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5    // Number of fragment per reassembly
+#define MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5   // Number of fragment per reassembly
 
-#define MAP_IP6_REASS_LIFETIME_DEFAULT (100) /* ms */
+#define MAP_IP6_REASS_LIFETIME_DEFAULT (100)   /* ms */
 #define MAP_IP6_REASS_HT_RATIO_DEFAULT (1.0)
-#define MAP_IP6_REASS_POOL_SIZE_DEFAULT 1024 // Number of reassembly structures
+#define MAP_IP6_REASS_POOL_SIZE_DEFAULT 1024   // Number of reassembly structures
 #define MAP_IP6_REASS_BUFFERS_DEFAULT 2048
 
 #define MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5
@@ -78,7 +80,8 @@ typedef enum __attribute__ ((__packed__)) {
  * This structure _MUST_ be no larger than a single cache line (64 bytes).
  * If more space is needed make a union of ip6_prefix and *rules, those are mutually exclusive.
  */
-typedef struct {
+typedef struct
+{
   ip6_address_t ip6_src;
   ip6_address_t ip6_prefix;
   ip6_address_t *rules;
@@ -107,6 +110,7 @@ typedef struct {
 /*
  * Hash key, padded out to 16 bytes for fast compare
  */
+/* *INDENT-OFF* */
 typedef union {
   CLIB_PACKED (struct {
     ip4_address_t src;
@@ -117,8 +121,10 @@ typedef union {
   u64 as_u64[2];
   u32 as_u32[4];
 } map_ip4_reass_key_t;
+/* *INDENT-ON* */
 
-typedef struct {
+typedef struct
+{
   map_ip4_reass_key_t key;
   f64 ts;
 #ifdef MAP_IP4_REASS_COUNT_BYTES
@@ -136,7 +142,8 @@ typedef struct {
 /*
  * MAP domain counters
  */
-typedef enum {
+typedef enum
+{
   /* Simple counters */
   MAP_DOMAIN_IPV4_FRAGMENT = 0,
   /* Combined counters */
@@ -148,6 +155,7 @@ typedef enum {
 /*
  * main_main_t
  */
+/* *INDENT-OFF* */
 typedef union {
   CLIB_PACKED (struct {
     ip6_address_t src;
@@ -158,6 +166,7 @@ typedef union {
   u64 as_u64[5];
   u32 as_u32[10];
 } map_ip6_reass_key_t;
+/* *INDENT-OFF* */
 
 typedef struct {
   u32 pi; //Cached packet or ~0
@@ -565,3 +574,11 @@ map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector,
     vlib_put_next_frame(vm, node, next_index, n_left_to_next);
   }
 }
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */