#include <vnet/ip/ip_frag.h>
#include <vnet/ip/ip4_to_ip6.h>
#include <vnet/ip/ip6_to_ip4.h>
+#include <vnet/ip/reass/ip4_sv_reass.h>
enum ip6_map_next_e
{
#ifdef MAP_SKIP_IP6_LOOKUP
IP6_MAP_NEXT_IP4_REWRITE,
#endif
- IP6_MAP_NEXT_IP6_REASS,
IP6_MAP_NEXT_IP4_REASS,
IP6_MAP_NEXT_IP4_FRAGMENT,
IP6_MAP_NEXT_IP6_ICMP_RELAY,
IP6_MAP_IP6_REASS_N_NEXT,
};
-enum ip6_map_ip4_reass_next_e
+enum ip6_map_post_ip4_reass_next_e
{
- IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP,
- IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT,
- IP6_MAP_IP4_REASS_NEXT_DROP,
- IP6_MAP_IP4_REASS_N_NEXT,
+ IP6_MAP_POST_IP4_REASS_NEXT_IP4_LOOKUP,
+ IP6_MAP_POST_IP4_REASS_NEXT_IP4_FRAGMENT,
+ IP6_MAP_POST_IP4_REASS_NEXT_DROP,
+ IP6_MAP_POST_IP4_REASS_N_NEXT,
};
enum ip6_icmp_relay_next_e
IP6_ICMP_RELAY_N_NEXT,
};
-vlib_node_registration_t ip6_map_ip4_reass_node;
+vlib_node_registration_t ip6_map_post_ip4_reass_node;
vlib_node_registration_t ip6_map_ip6_reass_node;
static vlib_node_registration_t ip6_map_icmp_relay_node;
} map_ip6_map_ip4_reass_trace_t;
u8 *
-format_ip6_map_ip4_reass_trace (u8 * s, va_list * args)
+format_ip6_map_post_ip4_reass_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
map_ip6_map_ip4_reass_trace_t *t =
va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
return format (s, "MAP domain index: %d L4 port: %u Status: %s",
- t->map_domain_index, t->port,
+ t->map_domain_index, clib_net_to_host_u16 (t->port),
t->cached ? "cached" : "forwarded");
}
}
static_always_inline void
-ip6_map_security_check (map_domain_t * d, ip4_header_t * ip4,
- ip6_header_t * ip6, u32 * next, u8 * error)
+ip6_map_security_check (map_domain_t * d, vlib_buffer_t * b0,
+ ip4_header_t * ip4, ip6_header_t * ip6, u32 * next,
+ u8 * error)
{
map_main_t *mm = &map_main;
if (d->ea_bits_len || d->rules)
}
else
{
- *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
+ if (mm->sec_check_frag)
+ {
+ vnet_buffer (b0)->ip.reass.next_index =
+ map_main.ip4_sv_reass_custom_next_index;
+ *next = IP6_MAP_NEXT_IP4_REASS;
+ }
}
}
}
}
-static_always_inline bool
-ip6_map_ip4_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
-{
-#ifdef MAP_SKIP_IP6_LOOKUP
- if (FIB_NODE_INDEX_INVALID != pre_resolved[FIB_PROTOCOL_IP4].fei)
- {
- vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
- pre_resolved[FIB_PROTOCOL_IP4].dpo.dpoi_index;
- return (true);
- }
-#endif
- return (false);
-}
-
/*
* ip6_map
*/
vlib_node_get_runtime (vm, ip6_map_node.index);
map_main_t *mm = &map_main;
vlib_combined_counter_main_t *cm = mm->domain_counters;
- u32 thread_index = vlib_get_thread_index ();
+ u32 thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
&& clib_net_to_host_u16 (ip60->payload_length) > 20))
{
d0 =
- ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
- (ip4_address_t *) & ip40->
+ ip4_map_get_domain ((ip4_address_t *) & ip40->
src_address.as_u32, &map_domain_index0,
&error0);
}
}
else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
{
- next0 = IP6_MAP_NEXT_IP6_REASS;
+ error0 = MAP_ERROR_FRAGMENTED;
}
else
{
&& clib_net_to_host_u16 (ip61->payload_length) > 20))
{
d1 =
- ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
- (ip4_address_t *) & ip41->
+ ip4_map_get_domain ((ip4_address_t *) & ip41->
src_address.as_u32, &map_domain_index1,
&error1);
}
}
else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
{
- next1 = IP6_MAP_NEXT_IP6_REASS;
+ error1 = MAP_ERROR_FRAGMENTED;
}
else
{
if (d0)
{
/* MAP inbound security check */
- ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
+ ip6_map_security_check (d0, p0, ip40, ip60, &next0, &error0);
if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
next0 == IP6_MAP_NEXT_IP4_LOOKUP))
(d0->mtu
&& (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
{
- vnet_buffer (p0)->ip_frag.header_offset = 0;
vnet_buffer (p0)->ip_frag.flags = 0;
vnet_buffer (p0)->ip_frag.next_index =
- IP4_FRAG_NEXT_IP4_LOOKUP;
+ IP_FRAG_NEXT_IP4_LOOKUP;
vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
}
if (d1)
{
/* MAP inbound security check */
- ip6_map_security_check (d1, ip41, ip61, &next1, &error1);
+ ip6_map_security_check (d1, p1, ip41, ip61, &next1, &error1);
if (PREDICT_TRUE (error1 == MAP_ERROR_NONE &&
next1 == IP6_MAP_NEXT_IP4_LOOKUP))
(d1->mtu
&& (clib_host_to_net_u16 (ip41->length) > d1->mtu)))
{
- vnet_buffer (p1)->ip_frag.header_offset = 0;
vnet_buffer (p1)->ip_frag.flags = 0;
vnet_buffer (p1)->ip_frag.next_index =
- IP4_FRAG_NEXT_IP4_LOOKUP;
+ IP_FRAG_NEXT_IP4_LOOKUP;
vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
next1 = IP6_MAP_NEXT_IP4_FRAGMENT;
}
if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
{
- map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
- tr->map_domain_index = map_domain_index0;
- tr->port = port0;
+ map_add_trace (vm, node, p0, map_domain_index0, port0);
}
if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
{
- map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
- tr->map_domain_index = map_domain_index1;
- tr->port = port1;
+ map_add_trace (vm, node, p1, map_domain_index1, port1);
}
if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
&& clib_net_to_host_u16 (ip60->payload_length) > 20))
{
d0 =
- ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
- (ip4_address_t *) & ip40->
+ ip4_map_get_domain ((ip4_address_t *) & ip40->
src_address.as_u32, &map_domain_index0,
&error0);
}
(((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr ==
IP_PROTOCOL_IP_IN_IP))
{
- next0 = IP6_MAP_NEXT_IP6_REASS;
+ error0 = MAP_ERROR_FRAGMENTED;
}
else
{
- error0 = MAP_ERROR_BAD_PROTOCOL;
+ /* XXX: Move get_domain to ip6_get_domain lookup on source */
+ //error0 = MAP_ERROR_BAD_PROTOCOL;
+ vlib_buffer_advance (p0, -sizeof (ip6_header_t));
+ vnet_feature_next (&next0, p0);
}
if (d0)
{
/* MAP inbound security check */
- ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
+ ip6_map_security_check (d0, p0, ip40, ip60, &next0, &error0);
if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
next0 == IP6_MAP_NEXT_IP4_LOOKUP))
(d0->mtu
&& (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
{
- vnet_buffer (p0)->ip_frag.header_offset = 0;
vnet_buffer (p0)->ip_frag.flags = 0;
vnet_buffer (p0)->ip_frag.next_index =
- IP4_FRAG_NEXT_IP4_LOOKUP;
+ IP_FRAG_NEXT_IP4_LOOKUP;
vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
}
if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
{
- map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
- tr->map_domain_index = map_domain_index0;
- tr->port = (u16) port0;
+ map_add_trace (vm, node, p0, map_domain_index0, port0);
}
if (mm->icmp6_enabled &&
}
-static_always_inline void
-ip6_map_ip6_reass_prepare (vlib_main_t * vm, vlib_node_runtime_t * node,
- map_ip6_reass_t * r, u32 ** fragments_ready,
- u32 ** fragments_to_drop)
-{
- ip4_header_t *ip40;
- ip6_header_t *ip60;
- ip6_frag_hdr_t *frag0;
- vlib_buffer_t *p0;
-
- if (!r->ip4_header.ip_version_and_header_length)
- return;
-
- //The IP header is here, we need to check for packets
- //that can be forwarded
- int i;
- for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
- {
- if (r->fragments[i].pi == ~0 ||
- ((!r->fragments[i].next_data_len)
- && (r->fragments[i].next_data_offset != (0xffff))))
- continue;
-
- p0 = vlib_get_buffer (vm, r->fragments[i].pi);
- ip60 = vlib_buffer_get_current (p0);
- frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
- ip40 = (ip4_header_t *) (frag0 + 1);
-
- if (ip6_frag_hdr_offset (frag0))
- {
- //Not first fragment, add the IPv4 header
- clib_memcpy (ip40, &r->ip4_header, 20);
- }
-
-#ifdef MAP_IP6_REASS_COUNT_BYTES
- r->forwarded +=
- clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
-#endif
-
- if (ip6_frag_hdr_more (frag0))
- {
- //Not last fragment, we copy end of next
- clib_memcpy (u8_ptr_add (ip60, p0->current_length),
- r->fragments[i].next_data, 20);
- p0->current_length += 20;
- ip60->payload_length = u16_net_add (ip60->payload_length, 20);
- }
-
- if (!ip4_is_fragment (ip40))
- {
- ip40->fragment_id = frag_id_6to4 (frag0->identification);
- ip40->flags_and_fragment_offset =
- clib_host_to_net_u16 (ip6_frag_hdr_offset (frag0));
- }
- else
- {
- ip40->flags_and_fragment_offset =
- clib_host_to_net_u16 (ip4_get_fragment_offset (ip40) +
- ip6_frag_hdr_offset (frag0));
- }
-
- if (ip6_frag_hdr_more (frag0))
- ip40->flags_and_fragment_offset |=
- clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
-
- ip40->length =
- clib_host_to_net_u16 (p0->current_length - sizeof (*ip60) -
- sizeof (*frag0));
- ip40->checksum = ip4_header_checksum (ip40);
-
- if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
- {
- map_ip6_map_ip6_reass_trace_t *tr =
- vlib_add_trace (vm, node, p0, sizeof (*tr));
- tr->offset = ip4_get_fragment_offset (ip40);
- tr->frag_len = clib_net_to_host_u16 (ip40->length) - sizeof (*ip40);
- tr->out = 1;
- }
-
- vec_add1 (*fragments_ready, r->fragments[i].pi);
- r->fragments[i].pi = ~0;
- r->fragments[i].next_data_len = 0;
- r->fragments[i].next_data_offset = 0;
- map_main.ip6_reass_buffered_counter--;
-
- //TODO: Best solution would be that ip6_map handles extension headers
- // and ignores atomic fragment. But in the meantime, let's just copy the header.
-
- u8 protocol = frag0->next_hdr;
- memmove (u8_ptr_add (ip40, -sizeof (*ip60)), ip60, sizeof (*ip60));
- ((ip6_header_t *) u8_ptr_add (ip40, -sizeof (*ip60)))->protocol =
- protocol;
- vlib_buffer_advance (p0, sizeof (*frag0));
- }
-}
-
void
map_ip6_drop_pi (u32 pi)
{
vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP6_REASS_NEXT_DROP, pi);
}
-void
-map_ip4_drop_pi (u32 pi)
-{
- vlib_main_t *vm = vlib_get_main ();
- vlib_node_runtime_t *n =
- vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index);
- vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP4_REASS_NEXT_DROP, pi);
-}
-
/*
- * ip6_reass
- * TODO: We should count the number of successfully
- * transmitted fragment bytes and compare that to the last fragment
- * offset such that we can free the reassembly structure when all fragments
- * have been forwarded.
+ * ip6_map_post_ip4_reass
*/
static uword
-ip6_map_ip6_reass (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+ip6_map_post_ip4_reass (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
{
u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
vlib_node_runtime_t *error_node =
- vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index);
- u32 *fragments_to_drop = NULL;
- u32 *fragments_ready = NULL;
-
- from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
- while (n_left_from > 0)
- {
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- /* Single loop */
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 pi0;
- vlib_buffer_t *p0;
- u8 error0 = MAP_ERROR_NONE;
- ip6_header_t *ip60;
- ip6_frag_hdr_t *frag0;
- u16 offset;
- u16 next_offset;
- u16 frag_len;
-
- pi0 = to_next[0] = from[0];
- from += 1;
- n_left_from -= 1;
- to_next += 1;
- n_left_to_next -= 1;
-
- p0 = vlib_get_buffer (vm, pi0);
- ip60 = vlib_buffer_get_current (p0);
- frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
- offset =
- clib_host_to_net_u16 (frag0->fragment_offset_and_more) & (~7);
- frag_len =
- clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
- next_offset =
- ip6_frag_hdr_more (frag0) ? (offset + frag_len) : (0xffff);
-
- //FIXME: Support other extension headers, maybe
-
- if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
- {
- map_ip6_map_ip6_reass_trace_t *tr =
- vlib_add_trace (vm, node, p0, sizeof (*tr));
- tr->offset = offset;
- tr->frag_len = frag_len;
- tr->out = 0;
- }
-
- map_ip6_reass_lock ();
- map_ip6_reass_t *r =
- map_ip6_reass_get (&ip60->src_address, &ip60->dst_address,
- frag0->identification, frag0->next_hdr,
- &fragments_to_drop);
- //FIXME: Use better error codes
- if (PREDICT_FALSE (!r))
- {
- // Could not create a caching entry
- error0 = MAP_ERROR_FRAGMENT_MEMORY;
- }
- else if (PREDICT_FALSE ((frag_len <= 20 &&
- (ip6_frag_hdr_more (frag0) || (!offset)))))
- {
- //Very small fragment are restricted to the last one and
- //can't be the first one
- error0 = MAP_ERROR_FRAGMENT_MALFORMED;
- }
- else
- if (map_ip6_reass_add_fragment
- (r, pi0, offset, next_offset, (u8 *) (frag0 + 1), frag_len))
- {
- map_ip6_reass_free (r, &fragments_to_drop);
- error0 = MAP_ERROR_FRAGMENT_MEMORY;
- }
- else
- {
-#ifdef MAP_IP6_REASS_COUNT_BYTES
- if (!ip6_frag_hdr_more (frag0))
- r->expected_total = offset + frag_len;
-#endif
- ip6_map_ip6_reass_prepare (vm, node, r, &fragments_ready,
- &fragments_to_drop);
-#ifdef MAP_IP6_REASS_COUNT_BYTES
- if (r->forwarded >= r->expected_total)
- map_ip6_reass_free (r, &fragments_to_drop);
-#endif
- }
- map_ip6_reass_unlock ();
-
- if (error0 == MAP_ERROR_NONE)
- {
- if (frag_len > 20)
- {
- //Dequeue the packet
- n_left_to_next++;
- to_next--;
- }
- else
- {
- //All data from that packet was copied no need to keep it, but this is not an error
- p0->error = error_node->errors[MAP_ERROR_NONE];
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- pi0,
- IP6_MAP_IP6_REASS_NEXT_DROP);
- }
- }
- else
- {
- p0->error = error_node->errors[error0];
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, pi0,
- IP6_MAP_IP6_REASS_NEXT_DROP);
- }
- }
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
-
- map_send_all_to_node (vm, fragments_ready, node,
- &error_node->errors[MAP_ERROR_NONE],
- IP6_MAP_IP6_REASS_NEXT_IP6_MAP);
- map_send_all_to_node (vm, fragments_to_drop, node,
- &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
- IP6_MAP_IP6_REASS_NEXT_DROP);
-
- vec_free (fragments_to_drop);
- vec_free (fragments_ready);
- return frame->n_vectors;
-}
-
-/*
- * ip6_ip4_virt_reass
- */
-static uword
-ip6_map_ip4_reass (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
-{
- u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
- vlib_node_runtime_t *error_node =
- vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index);
+ vlib_node_get_runtime (vm, ip6_map_post_ip4_reass_node.index);
map_main_t *mm = &map_main;
vlib_combined_counter_main_t *cm = mm->domain_counters;
- u32 thread_index = vlib_get_thread_index ();
- u32 *fragments_to_drop = NULL;
- u32 *fragments_to_loopback = NULL;
+ u32 thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
ip6_header_t *ip60;
i32 port0 = 0;
u32 map_domain_index0 = ~0;
- u32 next0 = IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP;
- u8 cached = 0;
+ u32 next0 = IP6_MAP_POST_IP4_REASS_NEXT_IP4_LOOKUP;
pi0 = to_next[0] = from[0];
from += 1;
ip60 = ((ip6_header_t *) ip40) - 1;
d0 =
- ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
- (ip4_address_t *) & ip40->src_address.as_u32,
+ ip4_map_get_domain ((ip4_address_t *) & ip40->src_address.as_u32,
&map_domain_index0, &error0);
- map_ip4_reass_lock ();
- //This node only deals with fragmented ip4
- map_ip4_reass_t *r = map_ip4_reass_get (ip40->src_address.as_u32,
- ip40->dst_address.as_u32,
- ip40->fragment_id,
- ip40->protocol,
- &fragments_to_drop);
- if (PREDICT_FALSE (!r))
- {
- // Could not create a caching entry
- error0 = MAP_ERROR_FRAGMENT_MEMORY;
- }
- else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
- {
- // This is a fragment
- if (r->port >= 0)
- {
- // We know the port already
- port0 = r->port;
- }
- else if (map_ip4_reass_add_fragment (r, pi0))
- {
- // Not enough space for caching
- error0 = MAP_ERROR_FRAGMENT_MEMORY;
- map_ip4_reass_free (r, &fragments_to_drop);
- }
- else
- {
- cached = 1;
- }
- }
- else if ((port0 = ip4_get_port (ip40, 1)) == 0)
- {
- // Could not find port from first fragment. Stop reassembling.
- error0 = MAP_ERROR_BAD_PROTOCOL;
- port0 = 0;
- map_ip4_reass_free (r, &fragments_to_drop);
- }
- else
- {
- // Found port. Remember it and loopback saved fragments
- r->port = port0;
- map_ip4_reass_get_fragments (r, &fragments_to_loopback);
- }
-
-#ifdef MAP_IP4_REASS_COUNT_BYTES
- if (!cached && r)
- {
- r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
- if (!ip4_get_fragment_more (ip40))
- r->expected_total =
- ip4_get_fragment_offset (ip40) * 8 +
- clib_host_to_net_u16 (ip40->length) - 20;
- if (r->forwarded >= r->expected_total)
- map_ip4_reass_free (r, &fragments_to_drop);
- }
-#endif
-
- map_ip4_reass_unlock ();
+ port0 = vnet_buffer (p0)->ip.reass.l4_src_port;
if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
error0 =
MAP_ERROR_DECAP_SEC_CHECK;
if (PREDICT_FALSE
- (d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)
- && error0 == MAP_ERROR_NONE && !cached))
+ (error0 == MAP_ERROR_NONE &&
+ d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
{
- vnet_buffer (p0)->ip_frag.header_offset = 0;
vnet_buffer (p0)->ip_frag.flags = 0;
- vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
+ vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP4_LOOKUP;
vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
- next0 = IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT;
+ next0 = IP6_MAP_POST_IP4_REASS_NEXT_IP4_FRAGMENT;
}
if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
vlib_add_trace (vm, node, p0, sizeof (*tr));
tr->map_domain_index = map_domain_index0;
tr->port = port0;
- tr->cached = cached;
}
- if (cached)
- {
- //Dequeue the packet
- n_left_to_next++;
- to_next--;
- }
- else
- {
- if (error0 == MAP_ERROR_NONE)
- vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
- thread_index,
- map_domain_index0, 1,
- clib_net_to_host_u16
- (ip40->length));
- next0 =
- (error0 ==
- MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
- p0->error = error_node->errors[error0];
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, pi0, next0);
- }
+ if (error0 == MAP_ERROR_NONE)
+ vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
+ thread_index,
+ map_domain_index0, 1,
+ clib_net_to_host_u16
+ (ip40->length));
+ next0 =
+ (error0 ==
+ MAP_ERROR_NONE) ? next0 : IP6_MAP_POST_IP4_REASS_NEXT_DROP;
+ p0->error = error_node->errors[error0];
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, next0);
- //Loopback when we reach the end of the inpu vector
- if (n_left_from == 0 && vec_len (fragments_to_loopback))
- {
- from = vlib_frame_vector_args (frame);
- u32 len = vec_len (fragments_to_loopback);
- if (len <= VLIB_FRAME_SIZE)
- {
- clib_memcpy (from, fragments_to_loopback,
- sizeof (u32) * len);
- n_left_from = len;
- vec_reset_length (fragments_to_loopback);
- }
- else
- {
- clib_memcpy (from,
- fragments_to_loopback + (len -
- VLIB_FRAME_SIZE),
- sizeof (u32) * VLIB_FRAME_SIZE);
- n_left_from = VLIB_FRAME_SIZE;
- _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
- }
- }
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- map_send_all_to_node (vm, fragments_to_drop, node,
- &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
- IP6_MAP_IP4_REASS_NEXT_DROP);
-
- vec_free (fragments_to_drop);
- vec_free (fragments_to_loopback);
return frame->n_vectors;
}
vlib_node_runtime_t *error_node =
vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index);
map_main_t *mm = &map_main;
- u32 thread_index = vlib_get_thread_index ();
+ u32 thread_index = vm->thread_index;
u16 *fragment_ids, *fid;
from = vlib_frame_vector_args (frame);
};
/* *INDENT-OFF* */
+VNET_FEATURE_INIT (ip6_map_feature, static) =
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "ip6-map",
+ .runs_before = VNET_FEATURES ("ip6-flow-classify"),
+ .runs_after = VNET_FEATURES ("ip6-full-reassembly-feature"),
+};
+
VLIB_REGISTER_NODE(ip6_map_node) = {
.function = ip6_map,
.name = "ip6-map",
#ifdef MAP_SKIP_IP6_LOOKUP
[IP6_MAP_NEXT_IP4_REWRITE] = "ip4-load-balance",
#endif
- [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass",
- [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass",
+ [IP6_MAP_NEXT_IP4_REASS] = "ip4-sv-reassembly-custom-next",
[IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
[IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay",
[IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local",
/* *INDENT-ON* */
/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(ip6_map_ip6_reass_node) = {
- .function = ip6_map_ip6_reass,
- .name = "ip6-map-ip6-reass",
- .vector_size = sizeof(u32),
- .format_trace = format_ip6_map_ip6_reass_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = MAP_N_ERROR,
- .error_strings = map_error_strings,
- .n_next_nodes = IP6_MAP_IP6_REASS_N_NEXT,
- .next_nodes = {
- [IP6_MAP_IP6_REASS_NEXT_IP6_MAP] = "ip6-map",
- [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop",
- },
-};
-/* *INDENT-ON* */
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(ip6_map_ip4_reass_node) = {
- .function = ip6_map_ip4_reass,
- .name = "ip6-map-ip4-reass",
+VLIB_REGISTER_NODE(ip6_map_post_ip4_reass_node) = {
+ .function = ip6_map_post_ip4_reass,
+ .name = "ip6-map-post-ip4-reass",
.vector_size = sizeof(u32),
- .format_trace = format_ip6_map_ip4_reass_trace,
+ .format_trace = format_ip6_map_post_ip4_reass_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
.n_errors = MAP_N_ERROR,
.error_strings = map_error_strings,
- .n_next_nodes = IP6_MAP_IP4_REASS_N_NEXT,
+ .n_next_nodes = IP6_MAP_POST_IP4_REASS_N_NEXT,
.next_nodes = {
- [IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
- [IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
- [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop",
+ [IP6_MAP_POST_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [IP6_MAP_POST_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
+ [IP6_MAP_POST_IP4_REASS_NEXT_DROP] = "error-drop",
},
};
/* *INDENT-ON* */
};
/* *INDENT-ON* */
+clib_error_t *
+ip6_map_init (vlib_main_t * vm)
+{
+ map_main.ip4_sv_reass_custom_next_index =
+ ip4_sv_reass_custom_register_next_node
+ (ip6_map_post_ip4_reass_node.index);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ip6_map_init) =
+{
+.runs_after = VLIB_INITS ("map_init"),};
+
/*
* fd.io coding-style-patch-verification: ON
*