#include <vnet/fib/ip6_fib.h>
#include <vnet/adj/adj.h>
#include <vnet/map/map_dpo.h>
+#include <vppinfra/crc32.h>
#include "map.h"
-#ifdef __SSE4_2__
-static inline u32
-crc_u32 (u32 data, u32 value)
-{
- __asm__ volatile ("crc32l %[data], %[value];":[value] "+r" (value):[data]
- "rm" (data));
- return value;
-}
-#else
-#include <vppinfra/xxhash.h>
-
-static inline u32
-crc_u32 (u32 data, u32 value)
-{
- u64 tmp = ((u64) data << 32) | (u64) value;
- return (u32) clib_xxhash (tmp);
-}
-#endif
-
-
/*
* This code supports the following MAP modes:
*
*/
-i32
-ip4_get_port (ip4_header_t * ip, map_dir_e dir, u16 buffer_len)
-{
- //TODO: use buffer length
- if (ip->ip_version_and_header_length != 0x45 ||
- ip4_get_fragment_offset (ip))
- return -1;
-
- if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) ||
- (ip->protocol == IP_PROTOCOL_UDP)))
- {
- udp_header_t *udp = (void *) (ip + 1);
- return (dir == MAP_SENDER) ? udp->src_port : udp->dst_port;
- }
- else if (ip->protocol == IP_PROTOCOL_ICMP)
- {
- icmp46_header_t *icmp = (void *) (ip + 1);
- if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
- {
- return *((u16 *) (icmp + 1));
- }
- else if (clib_net_to_host_u16 (ip->length) >= 64)
- {
- ip = (ip4_header_t *) (icmp + 2);
- if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) ||
- (ip->protocol == IP_PROTOCOL_UDP)))
- {
- udp_header_t *udp = (void *) (ip + 1);
- return (dir == MAP_SENDER) ? udp->dst_port : udp->src_port;
- }
- else if (ip->protocol == IP_PROTOCOL_ICMP)
- {
- icmp46_header_t *icmp = (void *) (ip + 1);
- if (icmp->type == ICMP4_echo_request ||
- icmp->type == ICMP4_echo_reply)
- {
- return *((u16 *) (icmp + 1));
- }
- }
- }
- }
- return -1;
-}
-
-i32
-ip6_get_port (ip6_header_t * ip6, map_dir_e dir, u16 buffer_len)
-{
- u8 l4_protocol;
- u16 l4_offset;
- u16 frag_offset;
- u8 *l4;
-
- if (ip6_parse (ip6, buffer_len, &l4_protocol, &l4_offset, &frag_offset))
- return -1;
-
- //TODO: Use buffer length
-
- if (frag_offset &&
- ip6_frag_hdr_offset (((ip6_frag_hdr_t *)
- u8_ptr_add (ip6, frag_offset))))
- return -1; //Can't deal with non-first fragment for now
-
- l4 = u8_ptr_add (ip6, l4_offset);
- if (l4_protocol == IP_PROTOCOL_TCP || l4_protocol == IP_PROTOCOL_UDP)
- {
- return (dir ==
- MAP_SENDER) ? ((udp_header_t *) (l4))->src_port : ((udp_header_t
- *)
- (l4))->dst_port;
- }
- else if (l4_protocol == IP_PROTOCOL_ICMP6)
- {
- icmp46_header_t *icmp = (icmp46_header_t *) (l4);
- if (icmp->type == ICMP6_echo_request)
- {
- return (dir == MAP_SENDER) ? ((u16 *) (icmp))[2] : -1;
- }
- else if (icmp->type == ICMP6_echo_reply)
- {
- return (dir == MAP_SENDER) ? -1 : ((u16 *) (icmp))[2];
- }
- }
- return -1;
-}
-
int
map_create_domain (ip4_address_t * ip4_prefix,
map_main_t *mm = &map_main;
dpo_id_t dpo_v4 = DPO_INVALID;
dpo_id_t dpo_v6 = DPO_INVALID;
- fib_node_index_t fei;
map_domain_t *d;
/* Sanity check on the src prefix length */
dpo_reset (&dpo_v4);
/*
- * Multiple MAP domains may share same source IPv6 TEP.
- * In this case the route will exist and be MAP sourced.
- * Find the adj (if any) already contributed and modify it
+ * construct a DPO to use the v6 domain
*/
- fib_prefix_t pfx6 = {
- .fp_proto = FIB_PROTOCOL_IP6,
- .fp_len = d->ip6_src_len,
- .fp_addr = {
- .ip6 = d->ip6_src,
- }
- ,
- };
- fei = fib_table_lookup_exact_match (0, &pfx6);
-
- if (FIB_NODE_INDEX_INVALID != fei)
- {
- dpo_id_t dpo = DPO_INVALID;
-
- if (fib_entry_get_dpo_for_source (fei, FIB_SOURCE_MAP, &dpo))
- {
- /*
- * modify the existing MAP to indicate it's shared
- * skip to route add.
- */
- const dpo_id_t *md_dpo;
- map_dpo_t *md;
-
- ASSERT (DPO_LOAD_BALANCE == dpo.dpoi_type);
-
- md_dpo = load_balance_get_bucket (dpo.dpoi_index, 0);
- md = map_dpo_get (md_dpo->dpoi_index);
-
- md->md_domain = ~0;
- dpo_copy (&dpo_v6, md_dpo);
- dpo_reset (&dpo);
-
- goto route_add;
- }
- }
-
if (d->flags & MAP_DOMAIN_TRANSLATION)
map_t_dpo_create (DPO_PROTO_IP6, *map_domain_index, &dpo_v6);
else
map_dpo_create (DPO_PROTO_IP6, *map_domain_index, &dpo_v6);
-route_add:
/*
+ * Multiple MAP domains may share same source IPv6 TEP. Which is just dandy.
+ * We are not tracking the sharing. So a v4 lookup to find the correct
+ * domain post decap/trnaslate is always done
+ *
* Create ip6 route. This is a reference counted add. If the prefix
* already exists and is MAP sourced, it is now MAP source n+1 times
* and will need to be removed n+1 times.
*/
+ fib_prefix_t pfx6 = {
+ .fp_proto = FIB_PROTOCOL_IP6,
+ .fp_len = d->ip6_src_len,
+ .fp_addr.ip6 = d->ip6_src,
+ };
+
fib_table_entry_special_dpo_add (0, &pfx6,
FIB_SOURCE_MAP,
FIB_ENTRY_FLAG_EXCLUSIVE, &dpo_v6);
}
static u8 *
-format_map_pre_resolve (u8 * s, va_list ap)
+format_map_pre_resolve (u8 * s, va_list * ap)
{
- map_main_pre_resolved_t *pr = va_arg (ap, map_main_pre_resolved_t *);
+ map_main_pre_resolved_t *pr = va_arg (*ap, map_main_pre_resolved_t *);
if (FIB_NODE_INDEX_INVALID != pr->fei)
{
pr->fei = fib_table_entry_special_add (0, // default fib
&pfx,
- FIB_SOURCE_RR,
- FIB_ENTRY_FLAG_NONE,
- ADJ_INDEX_INVALID);
+ FIB_SOURCE_RR, FIB_ENTRY_FLAG_NONE);
pr->sibling = fib_entry_child_add (pr->fei, FIB_NODE_TYPE_MAP_E, proto);
map_stack (pr);
}
static void
-map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6)
+map_fib_unresolve (map_main_pre_resolved_t * pr,
+ fib_protocol_t proto, u8 len, const ip46_address_t * addr)
+{
+ fib_prefix_t pfx = {
+ .fp_proto = proto,
+ .fp_len = len,
+ .fp_addr = *addr,
+ };
+
+ fib_entry_child_remove (pr->fei, pr->sibling);
+
+ fib_table_entry_special_remove (0, // default fib
+ &pfx, FIB_SOURCE_RR);
+ dpo_reset (&pr->dpo);
+
+ pr->fei = FIB_NODE_INDEX_INVALID;
+ pr->sibling = FIB_NODE_INDEX_INVALID;
+}
+
+static void
+map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6, int is_del)
{
if (ip6 && (ip6->as_u64[0] != 0 || ip6->as_u64[1] != 0))
{
ip46_address_t addr = {
.ip6 = *ip6,
};
- map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP6],
- FIB_PROTOCOL_IP6, 128, &addr);
+ if (is_del)
+ map_fib_unresolve (&pre_resolved[FIB_PROTOCOL_IP6],
+ FIB_PROTOCOL_IP6, 128, &addr);
+ else
+ map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP6],
+ FIB_PROTOCOL_IP6, 128, &addr);
}
if (ip4 && (ip4->as_u32 != 0))
{
ip46_address_t addr = {
.ip4 = *ip4,
};
- map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP4],
- FIB_PROTOCOL_IP4, 32, &addr);
+ if (is_del)
+ map_fib_unresolve (&pre_resolved[FIB_PROTOCOL_IP4],
+ FIB_PROTOCOL_IP4, 32, &addr);
+ else
+ map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP4],
+ FIB_PROTOCOL_IP4, 32, &addr);
}
}
#endif
ip4_address_t ip4nh, *p_v4 = NULL;
ip6_address_t ip6nh, *p_v6 = NULL;
clib_error_t *error = NULL;
+ int is_del = 0;
memset (&ip4nh, 0, sizeof (ip4nh));
memset (&ip6nh, 0, sizeof (ip6nh));
else
if (unformat (line_input, "ip6-nh %U", unformat_ip6_address, &ip6nh))
p_v6 = &ip6nh;
+ else if (unformat (line_input, "del"))
+ is_del = 1;
else
{
error = clib_error_return (0, "unknown input `%U'",
}
}
- map_pre_resolve (p_v4, p_v6);
+ map_pre_resolve (p_v4, p_v6, is_del);
done:
unformat_free (line_input);
map_domain_t *d;
int domains = 0, rules = 0, domaincount = 0, rulecount = 0;
if (pool_elts (mm->domains) == 0)
- vlib_cli_output (vm, "No MAP domains are configured...");
+ {
+ vlib_cli_output (vm, "No MAP domains are configured...");
+ return 0;
+ }
/* *INDENT-OFF* */
pool_foreach(d, mm->domains, ({
{
which = cm - mm->domain_counters;
- for (i = 0; i < vec_len (cm->maxi); i++)
+ for (i = 0; i < vlib_combined_counter_n_counters (cm); i++)
{
vlib_get_combined_counter (cm, i, &v);
total_pkts[which] += v.packets;
};
u32 h = 0;
- h = crc_u32 (k.as_u32[0], h);
- h = crc_u32 (k.as_u32[1], h);
- h = crc_u32 (k.as_u32[2], h);
- h = crc_u32 (k.as_u32[3], h);
+#ifdef clib_crc32c_uses_intrinsics
+ h = clib_crc32c ((u8 *) k.as_u32, 16);
+#else
+ u64 tmp = k.as_u32[0] ^ k.as_u32[1] ^ k.as_u32[2] ^ k.as_u32[3];
+ h = clib_xxhash (tmp);
+#endif
h = h >> (32 - mm->ip4_reass_ht_log2len);
f64 now = vlib_time_now (mm->vlib_main);
u32 h = 0;
int i;
- for (i = 0; i < 10; i++)
- h = crc_u32 (k.as_u32[i], h);
+
+#ifdef clib_crc32c_uses_intrinsics
+ h = clib_crc32c ((u8 *) k.as_u32, 40);
+#else
+ u64 tmp =
+ k.as_u64[0] ^ k.as_u64[1] ^ k.as_u64[2] ^ k.as_u64[3] ^ k.as_u64[4];
+ h = clib_xxhash (tmp);
+#endif
+
h = h >> (32 - mm->ip6_reass_ht_log2len);
f64 now = vlib_time_now (mm->vlib_main);
mm->ip4_reass_pool = 0;
mm->ip4_reass_lock =
clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
+ *mm->ip4_reass_lock = 0;
mm->ip4_reass_conf_ht_ratio = MAP_IP4_REASS_HT_RATIO_DEFAULT;
mm->ip4_reass_conf_lifetime_ms = MAP_IP4_REASS_LIFETIME_DEFAULT;
mm->ip4_reass_conf_pool_size = MAP_IP4_REASS_POOL_SIZE_DEFAULT;
mm->ip6_reass_pool = 0;
mm->ip6_reass_lock =
clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
+ *mm->ip6_reass_lock = 0;
mm->ip6_reass_conf_ht_ratio = MAP_IP6_REASS_HT_RATIO_DEFAULT;
mm->ip6_reass_conf_lifetime_ms = MAP_IP6_REASS_LIFETIME_DEFAULT;
mm->ip6_reass_conf_pool_size = MAP_IP6_REASS_POOL_SIZE_DEFAULT;