#define MAP_SKIP_IP6_LOOKUP 1
+#define MAP_ERR_GOOD 0
+#define MAP_ERR_BAD_POOL_SIZE -1
+#define MAP_ERR_BAD_HT_RATIO -2
+#define MAP_ERR_BAD_LIFETIME -3
+#define MAP_ERR_BAD_BUFFERS -4
+#define MAP_ERR_BAD_BUFFERS_TOO_LARGE -5
+
int map_create_domain (ip4_address_t * ip4_prefix, u8 ip4_prefix_len,
ip6_address_t * ip6_prefix, u8 ip6_prefix_len,
ip6_address_t * ip6_src, u8 ip6_src_len,
int map_delete_domain (u32 map_domain_index);
int map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep,
u8 is_add);
+int map_if_enable_disable (bool is_enable, u32 sw_if_index,
+ bool is_translation);
u8 *format_map_trace (u8 * s, va_list * args);
+int map_param_set_fragmentation (bool inner, bool ignore_df);
+int map_param_set_icmp (ip4_address_t * ip4_err_relay_src);
+int map_param_set_icmp6 (u8 enable_unreachable);
+void map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6, int is_del);
+int map_param_set_reassembly (bool is_ipv6, u16 lifetime_ms,
+ u16 pool_size, u32 buffers, f64 ht_ratio,
+ u32 * reass, u32 * packets);
+int map_param_set_security_check (bool enable, bool fragments);
+int map_param_set_traffic_class (bool copy, u8 tc);
+int map_param_set_tcp (u16 tcp_mss);
+
+
typedef enum
{
MAP_DOMAIN_PREFIX = 1 << 0,
bool sec_check_frag; /* Inbound security check for (subsequent) fragments */
bool icmp6_enabled; /* Send destination unreachable for security check failure */
- bool is_ce; /* If this MAP node is a Customer Edge router*/
+ u16 tcp_mss; /* TCP MSS clamp value */
/* ICMPv6 -> ICMPv4 relay parameters */
ip4_address_t icmp4_src_address;
void
map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop);
-#define map_ip4_reass_lock() while (__sync_lock_test_and_set(map_main.ip4_reass_lock, 1)) {}
+#define map_ip4_reass_lock() while (clib_atomic_test_and_set (map_main.ip4_reass_lock)) {}
#define map_ip4_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip4_reass_lock = 0;} while(0)
static_always_inline void
void
map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop);
-#define map_ip6_reass_lock() while (__sync_lock_test_and_set(map_main.ip6_reass_lock, 1)) {}
+#define map_ip6_reass_lock() while (clib_atomic_test_and_set (map_main.ip6_reass_lock)) {}
#define map_ip6_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip6_reass_lock = 0;} while(0)
int
static_always_inline void
ip4_map_t_embedded_address (map_domain_t *d,
- ip6_address_t *ip6, const ip4_address_t *ip4)
+ ip6_address_t *ip6, const ip4_address_t *ip4)
{
- ASSERT(d->ip6_src_len == 96); //No support for other lengths for now
+ ASSERT(d->ip6_src_len == 96 || d->ip6_src_len == 64); //No support for other lengths for now
+ u8 offset = d->ip6_src_len == 64 ? 9 : 12;
ip6->as_u64[0] = d->ip6_src.as_u64[0];
- ip6->as_u32[2] = d->ip6_src.as_u32[2];
- ip6->as_u32[3] = ip4->as_u32;
+ ip6->as_u64[1] = d->ip6_src.as_u64[1];
+ clib_memcpy_fast(&ip6->as_u8[offset], ip4, 4);
}
static_always_inline u32
ip6_map_t_embedded_address (map_domain_t *d, ip6_address_t *addr)
{
- ASSERT(d->ip6_src_len == 96); //No support for other lengths for now
- return addr->as_u32[3];
+ ASSERT(d->ip6_src_len == 64 || d->ip6_src_len == 96);
+ u32 x;
+ u8 offset = d->ip6_src_len == 64 ? 9 : 12;
+ clib_memcpy(&x, &addr->as_u8[offset], 4);
+ return x;
}
static inline void
map_domain_counter_lock (map_main_t *mm)
{
if (mm->counter_lock)
- while (__sync_lock_test_and_set(mm->counter_lock, 1))
+ while (clib_atomic_test_and_set (mm->counter_lock))
/* zzzz */ ;
}
static inline void
map_domain_counter_unlock (map_main_t *mm)
{
if (mm->counter_lock)
- *mm->counter_lock = 0;
+ clib_atomic_release (mm->counter_lock);
}