#include <vnet/fib/fib_types.h>
#include <vnet/fib/ip4_fib.h>
#include <vnet/adj/adj.h>
-#include <map/map_dpo.h>
#include <vnet/dpo/load_balance.h>
+#include "lpm.h"
+#include <vppinfra/lock.h>
#define MAP_SKIP_IP6_LOOKUP 1
+#define MAP_ERR_GOOD 0
+#define MAP_ERR_BAD_POOL_SIZE -1
+#define MAP_ERR_BAD_HT_RATIO -2
+#define MAP_ERR_BAD_LIFETIME -3
+#define MAP_ERR_BAD_BUFFERS -4
+#define MAP_ERR_BAD_BUFFERS_TOO_LARGE -5
+#define MAP_ERR_UNSUPPORTED -6
+
int map_create_domain (ip4_address_t * ip4_prefix, u8 ip4_prefix_len,
ip6_address_t * ip6_prefix, u8 ip6_prefix_len,
ip6_address_t * ip6_src, u8 ip6_src_len,
u8 ea_bits_len, u8 psid_offset, u8 psid_length,
- u32 * map_domain_index, u16 mtu, u8 flags);
+ u32 * map_domain_index, u16 mtu, u8 flags, u8 * tag);
int map_delete_domain (u32 map_domain_index);
int map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep,
- u8 is_add);
+ bool is_add);
+int map_if_enable_disable (bool is_enable, u32 sw_if_index,
+ bool is_translation);
u8 *format_map_trace (u8 * s, va_list * args);
+int map_param_set_fragmentation (bool inner, bool ignore_df);
+int map_param_set_icmp (ip4_address_t * ip4_err_relay_src);
+int map_param_set_icmp6 (u8 enable_unreachable);
+void map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6, bool is_del);
+int map_param_set_security_check (bool enable, bool fragments);
+int map_param_set_traffic_class (bool copy, u8 tc);
+int map_param_set_tcp (u16 tcp_mss);
+
+
typedef enum
{
MAP_DOMAIN_PREFIX = 1 << 0,
MAP_DOMAIN_RFC6052 = 1 << 2,
} __attribute__ ((__packed__)) map_domain_flags_e;
-/**
- * IP4 reassembly logic:
- * One virtually reassembled flow requires a map_ip4_reass_t structure in order
- * to keep the first-fragment port number and, optionally, cache out of sequence
- * packets.
- * There are up to MAP_IP4_REASS_MAX_REASSEMBLY such structures.
- * When in use, those structures are stored in a hash table of MAP_IP4_REASS_BUCKETS buckets.
- * When a new structure needs to be used, it is allocated from available ones.
- * If there is no structure available, the oldest in use is selected and used if and
- * only if it was first allocated more than MAP_IP4_REASS_LIFETIME seconds ago.
- * In case no structure can be allocated, the fragment is dropped.
- */
-
-#define MAP_IP4_REASS_LIFETIME_DEFAULT (100) /* ms */
-#define MAP_IP4_REASS_HT_RATIO_DEFAULT (1.0)
-#define MAP_IP4_REASS_POOL_SIZE_DEFAULT 1024 // Number of reassembly structures
-#define MAP_IP4_REASS_BUFFERS_DEFAULT 2048
-
-#define MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5 // Number of fragment per reassembly
-
-#define MAP_IP6_REASS_LIFETIME_DEFAULT (100) /* ms */
-#define MAP_IP6_REASS_HT_RATIO_DEFAULT (1.0)
-#define MAP_IP6_REASS_POOL_SIZE_DEFAULT 1024 // Number of reassembly structures
-#define MAP_IP6_REASS_BUFFERS_DEFAULT 2048
-
-#define MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5
-
-#define MAP_IP6_REASS_COUNT_BYTES
-#define MAP_IP4_REASS_COUNT_BYTES
-
//#define IP6_MAP_T_OVERRIDE_TOS 0
/*
* This structure _MUST_ be no larger than a single cache line (64 bytes).
- * If more space is needed make a union of ip6_prefix and *rules, those are mutually exclusive.
+ * If more space is needed make a union of ip6_prefix and *rules, as
+ * those are mutually exclusive.
*/
typedef struct
{
STATIC_ASSERT ((sizeof (map_domain_t) <= CLIB_CACHE_LINE_BYTES),
"MAP domain fits in one cacheline");
-#define MAP_REASS_INDEX_NONE ((u16)0xffff)
-
/*
- * Hash key, padded out to 16 bytes for fast compare
+ * Extra data about a domain that doesn't need to be time/space critical.
+ * This structure is in a vector parallel to the main map_domain_t,
+ * and indexed by the same map-domain-index values.
*/
-/* *INDENT-OFF* */
-typedef union {
- CLIB_PACKED (struct {
- ip4_address_t src;
- ip4_address_t dst;
- u16 fragment_id;
- u8 protocol;
- });
- u64 as_u64[2];
- u32 as_u32[4];
-} map_ip4_reass_key_t;
-/* *INDENT-ON* */
-
typedef struct
{
- map_ip4_reass_key_t key;
- f64 ts;
-#ifdef MAP_IP4_REASS_COUNT_BYTES
- u16 expected_total;
- u16 forwarded;
-#endif
- i32 port;
- u16 bucket;
- u16 bucket_next;
- u16 fifo_prev;
- u16 fifo_next;
- u32 fragments[MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY];
-} map_ip4_reass_t;
+ u8 *tag; /* Probably a user-assigned domain name. */
+} map_domain_extra_t;
+
+#define MAP_REASS_INDEX_NONE ((u16)0xffff)
/*
* MAP domain counters
MAP_N_DOMAIN_COUNTER
} map_domain_counter_t;
-/*
- * main_main_t
- */
-/* *INDENT-OFF* */
-typedef union {
- CLIB_PACKED (struct {
- ip6_address_t src;
- ip6_address_t dst;
- u32 fragment_id;
- u8 protocol;
- });
- u64 as_u64[5];
- u32 as_u32[10];
-} map_ip6_reass_key_t;
-/* *INDENT-OFF* */
-
-typedef struct {
- u32 pi; //Cached packet or ~0
- u16 next_data_offset; //The data offset of the additional 20 bytes or ~0
- u8 next_data_len; //Number of bytes ready to be copied (20 if not last fragment)
- u8 next_data[20]; //The 20 additional bytes
-} map_ip6_fragment_t;
-
-typedef struct {
- map_ip6_reass_key_t key;
- f64 ts;
-#ifdef MAP_IP6_REASS_COUNT_BYTES
- u16 expected_total;
- u16 forwarded;
-#endif
- u16 bucket; //What hash bucket this element is linked in
- u16 bucket_next;
- u16 fifo_prev;
- u16 fifo_next;
- ip4_header_t ip4_header;
- map_ip6_fragment_t fragments[MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY];
-} map_ip6_reass_t;
-
#ifdef MAP_SKIP_IP6_LOOKUP
/**
* A pre-resolved next-hop
extern map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX];
#endif
-typedef struct {
+typedef struct
+{
/* pool of MAP domains */
map_domain_t *domains;
+ map_domain_extra_t *domain_extras;
/* MAP Domain packet/byte counters indexed by map domain index */
vlib_simple_counter_main_t *simple_domain_counters;
bool sec_check_frag; /* Inbound security check for (subsequent) fragments */
bool icmp6_enabled; /* Send destination unreachable for security check failure */
- bool is_ce; /* If this MAP node is a Customer Edge router*/
+ u16 tcp_mss; /* TCP MSS clamp value */
/* ICMPv6 -> ICMPv4 relay parameters */
ip4_address_t icmp4_src_address;
vlib_main_t *vlib_main;
vnet_main_t *vnet_main;
- /*
- * IPv4 encap and decap reassembly
- */
- /* Configuration */
- f32 ip4_reass_conf_ht_ratio; //Size of ht is 2^ceil(log2(ratio*pool_size))
- u16 ip4_reass_conf_pool_size; //Max number of allocated reass structures
- u16 ip4_reass_conf_lifetime_ms; //Time a reassembly struct is considered valid in ms
- u32 ip4_reass_conf_buffers; //Maximum number of buffers used by ip4 reassembly
-
- /* Runtime */
- map_ip4_reass_t *ip4_reass_pool;
- u8 ip4_reass_ht_log2len; //Hash table size is 2^log2len
- u16 ip4_reass_allocated;
- u16 *ip4_reass_hash_table;
- u16 ip4_reass_fifo_last;
- volatile u32 *ip4_reass_lock;
-
- /* Counters */
- u32 ip4_reass_buffered_counter;
-
bool frag_inner; /* Inner or outer fragmentation */
bool frag_ignore_df; /* Fragment (outer) packet even if DF is set */
- /*
- * IPv6 decap reassembly
- */
- /* Configuration */
- f32 ip6_reass_conf_ht_ratio; //Size of ht is 2^ceil(log2(ratio*pool_size))
- u16 ip6_reass_conf_pool_size; //Max number of allocated reass structures
- u16 ip6_reass_conf_lifetime_ms; //Time a reassembly struct is considered valid in ms
- u32 ip6_reass_conf_buffers; //Maximum number of buffers used by ip6 reassembly
-
- /* Runtime */
- map_ip6_reass_t *ip6_reass_pool;
- u8 ip6_reass_ht_log2len; //Hash table size is 2^log2len
- u16 ip6_reass_allocated;
- u16 *ip6_reass_hash_table;
- u16 ip6_reass_fifo_last;
- volatile u32 *ip6_reass_lock;
-
- /* Counters */
- u32 ip6_reass_buffered_counter;
+ /* Graph node state */
+ uword *bm_trans_enabled_by_sw_if;
+ uword *bm_encap_enabled_by_sw_if;
+
+ /* Lookup tables */
+ lpm_t *ip4_prefix_tbl;
+ lpm_t *ip6_prefix_tbl;
+ lpm_t *ip6_src_prefix_tbl;
+ uword ip4_sv_reass_custom_next_index;
} map_main_t;
/*
_(FRAGMENT_MALFORMED, "fragment has unexpected format")\
_(FRAGMENT_DROPPED, "dropped cached fragment") \
_(MALFORMED, "malformed packet") \
- _(DF_SET, "can't fragment, DF set")
+ _(DF_SET, "can't fragment, DF set") \
+ _(TIME_EXCEEDED, "time exceeded") \
-typedef enum {
+typedef enum
+{
#define _(sym,str) MAP_ERROR_##sym,
- foreach_map_error
+ foreach_map_error
#undef _
- MAP_N_ERROR,
- } map_error_t;
+ MAP_N_ERROR,
+} map_error_t;
-u64 map_error_counter_get(u32 node_index, map_error_t map_error);
+u64 map_error_counter_get (u32 node_index, map_error_t map_error);
-typedef struct {
+typedef struct
+{
u32 map_domain_index;
u16 port;
} map_trace_t;
+always_inline void
+map_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_buffer_t * b, u32 map_domain_index, u16 port)
+{
+ map_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
+ tr->map_domain_index = map_domain_index;
+ tr->port = port;
+}
+
extern map_main_t map_main;
extern vlib_node_registration_t ip4_map_node;
* map_get_pfx
*/
static_always_inline u64
-map_get_pfx (map_domain_t *d, u32 addr, u16 port)
+map_get_pfx (map_domain_t * d, u32 addr, u16 port)
{
u16 psid = (port >> d->psid_shift) & d->psid_mask;
if (d->ea_bits_len == 0 && d->rules)
- return clib_net_to_host_u64(d->rules[psid].as_u64[0]);
+ return clib_net_to_host_u64 (d->rules[psid].as_u64[0]);
u32 suffix = (addr >> d->suffix_shift) & d->suffix_mask;
- u64 ea = d->ea_bits_len == 0 ? 0 : (((u64) suffix << d->psid_length)) | psid;
+ u64 ea =
+ d->ea_bits_len == 0 ? 0 : (((u64) suffix << d->psid_length)) | psid;
- return clib_net_to_host_u64(d->ip6_prefix.as_u64[0]) | ea << d->ea_shift;
+ return clib_net_to_host_u64 (d->ip6_prefix.as_u64[0]) | ea << d->ea_shift;
}
static_always_inline u64
-map_get_pfx_net (map_domain_t *d, u32 addr, u16 port)
+map_get_pfx_net (map_domain_t * d, u32 addr, u16 port)
{
- return clib_host_to_net_u64(map_get_pfx(d, clib_net_to_host_u32(addr),
- clib_net_to_host_u16(port)));
+ return clib_host_to_net_u64 (map_get_pfx (d, clib_net_to_host_u32 (addr),
+ clib_net_to_host_u16 (port)));
}
/*
* map_get_sfx
*/
static_always_inline u64
-map_get_sfx (map_domain_t *d, u32 addr, u16 port)
+map_get_sfx (map_domain_t * d, u32 addr, u16 port)
{
u16 psid = (port >> d->psid_shift) & d->psid_mask;
/* Shared 1:1 mode. */
if (d->ea_bits_len == 0 && d->rules)
- return clib_net_to_host_u64(d->rules[psid].as_u64[1]);
+ return clib_net_to_host_u64 (d->rules[psid].as_u64[1]);
if (d->ip6_prefix_len == 128)
- return clib_net_to_host_u64(d->ip6_prefix.as_u64[1]);
+ return clib_net_to_host_u64 (d->ip6_prefix.as_u64[1]);
- if (d->flags & MAP_DOMAIN_RFC6052)
- return (clib_net_to_host_u64(d->ip6_prefix.as_u64[1]) | addr);
+ if (d->ip6_src_len == 96)
+ return (clib_net_to_host_u64 (d->ip6_prefix.as_u64[1]) | addr);
/* IPv4 prefix */
if (d->flags & MAP_DOMAIN_PREFIX)
}
static_always_inline u64
-map_get_sfx_net (map_domain_t *d, u32 addr, u16 port)
+map_get_sfx_net (map_domain_t * d, u32 addr, u16 port)
{
- return clib_host_to_net_u64(map_get_sfx(d, clib_net_to_host_u32(addr),
- clib_net_to_host_u16(port)));
+ return clib_host_to_net_u64 (map_get_sfx (d, clib_net_to_host_u32 (addr),
+ clib_net_to_host_u16 (port)));
}
static_always_inline u32
-map_get_ip4 (ip6_address_t *addr, map_domain_flags_e flags)
+map_get_ip4 (ip6_address_t * addr, u16 prefix_len)
{
- if (flags & MAP_DOMAIN_RFC6052)
- return clib_host_to_net_u32(clib_net_to_host_u64(addr->as_u64[1]));
+ ASSERT (prefix_len == 64 || prefix_len == 96);
+ if (prefix_len == 96)
+ return clib_host_to_net_u32 (clib_net_to_host_u64 (addr->as_u64[1]));
else
- return clib_host_to_net_u32(clib_net_to_host_u64(addr->as_u64[1]) >> 16);
+ return clib_host_to_net_u32 (clib_net_to_host_u64 (addr->as_u64[1]) >>
+ 16);
}
-/*
- * Get the MAP domain from an IPv4 lookup adjacency.
- */
static_always_inline map_domain_t *
-ip4_map_get_domain (u32 mdi)
+ip4_map_get_domain (ip4_address_t * addr, u32 * map_domain_index, u8 * error)
{
map_main_t *mm = &map_main;
- return pool_elt_at_index(mm->domains, mdi);
+ u32 mdi = mm->ip4_prefix_tbl->lookup (mm->ip4_prefix_tbl, addr, 32);
+ if (mdi == ~0)
+ {
+ *error = MAP_ERROR_NO_DOMAIN;
+ return 0;
+ }
+ *map_domain_index = mdi;
+ return pool_elt_at_index (mm->domains, mdi);
}
/*
- * Get the MAP domain from an IPv6 lookup adjacency.
- * If the IPv6 address or prefix is not shared, no lookup is required.
- * The IPv4 address is used otherwise.
+ * Get the MAP domain from an IPv6 address.
+ * If the IPv6 address or
+ * prefix is shared the IPv4 address must be used.
*/
static_always_inline map_domain_t *
-ip6_map_get_domain (u32 mdi,
- ip4_address_t *addr,
- u32 *map_domain_index,
- u8 *error)
+ip6_map_get_domain (ip6_address_t * addr, u32 * map_domain_index, u8 * error)
{
map_main_t *mm = &map_main;
-
-#ifdef TODO
- /*
- * Disable direct MAP domain lookup on decap, until the security check is updated to verify IPv4 SA.
- * (That's done implicitly when MAP domain is looked up in the IPv4 FIB)
- */
- //#ifdef MAP_NONSHARED_DOMAIN_ENABLED
- //#error "How can you be sure this domain is not shared?"
-#endif
-
- *map_domain_index = mdi;
- return pool_elt_at_index(mm->domains, mdi);
-
-#ifdef TODO
- u32 lbi = ip4_fib_forwarding_lookup(0, addr);
- const dpo_id_t *dpo = load_balance_get_bucket(lbi, 0);
- if (PREDICT_TRUE(dpo->dpoi_type == map_dpo_type ||
- dpo->dpoi_type == map_t_dpo_type))
+ u32 mdi =
+ mm->ip6_src_prefix_tbl->lookup (mm->ip6_src_prefix_tbl, addr, 128);
+ if (mdi == ~0)
{
- *map_domain_index = dpo->dpoi_index;
- return pool_elt_at_index(mm->domains, *map_domain_index);
+ *error = MAP_ERROR_NO_DOMAIN;
+ return 0;
}
- *error = MAP_ERROR_NO_DOMAIN;
- return NULL;
-#endif
-}
-map_ip4_reass_t *
-map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id,
- u8 protocol, u32 **pi_to_drop);
-void
-map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop);
-
-#define map_ip4_reass_lock() while (__sync_lock_test_and_set(map_main.ip4_reass_lock, 1)) {}
-#define map_ip4_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip4_reass_lock = 0;} while(0)
-
-static_always_inline void
-map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
-{
- int i;
- for (i=0; i<MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
- if(r->fragments[i] != ~0) {
- vec_add1(*pi, r->fragments[i]);
- r->fragments[i] = ~0;
- map_main.ip4_reass_buffered_counter--;
- }
+ *map_domain_index = mdi;
+ return pool_elt_at_index (mm->domains, mdi);
}
-clib_error_t * map_plugin_api_hookup (vlib_main_t * vm);
-
-int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi);
-
-map_ip6_reass_t *
-map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id,
- u8 protocol, u32 **pi_to_drop);
-void
-map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop);
+clib_error_t *map_plugin_api_hookup (vlib_main_t * vm);
-#define map_ip6_reass_lock() while (__sync_lock_test_and_set(map_main.ip6_reass_lock, 1)) {}
-#define map_ip6_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip6_reass_lock = 0;} while(0)
-
-int
-map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi,
- u16 data_offset, u16 next_data_offset,
- u8 *data_start, u16 data_len);
-
-void map_ip4_drop_pi(u32 pi);
-
-int map_ip4_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets);
-#define MAP_IP4_REASS_CONF_HT_RATIO_MAX 100
-int map_ip4_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets);
-#define MAP_IP4_REASS_CONF_POOL_SIZE_MAX (0xfeff)
-int map_ip4_reass_conf_lifetime(u16 lifetime_ms);
-#define MAP_IP4_REASS_CONF_LIFETIME_MAX 0xffff
-int map_ip4_reass_conf_buffers(u32 buffers);
-#define MAP_IP4_REASS_CONF_BUFFERS_MAX (0xffffffff)
-
-void map_ip6_drop_pi(u32 pi);
-
-
-int map_ip6_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets);
-#define MAP_IP6_REASS_CONF_HT_RATIO_MAX 100
-int map_ip6_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets);
-#define MAP_IP6_REASS_CONF_POOL_SIZE_MAX (0xfeff)
-int map_ip6_reass_conf_lifetime(u16 lifetime_ms);
-#define MAP_IP6_REASS_CONF_LIFETIME_MAX 0xffff
-int map_ip6_reass_conf_buffers(u32 buffers);
-#define MAP_IP6_REASS_CONF_BUFFERS_MAX (0xffffffff)
+void map_ip6_drop_pi (u32 pi);
+/*
+ * Supports prefix of 96 or 64 (with u-octet)
+ */
static_always_inline void
-ip4_map_t_embedded_address (map_domain_t *d,
- ip6_address_t *ip6, const ip4_address_t *ip4)
+ip4_map_t_embedded_address (map_domain_t * d,
+ ip6_address_t * ip6, const ip4_address_t * ip4)
{
- ASSERT(d->ip6_src_len == 96); //No support for other lengths for now
+ ASSERT (d->ip6_src_len == 96 || d->ip6_src_len == 64); //No support for other lengths for now
+ u8 offset = d->ip6_src_len == 64 ? 9 : 12;
ip6->as_u64[0] = d->ip6_src.as_u64[0];
- ip6->as_u32[2] = d->ip6_src.as_u32[2];
- ip6->as_u32[3] = ip4->as_u32;
+ ip6->as_u64[1] = d->ip6_src.as_u64[1];
+ clib_memcpy_fast (&ip6->as_u8[offset], ip4, 4);
}
static_always_inline u32
-ip6_map_t_embedded_address (map_domain_t *d, ip6_address_t *addr)
+ip6_map_t_embedded_address (map_domain_t * d, ip6_address_t * addr)
{
- ASSERT(d->ip6_src_len == 96); //No support for other lengths for now
- return addr->as_u32[3];
+ ASSERT (d->ip6_src_len == 64 || d->ip6_src_len == 96);
+ u32 x;
+ u8 offset = d->ip6_src_len == 64 ? 9 : 12;
+ clib_memcpy (&x, &addr->as_u8[offset], 4);
+ return x;
}
static inline void
-map_domain_counter_lock (map_main_t *mm)
+map_domain_counter_lock (map_main_t * mm)
{
if (mm->counter_lock)
- while (__sync_lock_test_and_set(mm->counter_lock, 1))
+ while (clib_atomic_test_and_set (mm->counter_lock))
/* zzzz */ ;
}
+
static inline void
-map_domain_counter_unlock (map_main_t *mm)
+map_domain_counter_unlock (map_main_t * mm)
{
if (mm->counter_lock)
- *mm->counter_lock = 0;
+ clib_atomic_release (mm->counter_lock);
}
static_always_inline void
-map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector,
- vlib_node_runtime_t *node, vlib_error_t *error,
- u32 next)
+map_send_all_to_node (vlib_main_t * vm, u32 * pi_vector,
+ vlib_node_runtime_t * node, vlib_error_t * error,
+ u32 next)
{
u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
//Deal with fragments that are ready
from = pi_vector;
- n_left_from = vec_len(pi_vector);
+ n_left_from = vec_len (pi_vector);
next_index = node->cached_next_index;
- while (n_left_from > 0) {
- vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
- while (n_left_from > 0 && n_left_to_next > 0) {
- u32 pi0 = to_next[0] = from[0];
- from += 1;
- n_left_from -= 1;
- to_next += 1;
- n_left_to_next -= 1;
- vlib_buffer_t *p0 = vlib_get_buffer(vm, pi0);
- p0->error = *error;
- vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next);
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 pi0 = to_next[0] = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ vlib_buffer_t *p0 = vlib_get_buffer (vm, pi0);
+ p0->error = *error;
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, pi0, next);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_put_next_frame(vm, node, next_index, n_left_to_next);
- }
+}
+
+static_always_inline void
+map_mss_clamping (tcp_header_t * tcp, ip_csum_t * sum, u16 mss_clamping)
+{
+ u8 *data;
+ u8 opt_len, opts_len, kind;
+ u16 mss;
+ u16 mss_value_net = clib_host_to_net_u16 (mss_clamping);
+
+ if (!tcp_syn (tcp))
+ return;
+
+ opts_len = (tcp_doff (tcp) << 2) - sizeof (tcp_header_t);
+ data = (u8 *) (tcp + 1);
+ for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
+ {
+ kind = data[0];
+
+ if (kind == TCP_OPTION_EOL)
+ break;
+ else if (kind == TCP_OPTION_NOOP)
+ {
+ opt_len = 1;
+ continue;
+ }
+ else
+ {
+ if (opts_len < 2)
+ return;
+ opt_len = data[1];
+
+ if (opt_len < 2 || opt_len > opts_len)
+ return;
+ }
+
+ if (kind == TCP_OPTION_MSS)
+ {
+ mss = *(u16 *) (data + 2);
+ if (clib_net_to_host_u16 (mss) > mss_clamping)
+ {
+ *sum =
+ ip_csum_update (*sum, mss, mss_value_net, ip4_header_t,
+ length);
+ clib_memcpy (data + 2, &mss_value_net, 2);
+ }
+ return;
+ }
+ }
+}
+
+static_always_inline bool
+ip4_map_ip6_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
+{
+#ifdef MAP_SKIP_IP6_LOOKUP
+ if (FIB_NODE_INDEX_INVALID != pre_resolved[FIB_PROTOCOL_IP6].fei)
+ {
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
+ pre_resolved[FIB_PROTOCOL_IP6].dpo.dpoi_index;
+ return (true);
+ }
+#endif
+ return (false);
+}
+
+static_always_inline bool
+ip6_map_ip4_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
+{
+#ifdef MAP_SKIP_IP6_LOOKUP
+ if (FIB_NODE_INDEX_INVALID != pre_resolved[FIB_PROTOCOL_IP4].fei)
+ {
+ vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
+ pre_resolved[FIB_PROTOCOL_IP4].dpo.dpoi_index;
+ return (true);
+ }
+#endif
+ return (false);
}
/*