X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fnat%2Fin2out.c;h=0fe363383976004316b9716327f2af707fd75c62;hb=ea5b5be4;hp=dfe103033ae231ab73c5735b371516a2003f96d0;hpb=ed3c160983d302909dee5223675a2b356d306c81;p=vpp.git diff --git a/src/plugins/nat/in2out.c b/src/plugins/nat/in2out.c index dfe103033ae..0fe36338397 100755 --- a/src/plugins/nat/in2out.c +++ b/src/plugins/nat/in2out.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include @@ -83,6 +85,25 @@ static u8 * format_snat_in2out_worker_handoff_trace (u8 * s, va_list * args) return s; } +typedef struct { + u32 sw_if_index; + u32 next_index; + u8 cached; +} nat44_in2out_reass_trace_t; + +static u8 * format_nat44_in2out_reass_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + nat44_in2out_reass_trace_t * t = va_arg (*args, nat44_in2out_reass_trace_t *); + + s = format (s, "NAT44_IN2OUT_REASS: sw_if_index %d, next index %d, status %s", + t->sw_if_index, t->next_index, + t->cached ? "cached" : "translated"); + + return s; +} + vlib_node_registration_t snat_in2out_node; vlib_node_registration_t snat_in2out_slowpath_node; vlib_node_registration_t snat_in2out_fast_node; @@ -93,7 +114,15 @@ vlib_node_registration_t snat_in2out_output_slowpath_node; vlib_node_registration_t snat_in2out_output_worker_handoff_node; vlib_node_registration_t snat_hairpin_dst_node; vlib_node_registration_t snat_hairpin_src_node; - +vlib_node_registration_t nat44_hairpinning_node; +vlib_node_registration_t nat44_in2out_reass_node; +vlib_node_registration_t nat44_ed_in2out_node; +vlib_node_registration_t nat44_ed_in2out_slowpath_node; +vlib_node_registration_t nat44_ed_in2out_output_node; +vlib_node_registration_t nat44_ed_in2out_output_slowpath_node; +vlib_node_registration_t nat44_ed_hairpin_dst_node; +vlib_node_registration_t nat44_ed_hairpin_src_node; +vlib_node_registration_t nat44_ed_hairpinning_node; #define foreach_snat_in2out_error \ _(UNSUPPORTED_PROTOCOL, "Unsupported protocol") \ @@ -101,7 +130,12 @@ _(IN2OUT_PACKETS, "Good in2out packets processed") \ _(OUT_OF_PORTS, "Out of ports") \ _(BAD_OUTSIDE_FIB, "Outside VRF ID not found") \ _(BAD_ICMP_TYPE, "unsupported ICMP type") \ -_(NO_TRANSLATION, "No translation") +_(NO_TRANSLATION, "No translation") \ +_(MAX_SESSIONS_EXCEEDED, "Maximum sessions exceeded") \ +_(DROP_FRAGMENT, "Drop fragment") \ +_(MAX_REASS, "Maximum reassemblies exceeded") \ +_(MAX_FRAG, "Maximum fragments per reassembly exceeded")\ +_(FQ_CONGESTED, "Handoff frame queue congested") typedef enum { #define _(sym,str) SNAT_IN2OUT_ERROR_##sym, @@ -121,6 +155,7 @@ typedef enum { SNAT_IN2OUT_NEXT_DROP, SNAT_IN2OUT_NEXT_ICMP_ERROR, SNAT_IN2OUT_NEXT_SLOW_PATH, + SNAT_IN2OUT_NEXT_REASS, SNAT_IN2OUT_N_NEXT, } snat_in2out_next_t; @@ -135,7 +170,7 @@ typedef enum { /** * @brief Check if packet should be translated * - * Packets aimed at outside interface and external addresss with active session + * Packets aimed at outside interface and external address with active session * should be translated. * * @param sm NAT main @@ -152,7 +187,11 @@ snat_not_translate_fast (snat_main_t * sm, vlib_node_runtime_t *node, u32 sw_if_index0, ip4_header_t * ip0, u32 proto0, u32 rx_fib_index0) { + if (sm->out2in_dpo) + return 0; + fib_node_index_t fei = FIB_NODE_INDEX_INVALID; + nat_outside_fib_t *outside_fib; fib_prefix_t pfx = { .fp_proto = FIB_PROTOCOL_IP4, .fp_len = 32, @@ -172,15 +211,25 @@ snat_not_translate_fast (snat_main_t * sm, vlib_node_runtime_t *node, u32 sw_if_index = fib_entry_get_resolving_interface (fei); if (sw_if_index == ~0) { - fei = fib_table_lookup (sm->outside_fib_index, &pfx); - if (FIB_NODE_INDEX_INVALID != fei) - sw_if_index = fib_entry_get_resolving_interface (fei); + vec_foreach (outside_fib, sm->outside_fibs) + { + fei = fib_table_lookup (outside_fib->fib_index, &pfx); + if (FIB_NODE_INDEX_INVALID != fei) + { + sw_if_index = fib_entry_get_resolving_interface (fei); + if (sw_if_index != ~0) + break; + } + } } + if (sw_if_index == ~0) + return 1; + snat_interface_t *i; pool_foreach (i, sm->interfaces, ({ /* NAT packet aimed at outside interface */ - if ((i->is_inside == 0) && (sw_if_index == i->sw_if_index)) + if ((nat_interface_is_outside(i)) && (sw_if_index == i->sw_if_index)) return 0; })); } @@ -209,16 +258,96 @@ snat_not_translate (snat_main_t * sm, vlib_node_runtime_t *node, &value0)) { /* or is static mappings */ - if (!snat_static_mapping_match(sm, key0, &sm0, 1, 0)) + if (!snat_static_mapping_match(sm, key0, &sm0, 1, 0, 0, 0, 0)) return 0; } else return 0; + if (sm->forwarding_enabled) + return 1; + return snat_not_translate_fast(sm, node, sw_if_index0, ip0, proto0, rx_fib_index0); } +static inline int +nat_not_translate_output_feature (snat_main_t * sm, ip4_header_t * ip0, + u32 proto0, u16 src_port, u16 dst_port, + u32 thread_index, u32 sw_if_index) +{ + snat_session_key_t key0; + clib_bihash_kv_8_8_t kv0, value0; + snat_interface_t *i; + + /* src NAT check */ + key0.addr = ip0->src_address; + key0.port = src_port; + key0.protocol = proto0; + key0.fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index); + kv0.key = key0.as_u64; + + if (!clib_bihash_search_8_8 (&sm->per_thread_data[thread_index].out2in, &kv0, + &value0)) + return 1; + + /* dst NAT check */ + key0.addr = ip0->dst_address; + key0.port = dst_port; + key0.protocol = proto0; + kv0.key = key0.as_u64; + if (!clib_bihash_search_8_8 (&sm->per_thread_data[thread_index].in2out, &kv0, + &value0)) + { + /* hairpinning */ + pool_foreach (i, sm->output_feature_interfaces, + ({ + if ((nat_interface_is_inside(i)) && (sw_if_index == i->sw_if_index)) + return 0; + })); + return 1; + } + + return 0; +} + +int +nat44_i2o_is_idle_session_cb (clib_bihash_kv_8_8_t * kv, void * arg) +{ + snat_main_t *sm = &snat_main; + nat44_is_idle_session_ctx_t *ctx = arg; + snat_session_t *s; + u64 sess_timeout_time; + snat_main_per_thread_data_t *tsm = vec_elt_at_index (sm->per_thread_data, + ctx->thread_index); + clib_bihash_kv_8_8_t s_kv; + + s = pool_elt_at_index (tsm->sessions, kv->value); + sess_timeout_time = s->last_heard + (f64)nat44_session_get_timeout(sm, s); + if (ctx->now >= sess_timeout_time) + { + s_kv.key = s->out2in.as_u64; + if (clib_bihash_add_del_8_8 (&tsm->out2in, &s_kv, 0)) + nat_log_warn ("out2in key del failed"); + + snat_ipfix_logging_nat44_ses_delete(s->in2out.addr.as_u32, + s->out2in.addr.as_u32, + s->in2out.protocol, + s->in2out.port, + s->out2in.port, + s->in2out.fib_index); + + if (!snat_is_session_static (s)) + snat_free_outside_address_and_port (sm->addresses, ctx->thread_index, + &s->out2in); + + nat44_delete_session (sm, s, ctx->thread_index); + return 1; + } + + return 0; +} + static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, ip4_header_t * ip0, u32 rx_fib_index0, @@ -226,225 +355,122 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, snat_session_t ** sessionp, vlib_node_runtime_t * node, u32 next0, - u32 thread_index) + u32 thread_index, + f64 now) { snat_user_t *u; - snat_user_key_t user_key; snat_session_t *s; - clib_bihash_kv_8_8_t kv0, value0; - u32 oldest_per_user_translation_list_index; - dlist_elt_t * oldest_per_user_translation_list_elt; - dlist_elt_t * per_user_translation_list_elt; - dlist_elt_t * per_user_list_head_elt; - u32 session_index; + clib_bihash_kv_8_8_t kv0; snat_session_key_t key1; u32 address_index = ~0; - u32 outside_fib_index; - uword * p; + udp_header_t * udp0 = ip4_next_header (ip0); + u8 is_sm = 0; + nat_outside_fib_t *outside_fib; + fib_node_index_t fei = FIB_NODE_INDEX_INVALID; + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP4, + .fp_len = 32, + .fp_addr = { + .ip4.as_u32 = ip0->dst_address.as_u32, + }, + }; + nat44_is_idle_session_ctx_t ctx0; - p = hash_get (sm->ip4_main->fib_index_by_table_id, sm->outside_vrf_id); - if (! p) + if (PREDICT_FALSE (maximum_sessions_exceeded(sm, thread_index))) { - b0->error = node->errors[SNAT_IN2OUT_ERROR_BAD_OUTSIDE_FIB]; + b0->error = node->errors[SNAT_IN2OUT_ERROR_MAX_SESSIONS_EXCEEDED]; + nat_ipfix_logging_max_sessions(sm->max_translations); + nat_log_notice ("maximum sessions exceeded"); return SNAT_IN2OUT_NEXT_DROP; } - outside_fib_index = p[0]; key1.protocol = key0->protocol; - user_key.addr = ip0->src_address; - user_key.fib_index = rx_fib_index0; - kv0.key = user_key.as_u64; - - /* Ever heard of the "user" = src ip4 address before? */ - if (clib_bihash_search_8_8 (&sm->per_thread_data[thread_index].user_hash, - &kv0, &value0)) - { - /* no, make a new one */ - pool_get (sm->per_thread_data[thread_index].users, u); - memset (u, 0, sizeof (*u)); - u->addr = ip0->src_address; - u->fib_index = rx_fib_index0; - - pool_get (sm->per_thread_data[thread_index].list_pool, per_user_list_head_elt); - - u->sessions_per_user_list_head_index = per_user_list_head_elt - - sm->per_thread_data[thread_index].list_pool; - - clib_dlist_init (sm->per_thread_data[thread_index].list_pool, - u->sessions_per_user_list_head_index); - - kv0.value = u - sm->per_thread_data[thread_index].users; - /* add user */ - clib_bihash_add_del_8_8 (&sm->per_thread_data[thread_index].user_hash, - &kv0, 1 /* is_add */); - } - else - { - u = pool_elt_at_index (sm->per_thread_data[thread_index].users, - value0.value); - } - - /* Over quota? Recycle the least recently used dynamic translation */ - if (u->nsessions >= sm->max_translations_per_user) + /* First try to match static mapping by local address and port */ + if (snat_static_mapping_match (sm, *key0, &key1, 0, 0, 0, 0, 0)) { - /* Remove the oldest dynamic translation */ - do { - oldest_per_user_translation_list_index = - clib_dlist_remove_head (sm->per_thread_data[thread_index].list_pool, - u->sessions_per_user_list_head_index); - - ASSERT (oldest_per_user_translation_list_index != ~0); - - /* add it back to the end of the LRU list */ - clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, - u->sessions_per_user_list_head_index, - oldest_per_user_translation_list_index); - /* Get the list element */ - oldest_per_user_translation_list_elt = - pool_elt_at_index (sm->per_thread_data[thread_index].list_pool, - oldest_per_user_translation_list_index); - - /* Get the session index from the list element */ - session_index = oldest_per_user_translation_list_elt->value; - - /* Get the session */ - s = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, - session_index); - } while (snat_is_session_static (s)); - - if (snat_is_unk_proto_session (s)) - { - clib_bihash_kv_16_8_t up_kv; - nat_ed_ses_key_t key; - - /* Remove from lookup tables */ - key.l_addr = s->in2out.addr; - key.r_addr = s->ext_host_addr; - key.fib_index = s->in2out.fib_index; - key.proto = s->in2out.port; - key.rsvd = 0; - key.l_port = 0; - up_kv.key[0] = key.as_u64[0]; - up_kv.key[1] = key.as_u64[1]; - if (clib_bihash_add_del_16_8 (&sm->in2out_ed, &up_kv, 0)) - clib_warning ("in2out key del failed"); - - key.l_addr = s->out2in.addr; - key.fib_index = s->out2in.fib_index; - up_kv.key[0] = key.as_u64[0]; - up_kv.key[1] = key.as_u64[1]; - if (clib_bihash_add_del_16_8 (&sm->out2in_ed, &up_kv, 0)) - clib_warning ("out2in key del failed"); - } - else - { - /* Remove in2out, out2in keys */ - kv0.key = s->in2out.as_u64; - if (clib_bihash_add_del_8_8 (&sm->per_thread_data[thread_index].in2out, - &kv0, 0 /* is_add */)) - clib_warning ("in2out key delete failed"); - kv0.key = s->out2in.as_u64; - if (clib_bihash_add_del_8_8 (&sm->per_thread_data[thread_index].out2in, - &kv0, 0 /* is_add */)) - clib_warning ("out2in key delete failed"); - - /* log NAT event */ - snat_ipfix_logging_nat44_ses_delete(s->in2out.addr.as_u32, - s->out2in.addr.as_u32, - s->in2out.protocol, - s->in2out.port, - s->out2in.port, - s->in2out.fib_index); - - snat_free_outside_address_and_port - (sm, thread_index, &s->out2in, s->outside_address_index); - } - s->outside_address_index = ~0; - - if (snat_alloc_outside_address_and_port (sm, rx_fib_index0, thread_index, - &key1, &address_index)) + /* Try to create dynamic translation */ + if (snat_alloc_outside_address_and_port (sm->addresses, rx_fib_index0, + thread_index, &key1, + &address_index, + sm->port_per_thread, + sm->per_thread_data[thread_index].snat_thread_index)) { - ASSERT(0); - b0->error = node->errors[SNAT_IN2OUT_ERROR_OUT_OF_PORTS]; return SNAT_IN2OUT_NEXT_DROP; } - s->outside_address_index = address_index; } else - { - u8 static_mapping = 1; - - /* First try to match static mapping by local address and port */ - if (snat_static_mapping_match (sm, *key0, &key1, 0, 0)) - { - static_mapping = 0; - /* Try to create dynamic translation */ - if (snat_alloc_outside_address_and_port (sm, rx_fib_index0, - thread_index, &key1, - &address_index)) - { - b0->error = node->errors[SNAT_IN2OUT_ERROR_OUT_OF_PORTS]; - return SNAT_IN2OUT_NEXT_DROP; - } - } - - /* Create a new session */ - pool_get (sm->per_thread_data[thread_index].sessions, s); - memset (s, 0, sizeof (*s)); - - s->outside_address_index = address_index; + is_sm = 1; - if (static_mapping) - { - u->nstaticsessions++; - s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING; - } - else - { - u->nsessions++; - } + u = nat_user_get_or_create (sm, &ip0->src_address, rx_fib_index0, + thread_index); + if (!u) + { + nat_log_warn ("create NAT user failed"); + return SNAT_IN2OUT_NEXT_DROP; + } - /* Create list elts */ - pool_get (sm->per_thread_data[thread_index].list_pool, - per_user_translation_list_elt); - clib_dlist_init (sm->per_thread_data[thread_index].list_pool, - per_user_translation_list_elt - - sm->per_thread_data[thread_index].list_pool); - - per_user_translation_list_elt->value = - s - sm->per_thread_data[thread_index].sessions; - s->per_user_index = per_user_translation_list_elt - - sm->per_thread_data[thread_index].list_pool; - s->per_user_list_head_index = u->sessions_per_user_list_head_index; - - clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, - s->per_user_list_head_index, - per_user_translation_list_elt - - sm->per_thread_data[thread_index].list_pool); - } + s = nat_session_alloc_or_recycle (sm, u, thread_index); + if (!s) + { + nat44_delete_user_with_no_session (sm, u, thread_index); + nat_log_warn ("create NAT session failed"); + return SNAT_IN2OUT_NEXT_DROP; + } + if (is_sm) + s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING; + user_session_increment (sm, u, is_sm); + s->outside_address_index = address_index; s->in2out = *key0; s->out2in = key1; s->out2in.protocol = key0->protocol; - s->out2in.fib_index = outside_fib_index; + s->out2in.fib_index = sm->outside_fib_index; + switch (vec_len (sm->outside_fibs)) + { + case 0: + s->out2in.fib_index = sm->outside_fib_index; + break; + case 1: + s->out2in.fib_index = sm->outside_fibs[0].fib_index; + break; + default: + vec_foreach (outside_fib, sm->outside_fibs) + { + fei = fib_table_lookup (outside_fib->fib_index, &pfx); + if (FIB_NODE_INDEX_INVALID != fei) + { + if (fib_entry_get_resolving_interface (fei) != ~0) + { + s->out2in.fib_index = outside_fib->fib_index; + break; + } + } + } + break; + } s->ext_host_addr.as_u32 = ip0->dst_address.as_u32; + s->ext_host_port = udp0->dst_port; *sessionp = s; /* Add to translation hashes */ + ctx0.now = now; + ctx0.thread_index = thread_index; kv0.key = s->in2out.as_u64; kv0.value = s - sm->per_thread_data[thread_index].sessions; - if (clib_bihash_add_del_8_8 (&sm->per_thread_data[thread_index].in2out, &kv0, - 1 /* is_add */)) - clib_warning ("in2out key add failed"); + if (clib_bihash_add_or_overwrite_stale_8_8 ( + &sm->per_thread_data[thread_index].in2out, &kv0, + nat44_i2o_is_idle_session_cb, &ctx0)) + nat_log_notice ("in2out key add failed"); kv0.key = s->out2in.as_u64; kv0.value = s - sm->per_thread_data[thread_index].sessions; - if (clib_bihash_add_del_8_8 (&sm->per_thread_data[thread_index].out2in, &kv0, - 1 /* is_add */)) - clib_warning ("out2in key add failed"); + if (clib_bihash_add_or_overwrite_stale_8_8 ( + &sm->per_thread_data[thread_index].out2in, &kv0, + nat44_o2i_is_idle_session_cb, &ctx0)) + nat_log_notice ("out2in key add failed"); /* log NAT event */ snat_ipfix_logging_nat44_ses_create(s->in2out.addr.as_u32, @@ -516,11 +542,11 @@ snat_in2out_error_t icmp_get_key(ip4_header_t *ip0, * @param e optional parameter */ u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node, - u32 thread_index, vlib_buffer_t *b0, u8 *p_proto, + u32 thread_index, vlib_buffer_t *b0, + ip4_header_t *ip0, u8 *p_proto, snat_session_key_t *p_value, u8 *p_dont_translate, void *d, void *e) { - ip4_header_t *ip0; icmp46_header_t *icmp0; u32 sw_if_index0; u32 rx_fib_index0; @@ -530,13 +556,7 @@ u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node, clib_bihash_kv_8_8_t kv0, value0; u32 next0 = ~0; int err; - u32 iph_offset0 = 0; - if (PREDICT_FALSE(vnet_buffer(b0)->sw_if_index[VLIB_TX] != ~0)) - { - iph_offset0 = vnet_buffer (b0)->ip.save_rewrite_length; - } - ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + iph_offset0); icmp0 = (icmp46_header_t *) ip4_next_header (ip0); sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0); @@ -555,12 +575,23 @@ u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node, if (clib_bihash_search_8_8 (&sm->per_thread_data[thread_index].in2out, &kv0, &value0)) { - if (PREDICT_FALSE(snat_not_translate(sm, node, sw_if_index0, ip0, - IP_PROTOCOL_ICMP, rx_fib_index0, thread_index) && - vnet_buffer(b0)->sw_if_index[VLIB_TX] == ~0)) + if (vnet_buffer(b0)->sw_if_index[VLIB_TX] != ~0) { - dont_translate = 1; - goto out; + if (PREDICT_FALSE(nat_not_translate_output_feature(sm, ip0, + key0.protocol, key0.port, key0.port, thread_index, sw_if_index0))) + { + dont_translate = 1; + goto out; + } + } + else + { + if (PREDICT_FALSE(snat_not_translate(sm, node, sw_if_index0, + ip0, SNAT_PROTOCOL_ICMP, rx_fib_index0, thread_index))) + { + dont_translate = 1; + goto out; + } } if (PREDICT_FALSE(icmp_is_error_message (icmp0))) @@ -570,8 +601,8 @@ u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node, goto out; } - next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, - &s0, node, next0, thread_index); + next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, &s0, node, next0, + thread_index, vlib_time_now (sm->vlib_main)); if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) goto out; @@ -615,11 +646,11 @@ out: * @param e optional parameter */ u32 icmp_match_in2out_fast(snat_main_t *sm, vlib_node_runtime_t *node, - u32 thread_index, vlib_buffer_t *b0, u8 *p_proto, + u32 thread_index, vlib_buffer_t *b0, + ip4_header_t *ip0, u8 *p_proto, snat_session_key_t *p_value, u8 *p_dont_translate, void *d, void *e) { - ip4_header_t *ip0; icmp46_header_t *icmp0; u32 sw_if_index0; u32 rx_fib_index0; @@ -630,7 +661,6 @@ u32 icmp_match_in2out_fast(snat_main_t *sm, vlib_node_runtime_t *node, u32 next0 = ~0; int err; - ip0 = vlib_buffer_get_current (b0); icmp0 = (icmp46_header_t *) ip4_next_header (ip0); sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0); @@ -644,7 +674,7 @@ u32 icmp_match_in2out_fast(snat_main_t *sm, vlib_node_runtime_t *node, } key0.fib_index = rx_fib_index0; - if (snat_static_mapping_match(sm, key0, &sm0, 0, &is_addr_only)) + if (snat_static_mapping_match(sm, key0, &sm0, 0, &is_addr_only, 0, 0, 0)) { if (PREDICT_FALSE(snat_not_translate_fast(sm, node, sw_if_index0, ip0, IP_PROTOCOL_ICMP, rx_fib_index0))) @@ -708,7 +738,7 @@ static inline u32 icmp_in2out (snat_main_t *sm, echo0 = (icmp_echo_header_t *)(icmp0+1); - next0_tmp = sm->icmp_match_in2out_cb(sm, node, thread_index, b0, + next0_tmp = sm->icmp_match_in2out_cb(sm, node, thread_index, b0, ip0, &protocol, &sm0, &dont_translate, d, e); if (next0_tmp != ~0) next0 = next0_tmp; @@ -734,6 +764,9 @@ static inline u32 icmp_in2out (snat_main_t *sm, src_address /* changed member */); ip0->checksum = ip_csum_fold (sum0); + if (icmp0->checksum == 0) + icmp0->checksum = 0xffff; + if (!icmp_is_error_message (icmp0)) { new_id0 = sm0.port; @@ -818,13 +851,14 @@ out: * @param tcp0 TCP header. * @param proto0 NAT protocol. */ -static inline void +static inline int snat_hairpinning (snat_main_t *sm, vlib_buffer_t * b0, ip4_header_t * ip0, udp_header_t * udp0, tcp_header_t * tcp0, - u32 proto0) + u32 proto0, + int is_ed) { snat_session_key_t key0, sm0; snat_session_t * s0; @@ -832,6 +866,7 @@ snat_hairpinning (snat_main_t *sm, ip_csum_t sum0; u32 new_dst_addr0 = 0, old_dst_addr0, ti = 0, si; u16 new_dst_port0, old_dst_port0; + int rv; key0.addr = ip0->dst_address; key0.port = udp0->dst_port; @@ -840,7 +875,7 @@ snat_hairpinning (snat_main_t *sm, kv0.key = key0.as_u64; /* Check if destination is static mappings */ - if (!snat_static_mapping_match(sm, key0, &sm0, 1, 0)) + if (!snat_static_mapping_match(sm, key0, &sm0, 1, 0, 0, 0, 0)) { new_dst_addr0 = sm0.addr.as_u32; new_dst_port0 = sm0.port; @@ -854,15 +889,29 @@ snat_hairpinning (snat_main_t *sm, else ti = sm->num_workers; - if (!clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0, &value0)) + if (is_ed) + { + clib_bihash_kv_16_8_t ed_kv, ed_value; + make_ed_kv (&ed_kv, &ip0->dst_address, &ip0->src_address, + ip0->protocol, sm->outside_fib_index, udp0->dst_port, + udp0->src_port); + rv = clib_bihash_search_16_8 (&sm->per_thread_data[ti].out2in_ed, + &ed_kv, &ed_value); + si = ed_value.value; + } + else { + rv = clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0, + &value0); si = value0.value; - - s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si); - new_dst_addr0 = s0->in2out.addr.as_u32; - new_dst_port0 = s0->in2out.port; - vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index; } + if (rv) + return 0; + + s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si); + new_dst_addr0 = s0->in2out.addr.as_u32; + new_dst_port0 = s0->in2out.port; + vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index; } /* Destination is behind the same NAT, use internal address and port */ @@ -904,20 +953,24 @@ snat_hairpinning (snat_main_t *sm, tcp0->checksum = ip_csum_fold(sum0); } } + return 1; } + return 0; } static inline void snat_icmp_hairpinning (snat_main_t *sm, vlib_buffer_t * b0, ip4_header_t * ip0, - icmp46_header_t * icmp0) + icmp46_header_t * icmp0, + int is_ed) { snat_session_key_t key0, sm0; clib_bihash_kv_8_8_t kv0, value0; u32 new_dst_addr0 = 0, old_dst_addr0, si, ti = 0; ip_csum_t sum0; snat_session_t *s0; + int rv; if (!icmp_is_error_message (icmp0)) { @@ -935,11 +988,25 @@ snat_icmp_hairpinning (snat_main_t *sm, ti = sm->num_workers; /* Check if destination is in active sessions */ - if (clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0, - &value0)) + if (is_ed) + { + clib_bihash_kv_16_8_t ed_kv, ed_value; + make_ed_kv (&ed_kv, &ip0->dst_address, &ip0->src_address, + IP_PROTOCOL_ICMP, sm->outside_fib_index, icmp_id0, 0); + rv = clib_bihash_search_16_8 (&sm->per_thread_data[ti].out2in_ed, + &ed_kv, &ed_value); + si = ed_value.value; + } + else + { + rv = clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0, + &value0); + si = value0.value; + } + if (rv) { /* or static mappings */ - if (!snat_static_mapping_match(sm, key0, &sm0, 1, 0)) + if (!snat_static_mapping_match(sm, key0, &sm0, 1, 0, 0, 0, 0)) { new_dst_addr0 = sm0.addr.as_u32; vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm0.fib_index; @@ -947,8 +1014,6 @@ snat_icmp_hairpinning (snat_main_t *sm, } else { - si = value0.value; - s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si); new_dst_addr0 = s0->in2out.addr.as_u32; vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index; @@ -992,533 +1057,108 @@ static inline u32 icmp_in2out_slow_path (snat_main_t *sm, { /* Hairpinning */ if (vnet_buffer(b0)->sw_if_index[VLIB_TX] == 0) - snat_icmp_hairpinning(sm, b0, ip0, icmp0); + snat_icmp_hairpinning(sm, b0, ip0, icmp0, sm->endpoint_dependent); /* Accounting */ - s0->last_heard = now; - s0->total_pkts++; - s0->total_bytes += vlib_buffer_length_in_chain (sm->vlib_main, b0); - /* Per-user LRU list maintenance for dynamic translations */ - if (!snat_is_session_static (s0)) - { - clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, - s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, - s0->per_user_list_head_index, - s0->per_user_index); - } + nat44_session_update_counters (s0, now, + vlib_buffer_length_in_chain (sm->vlib_main, b0)); + /* Per-user LRU list maintenance */ + nat44_session_update_lru (sm, s0, thread_index); } return next0; } + static inline void -snat_hairpinning_unknown_proto (snat_main_t *sm, - vlib_buffer_t * b, - ip4_header_t * ip) +nat_hairpinning_sm_unknown_proto (snat_main_t * sm, + vlib_buffer_t * b, + ip4_header_t * ip) { - u32 old_addr, new_addr = 0, ti = 0; clib_bihash_kv_8_8_t kv, value; - clib_bihash_kv_16_8_t s_kv, s_value; - nat_ed_ses_key_t key; - snat_session_key_t m_key; snat_static_mapping_t *m; + u32 old_addr, new_addr; ip_csum_t sum; - snat_session_t *s; - old_addr = ip->dst_address.as_u32; - key.l_addr.as_u32 = ip->dst_address.as_u32; - key.r_addr.as_u32 = ip->src_address.as_u32; - key.fib_index = sm->outside_fib_index; - key.proto = ip->protocol; - key.rsvd = 0; - key.l_port = 0; - s_kv.key[0] = key.as_u64[0]; - s_kv.key[1] = key.as_u64[1]; - if (clib_bihash_search_16_8 (&sm->out2in_ed, &s_kv, &s_value)) - { - m_key.addr = ip->dst_address; - m_key.fib_index = sm->outside_fib_index; - m_key.port = 0; - m_key.protocol = 0; - kv.key = m_key.as_u64; - if (clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv, &value)) - return; + make_sm_kv (&kv, &ip->dst_address, 0, 0, 0); + if (clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv, &value)) + return; - m = pool_elt_at_index (sm->static_mappings, value.value); - if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) - vnet_buffer(b)->sw_if_index[VLIB_TX] = m->fib_index; - new_addr = ip->dst_address.as_u32 = m->local_addr.as_u32; - } - else - { - if (sm->num_workers > 1) - ti = sm->worker_out2in_cb (ip, sm->outside_fib_index); - else - ti = sm->num_workers; + m = pool_elt_at_index (sm->static_mappings, value.value); - s = pool_elt_at_index (sm->per_thread_data[ti].sessions, s_value.value); - if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) - vnet_buffer(b)->sw_if_index[VLIB_TX] = s->in2out.fib_index; - new_addr = ip->dst_address.as_u32 = s->in2out.addr.as_u32; - } + old_addr = ip->dst_address.as_u32; + new_addr = ip->dst_address.as_u32 = m->local_addr.as_u32; sum = ip->checksum; sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, dst_address); ip->checksum = ip_csum_fold (sum); + + if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) + vnet_buffer(b)->sw_if_index[VLIB_TX] = m->fib_index; } -static void -snat_in2out_unknown_proto (snat_main_t *sm, - vlib_buffer_t * b, - ip4_header_t * ip, - u32 rx_fib_index, - u32 thread_index, - f64 now, - vlib_main_t * vm) +static int +nat_in2out_sm_unknown_proto (snat_main_t *sm, + vlib_buffer_t * b, + ip4_header_t * ip, + u32 rx_fib_index) { clib_bihash_kv_8_8_t kv, value; - clib_bihash_kv_16_8_t s_kv, s_value; snat_static_mapping_t *m; snat_session_key_t m_key; - u32 old_addr, new_addr = 0; + u32 old_addr, new_addr; ip_csum_t sum; - snat_user_key_t u_key; - snat_user_t *u; - dlist_elt_t *head, *elt, *oldest; - snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; - u32 elt_index, head_index, ses_index, oldest_index; - snat_session_t * s; - nat_ed_ses_key_t key; - u32 address_index = ~0; - int i; - u8 is_sm = 0; + + m_key.addr = ip->src_address; + m_key.port = 0; + m_key.protocol = 0; + m_key.fib_index = rx_fib_index; + kv.key = m_key.as_u64; + if (clib_bihash_search_8_8 (&sm->static_mapping_by_local, &kv, &value)) + return 1; + + m = pool_elt_at_index (sm->static_mappings, value.value); old_addr = ip->src_address.as_u32; + new_addr = ip->src_address.as_u32 = m->external_addr.as_u32; + sum = ip->checksum; + sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, src_address); + ip->checksum = ip_csum_fold (sum); - key.l_addr = ip->src_address; - key.r_addr = ip->dst_address; - key.fib_index = rx_fib_index; - key.proto = ip->protocol; - key.rsvd = 0; - key.l_port = 0; - s_kv.key[0] = key.as_u64[0]; - s_kv.key[1] = key.as_u64[1]; - if (!clib_bihash_search_16_8 (&sm->in2out_ed, &s_kv, &s_value)) + /* Hairpinning */ + if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) { - s = pool_elt_at_index (tsm->sessions, s_value.value); - new_addr = ip->src_address.as_u32 = s->out2in.addr.as_u32; + vnet_buffer(b)->sw_if_index[VLIB_TX] = m->fib_index; + nat_hairpinning_sm_unknown_proto (sm, b, ip); } - else - { - u_key.addr = ip->src_address; - u_key.fib_index = rx_fib_index; - kv.key = u_key.as_u64; - /* Ever heard of the "user" = src ip4 address before? */ - if (clib_bihash_search_8_8 (&tsm->user_hash, &kv, &value)) - { - /* no, make a new one */ - pool_get (tsm->users, u); - memset (u, 0, sizeof (*u)); - u->addr = ip->src_address; - u->fib_index = rx_fib_index; + return 0; +} - pool_get (tsm->list_pool, head); - u->sessions_per_user_list_head_index = head - tsm->list_pool; +static inline uword +snat_in2out_node_fn_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, int is_slow_path, + int is_output_feature) +{ + u32 n_left_from, * from, * to_next; + snat_in2out_next_t next_index; + u32 pkts_processed = 0; + snat_main_t * sm = &snat_main; + f64 now = vlib_time_now (vm); + u32 stats_node_index; + u32 thread_index = vm->thread_index; - clib_dlist_init (tsm->list_pool, - u->sessions_per_user_list_head_index); + stats_node_index = is_slow_path ? snat_in2out_slowpath_node.index : + snat_in2out_node.index; - kv.value = u - tsm->users; + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; - /* add user */ - clib_bihash_add_del_8_8 (&tsm->user_hash, &kv, 1); - } - else - { - u = pool_elt_at_index (tsm->users, value.value); - } + while (n_left_from > 0) + { + u32 n_left_to_next; - m_key.addr = ip->src_address; - m_key.port = 0; - m_key.protocol = 0; - m_key.fib_index = rx_fib_index; - kv.key = m_key.as_u64; - - /* Try to find static mapping first */ - if (!clib_bihash_search_8_8 (&sm->static_mapping_by_local, &kv, &value)) - { - m = pool_elt_at_index (sm->static_mappings, value.value); - new_addr = ip->src_address.as_u32 = m->external_addr.as_u32; - is_sm = 1; - goto create_ses; - } - /* Fallback to 3-tuple key */ - else - { - /* Choose same out address as for TCP/UDP session to same destination */ - if (!clib_bihash_search_8_8 (&tsm->user_hash, &kv, &value)) - { - head_index = u->sessions_per_user_list_head_index; - head = pool_elt_at_index (tsm->list_pool, head_index); - elt_index = head->next; - elt = pool_elt_at_index (tsm->list_pool, elt_index); - ses_index = elt->value; - while (ses_index != ~0) - { - s = pool_elt_at_index (tsm->sessions, ses_index); - elt_index = elt->next; - elt = pool_elt_at_index (tsm->list_pool, elt_index); - ses_index = elt->value; - - if (s->ext_host_addr.as_u32 == ip->dst_address.as_u32) - { - new_addr = ip->src_address.as_u32 = s->out2in.addr.as_u32; - address_index = s->outside_address_index; - - key.fib_index = sm->outside_fib_index; - key.l_addr.as_u32 = new_addr; - s_kv.key[0] = key.as_u64[0]; - s_kv.key[1] = key.as_u64[1]; - if (clib_bihash_search_16_8 (&sm->out2in_ed, &s_kv, &s_value)) - break; - - goto create_ses; - } - } - } - key.fib_index = sm->outside_fib_index; - for (i = 0; i < vec_len (sm->addresses); i++) - { - key.l_addr.as_u32 = sm->addresses[i].addr.as_u32; - s_kv.key[0] = key.as_u64[0]; - s_kv.key[1] = key.as_u64[1]; - if (clib_bihash_search_16_8 (&sm->out2in_ed, &s_kv, &s_value)) - { - new_addr = ip->src_address.as_u32 = key.l_addr.as_u32; - address_index = i; - goto create_ses; - } - } - return; - } - -create_ses: - /* Over quota? Recycle the least recently used dynamic translation */ - if (u->nsessions >= sm->max_translations_per_user && !is_sm) - { - /* Remove the oldest dynamic translation */ - do { - oldest_index = clib_dlist_remove_head ( - tsm->list_pool, u->sessions_per_user_list_head_index); - - ASSERT (oldest_index != ~0); - - /* add it back to the end of the LRU list */ - clib_dlist_addtail (tsm->list_pool, - u->sessions_per_user_list_head_index, - oldest_index); - /* Get the list element */ - oldest = pool_elt_at_index (tsm->list_pool, oldest_index); - - /* Get the session index from the list element */ - ses_index = oldest->value; - - /* Get the session */ - s = pool_elt_at_index (tsm->sessions, ses_index); - } while (snat_is_session_static (s)); - - if (snat_is_unk_proto_session (s)) - { - /* Remove from lookup tables */ - key.l_addr = s->in2out.addr; - key.r_addr = s->ext_host_addr; - key.fib_index = s->in2out.fib_index; - key.proto = s->in2out.port; - s_kv.key[0] = key.as_u64[0]; - s_kv.key[1] = key.as_u64[1]; - if (clib_bihash_add_del_16_8 (&sm->in2out_ed, &s_kv, 0)) - clib_warning ("in2out key del failed"); - - key.l_addr = s->out2in.addr; - key.fib_index = s->out2in.fib_index; - s_kv.key[0] = key.as_u64[0]; - s_kv.key[1] = key.as_u64[1]; - if (clib_bihash_add_del_16_8 (&sm->out2in_ed, &s_kv, 0)) - clib_warning ("out2in key del failed"); - } - else - { - /* log NAT event */ - snat_ipfix_logging_nat44_ses_delete(s->in2out.addr.as_u32, - s->out2in.addr.as_u32, - s->in2out.protocol, - s->in2out.port, - s->out2in.port, - s->in2out.fib_index); - - snat_free_outside_address_and_port (sm, thread_index, &s->out2in, - s->outside_address_index); - - /* Remove in2out, out2in keys */ - kv.key = s->in2out.as_u64; - if (clib_bihash_add_del_8_8 ( - &sm->per_thread_data[thread_index].in2out, &kv, 0)) - clib_warning ("in2out key del failed"); - kv.key = s->out2in.as_u64; - if (clib_bihash_add_del_8_8 ( - &sm->per_thread_data[thread_index].out2in, &kv, 0)) - clib_warning ("out2in key del failed"); - } - } - else - { - /* Create a new session */ - pool_get (tsm->sessions, s); - memset (s, 0, sizeof (*s)); - - /* Create list elts */ - pool_get (tsm->list_pool, elt); - clib_dlist_init (tsm->list_pool, elt - tsm->list_pool); - elt->value = s - tsm->sessions; - s->per_user_index = elt - tsm->list_pool; - s->per_user_list_head_index = u->sessions_per_user_list_head_index; - clib_dlist_addtail (tsm->list_pool, s->per_user_list_head_index, - s->per_user_index); - } - - s->ext_host_addr.as_u32 = ip->dst_address.as_u32; - s->flags |= SNAT_SESSION_FLAG_UNKNOWN_PROTO; - s->outside_address_index = address_index; - s->out2in.addr.as_u32 = new_addr; - s->out2in.fib_index = sm->outside_fib_index; - s->in2out.addr.as_u32 = old_addr; - s->in2out.fib_index = rx_fib_index; - s->in2out.port = s->out2in.port = ip->protocol; - if (is_sm) - { - u->nstaticsessions++; - s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING; - } - else - { - u->nsessions++; - } - - /* Add to lookup tables */ - key.l_addr.as_u32 = old_addr; - key.r_addr = ip->dst_address; - key.proto = ip->protocol; - key.fib_index = rx_fib_index; - s_kv.key[0] = key.as_u64[0]; - s_kv.key[1] = key.as_u64[1]; - s_kv.value = s - tsm->sessions; - if (clib_bihash_add_del_16_8 (&sm->in2out_ed, &s_kv, 1)) - clib_warning ("in2out key add failed"); - - key.l_addr.as_u32 = new_addr; - key.fib_index = sm->outside_fib_index; - s_kv.key[0] = key.as_u64[0]; - s_kv.key[1] = key.as_u64[1]; - if (clib_bihash_add_del_16_8 (&sm->out2in_ed, &s_kv, 1)) - clib_warning ("out2in key add failed"); - } - - /* Update IP checksum */ - sum = ip->checksum; - sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, src_address); - ip->checksum = ip_csum_fold (sum); - - /* Accounting */ - s->last_heard = now; - s->total_pkts++; - s->total_bytes += vlib_buffer_length_in_chain (vm, b); - /* Per-user LRU list maintenance */ - clib_dlist_remove (tsm->list_pool, s->per_user_index); - clib_dlist_addtail (tsm->list_pool, s->per_user_list_head_index, - s->per_user_index); - - /* Hairpinning */ - if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) - snat_hairpinning_unknown_proto(sm, b, ip); - - if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) - vnet_buffer(b)->sw_if_index[VLIB_TX] = sm->outside_fib_index; -} - -static snat_session_t * -snat_in2out_lb (snat_main_t *sm, - vlib_buffer_t * b, - ip4_header_t * ip, - u32 rx_fib_index, - u32 thread_index, - f64 now, - vlib_main_t * vm) -{ - nat_ed_ses_key_t key; - clib_bihash_kv_16_8_t s_kv, s_value; - udp_header_t *udp = ip4_next_header (ip); - tcp_header_t *tcp = (tcp_header_t *) udp; - snat_session_t *s = 0; - snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; - u32 old_addr, new_addr; - u16 new_port, old_port; - ip_csum_t sum; - u32 proto = ip_proto_to_snat_proto (ip->protocol); - snat_session_key_t e_key, l_key; - clib_bihash_kv_8_8_t kv, value; - snat_user_key_t u_key; - snat_user_t *u; - dlist_elt_t *head, *elt; - - old_addr = ip->src_address.as_u32; - - key.l_addr = ip->src_address; - key.r_addr = ip->dst_address; - key.fib_index = rx_fib_index; - key.proto = ip->protocol; - key.rsvd = 0; - key.l_port = udp->src_port; - s_kv.key[0] = key.as_u64[0]; - s_kv.key[1] = key.as_u64[1]; - - if (!clib_bihash_search_16_8 (&sm->in2out_ed, &s_kv, &s_value)) - { - s = pool_elt_at_index (tsm->sessions, s_value.value); - } - else - { - l_key.addr = ip->src_address; - l_key.port = udp->src_port; - l_key.protocol = proto; - l_key.fib_index = rx_fib_index; - if (snat_static_mapping_match(sm, l_key, &e_key, 0, 0)) - return 0; - - u_key.addr = ip->src_address; - u_key.fib_index = rx_fib_index; - kv.key = u_key.as_u64; - - /* Ever heard of the "user" = src ip4 address before? */ - if (clib_bihash_search_8_8 (&tsm->user_hash, &kv, &value)) - { - /* no, make a new one */ - pool_get (tsm->users, u); - memset (u, 0, sizeof (*u)); - u->addr = ip->src_address; - u->fib_index = rx_fib_index; - - pool_get (tsm->list_pool, head); - u->sessions_per_user_list_head_index = head - tsm->list_pool; - - clib_dlist_init (tsm->list_pool, - u->sessions_per_user_list_head_index); - - kv.value = u - tsm->users; - - /* add user */ - if (clib_bihash_add_del_8_8 (&tsm->user_hash, &kv, 1)) - clib_warning ("user key add failed"); - } - else - { - u = pool_elt_at_index (tsm->users, value.value); - } - - /* Create a new session */ - pool_get (tsm->sessions, s); - memset (s, 0, sizeof (*s)); - - s->ext_host_addr.as_u32 = ip->dst_address.as_u32; - s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING; - s->flags |= SNAT_SESSION_FLAG_LOAD_BALANCING; - s->outside_address_index = ~0; - s->in2out = l_key; - s->out2in = e_key; - u->nstaticsessions++; - - /* Create list elts */ - pool_get (tsm->list_pool, elt); - clib_dlist_init (tsm->list_pool, elt - tsm->list_pool); - elt->value = s - tsm->sessions; - s->per_user_index = elt - tsm->list_pool; - s->per_user_list_head_index = u->sessions_per_user_list_head_index; - clib_dlist_addtail (tsm->list_pool, s->per_user_list_head_index, - s->per_user_index); - - /* Add to lookup tables */ - s_kv.value = s - tsm->sessions; - if (clib_bihash_add_del_16_8 (&sm->in2out_ed, &s_kv, 1)) - clib_warning ("in2out-ed key add failed"); - - key.l_addr = e_key.addr; - key.fib_index = e_key.fib_index; - key.l_port = e_key.port; - s_kv.key[0] = key.as_u64[0]; - s_kv.key[1] = key.as_u64[1]; - if (clib_bihash_add_del_16_8 (&sm->out2in_ed, &s_kv, 1)) - clib_warning ("out2in-ed key add failed"); - } - - new_addr = ip->src_address.as_u32 = s->out2in.addr.as_u32; - - /* Update IP checksum */ - sum = ip->checksum; - sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, src_address); - ip->checksum = ip_csum_fold (sum); - - if (PREDICT_TRUE(proto == SNAT_PROTOCOL_TCP)) - { - old_port = tcp->src_port; - tcp->src_port = s->out2in.port; - new_port = tcp->src_port; - - sum = tcp->checksum; - sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, src_address); - sum = ip_csum_update (sum, old_port, new_port, ip4_header_t, length); - tcp->checksum = ip_csum_fold(sum); - } - else - { - udp->src_port = s->out2in.port; - udp->checksum = 0; - } - - if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) - vnet_buffer(b)->sw_if_index[VLIB_TX] = sm->outside_fib_index; - - /* Accounting */ - s->last_heard = now; - s->total_pkts++; - s->total_bytes += vlib_buffer_length_in_chain (vm, b); - return s; -} - -static inline uword -snat_in2out_node_fn_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, int is_slow_path, - int is_output_feature) -{ - u32 n_left_from, * from, * to_next; - snat_in2out_next_t next_index; - u32 pkts_processed = 0; - snat_main_t * sm = &snat_main; - f64 now = vlib_time_now (vm); - u32 stats_node_index; - u32 thread_index = vlib_get_thread_index (); - - stats_node_index = is_slow_path ? snat_in2out_slowpath_node.index : - snat_in2out_node.index; - - from = vlib_frame_vector_args (frame); - n_left_from = frame->n_vectors; - next_index = node->cached_next_index; - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, - to_next, n_left_to_next); + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); while (n_left_from >= 4 && n_left_to_next >= 2) { @@ -1598,8 +1238,11 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, { if (PREDICT_FALSE (proto0 == ~0)) { - snat_in2out_unknown_proto (sm, b0, ip0, rx_fib_index0, - thread_index, now, vm); + if (nat_in2out_sm_unknown_proto (sm, b0, ip0, rx_fib_index0)) + { + next0 = SNAT_IN2OUT_NEXT_DROP; + b0->error = node->errors[SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL]; + } goto trace00; } @@ -1618,6 +1261,12 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, next0 = SNAT_IN2OUT_NEXT_SLOW_PATH; goto trace00; } + + if (ip4_is_fragment (ip0)) + { + next0 = SNAT_IN2OUT_NEXT_REASS; + goto trace00; + } } key0.addr = ip0->src_address; @@ -1632,12 +1281,21 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, { if (is_slow_path) { - if (PREDICT_FALSE(snat_not_translate(sm, node, sw_if_index0, - ip0, proto0, rx_fib_index0, thread_index)) && !is_output_feature) - goto trace00; + if (is_output_feature) + { + if (PREDICT_FALSE(nat_not_translate_output_feature(sm, + ip0, proto0, udp0->src_port, udp0->dst_port, thread_index, sw_if_index0))) + goto trace00; + } + else + { + if (PREDICT_FALSE(snat_not_translate(sm, node, sw_if_index0, + ip0, proto0, rx_fib_index0, thread_index))) + goto trace00; + } next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, - &s0, node, next0, thread_index); + &s0, node, next0, thread_index, now); if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) goto trace00; } @@ -1648,28 +1306,10 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } } else - { - if (PREDICT_FALSE (value0.value == ~0ULL)) - { - if (is_slow_path) - { - s0 = snat_in2out_lb(sm, b0, ip0, rx_fib_index0, thread_index, - now, vm); - goto trace00; - } - else - { - next0 = SNAT_IN2OUT_NEXT_SLOW_PATH; - goto trace00; - } - } - else - { - s0 = pool_elt_at_index ( - sm->per_thread_data[thread_index].sessions, - value0.value); - } - } + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, + value0.value); + + b0->flags |= VNET_BUFFER_F_IS_NATED; old_addr0 = ip0->src_address.as_u32; ip0->src_address = s0->out2in.addr; @@ -1705,23 +1345,11 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, udp0->checksum = 0; } - /* Hairpinning */ - if (!is_output_feature) - snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0); - /* Accounting */ - s0->last_heard = now; - s0->total_pkts++; - s0->total_bytes += vlib_buffer_length_in_chain (vm, b0); - /* Per-user LRU list maintenance for dynamic translation */ - if (!snat_is_session_static (s0)) - { - clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, - s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, - s0->per_user_list_head_index, - s0->per_user_index); - } + nat44_session_update_counters (s0, now, + vlib_buffer_length_in_chain (vm, b0)); + /* Per-user LRU list maintenance */ + nat44_session_update_lru (sm, s0, thread_index); trace00: if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) @@ -1770,8 +1398,11 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, { if (PREDICT_FALSE (proto1 == ~0)) { - snat_in2out_unknown_proto (sm, b1, ip1, rx_fib_index1, - thread_index, now, vm); + if (nat_in2out_sm_unknown_proto (sm, b1, ip1, rx_fib_index1)) + { + next1 = SNAT_IN2OUT_NEXT_DROP; + b1->error = node->errors[SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL]; + } goto trace01; } @@ -1790,6 +1421,12 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, next1 = SNAT_IN2OUT_NEXT_SLOW_PATH; goto trace01; } + + if (ip4_is_fragment (ip1)) + { + next1 = SNAT_IN2OUT_NEXT_REASS; + goto trace01; + } } key1.addr = ip1->src_address; @@ -1804,12 +1441,21 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, { if (is_slow_path) { - if (PREDICT_FALSE(snat_not_translate(sm, node, sw_if_index1, - ip1, proto1, rx_fib_index1, thread_index)) && !is_output_feature) - goto trace01; + if (is_output_feature) + { + if (PREDICT_FALSE(nat_not_translate_output_feature(sm, + ip1, proto1, udp1->src_port, udp1->dst_port, thread_index, sw_if_index1))) + goto trace01; + } + else + { + if (PREDICT_FALSE(snat_not_translate(sm, node, sw_if_index1, + ip1, proto1, rx_fib_index1, thread_index))) + goto trace01; + } next1 = slow_path (sm, b1, ip1, rx_fib_index1, &key1, - &s1, node, next1, thread_index); + &s1, node, next1, thread_index, now); if (PREDICT_FALSE (next1 == SNAT_IN2OUT_NEXT_DROP)) goto trace01; } @@ -1820,92 +1466,2109 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } } else + s1 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, + value1.value); + + b1->flags |= VNET_BUFFER_F_IS_NATED; + + old_addr1 = ip1->src_address.as_u32; + ip1->src_address = s1->out2in.addr; + new_addr1 = ip1->src_address.as_u32; + if (!is_output_feature) + vnet_buffer(b1)->sw_if_index[VLIB_TX] = s1->out2in.fib_index; + + sum1 = ip1->checksum; + sum1 = ip_csum_update (sum1, old_addr1, new_addr1, + ip4_header_t, + src_address /* changed member */); + ip1->checksum = ip_csum_fold (sum1); + + if (PREDICT_TRUE(proto1 == SNAT_PROTOCOL_TCP)) + { + old_port1 = tcp1->src_port; + tcp1->src_port = s1->out2in.port; + new_port1 = tcp1->src_port; + + sum1 = tcp1->checksum; + sum1 = ip_csum_update (sum1, old_addr1, new_addr1, + ip4_header_t, + dst_address /* changed member */); + sum1 = ip_csum_update (sum1, old_port1, new_port1, + ip4_header_t /* cheat */, + length /* changed member */); + tcp1->checksum = ip_csum_fold(sum1); + } + else + { + old_port1 = udp1->src_port; + udp1->src_port = s1->out2in.port; + udp1->checksum = 0; + } + + /* Accounting */ + nat44_session_update_counters (s1, now, + vlib_buffer_length_in_chain (vm, b1)); + /* Per-user LRU list maintenance */ + nat44_session_update_lru (sm, s1, thread_index); + trace01: + + if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) + && (b1->flags & VLIB_BUFFER_IS_TRACED))) + { + snat_in2out_trace_t *t = + vlib_add_trace (vm, node, b1, sizeof (*t)); + t->sw_if_index = sw_if_index1; + t->next_index = next1; + t->session_index = ~0; + if (s1) + t->session_index = s1 - sm->per_thread_data[thread_index].sessions; + } + + pkts_processed += next1 != SNAT_IN2OUT_NEXT_DROP; + + /* verify speculative enqueues, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t * b0; + u32 next0; + u32 sw_if_index0; + ip4_header_t * ip0; + ip_csum_t sum0; + u32 new_addr0, old_addr0; + u16 old_port0, new_port0; + udp_header_t * udp0; + tcp_header_t * tcp0; + icmp46_header_t * icmp0; + snat_session_key_t key0; + u32 rx_fib_index0; + u32 proto0; + snat_session_t * s0 = 0; + clib_bihash_kv_8_8_t kv0, value0; + u32 iph_offset0 = 0; + + /* speculatively enqueue b0 to the current next frame */ + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + next0 = SNAT_IN2OUT_NEXT_LOOKUP; + + if (is_output_feature) + iph_offset0 = vnet_buffer (b0)->ip.save_rewrite_length; + + ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + + iph_offset0); + + udp0 = ip4_next_header (ip0); + tcp0 = (tcp_header_t *) udp0; + icmp0 = (icmp46_header_t *) udp0; + + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; + rx_fib_index0 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index, + sw_if_index0); + + if (PREDICT_FALSE(ip0->ttl == 1)) + { + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + icmp4_error_set_vnet_buffer (b0, ICMP4_time_exceeded, + ICMP4_time_exceeded_ttl_exceeded_in_transit, + 0); + next0 = SNAT_IN2OUT_NEXT_ICMP_ERROR; + goto trace0; + } + + proto0 = ip_proto_to_snat_proto (ip0->protocol); + + /* Next configured feature, probably ip4-lookup */ + if (is_slow_path) + { + if (PREDICT_FALSE (proto0 == ~0)) + { + if (nat_in2out_sm_unknown_proto (sm, b0, ip0, rx_fib_index0)) + { + next0 = SNAT_IN2OUT_NEXT_DROP; + b0->error = node->errors[SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL]; + } + goto trace0; + } + + if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP)) + { + next0 = icmp_in2out_slow_path + (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, + next0, now, thread_index, &s0); + goto trace0; + } + } + else + { + if (PREDICT_FALSE (proto0 == ~0 || proto0 == SNAT_PROTOCOL_ICMP)) + { + next0 = SNAT_IN2OUT_NEXT_SLOW_PATH; + goto trace0; + } + + if (ip4_is_fragment (ip0)) + { + next0 = SNAT_IN2OUT_NEXT_REASS; + goto trace0; + } + } + + key0.addr = ip0->src_address; + key0.port = udp0->src_port; + key0.protocol = proto0; + key0.fib_index = rx_fib_index0; + + kv0.key = key0.as_u64; + + if (clib_bihash_search_8_8 (&sm->per_thread_data[thread_index].in2out, + &kv0, &value0)) + { + if (is_slow_path) + { + if (is_output_feature) + { + if (PREDICT_FALSE(nat_not_translate_output_feature(sm, + ip0, proto0, udp0->src_port, udp0->dst_port, thread_index, sw_if_index0))) + goto trace0; + } + else + { + if (PREDICT_FALSE(snat_not_translate(sm, node, sw_if_index0, + ip0, proto0, rx_fib_index0, thread_index))) + goto trace0; + } + + next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, + &s0, node, next0, thread_index, now); + + if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) + goto trace0; + } + else + { + next0 = SNAT_IN2OUT_NEXT_SLOW_PATH; + goto trace0; + } + } + else + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, + value0.value); + + b0->flags |= VNET_BUFFER_F_IS_NATED; + + old_addr0 = ip0->src_address.as_u32; + ip0->src_address = s0->out2in.addr; + new_addr0 = ip0->src_address.as_u32; + if (!is_output_feature) + vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index; + + sum0 = ip0->checksum; + sum0 = ip_csum_update (sum0, old_addr0, new_addr0, + ip4_header_t, + src_address /* changed member */); + ip0->checksum = ip_csum_fold (sum0); + + if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) + { + old_port0 = tcp0->src_port; + tcp0->src_port = s0->out2in.port; + new_port0 = tcp0->src_port; + + sum0 = tcp0->checksum; + sum0 = ip_csum_update (sum0, old_addr0, new_addr0, + ip4_header_t, + dst_address /* changed member */); + sum0 = ip_csum_update (sum0, old_port0, new_port0, + ip4_header_t /* cheat */, + length /* changed member */); + tcp0->checksum = ip_csum_fold(sum0); + } + else + { + old_port0 = udp0->src_port; + udp0->src_port = s0->out2in.port; + udp0->checksum = 0; + } + + /* Accounting */ + nat44_session_update_counters (s0, now, + vlib_buffer_length_in_chain (vm, b0)); + /* Per-user LRU list maintenance */ + nat44_session_update_lru (sm, s0, thread_index); + + trace0: + if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) + && (b0->flags & VLIB_BUFFER_IS_TRACED))) + { + snat_in2out_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + t->is_slow_path = is_slow_path; + t->sw_if_index = sw_if_index0; + t->next_index = next0; + t->session_index = ~0; + if (s0) + t->session_index = s0 - sm->per_thread_data[thread_index].sessions; + } + + pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP; + + /* verify speculative enqueue, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, stats_node_index, + SNAT_IN2OUT_ERROR_IN2OUT_PACKETS, + pkts_processed); + return frame->n_vectors; +} + +static uword +snat_in2out_fast_path_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return snat_in2out_node_fn_inline (vm, node, frame, 0 /* is_slow_path */, 0); +} + +VLIB_REGISTER_NODE (snat_in2out_node) = { + .function = snat_in2out_fast_path_fn, + .name = "nat44-in2out", + .vector_size = sizeof (u32), + .format_trace = format_snat_in2out_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(snat_in2out_error_strings), + .error_strings = snat_in2out_error_strings, + + .runtime_data_bytes = sizeof (snat_runtime_t), + + .n_next_nodes = SNAT_IN2OUT_N_NEXT, + + /* edit / add dispositions here */ + .next_nodes = { + [SNAT_IN2OUT_NEXT_DROP] = "error-drop", + [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup", + [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-slowpath", + [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_node, snat_in2out_fast_path_fn); + +static uword +snat_in2out_output_fast_path_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return snat_in2out_node_fn_inline (vm, node, frame, 0 /* is_slow_path */, 1); +} + +VLIB_REGISTER_NODE (snat_in2out_output_node) = { + .function = snat_in2out_output_fast_path_fn, + .name = "nat44-in2out-output", + .vector_size = sizeof (u32), + .format_trace = format_snat_in2out_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(snat_in2out_error_strings), + .error_strings = snat_in2out_error_strings, + + .runtime_data_bytes = sizeof (snat_runtime_t), + + .n_next_nodes = SNAT_IN2OUT_N_NEXT, + + /* edit / add dispositions here */ + .next_nodes = { + [SNAT_IN2OUT_NEXT_DROP] = "error-drop", + [SNAT_IN2OUT_NEXT_LOOKUP] = "interface-output", + [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-output-slowpath", + [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_output_node, + snat_in2out_output_fast_path_fn); + +static uword +snat_in2out_slow_path_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return snat_in2out_node_fn_inline (vm, node, frame, 1 /* is_slow_path */, 0); +} + +VLIB_REGISTER_NODE (snat_in2out_slowpath_node) = { + .function = snat_in2out_slow_path_fn, + .name = "nat44-in2out-slowpath", + .vector_size = sizeof (u32), + .format_trace = format_snat_in2out_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(snat_in2out_error_strings), + .error_strings = snat_in2out_error_strings, + + .runtime_data_bytes = sizeof (snat_runtime_t), + + .n_next_nodes = SNAT_IN2OUT_N_NEXT, + + /* edit / add dispositions here */ + .next_nodes = { + [SNAT_IN2OUT_NEXT_DROP] = "error-drop", + [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup", + [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-slowpath", + [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_slowpath_node, + snat_in2out_slow_path_fn); + +static uword +snat_in2out_output_slow_path_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return snat_in2out_node_fn_inline (vm, node, frame, 1 /* is_slow_path */, 1); +} + +VLIB_REGISTER_NODE (snat_in2out_output_slowpath_node) = { + .function = snat_in2out_output_slow_path_fn, + .name = "nat44-in2out-output-slowpath", + .vector_size = sizeof (u32), + .format_trace = format_snat_in2out_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(snat_in2out_error_strings), + .error_strings = snat_in2out_error_strings, + + .runtime_data_bytes = sizeof (snat_runtime_t), + + .n_next_nodes = SNAT_IN2OUT_N_NEXT, + + /* edit / add dispositions here */ + .next_nodes = { + [SNAT_IN2OUT_NEXT_DROP] = "error-drop", + [SNAT_IN2OUT_NEXT_LOOKUP] = "interface-output", + [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-output-slowpath", + [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_output_slowpath_node, + snat_in2out_output_slow_path_fn); + +extern vnet_feature_arc_registration_t vnet_feat_arc_ip4_local; + +static inline uword +nat44_hairpinning_fn_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, + int is_ed) +{ + u32 n_left_from, * from, * to_next, stats_node_index; + snat_in2out_next_t next_index; + u32 pkts_processed = 0; + snat_main_t * sm = &snat_main; + vnet_feature_main_t *fm = &feature_main; + u8 arc_index = vnet_feat_arc_ip4_local.feature_arc_index; + vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index]; + + stats_node_index = is_ed ? nat44_ed_hairpinning_node.index : + nat44_hairpinning_node.index; + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, + to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t * b0; + u32 next0; + ip4_header_t * ip0; + u32 proto0; + udp_header_t * udp0; + tcp_header_t * tcp0; + + /* speculatively enqueue b0 to the current next frame */ + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + ip0 = vlib_buffer_get_current (b0); + udp0 = ip4_next_header (ip0); + tcp0 = (tcp_header_t *) udp0; + + proto0 = ip_proto_to_snat_proto (ip0->protocol); + + vnet_get_config_data (&cm->config_main, &b0->current_config_index, + &next0, 0); + + if (snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0, is_ed)) + next0 = SNAT_IN2OUT_NEXT_LOOKUP; + + pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP; + + /* verify speculative enqueue, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, stats_node_index, + SNAT_IN2OUT_ERROR_IN2OUT_PACKETS, + pkts_processed); + return frame->n_vectors; +} + +static uword +nat44_hairpinning_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return nat44_hairpinning_fn_inline (vm, node, frame, 0); +} + +VLIB_REGISTER_NODE (nat44_hairpinning_node) = { + .function = nat44_hairpinning_fn, + .name = "nat44-hairpinning", + .vector_size = sizeof (u32), + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = ARRAY_LEN(snat_in2out_error_strings), + .error_strings = snat_in2out_error_strings, + .n_next_nodes = 2, + .next_nodes = { + [SNAT_IN2OUT_NEXT_DROP] = "error-drop", + [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (nat44_hairpinning_node, + nat44_hairpinning_fn); + +static uword +nat44_ed_hairpinning_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return nat44_hairpinning_fn_inline (vm, node, frame, 1); +} + +VLIB_REGISTER_NODE (nat44_ed_hairpinning_node) = { + .function = nat44_ed_hairpinning_fn, + .name = "nat44-ed-hairpinning", + .vector_size = sizeof (u32), + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = ARRAY_LEN(snat_in2out_error_strings), + .error_strings = snat_in2out_error_strings, + .n_next_nodes = 2, + .next_nodes = { + [SNAT_IN2OUT_NEXT_DROP] = "error-drop", + [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_hairpinning_node, + nat44_ed_hairpinning_fn); + +static inline void +nat44_reass_hairpinning (snat_main_t *sm, + vlib_buffer_t * b0, + ip4_header_t * ip0, + u16 sport, + u16 dport, + u32 proto0) +{ + snat_session_key_t key0, sm0; + snat_session_t * s0; + clib_bihash_kv_8_8_t kv0, value0; + ip_csum_t sum0; + u32 new_dst_addr0 = 0, old_dst_addr0, ti = 0, si; + u16 new_dst_port0, old_dst_port0; + udp_header_t * udp0; + tcp_header_t * tcp0; + + key0.addr = ip0->dst_address; + key0.port = dport; + key0.protocol = proto0; + key0.fib_index = sm->outside_fib_index; + kv0.key = key0.as_u64; + + udp0 = ip4_next_header (ip0); + + /* Check if destination is static mappings */ + if (!snat_static_mapping_match(sm, key0, &sm0, 1, 0, 0, 0, 0)) + { + new_dst_addr0 = sm0.addr.as_u32; + new_dst_port0 = sm0.port; + vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm0.fib_index; + } + /* or active sessions */ + else + { + if (sm->num_workers > 1) + ti = (clib_net_to_host_u16 (udp0->dst_port) - 1024) / sm->port_per_thread; + else + ti = sm->num_workers; + + if (!clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0, &value0)) + { + si = value0.value; + s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si); + new_dst_addr0 = s0->in2out.addr.as_u32; + new_dst_port0 = s0->in2out.port; + vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index; + } + } + + /* Destination is behind the same NAT, use internal address and port */ + if (new_dst_addr0) + { + old_dst_addr0 = ip0->dst_address.as_u32; + ip0->dst_address.as_u32 = new_dst_addr0; + sum0 = ip0->checksum; + sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0, + ip4_header_t, dst_address); + ip0->checksum = ip_csum_fold (sum0); + + old_dst_port0 = dport; + if (PREDICT_TRUE(new_dst_port0 != old_dst_port0 && + ip4_is_first_fragment (ip0))) + { + if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) + { + tcp0 = ip4_next_header (ip0); + tcp0->dst = new_dst_port0; + sum0 = tcp0->checksum; + sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0, + ip4_header_t, dst_address); + sum0 = ip_csum_update (sum0, old_dst_port0, new_dst_port0, + ip4_header_t /* cheat */, length); + tcp0->checksum = ip_csum_fold(sum0); + } + else + { + udp0->dst_port = new_dst_port0; + udp0->checksum = 0; + } + } + else + { + if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) + { + tcp0 = ip4_next_header (ip0); + sum0 = tcp0->checksum; + sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0, + ip4_header_t, dst_address); + tcp0->checksum = ip_csum_fold(sum0); + } + } + } +} + +static uword +nat44_in2out_reass_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + u32 n_left_from, *from, *to_next; + snat_in2out_next_t next_index; + u32 pkts_processed = 0; + snat_main_t *sm = &snat_main; + f64 now = vlib_time_now (vm); + u32 thread_index = vm->thread_index; + snat_main_per_thread_data_t *per_thread_data = + &sm->per_thread_data[thread_index]; + u32 *fragments_to_drop = 0; + u32 *fragments_to_loopback = 0; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0, sw_if_index0, proto0, rx_fib_index0, new_addr0, old_addr0; + vlib_buffer_t *b0; + u32 next0; + u8 cached0 = 0; + ip4_header_t *ip0; + nat_reass_ip4_t *reass0; + udp_header_t * udp0; + tcp_header_t * tcp0; + snat_session_key_t key0; + clib_bihash_kv_8_8_t kv0, value0; + snat_session_t * s0 = 0; + u16 old_port0, new_port0; + ip_csum_t sum0; + + /* speculatively enqueue b0 to the current next frame */ + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + next0 = SNAT_IN2OUT_NEXT_LOOKUP; + + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; + rx_fib_index0 = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4, + sw_if_index0); + + if (PREDICT_FALSE (nat_reass_is_drop_frag(0))) + { + next0 = SNAT_IN2OUT_NEXT_DROP; + b0->error = node->errors[SNAT_IN2OUT_ERROR_DROP_FRAGMENT]; + goto trace0; + } + + ip0 = (ip4_header_t *) vlib_buffer_get_current (b0); + udp0 = ip4_next_header (ip0); + tcp0 = (tcp_header_t *) udp0; + proto0 = ip_proto_to_snat_proto (ip0->protocol); + + reass0 = nat_ip4_reass_find_or_create (ip0->src_address, + ip0->dst_address, + ip0->fragment_id, + ip0->protocol, + 1, + &fragments_to_drop); + + if (PREDICT_FALSE (!reass0)) + { + next0 = SNAT_IN2OUT_NEXT_DROP; + b0->error = node->errors[SNAT_IN2OUT_ERROR_MAX_REASS]; + nat_log_notice ("maximum reassemblies exceeded"); + goto trace0; + } + + if (PREDICT_FALSE (ip4_is_first_fragment (ip0))) + { + key0.addr = ip0->src_address; + key0.port = udp0->src_port; + key0.protocol = proto0; + key0.fib_index = rx_fib_index0; + kv0.key = key0.as_u64; + + if (clib_bihash_search_8_8 (&per_thread_data->in2out, &kv0, &value0)) + { + if (PREDICT_FALSE(snat_not_translate(sm, node, sw_if_index0, + ip0, proto0, rx_fib_index0, thread_index))) + goto trace0; + + next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, + &s0, node, next0, thread_index, now); + + if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) + goto trace0; + + reass0->sess_index = s0 - per_thread_data->sessions; + } + else + { + s0 = pool_elt_at_index (per_thread_data->sessions, + value0.value); + reass0->sess_index = value0.value; + } + nat_ip4_reass_get_frags (reass0, &fragments_to_loopback); + } + else + { + if (PREDICT_FALSE (reass0->sess_index == (u32) ~0)) + { + if (nat_ip4_reass_add_fragment (reass0, bi0)) + { + b0->error = node->errors[SNAT_IN2OUT_ERROR_MAX_FRAG]; + nat_log_notice ("maximum fragments per reassembly exceeded"); + next0 = SNAT_IN2OUT_NEXT_DROP; + goto trace0; + } + cached0 = 1; + goto trace0; + } + s0 = pool_elt_at_index (per_thread_data->sessions, + reass0->sess_index); + } + + old_addr0 = ip0->src_address.as_u32; + ip0->src_address = s0->out2in.addr; + new_addr0 = ip0->src_address.as_u32; + vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index; + + sum0 = ip0->checksum; + sum0 = ip_csum_update (sum0, old_addr0, new_addr0, + ip4_header_t, + src_address /* changed member */); + ip0->checksum = ip_csum_fold (sum0); + + if (PREDICT_FALSE (ip4_is_first_fragment (ip0))) + { + if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) + { + old_port0 = tcp0->src_port; + tcp0->src_port = s0->out2in.port; + new_port0 = tcp0->src_port; + + sum0 = tcp0->checksum; + sum0 = ip_csum_update (sum0, old_addr0, new_addr0, + ip4_header_t, + dst_address /* changed member */); + sum0 = ip_csum_update (sum0, old_port0, new_port0, + ip4_header_t /* cheat */, + length /* changed member */); + tcp0->checksum = ip_csum_fold(sum0); + } + else + { + old_port0 = udp0->src_port; + udp0->src_port = s0->out2in.port; + udp0->checksum = 0; + } + } + + /* Hairpinning */ + nat44_reass_hairpinning (sm, b0, ip0, s0->out2in.port, + s0->ext_host_port, proto0); + + /* Accounting */ + nat44_session_update_counters (s0, now, + vlib_buffer_length_in_chain (vm, b0)); + /* Per-user LRU list maintenance */ + nat44_session_update_lru (sm, s0, thread_index); + + trace0: + if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) + && (b0->flags & VLIB_BUFFER_IS_TRACED))) + { + nat44_in2out_reass_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + t->cached = cached0; + t->sw_if_index = sw_if_index0; + t->next_index = next0; + } + + if (cached0) + { + n_left_to_next++; + to_next--; + } + else + { + pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP; + + /* verify speculative enqueue, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + if (n_left_from == 0 && vec_len (fragments_to_loopback)) + { + from = vlib_frame_vector_args (frame); + u32 len = vec_len (fragments_to_loopback); + if (len <= VLIB_FRAME_SIZE) + { + clib_memcpy (from, fragments_to_loopback, sizeof (u32) * len); + n_left_from = len; + vec_reset_length (fragments_to_loopback); + } + else + { + clib_memcpy (from, + fragments_to_loopback + (len - VLIB_FRAME_SIZE), + sizeof (u32) * VLIB_FRAME_SIZE); + n_left_from = VLIB_FRAME_SIZE; + _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE; + } + } + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, nat44_in2out_reass_node.index, + SNAT_IN2OUT_ERROR_IN2OUT_PACKETS, + pkts_processed); + + nat_send_all_to_node (vm, fragments_to_drop, node, + &node->errors[SNAT_IN2OUT_ERROR_DROP_FRAGMENT], + SNAT_IN2OUT_NEXT_DROP); + + vec_free (fragments_to_drop); + vec_free (fragments_to_loopback); + return frame->n_vectors; +} + +VLIB_REGISTER_NODE (nat44_in2out_reass_node) = { + .function = nat44_in2out_reass_node_fn, + .name = "nat44-in2out-reass", + .vector_size = sizeof (u32), + .format_trace = format_nat44_in2out_reass_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(snat_in2out_error_strings), + .error_strings = snat_in2out_error_strings, + + .n_next_nodes = SNAT_IN2OUT_N_NEXT, + .next_nodes = { + [SNAT_IN2OUT_NEXT_DROP] = "error-drop", + [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup", + [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-slowpath", + [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (nat44_in2out_reass_node, + nat44_in2out_reass_node_fn); + +/*******************************/ +/*** endpoint-dependent mode ***/ +/*******************************/ + +static_always_inline int +icmp_get_ed_key(ip4_header_t *ip0, nat_ed_ses_key_t *p_key0) +{ + icmp46_header_t *icmp0; + nat_ed_ses_key_t key0; + icmp_echo_header_t *echo0, *inner_echo0 = 0; + ip4_header_t *inner_ip0 = 0; + void *l4_header = 0; + icmp46_header_t *inner_icmp0; + + icmp0 = (icmp46_header_t *) ip4_next_header (ip0); + echo0 = (icmp_echo_header_t *)(icmp0+1); + + if (!icmp_is_error_message (icmp0)) + { + key0.proto = IP_PROTOCOL_ICMP; + key0.l_addr = ip0->src_address; + key0.r_addr = ip0->dst_address; + key0.l_port = echo0->identifier; + key0.r_port = 0; + } + else + { + inner_ip0 = (ip4_header_t *)(echo0+1); + l4_header = ip4_next_header (inner_ip0); + key0.proto = inner_ip0->protocol; + key0.r_addr = inner_ip0->src_address; + key0.l_addr = inner_ip0->dst_address; + switch (ip_proto_to_snat_proto (inner_ip0->protocol)) + { + case SNAT_PROTOCOL_ICMP: + inner_icmp0 = (icmp46_header_t*)l4_header; + inner_echo0 = (icmp_echo_header_t *)(inner_icmp0+1); + key0.r_port = 0; + key0.l_port = inner_echo0->identifier; + break; + case SNAT_PROTOCOL_UDP: + case SNAT_PROTOCOL_TCP: + key0.l_port = ((tcp_udp_header_t*)l4_header)->dst_port; + key0.r_port = ((tcp_udp_header_t*)l4_header)->src_port; + break; + default: + return SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL; + } + } + *p_key0 = key0; + return 0; +} + +int +nat44_i2o_ed_is_idle_session_cb (clib_bihash_kv_16_8_t * kv, void * arg) +{ + snat_main_t *sm = &snat_main; + nat44_is_idle_session_ctx_t *ctx = arg; + snat_session_t *s; + u64 sess_timeout_time; + nat_ed_ses_key_t ed_key; + clib_bihash_kv_16_8_t ed_kv; + int i; + snat_address_t *a; + snat_session_key_t key; + snat_main_per_thread_data_t *tsm = vec_elt_at_index (sm->per_thread_data, + ctx->thread_index); + + s = pool_elt_at_index (tsm->sessions, kv->value); + sess_timeout_time = s->last_heard + (f64)nat44_session_get_timeout(sm, s); + if (ctx->now >= sess_timeout_time) + { + if (is_fwd_bypass_session (s)) + goto delete; + + ed_key.l_addr = s->out2in.addr; + ed_key.r_addr = s->ext_host_addr; + ed_key.fib_index = s->out2in.fib_index; + if (snat_is_unk_proto_session (s)) + { + ed_key.proto = s->in2out.port; + ed_key.r_port = 0; + ed_key.l_port = 0; + } + else + { + ed_key.proto = snat_proto_to_ip_proto (s->in2out.protocol); + ed_key.l_port = s->out2in.port; + ed_key.r_port = s->ext_host_port; + } + ed_kv.key[0] = ed_key.as_u64[0]; + ed_kv.key[1] = ed_key.as_u64[1]; + if (clib_bihash_add_del_16_8 (&tsm->out2in_ed, &ed_kv, 0)) + nat_log_warn ("out2in_ed key del failed"); + + if (snat_is_unk_proto_session (s)) + goto delete; + + snat_ipfix_logging_nat44_ses_delete(s->in2out.addr.as_u32, + s->out2in.addr.as_u32, + s->in2out.protocol, + s->in2out.port, + s->out2in.port, + s->in2out.fib_index); + + if (is_twice_nat_session (s)) + { + for (i = 0; i < vec_len (sm->twice_nat_addresses); i++) + { + key.protocol = s->in2out.protocol; + key.port = s->ext_host_nat_port; + a = sm->twice_nat_addresses + i; + if (a->addr.as_u32 == s->ext_host_nat_addr.as_u32) + { + snat_free_outside_address_and_port (sm->twice_nat_addresses, + ctx->thread_index, &key); + break; + } + } + } + + if (snat_is_session_static (s)) + goto delete; + + if (s->outside_address_index != ~0) + snat_free_outside_address_and_port (sm->addresses, ctx->thread_index, + &s->out2in); + delete: + nat44_delete_session (sm, s, ctx->thread_index); + return 1; + } + + return 0; +} + +static inline u32 +icmp_in2out_ed_slow_path (snat_main_t * sm, vlib_buffer_t * b0, + ip4_header_t * ip0, icmp46_header_t * icmp0, + u32 sw_if_index0, u32 rx_fib_index0, + vlib_node_runtime_t * node, u32 next0, f64 now, + u32 thread_index, snat_session_t ** p_s0) +{ + next0 = icmp_in2out(sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, + next0, thread_index, p_s0, 0); + snat_session_t * s0 = *p_s0; + if (PREDICT_TRUE(next0 != SNAT_IN2OUT_NEXT_DROP && s0)) + { + /* Hairpinning */ + if (vnet_buffer(b0)->sw_if_index[VLIB_TX] == ~0) + snat_icmp_hairpinning(sm, b0, ip0, icmp0, sm->endpoint_dependent); + /* Accounting */ + nat44_session_update_counters (s0, now, + vlib_buffer_length_in_chain (sm->vlib_main, b0)); + } + return next0; +} + +static u32 +slow_path_ed (snat_main_t *sm, + vlib_buffer_t *b, + u32 rx_fib_index, + clib_bihash_kv_16_8_t *kv, + snat_session_t ** sessionp, + vlib_node_runtime_t * node, + u32 next, + u32 thread_index, + f64 now) +{ + snat_session_t *s; + snat_user_t *u; + snat_session_key_t key0, key1; + lb_nat_type_t lb = 0, is_sm = 0; + u32 address_index = ~0; + snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; + nat_ed_ses_key_t *key = (nat_ed_ses_key_t *) kv->key; + u32 proto = ip_proto_to_snat_proto (key->proto); + nat_outside_fib_t *outside_fib; + fib_node_index_t fei = FIB_NODE_INDEX_INVALID; + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP4, + .fp_len = 32, + .fp_addr = { + .ip4.as_u32 = key->r_addr.as_u32, + }, + }; + nat44_is_idle_session_ctx_t ctx; + + if (PREDICT_FALSE (maximum_sessions_exceeded (sm, thread_index))) + { + b->error = node->errors[SNAT_IN2OUT_ERROR_MAX_SESSIONS_EXCEEDED]; + nat_ipfix_logging_max_sessions(sm->max_translations); + nat_log_notice ("maximum sessions exceeded"); + return SNAT_IN2OUT_NEXT_DROP; + } + + key0.addr = key->l_addr; + key0.port = key->l_port; + key1.protocol = key0.protocol = proto; + key0.fib_index = rx_fib_index; + key1.fib_index = sm->outside_fib_index; + /* First try to match static mapping by local address and port */ + if (snat_static_mapping_match (sm, key0, &key1, 0, 0, 0, &lb, 0)) + { + /* Try to create dynamic translation */ + if (snat_alloc_outside_address_and_port (sm->addresses, rx_fib_index, + thread_index, &key1, + &address_index, + sm->port_per_thread, + tsm->snat_thread_index)) + { + nat_log_notice ("addresses exhausted"); + b->error = node->errors[SNAT_IN2OUT_ERROR_OUT_OF_PORTS]; + return SNAT_IN2OUT_NEXT_DROP; + } + } + else + is_sm = 1; + + u = nat_user_get_or_create (sm, &key->l_addr, rx_fib_index, thread_index); + if (!u) + { + nat_log_warn ("create NAT user failed"); + if (!is_sm) + snat_free_outside_address_and_port (sm->addresses, + thread_index, &key1); + return SNAT_IN2OUT_NEXT_DROP; + } + + s = nat_ed_session_alloc (sm, u, thread_index); + if (!s) + { + nat44_delete_user_with_no_session (sm, u, thread_index); + nat_log_warn ("create NAT session failed"); + if (!is_sm) + snat_free_outside_address_and_port (sm->addresses, + thread_index, &key1); + return SNAT_IN2OUT_NEXT_DROP; + } + + user_session_increment (sm, u, is_sm); + if (is_sm) + s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING; + if (lb) + s->flags |= SNAT_SESSION_FLAG_LOAD_BALANCING; + s->flags |= SNAT_SESSION_FLAG_ENDPOINT_DEPENDENT; + s->outside_address_index = address_index; + s->ext_host_addr = key->r_addr; + s->ext_host_port = key->r_port; + s->in2out = key0; + s->out2in = key1; + s->out2in.protocol = key0.protocol; + + switch (vec_len (sm->outside_fibs)) + { + case 0: + s->out2in.fib_index = sm->outside_fib_index; + break; + case 1: + s->out2in.fib_index = sm->outside_fibs[0].fib_index; + break; + default: + vec_foreach (outside_fib, sm->outside_fibs) + { + fei = fib_table_lookup (outside_fib->fib_index, &pfx); + if (FIB_NODE_INDEX_INVALID != fei) + { + if (fib_entry_get_resolving_interface (fei) != ~0) + { + s->out2in.fib_index = outside_fib->fib_index; + break; + } + } + } + break; + } + + /* Add to lookup tables */ + kv->value = s - tsm->sessions; + ctx.now = now; + ctx.thread_index = thread_index; + if (clib_bihash_add_or_overwrite_stale_16_8 (&tsm->in2out_ed, kv, + nat44_i2o_ed_is_idle_session_cb, + &ctx)) + nat_log_notice ("in2out-ed key add failed"); + + make_ed_kv (kv, &key1.addr, &key->r_addr, key->proto, s->out2in.fib_index, + key1.port, key->r_port); + kv->value = s - tsm->sessions; + if (clib_bihash_add_or_overwrite_stale_16_8 (&tsm->out2in_ed, kv, + nat44_o2i_ed_is_idle_session_cb, + &ctx)) + nat_log_notice ("out2in-ed key add failed"); + + *sessionp = s; + + /* log NAT event */ + snat_ipfix_logging_nat44_ses_create(s->in2out.addr.as_u32, + s->out2in.addr.as_u32, + s->in2out.protocol, + s->in2out.port, + s->out2in.port, + s->in2out.fib_index); + return next; +} + +static_always_inline int +nat44_ed_not_translate (snat_main_t * sm, vlib_node_runtime_t *node, + u32 sw_if_index, ip4_header_t * ip, u32 proto, + u32 rx_fib_index, u32 thread_index) +{ + udp_header_t *udp = ip4_next_header (ip); + snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; + clib_bihash_kv_16_8_t kv, value; + snat_session_key_t key0, key1; + + make_ed_kv (&kv, &ip->dst_address, &ip->src_address, ip->protocol, + sm->outside_fib_index, udp->dst_port, udp->src_port); + + /* NAT packet aimed at external address if */ + /* has active sessions */ + if (clib_bihash_search_16_8 (&tsm->out2in_ed, &kv, &value)) + { + key0.addr = ip->dst_address; + key0.port = udp->dst_port; + key0.protocol = proto; + key0.fib_index = sm->outside_fib_index; + /* or is static mappings */ + if (!snat_static_mapping_match(sm, key0, &key1, 1, 0, 0, 0, 0)) + return 0; + } + else + return 0; + + if (sm->forwarding_enabled) + return 1; + + return snat_not_translate_fast(sm, node, sw_if_index, ip, proto, rx_fib_index); +} + +static_always_inline int +nat_not_translate_output_feature_fwd (snat_main_t * sm, ip4_header_t * ip, + u32 thread_index, f64 now, + vlib_main_t * vm, vlib_buffer_t * b) +{ + nat_ed_ses_key_t key; + clib_bihash_kv_16_8_t kv, value; + udp_header_t *udp; + snat_session_t *s = 0; + snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; + + if (!sm->forwarding_enabled) + return 0; + + if (ip->protocol == IP_PROTOCOL_ICMP) + { + key.as_u64[0] = key.as_u64[1] = 0; + if (icmp_get_ed_key (ip, &key)) + return 0; + key.fib_index = 0; + kv.key[0] = key.as_u64[0]; + kv.key[1] = key.as_u64[1]; + } + else if (ip->protocol == IP_PROTOCOL_UDP || ip->protocol == IP_PROTOCOL_TCP) + { + udp = ip4_next_header(ip); + make_ed_kv (&kv, &ip->src_address, &ip->dst_address, ip->protocol, 0, + udp->src_port, udp->dst_port); + } + else + { + make_ed_kv (&kv, &ip->src_address, &ip->dst_address, ip->protocol, 0, 0, + 0); + } + + if (!clib_bihash_search_16_8 (&tsm->in2out_ed, &kv, &value)) + { + s = pool_elt_at_index (tsm->sessions, value.value); + if (is_fwd_bypass_session (s)) + { + if (ip->protocol == IP_PROTOCOL_TCP) + { + tcp_header_t *tcp = ip4_next_header(ip); + if (nat44_set_tcp_session_state_i2o (sm, s, tcp, thread_index)) + return 1; + } + /* Accounting */ + nat44_session_update_counters (s, now, + vlib_buffer_length_in_chain (vm, b)); + return 1; + } + else + return 0; + } + + return 0; +} + +static_always_inline int +nat44_ed_not_translate_output_feature (snat_main_t * sm, ip4_header_t * ip, + u8 proto, u16 src_port, u16 dst_port, + u32 thread_index, u32 rx_sw_if_index, + u32 tx_sw_if_index) +{ + clib_bihash_kv_16_8_t kv, value; + snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; + snat_interface_t *i; + snat_session_t *s; + u32 rx_fib_index = ip4_fib_table_get_index_for_sw_if_index (rx_sw_if_index); + u32 tx_fib_index = ip4_fib_table_get_index_for_sw_if_index (tx_sw_if_index); + + /* src NAT check */ + make_ed_kv (&kv, &ip->src_address, &ip->dst_address, proto, tx_fib_index, + src_port, dst_port); + if (!clib_bihash_search_16_8 (&tsm->out2in_ed, &kv, &value)) + return 1; + + /* dst NAT check */ + make_ed_kv (&kv, &ip->dst_address, &ip->src_address, proto, rx_fib_index, + dst_port, src_port); + if (!clib_bihash_search_16_8 (&tsm->in2out_ed, &kv, &value)) + { + s = pool_elt_at_index (tsm->sessions, value.value); + if (is_fwd_bypass_session (s)) + return 0; + + /* hairpinning */ + pool_foreach (i, sm->output_feature_interfaces, + ({ + if ((nat_interface_is_inside(i)) && (rx_sw_if_index == i->sw_if_index)) + return 0; + })); + return 1; + } + + return 0; +} + +u32 +icmp_match_in2out_ed(snat_main_t *sm, vlib_node_runtime_t *node, + u32 thread_index, vlib_buffer_t *b, ip4_header_t *ip, + u8 *p_proto, snat_session_key_t *p_value, + u8 *p_dont_translate, void *d, void *e) +{ + icmp46_header_t *icmp; + u32 sw_if_index; + u32 rx_fib_index; + nat_ed_ses_key_t key; + snat_session_t *s = 0; + u8 dont_translate = 0; + clib_bihash_kv_16_8_t kv, value; + u32 next = ~0; + int err; + snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; + + icmp = (icmp46_header_t *) ip4_next_header (ip); + sw_if_index = vnet_buffer(b)->sw_if_index[VLIB_RX]; + rx_fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index); + + key.as_u64[0] = key.as_u64[1] = 0; + err = icmp_get_ed_key (ip, &key); + if (err != 0) + { + b->error = node->errors[err]; + next = SNAT_IN2OUT_NEXT_DROP; + goto out; + } + key.fib_index = rx_fib_index; + + kv.key[0] = key.as_u64[0]; + kv.key[1] = key.as_u64[1]; + + if (clib_bihash_search_16_8 (&tsm->in2out_ed, &kv, &value)) + { + if (vnet_buffer(b)->sw_if_index[VLIB_TX] != ~0) + { + if (PREDICT_FALSE(nat44_ed_not_translate_output_feature(sm, ip, + key.proto, key.l_port, key.r_port, thread_index, sw_if_index, + vnet_buffer(b)->sw_if_index[VLIB_TX]))) + { + dont_translate = 1; + goto out; + } + } + else + { + if (PREDICT_FALSE(nat44_ed_not_translate(sm, node, sw_if_index, + ip, SNAT_PROTOCOL_ICMP, rx_fib_index, thread_index))) + { + dont_translate = 1; + goto out; + } + } + + if (PREDICT_FALSE(icmp_is_error_message (icmp))) + { + b->error = node->errors[SNAT_IN2OUT_ERROR_BAD_ICMP_TYPE]; + next = SNAT_IN2OUT_NEXT_DROP; + goto out; + } + + next = slow_path_ed (sm, b, rx_fib_index, &kv, &s, node, next, + thread_index, vlib_time_now (sm->vlib_main)); + + if (PREDICT_FALSE (next == SNAT_IN2OUT_NEXT_DROP)) + goto out; + } + else + { + if (PREDICT_FALSE(icmp->type != ICMP4_echo_request && + icmp->type != ICMP4_echo_reply && + !icmp_is_error_message (icmp))) + { + b->error = node->errors[SNAT_IN2OUT_ERROR_BAD_ICMP_TYPE]; + next = SNAT_IN2OUT_NEXT_DROP; + goto out; + } + + s = pool_elt_at_index (tsm->sessions, value.value); + } + + *p_proto = ip_proto_to_snat_proto (key.proto); +out: + if (s) + *p_value = s->out2in; + *p_dont_translate = dont_translate; + if (d) + *(snat_session_t**)d = s; + return next; +} + +static inline void +nat44_ed_hairpinning_unknown_proto (snat_main_t *sm, + vlib_buffer_t * b, + ip4_header_t * ip) +{ + u32 old_addr, new_addr = 0, ti = 0; + clib_bihash_kv_8_8_t kv, value; + clib_bihash_kv_16_8_t s_kv, s_value; + snat_static_mapping_t *m; + ip_csum_t sum; + snat_session_t *s; + snat_main_per_thread_data_t *tsm; + + if (sm->num_workers > 1) + ti = sm->worker_out2in_cb (ip, sm->outside_fib_index); + else + ti = sm->num_workers; + tsm = &sm->per_thread_data[ti]; + + old_addr = ip->dst_address.as_u32; + make_ed_kv (&s_kv, &ip->dst_address, &ip->src_address, ip->protocol, + sm->outside_fib_index, 0, 0); + if (clib_bihash_search_16_8 (&tsm->out2in_ed, &s_kv, &s_value)) + { + make_sm_kv (&kv, &ip->dst_address, 0, 0, 0); + if (clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv, &value)) + return; + + m = pool_elt_at_index (sm->static_mappings, value.value); + if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) + vnet_buffer(b)->sw_if_index[VLIB_TX] = m->fib_index; + new_addr = ip->dst_address.as_u32 = m->local_addr.as_u32; + } + else + { + s = pool_elt_at_index (sm->per_thread_data[ti].sessions, s_value.value); + if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) + vnet_buffer(b)->sw_if_index[VLIB_TX] = s->in2out.fib_index; + new_addr = ip->dst_address.as_u32 = s->in2out.addr.as_u32; + } + sum = ip->checksum; + sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, dst_address); + ip->checksum = ip_csum_fold (sum); +} + +static snat_session_t * +nat44_ed_in2out_unknown_proto (snat_main_t *sm, + vlib_buffer_t * b, + ip4_header_t * ip, + u32 rx_fib_index, + u32 thread_index, + f64 now, + vlib_main_t * vm, + vlib_node_runtime_t * node) +{ + clib_bihash_kv_8_8_t kv, value; + clib_bihash_kv_16_8_t s_kv, s_value; + snat_static_mapping_t *m; + u32 old_addr, new_addr = 0; + ip_csum_t sum; + snat_user_t *u; + dlist_elt_t *head, *elt; + snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; + u32 elt_index, head_index, ses_index; + snat_session_t * s; + u32 address_index = ~0, outside_fib_index = sm->outside_fib_index; + int i; + u8 is_sm = 0; + nat_outside_fib_t *outside_fib; + fib_node_index_t fei = FIB_NODE_INDEX_INVALID; + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP4, + .fp_len = 32, + .fp_addr = { + .ip4.as_u32 = ip->dst_address.as_u32, + }, + }; + + switch (vec_len (sm->outside_fibs)) + { + case 0: + outside_fib_index = sm->outside_fib_index; + break; + case 1: + outside_fib_index = sm->outside_fibs[0].fib_index; + break; + default: + vec_foreach (outside_fib, sm->outside_fibs) + { + fei = fib_table_lookup (outside_fib->fib_index, &pfx); + if (FIB_NODE_INDEX_INVALID != fei) + { + if (fib_entry_get_resolving_interface (fei) != ~0) + { + outside_fib_index = outside_fib->fib_index; + break; + } + } + } + break; + } + old_addr = ip->src_address.as_u32; + + make_ed_kv (&s_kv, &ip->src_address, &ip->dst_address, ip->protocol, + rx_fib_index, 0, 0); + + if (!clib_bihash_search_16_8 (&tsm->in2out_ed, &s_kv, &s_value)) + { + s = pool_elt_at_index (tsm->sessions, s_value.value); + new_addr = ip->src_address.as_u32 = s->out2in.addr.as_u32; + } + else + { + if (PREDICT_FALSE (maximum_sessions_exceeded(sm, thread_index))) + { + b->error = node->errors[SNAT_IN2OUT_ERROR_MAX_SESSIONS_EXCEEDED]; + nat_ipfix_logging_max_sessions(sm->max_translations); + nat_log_notice ("maximum sessions exceeded"); + return 0; + } + + u = nat_user_get_or_create (sm, &ip->src_address, rx_fib_index, + thread_index); + if (!u) + { + nat_log_warn ("create NAT user failed"); + return 0; + } + + make_sm_kv (&kv, &ip->src_address, 0, rx_fib_index, 0); + + /* Try to find static mapping first */ + if (!clib_bihash_search_8_8 (&sm->static_mapping_by_local, &kv, &value)) + { + m = pool_elt_at_index (sm->static_mappings, value.value); + new_addr = ip->src_address.as_u32 = m->external_addr.as_u32; + is_sm = 1; + goto create_ses; + } + /* Fallback to 3-tuple key */ + else + { + /* Choose same out address as for TCP/UDP session to same destination */ + head_index = u->sessions_per_user_list_head_index; + head = pool_elt_at_index (tsm->list_pool, head_index); + elt_index = head->next; + if (PREDICT_FALSE (elt_index == ~0)) + ses_index = ~0; + else + { + elt = pool_elt_at_index (tsm->list_pool, elt_index); + ses_index = elt->value; + } + + while (ses_index != ~0) + { + s = pool_elt_at_index (tsm->sessions, ses_index); + elt_index = elt->next; + elt = pool_elt_at_index (tsm->list_pool, elt_index); + ses_index = elt->value; + + if (s->ext_host_addr.as_u32 == ip->dst_address.as_u32) + { + new_addr = ip->src_address.as_u32 = s->out2in.addr.as_u32; + address_index = s->outside_address_index; + + make_ed_kv (&s_kv, &s->out2in.addr, &ip->dst_address, + ip->protocol, outside_fib_index, 0, 0); + if (clib_bihash_search_16_8 (&tsm->out2in_ed, &s_kv, &s_value)) + goto create_ses; + + break; + } + } + + for (i = 0; i < vec_len (sm->addresses); i++) + { + make_ed_kv (&s_kv, &sm->addresses[i].addr, &ip->dst_address, + ip->protocol, outside_fib_index, 0, 0); + if (clib_bihash_search_16_8 (&tsm->out2in_ed, &s_kv, &s_value)) + { + new_addr = ip->src_address.as_u32 = + sm->addresses[i].addr.as_u32; + address_index = i; + goto create_ses; + } + } + return 0; + } + +create_ses: + s = nat_ed_session_alloc (sm, u, thread_index); + if (!s) + { + nat44_delete_user_with_no_session (sm, u, thread_index); + nat_log_warn ("create NAT session failed"); + return 0; + } + + s->ext_host_addr.as_u32 = ip->dst_address.as_u32; + s->flags |= SNAT_SESSION_FLAG_UNKNOWN_PROTO; + s->flags |= SNAT_SESSION_FLAG_ENDPOINT_DEPENDENT; + s->outside_address_index = address_index; + s->out2in.addr.as_u32 = new_addr; + s->out2in.fib_index = outside_fib_index; + s->in2out.addr.as_u32 = old_addr; + s->in2out.fib_index = rx_fib_index; + s->in2out.port = s->out2in.port = ip->protocol; + if (is_sm) + s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING; + user_session_increment (sm, u, is_sm); + + /* Add to lookup tables */ + make_ed_kv (&s_kv, &s->in2out.addr, &ip->dst_address, ip->protocol, + rx_fib_index, 0, 0); + s_kv.value = s - tsm->sessions; + if (clib_bihash_add_del_16_8 (&tsm->in2out_ed, &s_kv, 1)) + nat_log_notice ("in2out key add failed"); + + make_ed_kv (&s_kv, &s->out2in.addr, &ip->dst_address, ip->protocol, + outside_fib_index, 0, 0); + s_kv.value = s - tsm->sessions; + if (clib_bihash_add_del_16_8 (&tsm->out2in_ed, &s_kv, 1)) + nat_log_notice ("out2in key add failed"); + } + + /* Update IP checksum */ + sum = ip->checksum; + sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, src_address); + ip->checksum = ip_csum_fold (sum); + + /* Accounting */ + nat44_session_update_counters (s, now, vlib_buffer_length_in_chain (vm, b)); + + /* Hairpinning */ + if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) + nat44_ed_hairpinning_unknown_proto(sm, b, ip); + + if (vnet_buffer(b)->sw_if_index[VLIB_TX] == ~0) + vnet_buffer(b)->sw_if_index[VLIB_TX] = outside_fib_index; + + return s; +} + +static inline uword +nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, int is_slow_path, + int is_output_feature) +{ + u32 n_left_from, *from, *to_next, pkts_processed = 0, stats_node_index; + snat_in2out_next_t next_index; + snat_main_t *sm = &snat_main; + f64 now = vlib_time_now (vm); + u32 thread_index = vm->thread_index; + snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; + + stats_node_index = is_slow_path ? nat44_ed_in2out_slowpath_node.index : + nat44_ed_in2out_node.index; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 bi0, bi1; + vlib_buffer_t *b0, *b1; + u32 next0, sw_if_index0, rx_fib_index0, iph_offset0 = 0, proto0, + new_addr0, old_addr0; + u32 next1, sw_if_index1, rx_fib_index1, iph_offset1 = 0, proto1, + new_addr1, old_addr1; + u16 old_port0, new_port0, old_port1, new_port1; + ip4_header_t *ip0, *ip1; + udp_header_t *udp0, *udp1; + tcp_header_t *tcp0, *tcp1; + icmp46_header_t *icmp0, *icmp1; + snat_session_t *s0 = 0, *s1 = 0; + clib_bihash_kv_16_8_t kv0, value0, kv1, value1; + ip_csum_t sum0, sum1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + /* speculatively enqueue b0 and b1 to the current next frame */ + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + from += 2; + to_next += 2; + n_left_from -= 2; + n_left_to_next -= 2; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + + next0 = SNAT_IN2OUT_NEXT_LOOKUP; + + if (is_output_feature) + iph_offset0 = vnet_buffer (b0)->ip.save_rewrite_length; + + ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + + iph_offset0); + + sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; + rx_fib_index0 = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4, + sw_if_index0); + + if (PREDICT_FALSE(ip0->ttl == 1)) + { + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + icmp4_error_set_vnet_buffer (b0, ICMP4_time_exceeded, + ICMP4_time_exceeded_ttl_exceeded_in_transit, + 0); + next0 = SNAT_IN2OUT_NEXT_ICMP_ERROR; + goto trace00; + } + + udp0 = ip4_next_header (ip0); + tcp0 = (tcp_header_t *) udp0; + icmp0 = (icmp46_header_t *) udp0; + proto0 = ip_proto_to_snat_proto (ip0->protocol); + + if (is_slow_path) + { + if (PREDICT_FALSE (proto0 == ~0)) + { + s0 = nat44_ed_in2out_unknown_proto (sm, b0, ip0, + rx_fib_index0, + thread_index, now, vm, + node); + if (!s0) + next0 = SNAT_IN2OUT_NEXT_DROP; + goto trace00; + } + + if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP)) + { + next0 = icmp_in2out_ed_slow_path + (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, + next0, now, thread_index, &s0); + goto trace00; + } + } + else + { + if (is_output_feature) + { + if (PREDICT_FALSE(nat_not_translate_output_feature_fwd( + sm, ip0, thread_index, now, vm, b0))) + goto trace00; + } + + if (PREDICT_FALSE (proto0 == ~0 || proto0 == SNAT_PROTOCOL_ICMP)) + { + next0 = SNAT_IN2OUT_NEXT_SLOW_PATH; + goto trace00; + } + + if (ip4_is_fragment (ip0)) + { + b0->error = node->errors[SNAT_IN2OUT_ERROR_DROP_FRAGMENT]; + next0 = SNAT_IN2OUT_NEXT_DROP; + goto trace00; + } + } + + make_ed_kv (&kv0, &ip0->src_address, &ip0->dst_address, ip0->protocol, + rx_fib_index0, udp0->src_port, udp0->dst_port); + + if (clib_bihash_search_16_8 (&tsm->in2out_ed, &kv0, &value0)) + { + if (is_slow_path) + { + if (is_output_feature) + { + if (PREDICT_FALSE(nat44_ed_not_translate_output_feature( + sm, ip0, ip0->protocol, udp0->src_port, + udp0->dst_port, thread_index, sw_if_index0, + vnet_buffer(b0)->sw_if_index[VLIB_TX]))) + goto trace00; + } + else + { + if (PREDICT_FALSE(nat44_ed_not_translate(sm, node, + sw_if_index0, ip0, proto0, rx_fib_index0, + thread_index))) + goto trace00; + } + + next0 = slow_path_ed (sm, b0, rx_fib_index0, &kv0, &s0, node, + next0, thread_index, now); + + if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) + goto trace00; + } + else + { + next0 = SNAT_IN2OUT_NEXT_SLOW_PATH; + goto trace00; + } + } + else + { + s0 = pool_elt_at_index (tsm->sessions, value0.value); + } + + b0->flags |= VNET_BUFFER_F_IS_NATED; + + if (!is_output_feature) + vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index; + + old_addr0 = ip0->src_address.as_u32; + new_addr0 = ip0->src_address.as_u32 = s0->out2in.addr.as_u32; + sum0 = ip0->checksum; + sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, + src_address); + if (PREDICT_FALSE (is_twice_nat_session (s0))) + sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, + s0->ext_host_addr.as_u32, ip4_header_t, + dst_address); + ip0->checksum = ip_csum_fold (sum0); + + if (PREDICT_TRUE (proto0 == SNAT_PROTOCOL_TCP)) + { + old_port0 = tcp0->src_port; + new_port0 = tcp0->src_port = s0->out2in.port; + + sum0 = tcp0->checksum; + sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, + dst_address); + sum0 = ip_csum_update (sum0, old_port0, new_port0, ip4_header_t, + length); + if (PREDICT_FALSE (is_twice_nat_session (s0))) + { + sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, + s0->ext_host_addr.as_u32, + ip4_header_t, dst_address); + sum0 = ip_csum_update (sum0, tcp0->dst_port, + s0->ext_host_port, ip4_header_t, + length); + tcp0->dst_port = s0->ext_host_port; + ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + } + tcp0->checksum = ip_csum_fold(sum0); + if (nat44_set_tcp_session_state_i2o (sm, s0, tcp0, thread_index)) + goto trace00; + } + else + { + udp0->src_port = s0->out2in.port; + udp0->checksum = 0; + if (PREDICT_FALSE (is_twice_nat_session (s0))) + { + udp0->dst_port = s0->ext_host_port; + ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + } + } + + /* Accounting */ + nat44_session_update_counters (s0, now, + vlib_buffer_length_in_chain (vm, b0)); + + trace00: + if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) + && (b0->flags & VLIB_BUFFER_IS_TRACED))) + { + snat_in2out_trace_t *t = + vlib_add_trace (vm, node, b0, sizeof (*t)); + t->is_slow_path = is_slow_path; + t->sw_if_index = sw_if_index0; + t->next_index = next0; + t->session_index = ~0; + if (s0) + t->session_index = s0 - tsm->sessions; + } + + pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP; + + + next1 = SNAT_IN2OUT_NEXT_LOOKUP; + + if (is_output_feature) + iph_offset1 = vnet_buffer (b1)->ip.save_rewrite_length; + + ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b1) + + iph_offset1); + + sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX]; + rx_fib_index1 = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4, + sw_if_index1); + + if (PREDICT_FALSE(ip1->ttl == 1)) + { + vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0; + icmp4_error_set_vnet_buffer (b1, ICMP4_time_exceeded, + ICMP4_time_exceeded_ttl_exceeded_in_transit, + 0); + next1 = SNAT_IN2OUT_NEXT_ICMP_ERROR; + goto trace01; + } + + udp1 = ip4_next_header (ip1); + tcp1 = (tcp_header_t *) udp1; + icmp1 = (icmp46_header_t *) udp1; + proto1 = ip_proto_to_snat_proto (ip1->protocol); + + if (is_slow_path) + { + if (PREDICT_FALSE (proto1 == ~0)) + { + s1 = nat44_ed_in2out_unknown_proto (sm, b1, ip1, + rx_fib_index1, + thread_index, now, vm, + node); + if (!s1) + next1 = SNAT_IN2OUT_NEXT_DROP; + goto trace01; + } + + if (PREDICT_FALSE (proto1 == SNAT_PROTOCOL_ICMP)) + { + next1 = icmp_in2out_ed_slow_path + (sm, b1, ip1, icmp1, sw_if_index1, rx_fib_index1, node, + next1, now, thread_index, &s1); + goto trace01; + } + } + else + { + if (is_output_feature) + { + if (PREDICT_FALSE(nat_not_translate_output_feature_fwd( + sm, ip1, thread_index, now, vm, b1))) + goto trace01; + } + + if (PREDICT_FALSE (proto1 == ~0 || proto1 == SNAT_PROTOCOL_ICMP)) + { + next1 = SNAT_IN2OUT_NEXT_SLOW_PATH; + goto trace01; + } + + if (ip4_is_fragment (ip1)) + { + b1->error = node->errors[SNAT_IN2OUT_ERROR_DROP_FRAGMENT]; + next1 = SNAT_IN2OUT_NEXT_DROP; + goto trace01; + } + } + + make_ed_kv (&kv1, &ip1->src_address, &ip1->dst_address, ip1->protocol, + rx_fib_index1, udp1->src_port, udp1->dst_port); + + if (clib_bihash_search_16_8 (&tsm->in2out_ed, &kv1, &value1)) { - if (PREDICT_FALSE (value1.value == ~0ULL)) + if (is_slow_path) { - if (is_slow_path) + if (is_output_feature) { - s1 = snat_in2out_lb(sm, b1, ip1, rx_fib_index1, thread_index, - now, vm); - goto trace01; + if (PREDICT_FALSE(nat44_ed_not_translate_output_feature( + sm, ip1, ip1->protocol, udp1->src_port, + udp1->dst_port, thread_index, sw_if_index1, + vnet_buffer(b1)->sw_if_index[VLIB_TX]))) + goto trace01; } else { - next1 = SNAT_IN2OUT_NEXT_SLOW_PATH; - goto trace01; + if (PREDICT_FALSE(nat44_ed_not_translate(sm, node, + sw_if_index1, ip1, proto1, rx_fib_index1, + thread_index))) + goto trace01; } + + next1 = slow_path_ed (sm, b1, rx_fib_index1, &kv1, &s1, node, + next1, thread_index, now); + + if (PREDICT_FALSE (next1 == SNAT_IN2OUT_NEXT_DROP)) + goto trace01; } else { - s1 = pool_elt_at_index ( - sm->per_thread_data[thread_index].sessions, - value1.value); + next1 = SNAT_IN2OUT_NEXT_SLOW_PATH; + goto trace01; } } + else + { + s1 = pool_elt_at_index (tsm->sessions, value1.value); + } + + b1->flags |= VNET_BUFFER_F_IS_NATED; - old_addr1 = ip1->src_address.as_u32; - ip1->src_address = s1->out2in.addr; - new_addr1 = ip1->src_address.as_u32; if (!is_output_feature) vnet_buffer(b1)->sw_if_index[VLIB_TX] = s1->out2in.fib_index; + old_addr1 = ip1->src_address.as_u32; + new_addr1 = ip1->src_address.as_u32 = s1->out2in.addr.as_u32; sum1 = ip1->checksum; - sum1 = ip_csum_update (sum1, old_addr1, new_addr1, - ip4_header_t, - src_address /* changed member */); + sum1 = ip_csum_update (sum1, old_addr1, new_addr1, ip4_header_t, + src_address); + if (PREDICT_FALSE (is_twice_nat_session (s1))) + sum1 = ip_csum_update (sum1, ip1->dst_address.as_u32, + s1->ext_host_addr.as_u32, ip4_header_t, + dst_address); ip1->checksum = ip_csum_fold (sum1); - if (PREDICT_TRUE(proto1 == SNAT_PROTOCOL_TCP)) + if (PREDICT_TRUE (proto1 == SNAT_PROTOCOL_TCP)) { old_port1 = tcp1->src_port; - tcp1->src_port = s1->out2in.port; - new_port1 = tcp1->src_port; + new_port1 = tcp1->src_port = s1->out2in.port; sum1 = tcp1->checksum; - sum1 = ip_csum_update (sum1, old_addr1, new_addr1, - ip4_header_t, - dst_address /* changed member */); - sum1 = ip_csum_update (sum1, old_port1, new_port1, - ip4_header_t /* cheat */, - length /* changed member */); + sum1 = ip_csum_update (sum1, old_addr1, new_addr1, ip4_header_t, + dst_address); + sum1 = ip_csum_update (sum1, old_port1, new_port1, ip4_header_t, + length); + if (PREDICT_FALSE (is_twice_nat_session (s1))) + { + sum1 = ip_csum_update (sum1, ip1->dst_address.as_u32, + s1->ext_host_addr.as_u32, + ip4_header_t, dst_address); + sum1 = ip_csum_update (sum1, tcp1->dst_port, + s1->ext_host_port, ip4_header_t, + length); + tcp1->dst_port = s1->ext_host_port; + ip1->dst_address.as_u32 = s1->ext_host_addr.as_u32; + } tcp1->checksum = ip_csum_fold(sum1); + if (nat44_set_tcp_session_state_i2o (sm, s1, tcp1, thread_index)) + goto trace01; } else { - old_port1 = udp1->src_port; udp1->src_port = s1->out2in.port; udp1->checksum = 0; + if (PREDICT_FALSE (is_twice_nat_session (s1))) + { + udp1->dst_port = s1->ext_host_port; + ip1->dst_address.as_u32 = s1->ext_host_addr.as_u32; + } } - /* Hairpinning */ - if (!is_output_feature) - snat_hairpinning (sm, b1, ip1, udp1, tcp1, proto1); - /* Accounting */ - s1->last_heard = now; - s1->total_pkts++; - s1->total_bytes += vlib_buffer_length_in_chain (vm, b1); - /* Per-user LRU list maintenance for dynamic translation */ - if (!snat_is_session_static (s1)) - { - clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, - s1->per_user_index); - clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, - s1->per_user_list_head_index, - s1->per_user_index); - } - trace01: + nat44_session_update_counters (s1, now, + vlib_buffer_length_in_chain (vm, b1)); + trace01: if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) && (b1->flags & VLIB_BUFFER_IS_TRACED))) { snat_in2out_trace_t *t = - vlib_add_trace (vm, node, b1, sizeof (*t)); + vlib_add_trace (vm, node, b1, sizeof (*t)); + t->is_slow_path = is_slow_path; t->sw_if_index = sw_if_index1; t->next_index = next1; t->session_index = ~0; if (s1) - t->session_index = s1 - sm->per_thread_data[thread_index].sessions; + t->session_index = s1 - tsm->sessions; } pkts_processed += next1 != SNAT_IN2OUT_NEXT_DROP; @@ -1919,22 +3582,17 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; - vlib_buffer_t * b0; - u32 next0; - u32 sw_if_index0; - ip4_header_t * ip0; - ip_csum_t sum0; - u32 new_addr0, old_addr0; + vlib_buffer_t *b0; + u32 next0, sw_if_index0, rx_fib_index0, iph_offset0 = 0, proto0, + new_addr0, old_addr0; u16 old_port0, new_port0; - udp_header_t * udp0; - tcp_header_t * tcp0; + ip4_header_t *ip0; + udp_header_t *udp0; + tcp_header_t *tcp0; icmp46_header_t * icmp0; - snat_session_key_t key0; - u32 rx_fib_index0; - u32 proto0; - snat_session_t * s0 = 0; - clib_bihash_kv_8_8_t kv0, value0; - u32 iph_offset0 = 0; + snat_session_t *s0 = 0; + clib_bihash_kv_16_8_t kv0, value0; + ip_csum_t sum0; /* speculatively enqueue b0 to the current next frame */ bi0 = from[0]; @@ -1953,13 +3611,9 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + iph_offset0); - udp0 = ip4_next_header (ip0); - tcp0 = (tcp_header_t *) udp0; - icmp0 = (icmp46_header_t *) udp0; - sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; - rx_fib_index0 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index, - sw_if_index0); + rx_fib_index0 = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4, + sw_if_index0); if (PREDICT_FALSE(ip0->ttl == 1)) { @@ -1971,21 +3625,27 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, goto trace0; } + udp0 = ip4_next_header (ip0); + tcp0 = (tcp_header_t *) udp0; + icmp0 = (icmp46_header_t *) udp0; proto0 = ip_proto_to_snat_proto (ip0->protocol); - /* Next configured feature, probably ip4-lookup */ if (is_slow_path) { if (PREDICT_FALSE (proto0 == ~0)) { - snat_in2out_unknown_proto (sm, b0, ip0, rx_fib_index0, - thread_index, now, vm); + s0 = nat44_ed_in2out_unknown_proto (sm, b0, ip0, + rx_fib_index0, + thread_index, now, vm, + node); + if (!s0) + next0 = SNAT_IN2OUT_NEXT_DROP; goto trace0; } if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP)) { - next0 = icmp_in2out_slow_path + next0 = icmp_in2out_ed_slow_path (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, next0, now, thread_index, &s0); goto trace0; @@ -1993,31 +3653,52 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } else { + if (is_output_feature) + { + if (PREDICT_FALSE(nat_not_translate_output_feature_fwd( + sm, ip0, thread_index, now, vm, b0))) + goto trace0; + } + if (PREDICT_FALSE (proto0 == ~0 || proto0 == SNAT_PROTOCOL_ICMP)) { next0 = SNAT_IN2OUT_NEXT_SLOW_PATH; goto trace0; } - } - key0.addr = ip0->src_address; - key0.port = udp0->src_port; - key0.protocol = proto0; - key0.fib_index = rx_fib_index0; + if (ip4_is_fragment (ip0)) + { + b0->error = node->errors[SNAT_IN2OUT_ERROR_DROP_FRAGMENT]; + next0 = SNAT_IN2OUT_NEXT_DROP; + goto trace0; + } + } - kv0.key = key0.as_u64; + make_ed_kv (&kv0, &ip0->src_address, &ip0->dst_address, ip0->protocol, + rx_fib_index0, udp0->src_port, udp0->dst_port); - if (clib_bihash_search_8_8 (&sm->per_thread_data[thread_index].in2out, - &kv0, &value0)) + if (clib_bihash_search_16_8 (&tsm->in2out_ed, &kv0, &value0)) { if (is_slow_path) { - if (PREDICT_FALSE(snat_not_translate(sm, node, sw_if_index0, - ip0, proto0, rx_fib_index0, thread_index)) && !is_output_feature) - goto trace0; + if (is_output_feature) + { + if (PREDICT_FALSE(nat44_ed_not_translate_output_feature( + sm, ip0, ip0->protocol, udp0->src_port, + udp0->dst_port, thread_index, sw_if_index0, + vnet_buffer(b0)->sw_if_index[VLIB_TX]))) + goto trace0; + } + else + { + if (PREDICT_FALSE(nat44_ed_not_translate(sm, node, + sw_if_index0, ip0, proto0, rx_fib_index0, + thread_index))) + goto trace0; + } - next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, - &s0, node, next0, thread_index); + next0 = slow_path_ed (sm, b0, rx_fib_index0, &kv0, &s0, node, + next0, thread_index, now); if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) goto trace0; @@ -2030,92 +3711,77 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } else { - if (PREDICT_FALSE (value0.value == ~0ULL)) - { - if (is_slow_path) - { - s0 = snat_in2out_lb(sm, b0, ip0, rx_fib_index0, thread_index, - now, vm); - goto trace0; - } - else - { - next0 = SNAT_IN2OUT_NEXT_SLOW_PATH; - goto trace0; - } - } - else - { - s0 = pool_elt_at_index ( - sm->per_thread_data[thread_index].sessions, - value0.value); - } + s0 = pool_elt_at_index (tsm->sessions, value0.value); } - old_addr0 = ip0->src_address.as_u32; - ip0->src_address = s0->out2in.addr; - new_addr0 = ip0->src_address.as_u32; + b0->flags |= VNET_BUFFER_F_IS_NATED; + if (!is_output_feature) vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index; + old_addr0 = ip0->src_address.as_u32; + new_addr0 = ip0->src_address.as_u32 = s0->out2in.addr.as_u32; sum0 = ip0->checksum; - sum0 = ip_csum_update (sum0, old_addr0, new_addr0, - ip4_header_t, - src_address /* changed member */); + sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, + src_address); + if (PREDICT_FALSE (is_twice_nat_session (s0))) + sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, + s0->ext_host_addr.as_u32, ip4_header_t, + dst_address); ip0->checksum = ip_csum_fold (sum0); - if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP)) + if (PREDICT_TRUE (proto0 == SNAT_PROTOCOL_TCP)) { old_port0 = tcp0->src_port; - tcp0->src_port = s0->out2in.port; - new_port0 = tcp0->src_port; + new_port0 = tcp0->src_port = s0->out2in.port; sum0 = tcp0->checksum; - sum0 = ip_csum_update (sum0, old_addr0, new_addr0, - ip4_header_t, - dst_address /* changed member */); - sum0 = ip_csum_update (sum0, old_port0, new_port0, - ip4_header_t /* cheat */, - length /* changed member */); + sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, + dst_address); + sum0 = ip_csum_update (sum0, old_port0, new_port0, ip4_header_t, + length); + if (PREDICT_FALSE (is_twice_nat_session (s0))) + { + sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, + s0->ext_host_addr.as_u32, + ip4_header_t, dst_address); + sum0 = ip_csum_update (sum0, tcp0->dst_port, + s0->ext_host_port, ip4_header_t, + length); + tcp0->dst_port = s0->ext_host_port; + ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + } tcp0->checksum = ip_csum_fold(sum0); + if (nat44_set_tcp_session_state_i2o (sm, s0, tcp0, thread_index)) + goto trace0; } else { - old_port0 = udp0->src_port; udp0->src_port = s0->out2in.port; udp0->checksum = 0; + if (PREDICT_FALSE (is_twice_nat_session (s0))) + { + udp0->dst_port = s0->ext_host_port; + ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + } } - /* Hairpinning */ - if (!is_output_feature) - snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0); - /* Accounting */ - s0->last_heard = now; - s0->total_pkts++; - s0->total_bytes += vlib_buffer_length_in_chain (vm, b0); - /* Per-user LRU list maintenance for dynamic translation */ - if (!snat_is_session_static (s0)) - { - clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, - s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, - s0->per_user_list_head_index, - s0->per_user_index); - } + nat44_session_update_counters (s0, now, + vlib_buffer_length_in_chain (vm, b0)); trace0: if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) && (b0->flags & VLIB_BUFFER_IS_TRACED))) { snat_in2out_trace_t *t = - vlib_add_trace (vm, node, b0, sizeof (*t)); + vlib_add_trace (vm, node, b0, sizeof (*t)); t->is_slow_path = is_slow_path; t->sw_if_index = sw_if_index0; t->next_index = next0; - t->session_index = ~0; + t->session_index = ~0; if (s0) - t->session_index = s0 - sm->per_thread_data[thread_index].sessions; + t->session_index = s0 - tsm->sessions; } pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP; @@ -2124,7 +3790,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); - } + } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } @@ -2136,16 +3802,16 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } static uword -snat_in2out_fast_path_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +nat44_ed_in2out_fast_path_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { - return snat_in2out_node_fn_inline (vm, node, frame, 0 /* is_slow_path */, 0); + return nat44_ed_in2out_node_fn_inline (vm, node, frame, 0, 0); } -VLIB_REGISTER_NODE (snat_in2out_node) = { - .function = snat_in2out_fast_path_fn, - .name = "nat44-in2out", +VLIB_REGISTER_NODE (nat44_ed_in2out_node) = { + .function = nat44_ed_in2out_fast_path_fn, + .name = "nat44-ed-in2out", .vector_size = sizeof (u32), .format_trace = format_snat_in2out_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -2161,24 +3827,25 @@ VLIB_REGISTER_NODE (snat_in2out_node) = { .next_nodes = { [SNAT_IN2OUT_NEXT_DROP] = "error-drop", [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup", - [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-slowpath", + [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-ed-in2out-slowpath", [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass", }, }; -VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_node, snat_in2out_fast_path_fn); +VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_in2out_node, nat44_ed_in2out_fast_path_fn); static uword -snat_in2out_output_fast_path_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +nat44_ed_in2out_output_fast_path_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { - return snat_in2out_node_fn_inline (vm, node, frame, 0 /* is_slow_path */, 1); + return nat44_ed_in2out_node_fn_inline (vm, node, frame, 0, 1); } -VLIB_REGISTER_NODE (snat_in2out_output_node) = { - .function = snat_in2out_output_fast_path_fn, - .name = "nat44-in2out-output", +VLIB_REGISTER_NODE (nat44_ed_in2out_output_node) = { + .function = nat44_ed_in2out_output_fast_path_fn, + .name = "nat44-ed-in2out-output", .vector_size = sizeof (u32), .format_trace = format_snat_in2out_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -2194,25 +3861,26 @@ VLIB_REGISTER_NODE (snat_in2out_output_node) = { .next_nodes = { [SNAT_IN2OUT_NEXT_DROP] = "error-drop", [SNAT_IN2OUT_NEXT_LOOKUP] = "interface-output", - [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-output-slowpath", + [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-ed-in2out-output-slowpath", [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass", }, }; -VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_output_node, - snat_in2out_output_fast_path_fn); +VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_in2out_output_node, + nat44_ed_in2out_output_fast_path_fn); static uword -snat_in2out_slow_path_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +nat44_ed_in2out_slow_path_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { - return snat_in2out_node_fn_inline (vm, node, frame, 1 /* is_slow_path */, 0); + return nat44_ed_in2out_node_fn_inline (vm, node, frame, 1, 0); } -VLIB_REGISTER_NODE (snat_in2out_slowpath_node) = { - .function = snat_in2out_slow_path_fn, - .name = "nat44-in2out-slowpath", +VLIB_REGISTER_NODE (nat44_ed_in2out_slowpath_node) = { + .function = nat44_ed_in2out_slow_path_fn, + .name = "nat44-ed-in2out-slowpath", .vector_size = sizeof (u32), .format_trace = format_snat_in2out_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -2228,25 +3896,26 @@ VLIB_REGISTER_NODE (snat_in2out_slowpath_node) = { .next_nodes = { [SNAT_IN2OUT_NEXT_DROP] = "error-drop", [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup", - [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-slowpath", + [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-ed-in2out-slowpath", [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass", }, }; -VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_slowpath_node, - snat_in2out_slow_path_fn); +VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_in2out_slowpath_node, + nat44_ed_in2out_slow_path_fn); static uword -snat_in2out_output_slow_path_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +nat44_ed_in2out_output_slow_path_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { - return snat_in2out_node_fn_inline (vm, node, frame, 1 /* is_slow_path */, 1); + return nat44_ed_in2out_node_fn_inline (vm, node, frame, 1, 1); } -VLIB_REGISTER_NODE (snat_in2out_output_slowpath_node) = { - .function = snat_in2out_output_slow_path_fn, - .name = "nat44-in2out-output-slowpath", +VLIB_REGISTER_NODE (nat44_ed_in2out_output_slowpath_node) = { + .function = nat44_ed_in2out_output_slow_path_fn, + .name = "nat44-ed-in2out-output-slowpath", .vector_size = sizeof (u32), .format_trace = format_snat_in2out_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -2262,13 +3931,14 @@ VLIB_REGISTER_NODE (snat_in2out_output_slowpath_node) = { .next_nodes = { [SNAT_IN2OUT_NEXT_DROP] = "error-drop", [SNAT_IN2OUT_NEXT_LOOKUP] = "interface-output", - [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-output-slowpath", + [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-ed-in2out-output-slowpath", [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass", }, }; -VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_output_slowpath_node, - snat_in2out_output_slow_path_fn); +VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_in2out_output_slowpath_node, + nat44_ed_in2out_output_slow_path_fn); /**************************/ /*** deterministic mode ***/ @@ -2283,7 +3953,7 @@ snat_det_in2out_node_fn (vlib_main_t * vm, u32 pkts_processed = 0; snat_main_t * sm = &snat_main; u32 now = (u32) vlib_time_now (vm); - u32 thread_index = vlib_get_thread_index (); + u32 thread_index = vm->thread_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -2376,8 +4046,8 @@ snat_det_in2out_node_fn (vlib_main_t * vm, dm0 = snat_det_map_by_user(sm, &ip0->src_address); if (PREDICT_FALSE(!dm0)) { - clib_warning("no match for internal host %U", - format_ip4_address, &ip0->src_address); + nat_log_info ("no match for internal host %U", + format_ip4_address, &ip0->src_address); next0 = SNAT_IN2OUT_NEXT_DROP; b0->error = node->errors[SNAT_IN2OUT_ERROR_NO_TRANSLATION]; goto trace0; @@ -2526,8 +4196,8 @@ snat_det_in2out_node_fn (vlib_main_t * vm, dm1 = snat_det_map_by_user(sm, &ip1->src_address); if (PREDICT_FALSE(!dm1)) { - clib_warning("no match for internal host %U", - format_ip4_address, &ip0->src_address); + nat_log_info ("no match for internal host %U", + format_ip4_address, &ip0->src_address); next1 = SNAT_IN2OUT_NEXT_DROP; b1->error = node->errors[SNAT_IN2OUT_ERROR_NO_TRANSLATION]; goto trace1; @@ -2712,8 +4382,8 @@ snat_det_in2out_node_fn (vlib_main_t * vm, dm0 = snat_det_map_by_user(sm, &ip0->src_address); if (PREDICT_FALSE(!dm0)) { - clib_warning("no match for internal host %U", - format_ip4_address, &ip0->src_address); + nat_log_info ("no match for internal host %U", + format_ip4_address, &ip0->src_address); next0 = SNAT_IN2OUT_NEXT_DROP; b0->error = node->errors[SNAT_IN2OUT_ERROR_NO_TRANSLATION]; goto trace00; @@ -2884,11 +4554,11 @@ VLIB_NODE_FUNCTION_MULTIARCH (snat_det_in2out_node, snat_det_in2out_node_fn); * @param e optional parameter */ u32 icmp_match_in2out_det(snat_main_t *sm, vlib_node_runtime_t *node, - u32 thread_index, vlib_buffer_t *b0, u8 *p_proto, + u32 thread_index, vlib_buffer_t *b0, + ip4_header_t *ip0, u8 *p_proto, snat_session_key_t *p_value, u8 *p_dont_translate, void *d, void *e) { - ip4_header_t *ip0; icmp46_header_t *icmp0; u32 sw_if_index0; u32 rx_fib_index0; @@ -2907,7 +4577,6 @@ u32 icmp_match_in2out_det(snat_main_t *sm, vlib_node_runtime_t *node, ip4_address_t in_addr; u16 in_port; - ip0 = vlib_buffer_get_current (b0); icmp0 = (icmp46_header_t *) ip4_next_header (ip0); echo0 = (icmp_echo_header_t *)(icmp0+1); sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX]; @@ -2946,8 +4615,8 @@ u32 icmp_match_in2out_det(snat_main_t *sm, vlib_node_runtime_t *node, dm0 = snat_det_map_by_user(sm, &in_addr); if (PREDICT_FALSE(!dm0)) { - clib_warning("no match for internal host %U", - format_ip4_address, &in_addr); + nat_log_info ("no match for internal host %U", + format_ip4_address, &in_addr); if (PREDICT_FALSE(snat_not_translate_fast(sm, node, sw_if_index0, ip0, IP_PROTOCOL_ICMP, rx_fib_index0))) { @@ -3038,19 +4707,21 @@ snat_in2out_worker_handoff_fn_inline (vlib_main_t * vm, { snat_main_t *sm = &snat_main; vlib_thread_main_t *tm = vlib_get_thread_main (); - u32 n_left_from, *from, *to_next = 0; + u32 n_left_from, *from, *to_next = 0, *to_next_drop = 0; static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index; static __thread vlib_frame_queue_t **congested_handoff_queue_by_worker_index = 0; vlib_frame_queue_elt_t *hf = 0; + vlib_frame_queue_t *fq; vlib_frame_t *f = 0; int i; u32 n_left_to_next_worker = 0, *to_next_worker = 0; u32 next_worker_index = 0; u32 current_worker_index = ~0; - u32 thread_index = vlib_get_thread_index (); + u32 thread_index = vm->thread_index; u32 fq_index; u32 to_node_index; + vlib_frame_t *d = 0; ASSERT (vec_len (sm->workers)); @@ -3070,7 +4741,7 @@ snat_in2out_worker_handoff_fn_inline (vlib_main_t * vm, vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1); vec_validate_init_empty (congested_handoff_queue_by_worker_index, - sm->first_worker_index + sm->num_workers - 1, + tm->n_vlib_mains - 1, (vlib_frame_queue_t *) (~0)); } @@ -3105,6 +4776,26 @@ snat_in2out_worker_handoff_fn_inline (vlib_main_t * vm, if (next_worker_index != current_worker_index) { + fq = is_vlib_frame_queue_congested ( + fq_index, next_worker_index, NAT_FQ_NELTS - 2, + congested_handoff_queue_by_worker_index); + + if (fq) + { + /* if this is 1st frame */ + if (!d) + { + d = vlib_get_frame_to_node (vm, sm->error_node_index); + to_next_drop = vlib_frame_vector_args (d); + } + + to_next_drop[0] = bi0; + to_next_drop += 1; + d->n_vectors++; + b0->error = node->errors[SNAT_IN2OUT_ERROR_FQ_CONGESTED]; + goto trace0; + } + if (hf) hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker; @@ -3146,6 +4837,7 @@ snat_in2out_worker_handoff_fn_inline (vlib_main_t * vm, f->n_vectors++; } +trace0: if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && (b0->flags & VLIB_BUFFER_IS_TRACED))) { @@ -3159,6 +4851,9 @@ snat_in2out_worker_handoff_fn_inline (vlib_main_t * vm, if (f) vlib_put_frame_to_node (vm, to_node_index, f); + if (d) + vlib_put_frame_to_node (vm, sm->error_node_index, d); + if (hf) hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker; @@ -3203,6 +4898,9 @@ VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node) = { .format_trace = format_snat_in2out_worker_handoff_trace, .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = ARRAY_LEN(snat_in2out_error_strings), + .error_strings = snat_in2out_error_strings, + .n_next_nodes = 1, .next_nodes = { @@ -3252,7 +4950,7 @@ is_hairpinning (snat_main_t *sm, ip4_address_t * dst_addr) } m_key.addr.as_u32 = dst_addr->as_u32; - m_key.fib_index = sm->outside_fib_index; + m_key.fib_index = 0; m_key.port = 0; m_key.protocol = 0; kv.key = m_key.as_u64; @@ -3262,16 +4960,20 @@ is_hairpinning (snat_main_t *sm, ip4_address_t * dst_addr) return 0; } -static uword -snat_hairpin_dst_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +static inline uword +snat_hairpin_dst_fn_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, + int is_ed) { - u32 n_left_from, * from, * to_next; + u32 n_left_from, * from, * to_next, stats_node_index; snat_in2out_next_t next_index; u32 pkts_processed = 0; snat_main_t * sm = &snat_main; + stats_node_index = is_ed ? nat44_ed_hairpin_dst_node.index : + snat_hairpin_dst_node.index; + from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; @@ -3313,21 +5015,23 @@ snat_hairpin_dst_fn (vlib_main_t * vm, udp_header_t * udp0 = ip4_next_header (ip0); tcp_header_t * tcp0 = (tcp_header_t *) udp0; - snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0); + snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0, is_ed); } else if (proto0 == SNAT_PROTOCOL_ICMP) { icmp46_header_t * icmp0 = ip4_next_header (ip0); - snat_icmp_hairpinning (sm, b0, ip0, icmp0); + snat_icmp_hairpinning (sm, b0, ip0, icmp0, is_ed); } else { - snat_hairpinning_unknown_proto (sm, b0, ip0); + if (is_ed) + nat44_ed_hairpinning_unknown_proto (sm, b0, ip0); + else + nat_hairpinning_sm_unknown_proto (sm, b0, ip0); } vnet_buffer (b0)->snat.flags = SNAT_FLAG_HAIRPINNING; - clib_warning("is hairpinning"); } pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP; @@ -3341,12 +5045,20 @@ snat_hairpin_dst_fn (vlib_main_t * vm, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, snat_hairpin_dst_node.index, + vlib_node_increment_counter (vm, stats_node_index, SNAT_IN2OUT_ERROR_IN2OUT_PACKETS, pkts_processed); return frame->n_vectors; } +static uword +snat_hairpin_dst_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return snat_hairpin_dst_fn_inline (vm, node, frame, 0); +} + VLIB_REGISTER_NODE (snat_hairpin_dst_node) = { .function = snat_hairpin_dst_fn, .name = "nat44-hairpin-dst", @@ -3365,15 +5077,44 @@ VLIB_NODE_FUNCTION_MULTIARCH (snat_hairpin_dst_node, snat_hairpin_dst_fn); static uword -snat_hairpin_src_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +nat44_ed_hairpin_dst_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { - u32 n_left_from, * from, * to_next; + return snat_hairpin_dst_fn_inline (vm, node, frame, 1); +} + +VLIB_REGISTER_NODE (nat44_ed_hairpin_dst_node) = { + .function = nat44_ed_hairpin_dst_fn, + .name = "nat44-ed-hairpin-dst", + .vector_size = sizeof (u32), + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = ARRAY_LEN(snat_in2out_error_strings), + .error_strings = snat_in2out_error_strings, + .n_next_nodes = 2, + .next_nodes = { + [SNAT_IN2OUT_NEXT_DROP] = "error-drop", + [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_hairpin_dst_node, + nat44_ed_hairpin_dst_fn); + +static inline uword +snat_hairpin_src_fn_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, + int is_ed) +{ + u32 n_left_from, * from, * to_next, stats_node_index; snat_in2out_next_t next_index; u32 pkts_processed = 0; snat_main_t *sm = &snat_main; + stats_node_index = is_ed ? nat44_ed_hairpin_src_node.index : + snat_hairpin_src_node.index; + from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; @@ -3408,7 +5149,7 @@ snat_hairpin_src_fn (vlib_main_t * vm, pool_foreach (i, sm->output_feature_interfaces, ({ /* Only packets from NAT inside interface */ - if ((i->is_inside == 1) && (sw_if_index0 == i->sw_if_index)) + if ((nat_interface_is_inside(i)) && (sw_if_index0 == i->sw_if_index)) { if (PREDICT_FALSE ((vnet_buffer (b0)->snat.flags) & SNAT_FLAG_HAIRPINNING)) @@ -3433,12 +5174,20 @@ snat_hairpin_src_fn (vlib_main_t * vm, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, snat_hairpin_src_node.index, + vlib_node_increment_counter (vm, stats_node_index, SNAT_IN2OUT_ERROR_IN2OUT_PACKETS, pkts_processed); return frame->n_vectors; } +static uword +snat_hairpin_src_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return snat_hairpin_src_fn_inline (vm, node, frame, 0); +} + VLIB_REGISTER_NODE (snat_hairpin_src_node) = { .function = snat_hairpin_src_fn, .name = "nat44-hairpin-src", @@ -3458,6 +5207,33 @@ VLIB_REGISTER_NODE (snat_hairpin_src_node) = { VLIB_NODE_FUNCTION_MULTIARCH (snat_hairpin_src_node, snat_hairpin_src_fn); +static uword +nat44_ed_hairpin_src_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return snat_hairpin_src_fn_inline (vm, node, frame, 1); +} + +VLIB_REGISTER_NODE (nat44_ed_hairpin_src_node) = { + .function = nat44_ed_hairpin_src_fn, + .name = "nat44-ed-hairpin-src", + .vector_size = sizeof (u32), + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = ARRAY_LEN(snat_in2out_error_strings), + .error_strings = snat_in2out_error_strings, + .n_next_nodes = SNAT_HAIRPIN_SRC_N_NEXT, + .next_nodes = { + [SNAT_HAIRPIN_SRC_NEXT_DROP] = "error-drop", + [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT] = "nat44-ed-in2out-output", + [SNAT_HAIRPIN_SRC_NEXT_INTERFACE_OUTPUT] = "interface-output", + [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH] = "nat44-in2out-output-worker-handoff", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_hairpin_src_node, + nat44_ed_hairpin_src_fn); + static uword snat_in2out_fast_static_map_fn (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -3545,7 +5321,7 @@ snat_in2out_fast_static_map_fn (vlib_main_t * vm, key0.port = udp0->src_port; key0.fib_index = rx_fib_index0; - if (snat_static_mapping_match(sm, key0, &sm0, 0, 0)) + if (snat_static_mapping_match(sm, key0, &sm0, 0, 0, 0, 0, 0)) { b0->error = node->errors[SNAT_IN2OUT_ERROR_NO_TRANSLATION]; next0= SNAT_IN2OUT_NEXT_DROP; @@ -3600,7 +5376,7 @@ snat_in2out_fast_static_map_fn (vlib_main_t * vm, } /* Hairpinning */ - snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0); + snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0, 0); trace0: if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE) @@ -3650,6 +5426,7 @@ VLIB_REGISTER_NODE (snat_in2out_fast_node) = { [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup", [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-slowpath", [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass", }, };