X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fnat%2Fin2out_ed.c;h=807a716e9426b810bfd484807f5e8d974c41a760;hb=ba5f9bc7534bcf58225b0658993728b1f4d47a67;hp=733e4f0c0e5379765a9b620e2f3daea54ae23438;hpb=9a6dc8a9376e7270331255861b3ead1045b40c6d;p=vpp.git diff --git a/src/plugins/nat/in2out_ed.c b/src/plugins/nat/in2out_ed.c index 733e4f0c0e5..807a716e942 100644 --- a/src/plugins/nat/in2out_ed.c +++ b/src/plugins/nat/in2out_ed.c @@ -23,11 +23,12 @@ #include #include #include +#include #include #include #include -#include #include +#include #include #include @@ -106,6 +107,16 @@ nat44_i2o_ed_is_idle_session_cb (clib_bihash_kv_16_8_t * kv, void *arg) if (clib_bihash_add_del_16_8 (&tsm->out2in_ed, &ed_kv, 0)) nat_elog_warn ("out2in_ed key del failed"); + ed_bihash_kv_t bihash_key; + clib_memset (&bihash_key, 0, sizeof (bihash_key)); + bihash_key.k.dst_address = s->ext_host_addr.as_u32; + bihash_key.k.dst_port = s->ext_host_port; + bihash_key.k.src_address = s->out2in.addr.as_u32; + bihash_key.k.src_port = s->out2in.port; + bihash_key.k.protocol = s->out2in.protocol; + clib_bihash_add_del_16_8 (&sm->ed_ext_ports, &bihash_key.kv, + 0 /* is_add */ ); + if (snat_is_unk_proto_session (s)) goto delete; @@ -181,14 +192,103 @@ icmp_in2out_ed_slow_path (snat_main_t * sm, vlib_buffer_t * b0, return next0; } +static_always_inline u16 +snat_random_port (u16 min, u16 max) +{ + snat_main_t *sm = &snat_main; + return min + random_u32 (&sm->random_seed) / + (random_u32_max () / (max - min + 1) + 1); +} + +static int +nat_alloc_addr_and_port_ed (snat_address_t * addresses, u32 fib_index, + u32 thread_index, nat_ed_ses_key_t * key, + snat_session_key_t * key1, u16 port_per_thread, + u32 snat_thread_index) +{ + int i; + snat_address_t *a, *ga = 0; + u32 portnum; + + const u16 port_thread_offset = (port_per_thread * snat_thread_index) + 1024; + ed_bihash_kv_t bihash_key; + clib_memset (&bihash_key, 0, sizeof (bihash_key)); + bihash_key.k.dst_address = key->r_addr.as_u32; + bihash_key.k.dst_port = key->r_port; + bihash_key.k.protocol = key1->protocol; + + for (i = 0; i < vec_len (addresses); i++) + { + a = addresses + i; + switch (key1->protocol) + { +#define _(N, j, n, s) \ + case SNAT_PROTOCOL_##N: \ + if (a->fib_index == fib_index) \ + { \ + bihash_key.k.src_address = a->addr.as_u32; \ + u16 port = snat_random_port (1, port_per_thread); \ + u16 attempts = port_per_thread; \ + while (attempts > 0) \ + { \ + --attempts; \ + portnum = port_thread_offset + port; \ + bihash_key.k.src_port = clib_host_to_net_u16 (portnum); \ + int rv = clib_bihash_add_del_16_8 ( \ + &snat_main.ed_ext_ports, &bihash_key.kv, 2 /* is_add */); \ + if (0 == rv) \ + { \ + ++a->busy_##n##_port_refcounts[portnum]; \ + a->busy_##n##_ports_per_thread[thread_index]++; \ + a->busy_##n##_ports++; \ + key1->addr = a->addr; \ + key1->port = clib_host_to_net_u16 (portnum); \ + return 0; \ + } \ + port = (port + 1) % port_per_thread; \ + } \ + } \ + else if (a->fib_index == ~0) \ + { \ + ga = a; \ + } \ + break; + + foreach_snat_protocol; + default: + nat_elog_info ("unknown protocol"); + return 1; + } + } + + if (ga) + { + /* fake fib_index to reuse macro */ + fib_index = ~0; + a = ga; + switch (key1->protocol) + { + foreach_snat_protocol; + default: + nat_elog_info ("unknown protocol"); + return 1; + } + } + +#undef _ + + /* Totally out of translations to use... */ + snat_ipfix_logging_addresses_exhausted (thread_index, 0); + return 1; +} + static u32 slow_path_ed (snat_main_t * sm, vlib_buffer_t * b, u32 rx_fib_index, clib_bihash_kv_16_8_t * kv, snat_session_t ** sessionp, - vlib_node_runtime_t * node, u32 next, u32 thread_index, f64 now, - tcp_header_t * tcp) + vlib_node_runtime_t * node, u32 next, u32 thread_index, f64 now) { snat_session_t *s = 0; snat_user_t *u; @@ -209,12 +309,18 @@ slow_path_ed (snat_main_t * sm, }; nat44_is_idle_session_ctx_t ctx; - if (PREDICT_FALSE (maximum_sessions_exceeded (sm, thread_index))) + u32 cleared = 0; + + if (PREDICT_FALSE (nat44_maximum_sessions_exceeded (sm, thread_index))) { - b->error = node->errors[NAT_IN2OUT_ED_ERROR_MAX_SESSIONS_EXCEEDED]; - nat_ipfix_logging_max_sessions (thread_index, sm->max_translations); - nat_elog_notice ("maximum sessions exceeded"); - return NAT_NEXT_DROP; + if (PREDICT_FALSE + (!(cleared = nat44_users_cleanup (thread_index, now)))) + { + b->error = node->errors[NAT_IN2OUT_ED_ERROR_MAX_SESSIONS_EXCEEDED]; + nat_ipfix_logging_max_sessions (thread_index, sm->max_translations); + nat_elog_notice ("maximum sessions exceeded"); + return NAT_NEXT_DROP; + } } key0.addr = key->l_addr; @@ -222,19 +328,27 @@ slow_path_ed (snat_main_t * sm, key1.protocol = key0.protocol = proto; key0.fib_index = rx_fib_index; key1.fib_index = sm->outside_fib_index; + /* First try to match static mapping by local address and port */ if (snat_static_mapping_match (sm, key0, &key1, 0, 0, 0, &lb, 0, &identity_nat)) { /* Try to create dynamic translation */ - if (snat_alloc_outside_address_and_port (sm->addresses, rx_fib_index, - thread_index, &key1, - sm->port_per_thread, - tsm->snat_thread_index)) + if (nat_alloc_addr_and_port_ed (sm->addresses, rx_fib_index, + thread_index, key, &key1, + sm->port_per_thread, + tsm->snat_thread_index)) { - nat_elog_notice ("addresses exhausted"); - b->error = node->errors[NAT_IN2OUT_ED_ERROR_OUT_OF_PORTS]; - return NAT_NEXT_DROP; + if (cleared || !nat44_out_of_ports_cleanup (thread_index, now) || + nat_alloc_addr_and_port_ed (sm->addresses, rx_fib_index, + thread_index, key, &key1, + sm->port_per_thread, + tsm->snat_thread_index)) + { + nat_elog_notice ("addresses exhausted"); + b->error = node->errors[NAT_IN2OUT_ED_ERROR_OUT_OF_PORTS]; + return NAT_NEXT_DROP; + } } } else @@ -244,15 +358,19 @@ slow_path_ed (snat_main_t * sm, *sessionp = s; return next; } - is_sm = 1; } - if (proto == SNAT_PROTOCOL_TCP) + if (PREDICT_TRUE (proto == SNAT_PROTOCOL_TCP)) { - if (!tcp_is_init (tcp)) + if (PREDICT_FALSE + (!tcp_flags_is_init + (vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags))) { b->error = node->errors[NAT_IN2OUT_ED_ERROR_NON_SYN]; + if (!is_sm) + snat_free_outside_address_and_port (sm->addresses, + thread_index, &key1); return NAT_NEXT_DROP; } } @@ -264,6 +382,7 @@ slow_path_ed (snat_main_t * sm, if (!is_sm) snat_free_outside_address_and_port (sm->addresses, thread_index, &key1); + b->error = node->errors[NAT_IN2OUT_ED_ERROR_CANNOT_CREATE_USER]; return NAT_NEXT_DROP; } @@ -275,6 +394,7 @@ slow_path_ed (snat_main_t * sm, if (!is_sm) snat_free_outside_address_and_port (sm->addresses, thread_index, &key1); + b->error = node->errors[NAT_IN2OUT_ED_ERROR_MAX_USER_SESS_EXCEEDED]; return NAT_NEXT_DROP; } @@ -401,7 +521,6 @@ nat_not_translate_output_feature_fwd (snat_main_t * sm, ip4_header_t * ip, { nat_ed_ses_key_t key; clib_bihash_kv_16_8_t kv, value; - udp_header_t *udp; snat_session_t *s = 0; snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; @@ -411,7 +530,7 @@ nat_not_translate_output_feature_fwd (snat_main_t * sm, ip4_header_t * ip, if (ip->protocol == IP_PROTOCOL_ICMP) { key.as_u64[0] = key.as_u64[1] = 0; - if (get_icmp_i2o_ed_key (ip, &key)) + if (get_icmp_i2o_ed_key (b, ip, &key)) return 0; key.fib_index = 0; kv.key[0] = key.as_u64[0]; @@ -419,9 +538,9 @@ nat_not_translate_output_feature_fwd (snat_main_t * sm, ip4_header_t * ip, } else if (ip->protocol == IP_PROTOCOL_UDP || ip->protocol == IP_PROTOCOL_TCP) { - udp = ip4_next_header (ip); make_ed_kv (&kv, &ip->src_address, &ip->dst_address, ip->protocol, 0, - udp->src_port, udp->dst_port); + vnet_buffer (b)->ip.reass.l4_src_port, + vnet_buffer (b)->ip.reass.l4_dst_port); } else { @@ -436,8 +555,8 @@ nat_not_translate_output_feature_fwd (snat_main_t * sm, ip4_header_t * ip, { if (ip->protocol == IP_PROTOCOL_TCP) { - tcp_header_t *tcp = ip4_next_header (ip); - if (nat44_set_tcp_session_state_i2o (sm, s, tcp, thread_index)) + if (nat44_set_tcp_session_state_i2o + (sm, now, s, b, thread_index)) return 1; } /* Accounting */ @@ -514,7 +633,6 @@ icmp_match_in2out_ed (snat_main_t * sm, vlib_node_runtime_t * node, u8 * p_proto, snat_session_key_t * p_value, u8 * p_dont_translate, void *d, void *e) { - icmp46_header_t *icmp; u32 sw_if_index; u32 rx_fib_index; nat_ed_ses_key_t key; @@ -525,12 +643,11 @@ icmp_match_in2out_ed (snat_main_t * sm, vlib_node_runtime_t * node, int err; snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; - icmp = (icmp46_header_t *) ip4_next_header (ip); sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX]; rx_fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index); key.as_u64[0] = key.as_u64[1] = 0; - err = get_icmp_i2o_ed_key (ip, &key); + err = get_icmp_i2o_ed_key (b, ip, &key); if (err != 0) { b->error = node->errors[err]; @@ -546,18 +663,10 @@ icmp_match_in2out_ed (snat_main_t * sm, vlib_node_runtime_t * node, { if (vnet_buffer (b)->sw_if_index[VLIB_TX] != ~0) { - if (PREDICT_FALSE (nat44_ed_not_translate_output_feature (sm, ip, - key.proto, - key. - l_port, - key. - r_port, - thread_index, - sw_if_index, - vnet_buffer - (b)-> - sw_if_index - [VLIB_TX]))) + if (PREDICT_FALSE + (nat44_ed_not_translate_output_feature + (sm, ip, key.proto, key.l_port, key.r_port, thread_index, + sw_if_index, vnet_buffer (b)->sw_if_index[VLIB_TX]))) { dont_translate = 1; goto out; @@ -575,7 +684,9 @@ icmp_match_in2out_ed (snat_main_t * sm, vlib_node_runtime_t * node, } } - if (PREDICT_FALSE (icmp_is_error_message (icmp))) + if (PREDICT_FALSE + (icmp_type_is_error_message + (vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags))) { b->error = node->errors[NAT_IN2OUT_ED_ERROR_BAD_ICMP_TYPE]; next = NAT_NEXT_DROP; @@ -583,7 +694,7 @@ icmp_match_in2out_ed (snat_main_t * sm, vlib_node_runtime_t * node, } next = slow_path_ed (sm, b, rx_fib_index, &kv, &s, node, next, - thread_index, vlib_time_now (sm->vlib_main), 0); + thread_index, vlib_time_now (sm->vlib_main)); if (PREDICT_FALSE (next == NAT_NEXT_DROP)) goto out; @@ -596,9 +707,13 @@ icmp_match_in2out_ed (snat_main_t * sm, vlib_node_runtime_t * node, } else { - if (PREDICT_FALSE (icmp->type != ICMP4_echo_request && - icmp->type != ICMP4_echo_reply && - !icmp_is_error_message (icmp))) + if (PREDICT_FALSE + (vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags != + ICMP4_echo_request + && vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags != + ICMP4_echo_reply + && !icmp_type_is_error_message (vnet_buffer (b)->ip. + reass.icmp_type_or_tcp_flags))) { b->error = node->errors[NAT_IN2OUT_ED_ERROR_BAD_ICMP_TYPE]; next = NAT_NEXT_DROP; @@ -688,7 +803,7 @@ nat44_ed_in2out_unknown_proto (snat_main_t * sm, } else { - if (PREDICT_FALSE (maximum_sessions_exceeded (sm, thread_index))) + if (PREDICT_FALSE (nat44_maximum_sessions_exceeded (sm, thread_index))) { b->error = node->errors[NAT_IN2OUT_ED_ERROR_MAX_SESSIONS_EXCEEDED]; nat_ipfix_logging_max_sessions (thread_index, sm->max_translations); @@ -700,6 +815,7 @@ nat44_ed_in2out_unknown_proto (snat_main_t * sm, thread_index); if (!u) { + b->error = node->errors[NAT_IN2OUT_ED_ERROR_CANNOT_CREATE_USER]; nat_elog_warn ("create NAT user failed"); return 0; } @@ -768,6 +884,7 @@ nat44_ed_in2out_unknown_proto (snat_main_t * sm, s = nat_ed_session_alloc (sm, u, thread_index, now); if (!s) { + b->error = node->errors[NAT_IN2OUT_ED_ERROR_MAX_USER_SESS_EXCEEDED]; nat44_delete_user_with_no_session (sm, u, thread_index); nat_elog_warn ("create NAT session failed"); return 0; @@ -821,10 +938,10 @@ nat44_ed_in2out_unknown_proto (snat_main_t * sm, } static inline uword -nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, int is_slow_path, - int is_output_feature) +nat44_ed_in2out_fast_path_node_fn_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, + int is_output_feature) { u32 n_left_from, *from, *to_next, pkts_processed = 0, stats_node_index; nat_next_t next_index; @@ -833,16 +950,12 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, u32 thread_index = vm->thread_index; snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets = - 0, fragments = 0, def_slow, def_reass; + 0, def_slow; def_slow = is_output_feature ? NAT_NEXT_IN2OUT_ED_OUTPUT_SLOW_PATH : NAT_NEXT_IN2OUT_ED_SLOW_PATH; - def_reass = is_output_feature ? NAT_NEXT_IN2OUT_ED_OUTPUT_REASS : - NAT_NEXT_IN2OUT_ED_REASS; - - stats_node_index = is_slow_path ? sm->ed_in2out_slowpath_node_index : - sm->ed_in2out_node_index; + stats_node_index = sm->ed_in2out_node_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -854,64 +967,37 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - while (n_left_from >= 4 && n_left_to_next >= 2) + while (n_left_from > 0 && n_left_to_next > 0) { - u32 bi0, bi1; - vlib_buffer_t *b0, *b1; + u32 bi0; + vlib_buffer_t *b0; u32 next0, sw_if_index0, rx_fib_index0, iph_offset0 = 0, proto0, new_addr0, old_addr0; - u32 next1, sw_if_index1, rx_fib_index1, iph_offset1 = 0, proto1, - new_addr1, old_addr1; - u16 old_port0, new_port0, old_port1, new_port1; - ip4_header_t *ip0, *ip1; - udp_header_t *udp0, *udp1; - tcp_header_t *tcp0, *tcp1; - icmp46_header_t *icmp0, *icmp1; - snat_session_t *s0 = 0, *s1 = 0; - clib_bihash_kv_16_8_t kv0, value0, kv1, value1; - ip_csum_t sum0, sum1; - - /* Prefetch next iteration. */ - { - vlib_buffer_t *p2, *p3; - - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, LOAD); - vlib_prefetch_buffer_header (p3, LOAD); - - CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE); - } - - /* speculatively enqueue b0 and b1 to the current next frame */ - to_next[0] = bi0 = from[0]; - to_next[1] = bi1 = from[1]; - from += 2; - to_next += 2; - n_left_from -= 2; - n_left_to_next -= 2; + u16 old_port0, new_port0; + ip4_header_t *ip0; + udp_header_t *udp0; + tcp_header_t *tcp0; + snat_session_t *s0 = 0; + clib_bihash_kv_16_8_t kv0, value0; + ip_csum_t sum0; + + /* speculatively enqueue b0 to the current next frame */ + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); if (is_output_feature) { - // output feature fast path is enabled on the arc - // we need new arc_next feature - if (PREDICT_TRUE (!is_slow_path)) - { - vnet_feature_next (&nat_buffer_opaque (b0)->arc_next, b0); - vnet_feature_next (&nat_buffer_opaque (b1)->arc_next, b1); - } - - iph_offset0 = vnet_buffer (b0)->ip.save_rewrite_length; - iph_offset1 = vnet_buffer (b1)->ip.save_rewrite_length; + vnet_feature_next (&nat_buffer_opaque (b0)->arc_next, b0); + iph_offset0 = vnet_buffer (b0)->ip.reass.save_rewrite_length; } next0 = nat_buffer_opaque (b0)->arc_next; - next1 = nat_buffer_opaque (b1)->arc_next; ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + iph_offset0); @@ -928,113 +1014,73 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, ICMP4_time_exceeded_ttl_exceeded_in_transit, 0); next0 = NAT_NEXT_ICMP_ERROR; - goto trace00; + goto trace0; } udp0 = ip4_next_header (ip0); tcp0 = (tcp_header_t *) udp0; - icmp0 = (icmp46_header_t *) udp0; proto0 = ip_proto_to_snat_proto (ip0->protocol); - if (is_slow_path) + if (PREDICT_FALSE (proto0 == ~0)) { - if (PREDICT_FALSE (proto0 == ~0)) - { - s0 = nat44_ed_in2out_unknown_proto (sm, b0, ip0, - rx_fib_index0, - thread_index, now, vm, - node); - if (!s0) - next0 = NAT_NEXT_DROP; - other_packets++; - goto trace00; - } - - if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP)) - { - next0 = icmp_in2out_ed_slow_path - (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, now, thread_index, &s0); - icmp_packets++; - goto trace00; - } + next0 = def_slow; + goto trace0; } - else - { - if (PREDICT_FALSE (proto0 == ~0)) - { - next0 = def_slow; - goto trace00; - } - - if (ip4_is_fragment (ip0)) - { - next0 = def_reass; - fragments++; - goto trace00; - } - if (is_output_feature) - { - if (PREDICT_FALSE - (nat_not_translate_output_feature_fwd - (sm, ip0, thread_index, now, vm, b0))) - goto trace00; - } + if (is_output_feature) + { + if (PREDICT_FALSE (nat_not_translate_output_feature_fwd + (sm, ip0, thread_index, now, vm, b0))) + goto trace0; + } - if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP)) - { - next0 = def_slow; - goto trace00; - } + if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP)) + { + next0 = def_slow; + goto trace0; } make_ed_kv (&kv0, &ip0->src_address, &ip0->dst_address, - ip0->protocol, rx_fib_index0, udp0->src_port, - udp0->dst_port); + ip0->protocol, rx_fib_index0, + vnet_buffer (b0)->ip.reass.l4_src_port, + vnet_buffer (b0)->ip.reass.l4_dst_port); + // lookup for session if (clib_bihash_search_16_8 (&tsm->in2out_ed, &kv0, &value0)) { - if (is_slow_path) - { - if (is_output_feature) - { - if (PREDICT_FALSE - (nat44_ed_not_translate_output_feature - (sm, ip0, ip0->protocol, udp0->src_port, - udp0->dst_port, thread_index, sw_if_index0, - vnet_buffer (b0)->sw_if_index[VLIB_TX]))) - goto trace00; - } - else - { - if (PREDICT_FALSE (nat44_ed_not_translate (sm, node, - sw_if_index0, - ip0, proto0, - rx_fib_index0, - thread_index))) - goto trace00; - } - - next0 = - slow_path_ed (sm, b0, rx_fib_index0, &kv0, &s0, node, - next0, thread_index, now, tcp0); - - if (PREDICT_FALSE (next0 == NAT_NEXT_DROP)) - goto trace00; + // session does not exist go slow path + next0 = def_slow; + goto trace0; + } + s0 = pool_elt_at_index (tsm->sessions, value0.value); - if (PREDICT_FALSE (!s0)) - goto trace00; + if (s0->tcp_close_timestamp) + { + if (now >= s0->tcp_close_timestamp) + { + // session is closed, go slow path + next0 = def_slow; } else { - next0 = def_slow; - goto trace00; + // session in transitory timeout, drop + b0->error = node->errors[NAT_IN2OUT_ED_ERROR_TCP_CLOSED]; + next0 = NAT_NEXT_DROP; } + goto trace0; } - else + + // drop if session expired + u64 sess_timeout_time; + sess_timeout_time = s0->last_heard + + (f64) nat44_session_get_timeout (sm, s0); + if (now >= sess_timeout_time) { - s0 = pool_elt_at_index (tsm->sessions, value0.value); + nat_free_session_data (sm, s0, thread_index, 0); + nat44_delete_session (sm, s0, thread_index); + // session is closed, go slow path + next0 = def_slow; + goto trace0; } b0->flags |= VNET_BUFFER_F_IS_NATED; @@ -1053,49 +1099,62 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, dst_address); ip0->checksum = ip_csum_fold (sum0); - old_port0 = udp0->src_port; - new_port0 = udp0->src_port = s0->out2in.port; + old_port0 = vnet_buffer (b0)->ip.reass.l4_src_port; if (PREDICT_TRUE (proto0 == SNAT_PROTOCOL_TCP)) { - sum0 = tcp0->checksum; - sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, - dst_address); - sum0 = ip_csum_update (sum0, old_port0, new_port0, ip4_header_t, - length); - if (PREDICT_FALSE (is_twice_nat_session (s0))) + if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment) { - sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, - s0->ext_host_addr.as_u32, - ip4_header_t, dst_address); - sum0 = ip_csum_update (sum0, tcp0->dst_port, - s0->ext_host_port, ip4_header_t, - length); - tcp0->dst_port = s0->ext_host_port; - ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + new_port0 = udp0->src_port = s0->out2in.port; + sum0 = tcp0->checksum; + sum0 = + ip_csum_update (sum0, old_addr0, new_addr0, + ip4_header_t, dst_address); + sum0 = + ip_csum_update (sum0, old_port0, new_port0, + ip4_header_t, length); + if (PREDICT_FALSE (is_twice_nat_session (s0))) + { + sum0 = + ip_csum_update (sum0, ip0->dst_address.as_u32, + s0->ext_host_addr.as_u32, + ip4_header_t, dst_address); + sum0 = + ip_csum_update (sum0, + vnet_buffer (b0)->ip. + reass.l4_dst_port, s0->ext_host_port, + ip4_header_t, length); + tcp0->dst_port = s0->ext_host_port; + ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + } + mss_clamping (sm, tcp0, &sum0); + tcp0->checksum = ip_csum_fold (sum0); } - mss_clamping (sm, tcp0, &sum0); - tcp0->checksum = ip_csum_fold (sum0); tcp_packets++; if (nat44_set_tcp_session_state_i2o - (sm, s0, tcp0, thread_index)) - goto trace00; + (sm, now, s0, b0, thread_index)) + goto trace0; } - else if (udp0->checksum) + else if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment + && udp0->checksum) { + new_port0 = udp0->src_port = s0->out2in.port; sum0 = udp0->checksum; - sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, - dst_address); - sum0 = ip_csum_update (sum0, old_port0, new_port0, ip4_header_t, - length); + sum0 = + ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, + dst_address); + sum0 = + ip_csum_update (sum0, old_port0, new_port0, ip4_header_t, + length); if (PREDICT_FALSE (is_twice_nat_session (s0))) { sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, s0->ext_host_addr.as_u32, ip4_header_t, dst_address); - sum0 = ip_csum_update (sum0, tcp0->dst_port, - s0->ext_host_port, ip4_header_t, - length); + sum0 = + ip_csum_update (sum0, + vnet_buffer (b0)->ip.reass.l4_dst_port, + s0->ext_host_port, ip4_header_t, length); udp0->dst_port = s0->ext_host_port; ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; } @@ -1104,427 +1163,228 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, } else { - if (PREDICT_FALSE (is_twice_nat_session (s0))) + if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment) { - udp0->dst_port = s0->ext_host_port; - ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + new_port0 = udp0->src_port = s0->out2in.port; + if (PREDICT_FALSE (is_twice_nat_session (s0))) + { + udp0->dst_port = s0->ext_host_port; + ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + } + udp_packets++; } - udp_packets++; } /* Accounting */ nat44_session_update_counters (s0, now, - vlib_buffer_length_in_chain (vm, - b0), - thread_index); + vlib_buffer_length_in_chain + (vm, b0), thread_index); /* Per-user LRU list maintenance */ nat44_session_update_lru (sm, s0, thread_index); - trace00: + trace0: if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && (b0->flags & VLIB_BUFFER_IS_TRACED))) { nat_in2out_ed_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t)); - t->is_slow_path = is_slow_path; t->sw_if_index = sw_if_index0; t->next_index = next0; - t->session_index = ~0; + t->is_slow_path = 0; + if (s0) t->session_index = s0 - tsm->sessions; + else + t->session_index = ~0; } pkts_processed += next0 == nat_buffer_opaque (b0)->arc_next; + /* verify speculative enqueue, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, stats_node_index, + NAT_IN2OUT_ED_ERROR_IN2OUT_PACKETS, + pkts_processed); + vlib_node_increment_counter (vm, stats_node_index, + NAT_IN2OUT_ED_ERROR_TCP_PACKETS, tcp_packets); + vlib_node_increment_counter (vm, stats_node_index, + NAT_IN2OUT_ED_ERROR_UDP_PACKETS, udp_packets); + vlib_node_increment_counter (vm, stats_node_index, + NAT_IN2OUT_ED_ERROR_ICMP_PACKETS, + icmp_packets); + vlib_node_increment_counter (vm, stats_node_index, + NAT_IN2OUT_ED_ERROR_OTHER_PACKETS, + other_packets); + return frame->n_vectors; +} + +static inline uword +nat44_ed_in2out_slow_path_node_fn_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, + int is_output_feature) +{ + u32 n_left_from, *from, *to_next, pkts_processed = 0, stats_node_index; + nat_next_t next_index; + snat_main_t *sm = &snat_main; + f64 now = vlib_time_now (vm); + u32 thread_index = vm->thread_index; + snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index]; + u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets = 0; + + stats_node_index = sm->ed_in2out_slowpath_node_index; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + u32 next0, sw_if_index0, rx_fib_index0, iph_offset0 = 0, proto0, + new_addr0, old_addr0; + u16 old_port0, new_port0; + ip4_header_t *ip0; + udp_header_t *udp0; + tcp_header_t *tcp0; + icmp46_header_t *icmp0; + snat_session_t *s0 = 0; + clib_bihash_kv_16_8_t kv0, value0; + ip_csum_t sum0; + + /* speculatively enqueue b0 to the current next frame */ + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + if (is_output_feature) + iph_offset0 = vnet_buffer (b0)->ip.reass.save_rewrite_length; + + next0 = nat_buffer_opaque (b0)->arc_next; - ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b1) + - iph_offset1); + ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + + iph_offset0); - sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX]; - rx_fib_index1 = + sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; + rx_fib_index0 = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4, - sw_if_index1); + sw_if_index0); - if (PREDICT_FALSE (ip1->ttl == 1)) + if (PREDICT_FALSE (ip0->ttl == 1)) { - vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0; - icmp4_error_set_vnet_buffer (b1, ICMP4_time_exceeded, + vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + icmp4_error_set_vnet_buffer (b0, ICMP4_time_exceeded, ICMP4_time_exceeded_ttl_exceeded_in_transit, 0); - next1 = NAT_NEXT_ICMP_ERROR; - goto trace01; + next0 = NAT_NEXT_ICMP_ERROR; + goto trace0; } - udp1 = ip4_next_header (ip1); - tcp1 = (tcp_header_t *) udp1; - icmp1 = (icmp46_header_t *) udp1; - proto1 = ip_proto_to_snat_proto (ip1->protocol); + udp0 = ip4_next_header (ip0); + tcp0 = (tcp_header_t *) udp0; + icmp0 = (icmp46_header_t *) udp0; + proto0 = ip_proto_to_snat_proto (ip0->protocol); - if (is_slow_path) + if (PREDICT_FALSE (proto0 == ~0)) { - if (PREDICT_FALSE (proto1 == ~0)) - { - s1 = nat44_ed_in2out_unknown_proto (sm, b1, ip1, - rx_fib_index1, - thread_index, now, vm, - node); - if (!s1) - next1 = NAT_NEXT_DROP; - other_packets++; - goto trace01; - } + s0 = nat44_ed_in2out_unknown_proto (sm, b0, ip0, + rx_fib_index0, + thread_index, now, + vm, node); + if (!s0) + next0 = NAT_NEXT_DROP; + + other_packets++; + goto trace0; + } - if (PREDICT_FALSE (proto1 == SNAT_PROTOCOL_ICMP)) - { - next1 = icmp_in2out_ed_slow_path - (sm, b1, ip1, icmp1, sw_if_index1, rx_fib_index1, node, - next1, now, thread_index, &s1); - icmp_packets++; - goto trace01; - } + if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP)) + { + next0 = icmp_in2out_ed_slow_path + (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, + node, next0, now, thread_index, &s0); + icmp_packets++; + goto trace0; } - else + + // move down + make_ed_kv (&kv0, &ip0->src_address, &ip0->dst_address, + ip0->protocol, rx_fib_index0, + vnet_buffer (b0)->ip.reass.l4_src_port, + vnet_buffer (b0)->ip.reass.l4_dst_port); + + if (!clib_bihash_search_16_8 (&tsm->in2out_ed, &kv0, &value0)) { - if (PREDICT_FALSE (proto1 == ~0)) - { - next1 = def_slow; - goto trace01; - } + s0 = pool_elt_at_index (tsm->sessions, value0.value); - if (ip4_is_fragment (ip1)) + if (s0->tcp_close_timestamp && now >= s0->tcp_close_timestamp) { - next1 = def_reass; - fragments++; - goto trace01; + nat_free_session_data (sm, s0, thread_index, 0); + nat44_delete_session (sm, s0, thread_index); + s0 = NULL; } + } + if (!s0) + { if (is_output_feature) { if (PREDICT_FALSE - (nat_not_translate_output_feature_fwd - (sm, ip1, thread_index, now, vm, b1))) - goto trace01; - } + (nat44_ed_not_translate_output_feature + (sm, ip0, ip0->protocol, + vnet_buffer (b0)->ip.reass.l4_src_port, + vnet_buffer (b0)->ip.reass.l4_dst_port, + thread_index, sw_if_index0, + vnet_buffer (b0)->sw_if_index[VLIB_TX]))) + goto trace0; - if (PREDICT_FALSE (proto1 == SNAT_PROTOCOL_ICMP)) + /* + * Send DHCP packets to the ipv4 stack, or we won't + * be able to use dhcp client on the outside interface + */ + if (PREDICT_FALSE + (proto0 == SNAT_PROTOCOL_UDP + && (vnet_buffer (b0)->ip.reass.l4_dst_port == + clib_host_to_net_u16 (UDP_DST_PORT_dhcp_to_server)) + && ip0->dst_address.as_u32 == 0xffffffff)) + goto trace0; + } + else { - next1 = def_slow; - goto trace01; + if (PREDICT_FALSE + (nat44_ed_not_translate + (sm, node, sw_if_index0, ip0, proto0, rx_fib_index0, + thread_index))) + goto trace0; } - } - make_ed_kv (&kv1, &ip1->src_address, &ip1->dst_address, - ip1->protocol, rx_fib_index1, udp1->src_port, - udp1->dst_port); + next0 = slow_path_ed (sm, b0, rx_fib_index0, &kv0, &s0, node, + next0, thread_index, now); - if (clib_bihash_search_16_8 (&tsm->in2out_ed, &kv1, &value1)) - { - if (is_slow_path) - { - if (is_output_feature) - { - if (PREDICT_FALSE - (nat44_ed_not_translate_output_feature - (sm, ip1, ip1->protocol, udp1->src_port, - udp1->dst_port, thread_index, sw_if_index1, - vnet_buffer (b1)->sw_if_index[VLIB_TX]))) - goto trace01; - } - else - { - if (PREDICT_FALSE (nat44_ed_not_translate (sm, node, - sw_if_index1, - ip1, proto1, - rx_fib_index1, - thread_index))) - goto trace01; - } - - next1 = - slow_path_ed (sm, b1, rx_fib_index1, &kv1, &s1, node, - next1, thread_index, now, tcp1); - - if (PREDICT_FALSE (next1 == NAT_NEXT_DROP)) - goto trace01; - - if (PREDICT_FALSE (!s1)) - goto trace01; - } - else - { - next1 = def_slow; - goto trace01; - } - } - else - { - s1 = pool_elt_at_index (tsm->sessions, value1.value); - } - - b1->flags |= VNET_BUFFER_F_IS_NATED; - - if (!is_output_feature) - vnet_buffer (b1)->sw_if_index[VLIB_TX] = s1->out2in.fib_index; - - old_addr1 = ip1->src_address.as_u32; - new_addr1 = ip1->src_address.as_u32 = s1->out2in.addr.as_u32; - sum1 = ip1->checksum; - sum1 = ip_csum_update (sum1, old_addr1, new_addr1, ip4_header_t, - src_address); - if (PREDICT_FALSE (is_twice_nat_session (s1))) - sum1 = ip_csum_update (sum1, ip1->dst_address.as_u32, - s1->ext_host_addr.as_u32, ip4_header_t, - dst_address); - ip1->checksum = ip_csum_fold (sum1); - - old_port1 = udp1->src_port; - new_port1 = udp1->src_port = s1->out2in.port; - - if (PREDICT_TRUE (proto1 == SNAT_PROTOCOL_TCP)) - { - sum1 = tcp1->checksum; - sum1 = ip_csum_update (sum1, old_addr1, new_addr1, ip4_header_t, - dst_address); - sum1 = ip_csum_update (sum1, old_port1, new_port1, ip4_header_t, - length); - if (PREDICT_FALSE (is_twice_nat_session (s1))) - { - sum1 = ip_csum_update (sum1, ip1->dst_address.as_u32, - s1->ext_host_addr.as_u32, - ip4_header_t, dst_address); - sum1 = ip_csum_update (sum1, tcp1->dst_port, - s1->ext_host_port, ip4_header_t, - length); - tcp1->dst_port = s1->ext_host_port; - ip1->dst_address.as_u32 = s1->ext_host_addr.as_u32; - } - tcp1->checksum = ip_csum_fold (sum1); - mss_clamping (sm, tcp1, &sum1); - tcp_packets++; - if (nat44_set_tcp_session_state_i2o - (sm, s1, tcp1, thread_index)) - goto trace01; - } - else if (udp1->checksum) - { - sum1 = udp1->checksum; - sum1 = ip_csum_update (sum1, old_addr1, new_addr1, ip4_header_t, - dst_address); - sum1 = ip_csum_update (sum1, old_port1, new_port1, ip4_header_t, - length); - - if (PREDICT_FALSE (is_twice_nat_session (s1))) - { - sum1 = ip_csum_update (sum1, ip1->dst_address.as_u32, - s1->ext_host_addr.as_u32, - ip4_header_t, dst_address); - sum1 = ip_csum_update (sum1, tcp1->dst_port, - s1->ext_host_port, ip4_header_t, - length); - udp1->dst_port = s1->ext_host_port; - ip1->dst_address.as_u32 = s1->ext_host_addr.as_u32; - } - udp1->checksum = ip_csum_fold (sum1); - udp_packets++; - } - else - { - if (PREDICT_FALSE (is_twice_nat_session (s1))) - { - udp1->dst_port = s1->ext_host_port; - ip1->dst_address.as_u32 = s1->ext_host_addr.as_u32; - } - udp_packets++; - } - - /* Accounting */ - nat44_session_update_counters (s1, now, - vlib_buffer_length_in_chain (vm, b1), - thread_index); - /* Per-user LRU list maintenance */ - nat44_session_update_lru (sm, s1, thread_index); - - trace01: - if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) - && (b1->flags & VLIB_BUFFER_IS_TRACED))) - { - nat_in2out_ed_trace_t *t = - vlib_add_trace (vm, node, b1, sizeof (*t)); - t->is_slow_path = is_slow_path; - t->sw_if_index = sw_if_index1; - t->next_index = next1; - t->session_index = ~0; - if (s1) - t->session_index = s1 - tsm->sessions; - } - - pkts_processed += next1 == nat_buffer_opaque (b1)->arc_next; - - - /* verify speculative enqueues, maybe switch current next frame */ - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, - bi0, bi1, next0, next1); - } - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0; - vlib_buffer_t *b0; - u32 next0, sw_if_index0, rx_fib_index0, iph_offset0 = 0, proto0, - new_addr0, old_addr0; - u16 old_port0, new_port0; - ip4_header_t *ip0; - udp_header_t *udp0; - tcp_header_t *tcp0; - icmp46_header_t *icmp0; - snat_session_t *s0 = 0; - clib_bihash_kv_16_8_t kv0, value0; - ip_csum_t sum0; - - /* speculatively enqueue b0 to the current next frame */ - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - - if (is_output_feature) - { - // output feature fast path is enabled on the arc - // we need new arc_next feature - if (PREDICT_TRUE (!is_slow_path)) - vnet_feature_next (&nat_buffer_opaque (b0)->arc_next, b0); - - iph_offset0 = vnet_buffer (b0)->ip.save_rewrite_length; - } - - next0 = nat_buffer_opaque (b0)->arc_next; - - ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + - iph_offset0); - - sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; - rx_fib_index0 = - fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4, - sw_if_index0); - - if (PREDICT_FALSE (ip0->ttl == 1)) - { - vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; - icmp4_error_set_vnet_buffer (b0, ICMP4_time_exceeded, - ICMP4_time_exceeded_ttl_exceeded_in_transit, - 0); - next0 = NAT_NEXT_ICMP_ERROR; - goto trace0; - } - - udp0 = ip4_next_header (ip0); - tcp0 = (tcp_header_t *) udp0; - icmp0 = (icmp46_header_t *) udp0; - proto0 = ip_proto_to_snat_proto (ip0->protocol); - - if (is_slow_path) - { - if (PREDICT_FALSE (proto0 == ~0)) - { - s0 = nat44_ed_in2out_unknown_proto (sm, b0, ip0, - rx_fib_index0, - thread_index, now, vm, - node); - if (!s0) - next0 = NAT_NEXT_DROP; - other_packets++; - goto trace0; - } - - if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP)) - { - next0 = icmp_in2out_ed_slow_path - (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, now, thread_index, &s0); - icmp_packets++; - goto trace0; - } - } - else - { - if (PREDICT_FALSE (proto0 == ~0)) - { - next0 = def_slow; - goto trace0; - } - - if (ip4_is_fragment (ip0)) - { - next0 = def_reass; - fragments++; - goto trace0; - } - - if (is_output_feature) - { - if (PREDICT_FALSE - (nat_not_translate_output_feature_fwd - (sm, ip0, thread_index, now, vm, b0))) - goto trace0; - } - - if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP)) - { - next0 = def_slow; - goto trace0; - } - } - - make_ed_kv (&kv0, &ip0->src_address, &ip0->dst_address, - ip0->protocol, rx_fib_index0, udp0->src_port, - udp0->dst_port); - - if (clib_bihash_search_16_8 (&tsm->in2out_ed, &kv0, &value0)) - { - if (is_slow_path) - { - if (is_output_feature) - { - if (PREDICT_FALSE - (nat44_ed_not_translate_output_feature - (sm, ip0, ip0->protocol, udp0->src_port, - udp0->dst_port, thread_index, sw_if_index0, - vnet_buffer (b0)->sw_if_index[VLIB_TX]))) - goto trace0; - } - else - { - if (PREDICT_FALSE (nat44_ed_not_translate (sm, node, - sw_if_index0, - ip0, proto0, - rx_fib_index0, - thread_index))) - goto trace0; - } - - next0 = - slow_path_ed (sm, b0, rx_fib_index0, &kv0, &s0, node, - next0, thread_index, now, tcp0); + if (PREDICT_FALSE (next0 == NAT_NEXT_DROP)) + goto trace0; - if (PREDICT_FALSE (next0 == NAT_NEXT_DROP)) - goto trace0; + if (PREDICT_FALSE (!s0)) + goto trace0; - if (PREDICT_FALSE (!s0)) - goto trace0; - } - else - { - next0 = def_slow; - goto trace0; - } - } - else - { - s0 = pool_elt_at_index (tsm->sessions, value0.value); } b0->flags |= VNET_BUFFER_F_IS_NATED; @@ -1543,49 +1403,62 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, dst_address); ip0->checksum = ip_csum_fold (sum0); - old_port0 = udp0->src_port; - new_port0 = udp0->src_port = s0->out2in.port; + old_port0 = vnet_buffer (b0)->ip.reass.l4_src_port; if (PREDICT_TRUE (proto0 == SNAT_PROTOCOL_TCP)) { - sum0 = tcp0->checksum; - sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, - dst_address); - sum0 = ip_csum_update (sum0, old_port0, new_port0, ip4_header_t, - length); - if (PREDICT_FALSE (is_twice_nat_session (s0))) + if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment) { - sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, - s0->ext_host_addr.as_u32, - ip4_header_t, dst_address); - sum0 = ip_csum_update (sum0, tcp0->dst_port, - s0->ext_host_port, ip4_header_t, - length); - tcp0->dst_port = s0->ext_host_port; - ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + new_port0 = udp0->src_port = s0->out2in.port; + sum0 = tcp0->checksum; + sum0 = + ip_csum_update (sum0, old_addr0, new_addr0, + ip4_header_t, dst_address); + sum0 = + ip_csum_update (sum0, old_port0, new_port0, + ip4_header_t, length); + if (PREDICT_FALSE (is_twice_nat_session (s0))) + { + sum0 = + ip_csum_update (sum0, ip0->dst_address.as_u32, + s0->ext_host_addr.as_u32, + ip4_header_t, dst_address); + sum0 = + ip_csum_update (sum0, + vnet_buffer (b0)->ip. + reass.l4_dst_port, s0->ext_host_port, + ip4_header_t, length); + tcp0->dst_port = s0->ext_host_port; + ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + } + mss_clamping (sm, tcp0, &sum0); + tcp0->checksum = ip_csum_fold (sum0); } - mss_clamping (sm, tcp0, &sum0); - tcp0->checksum = ip_csum_fold (sum0); tcp_packets++; if (nat44_set_tcp_session_state_i2o - (sm, s0, tcp0, thread_index)) + (sm, now, s0, b0, thread_index)) goto trace0; } - else if (udp0->checksum) + else if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment + && udp0->checksum) { + new_port0 = udp0->src_port = s0->out2in.port; sum0 = udp0->checksum; - sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, - dst_address); - sum0 = ip_csum_update (sum0, old_port0, new_port0, ip4_header_t, - length); + sum0 = + ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, + dst_address); + sum0 = + ip_csum_update (sum0, old_port0, new_port0, ip4_header_t, + length); if (PREDICT_FALSE (is_twice_nat_session (s0))) { sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, s0->ext_host_addr.as_u32, ip4_header_t, dst_address); - sum0 = ip_csum_update (sum0, tcp0->dst_port, - s0->ext_host_port, ip4_header_t, - length); + sum0 = + ip_csum_update (sum0, + vnet_buffer (b0)->ip.reass.l4_dst_port, + s0->ext_host_port, ip4_header_t, length); udp0->dst_port = s0->ext_host_port; ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; } @@ -1594,18 +1467,22 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, } else { - if (PREDICT_FALSE (is_twice_nat_session (s0))) + if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment) { - udp0->dst_port = s0->ext_host_port; - ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + new_port0 = udp0->src_port = s0->out2in.port; + if (PREDICT_FALSE (is_twice_nat_session (s0))) + { + udp0->dst_port = s0->ext_host_port; + ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; + } + udp_packets++; } - udp_packets++; } /* Accounting */ nat44_session_update_counters (s0, now, - vlib_buffer_length_in_chain (vm, b0), - thread_index); + vlib_buffer_length_in_chain + (vm, b0), thread_index); /* Per-user LRU list maintenance */ nat44_session_update_lru (sm, s0, thread_index); @@ -1615,12 +1492,14 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, { nat_in2out_ed_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t)); - t->is_slow_path = is_slow_path; t->sw_if_index = sw_if_index0; t->next_index = next0; - t->session_index = ~0; + t->is_slow_path = 1; + if (s0) t->session_index = s0 - tsm->sessions; + else + t->session_index = ~0; } pkts_processed += next0 == nat_buffer_opaque (b0)->arc_next; @@ -1647,367 +1526,6 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm, vlib_node_increment_counter (vm, stats_node_index, NAT_IN2OUT_ED_ERROR_OTHER_PACKETS, other_packets); - vlib_node_increment_counter (vm, stats_node_index, - NAT_IN2OUT_ED_ERROR_FRAGMENTS, fragments); - - return frame->n_vectors; -} - -static inline uword -nat44_ed_in2out_reass_node_fn_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, - int is_output_feature) -{ - u32 n_left_from, *from, *to_next; - nat_next_t next_index; - u32 pkts_processed = 0, cached_fragments = 0; - snat_main_t *sm = &snat_main; - f64 now = vlib_time_now (vm); - u32 thread_index = vm->thread_index; - snat_main_per_thread_data_t *per_thread_data = - &sm->per_thread_data[thread_index]; - u32 *fragments_to_drop = 0; - u32 *fragments_to_loopback = 0; - - from = vlib_frame_vector_args (frame); - n_left_from = frame->n_vectors; - next_index = node->cached_next_index; - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0, sw_if_index0, proto0, rx_fib_index0, new_addr0, old_addr0; - u32 iph_offset0 = 0; - vlib_buffer_t *b0; - u32 next0; - u8 cached0 = 0; - ip4_header_t *ip0 = 0; - nat_reass_ip4_t *reass0; - udp_header_t *udp0; - tcp_header_t *tcp0; - icmp46_header_t *icmp0; - clib_bihash_kv_16_8_t kv0, value0; - snat_session_t *s0 = 0; - u16 old_port0, new_port0; - ip_csum_t sum0; - - /* speculatively enqueue b0 to the current next frame */ - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - - next0 = nat_buffer_opaque (b0)->arc_next; - - sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; - rx_fib_index0 = - fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4, - sw_if_index0); - - if (PREDICT_FALSE (nat_reass_is_drop_frag (0))) - { - next0 = NAT_NEXT_DROP; - b0->error = node->errors[NAT_IN2OUT_ED_ERROR_DROP_FRAGMENT]; - goto trace0; - } - - if (is_output_feature) - iph_offset0 = vnet_buffer (b0)->ip.save_rewrite_length; - - ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + - iph_offset0); - - udp0 = ip4_next_header (ip0); - tcp0 = (tcp_header_t *) udp0; - icmp0 = (icmp46_header_t *) udp0; - proto0 = ip_proto_to_snat_proto (ip0->protocol); - - reass0 = nat_ip4_reass_find_or_create (ip0->src_address, - ip0->dst_address, - ip0->fragment_id, - ip0->protocol, - 1, &fragments_to_drop); - - if (PREDICT_FALSE (!reass0)) - { - next0 = NAT_NEXT_DROP; - b0->error = node->errors[NAT_IN2OUT_ED_ERROR_MAX_REASS]; - nat_elog_notice ("maximum reassemblies exceeded"); - goto trace0; - } - - if (PREDICT_FALSE (ip4_is_first_fragment (ip0))) - { - if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP)) - { - if (is_output_feature) - { - if (PREDICT_FALSE - (nat_not_translate_output_feature_fwd - (sm, ip0, thread_index, now, vm, b0))) - reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE; - goto trace0; - } - - next0 = icmp_in2out_ed_slow_path - (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, now, thread_index, &s0); - - if (PREDICT_TRUE (next0 != NAT_NEXT_DROP)) - { - if (s0) - reass0->sess_index = s0 - per_thread_data->sessions; - else - reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE; - nat_ip4_reass_get_frags (reass0, - &fragments_to_loopback); - } - - goto trace0; - } - - make_ed_kv (&kv0, &ip0->src_address, &ip0->dst_address, - ip0->protocol, rx_fib_index0, udp0->src_port, - udp0->dst_port); - - if (clib_bihash_search_16_8 - (&per_thread_data->in2out_ed, &kv0, &value0)) - { - if (is_output_feature) - { - if (PREDICT_FALSE - (nat44_ed_not_translate_output_feature - (sm, ip0, ip0->protocol, udp0->src_port, - udp0->dst_port, thread_index, sw_if_index0, - vnet_buffer (b0)->sw_if_index[VLIB_TX]))) - { - reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE; - nat_ip4_reass_get_frags (reass0, - &fragments_to_loopback); - goto trace0; - } - } - else - { - if (PREDICT_FALSE (nat44_ed_not_translate (sm, node, - sw_if_index0, - ip0, proto0, - rx_fib_index0, - thread_index))) - { - reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE; - nat_ip4_reass_get_frags (reass0, - &fragments_to_loopback); - goto trace0; - } - } - - next0 = slow_path_ed (sm, b0, rx_fib_index0, &kv0, - &s0, node, next0, thread_index, now, - tcp0); - - if (PREDICT_FALSE (next0 == NAT_NEXT_DROP)) - goto trace0; - - if (PREDICT_FALSE (!s0)) - { - reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE; - goto trace0; - } - - reass0->sess_index = s0 - per_thread_data->sessions; - } - else - { - s0 = pool_elt_at_index (per_thread_data->sessions, - value0.value); - reass0->sess_index = value0.value; - } - nat_ip4_reass_get_frags (reass0, &fragments_to_loopback); - } - else - { - if (reass0->flags & NAT_REASS_FLAG_ED_DONT_TRANSLATE) - goto trace0; - if (PREDICT_FALSE (reass0->sess_index == (u32) ~ 0)) - { - if (nat_ip4_reass_add_fragment - (thread_index, reass0, bi0, &fragments_to_drop)) - { - b0->error = node->errors[NAT_IN2OUT_ED_ERROR_MAX_FRAG]; - nat_elog_notice - ("maximum fragments per reassembly exceeded"); - next0 = NAT_NEXT_DROP; - goto trace0; - } - cached0 = 1; - goto trace0; - } - s0 = pool_elt_at_index (per_thread_data->sessions, - reass0->sess_index); - } - - old_addr0 = ip0->src_address.as_u32; - ip0->src_address = s0->out2in.addr; - new_addr0 = ip0->src_address.as_u32; - if (!is_output_feature) - vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index; - - sum0 = ip0->checksum; - sum0 = ip_csum_update (sum0, old_addr0, new_addr0, - ip4_header_t, - src_address /* changed member */ ); - if (PREDICT_FALSE (is_twice_nat_session (s0))) - sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, - s0->ext_host_addr.as_u32, ip4_header_t, - dst_address); - ip0->checksum = ip_csum_fold (sum0); - - if (PREDICT_FALSE (ip4_is_first_fragment (ip0))) - { - old_port0 = udp0->src_port; - new_port0 = udp0->src_port = s0->out2in.port; - - if (PREDICT_TRUE (proto0 == SNAT_PROTOCOL_TCP)) - { - sum0 = tcp0->checksum; - sum0 = ip_csum_update (sum0, old_addr0, new_addr0, - ip4_header_t, - dst_address /* changed member */ ); - sum0 = ip_csum_update (sum0, old_port0, new_port0, - ip4_header_t /* cheat */ , - length /* changed member */ ); - if (PREDICT_FALSE (is_twice_nat_session (s0))) - { - sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, - s0->ext_host_addr.as_u32, - ip4_header_t, dst_address); - sum0 = ip_csum_update (sum0, tcp0->dst_port, - s0->ext_host_port, ip4_header_t, - length); - tcp0->dst_port = s0->ext_host_port; - ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; - } - tcp0->checksum = ip_csum_fold (sum0); - } - else if (udp0->checksum) - { - sum0 = udp0->checksum; - sum0 = - ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, - dst_address); - sum0 = - ip_csum_update (sum0, old_port0, new_port0, ip4_header_t, - length); - if (PREDICT_FALSE (is_twice_nat_session (s0))) - { - sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32, - s0->ext_host_addr.as_u32, - ip4_header_t, dst_address); - sum0 = ip_csum_update (sum0, tcp0->dst_port, - s0->ext_host_port, ip4_header_t, - length); - udp0->dst_port = s0->ext_host_port; - ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; - } - udp0->checksum = ip_csum_fold (sum0); - } - else - { - if (PREDICT_FALSE (is_twice_nat_session (s0))) - { - udp0->dst_port = s0->ext_host_port; - ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32; - } - } - } - - /* Hairpinning */ - nat44_reass_hairpinning (sm, b0, ip0, s0->out2in.port, - s0->ext_host_port, proto0, 1); - - /* Accounting */ - nat44_session_update_counters (s0, now, - vlib_buffer_length_in_chain (vm, b0), - thread_index); - /* Per-user LRU list maintenance */ - nat44_session_update_lru (sm, s0, thread_index); - - trace0: - if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) - && (b0->flags & VLIB_BUFFER_IS_TRACED))) - { - nat44_reass_trace_t *t = - vlib_add_trace (vm, node, b0, sizeof (*t)); - t->cached = cached0; - t->sw_if_index = sw_if_index0; - t->next_index = next0; - } - - if (cached0) - { - n_left_to_next++; - to_next--; - cached_fragments++; - } - else - { - pkts_processed += next0 == nat_buffer_opaque (b0)->arc_next; - - /* verify speculative enqueue, maybe switch current next frame */ - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } - - if (n_left_from == 0 && vec_len (fragments_to_loopback)) - { - from = vlib_frame_vector_args (frame); - u32 len = vec_len (fragments_to_loopback); - if (len <= VLIB_FRAME_SIZE) - { - clib_memcpy_fast (from, fragments_to_loopback, - sizeof (u32) * len); - n_left_from = len; - vec_reset_length (fragments_to_loopback); - } - else - { - clib_memcpy_fast (from, fragments_to_loopback + - (len - VLIB_FRAME_SIZE), - sizeof (u32) * VLIB_FRAME_SIZE); - n_left_from = VLIB_FRAME_SIZE; - _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE; - } - } - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - - vlib_node_increment_counter (vm, sm->ed_in2out_reass_node_index, - NAT_IN2OUT_ED_ERROR_PROCESSED_FRAGMENTS, - pkts_processed); - vlib_node_increment_counter (vm, sm->ed_in2out_reass_node_index, - NAT_IN2OUT_ED_ERROR_CACHED_FRAGMENTS, - cached_fragments); - - nat_send_all_to_node (vm, fragments_to_drop, node, - &node->errors[NAT_IN2OUT_ED_ERROR_DROP_FRAGMENT], - NAT_NEXT_DROP); - - vec_free (fragments_to_drop); - vec_free (fragments_to_loopback); return frame->n_vectors; } @@ -2015,7 +1533,7 @@ VLIB_NODE_FN (nat44_ed_in2out_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return nat44_ed_in2out_node_fn_inline (vm, node, frame, 0, 0); + return nat44_ed_in2out_fast_path_node_fn_inline (vm, node, frame, 0); } /* *INDENT-OFF* */ @@ -2035,7 +1553,7 @@ VLIB_NODE_FN (nat44_ed_in2out_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return nat44_ed_in2out_node_fn_inline (vm, node, frame, 0, 1); + return nat44_ed_in2out_fast_path_node_fn_inline (vm, node, frame, 1); } /* *INDENT-OFF* */ @@ -2052,10 +1570,10 @@ VLIB_REGISTER_NODE (nat44_ed_in2out_output_node) = { /* *INDENT-ON* */ VLIB_NODE_FN (nat44_ed_in2out_slowpath_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) + vlib_node_runtime_t * + node, vlib_frame_t * frame) { - return nat44_ed_in2out_node_fn_inline (vm, node, frame, 1, 0); + return nat44_ed_in2out_slow_path_node_fn_inline (vm, node, frame, 0); } /* *INDENT-OFF* */ @@ -2072,11 +1590,11 @@ VLIB_REGISTER_NODE (nat44_ed_in2out_slowpath_node) = { /* *INDENT-ON* */ VLIB_NODE_FN (nat44_ed_in2out_output_slowpath_node) (vlib_main_t * vm, - vlib_node_runtime_t * - node, + vlib_node_runtime_t + * node, vlib_frame_t * frame) { - return nat44_ed_in2out_node_fn_inline (vm, node, frame, 1, 1); + return nat44_ed_in2out_slow_path_node_fn_inline (vm, node, frame, 1); } /* *INDENT-OFF* */ @@ -2092,45 +1610,6 @@ VLIB_REGISTER_NODE (nat44_ed_in2out_output_slowpath_node) = { }; /* *INDENT-ON* */ - -VLIB_NODE_FN (nat44_ed_in2out_reass_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) -{ - return nat44_ed_in2out_reass_node_fn_inline (vm, node, frame, 0); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (nat44_ed_in2out_reass_node) = { - .name = "nat44-ed-in2out-reass", - .vector_size = sizeof (u32), - .sibling_of = "nat-default", - .format_trace = format_nat44_reass_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN (nat_in2out_ed_error_strings), - .error_strings = nat_in2out_ed_error_strings, -}; -/* *INDENT-ON* */ - -VLIB_NODE_FN (nat44_ed_in2out_reass_output_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) -{ - return nat44_ed_in2out_reass_node_fn_inline (vm, node, frame, 1); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (nat44_ed_in2out_reass_output_node) = { - .name = "nat44-ed-in2out-reass-output", - .vector_size = sizeof (u32), - .sibling_of = "nat-default", - .format_trace = format_nat44_reass_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN (nat_in2out_ed_error_strings), - .error_strings = nat_in2out_ed_error_strings, -}; -/* *INDENT-ON* */ - static u8 * format_nat_pre_trace (u8 * s, va_list * args) { @@ -2140,9 +1619,8 @@ format_nat_pre_trace (u8 * s, va_list * args) return format (s, "in2out next_index %d", t->next_index); } -VLIB_NODE_FN (nat_pre_in2out_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +VLIB_NODE_FN (nat_pre_in2out_node) + (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return nat_pre_node_fn_inline (vm, node, frame, NAT_NEXT_IN2OUT_ED_FAST_PATH);