X-Git-Url: https://gerrit.fd.io/r/gitweb?p=vpp.git;a=blobdiff_plain;f=src%2Fplugins%2Fmap%2Fip6_map_t.c;h=21d6e10c5ae2764a3fa73301014de1ebbbb8b93a;hp=577dbe57001ace69526258dce01a71af2aa65b4d;hb=fc7344f;hpb=fccd1b2b694146506e0efdeee6c23674550fc5d5 diff --git a/src/plugins/map/ip6_map_t.c b/src/plugins/map/ip6_map_t.c index 577dbe57001..21d6e10c5ae 100644 --- a/src/plugins/map/ip6_map_t.c +++ b/src/plugins/map/ip6_map_t.c @@ -14,11 +14,9 @@ */ #include "map.h" -#include -#include #include - -#define IP6_MAP_T_DUAL_LOOP +#include +#include typedef enum { @@ -59,16 +57,13 @@ ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag, { u32 *ignore = NULL; map_ip4_reass_lock (); - map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address, - d->flags), - ip6_map_t_embedded_address (d, - &ip6-> - dst_address), - frag_id_6to4 (frag->identification), - (ip6->protocol == - IP_PROTOCOL_ICMP6) ? - IP_PROTOCOL_ICMP : ip6->protocol, - &ignore); + map_ip4_reass_t *r = + map_ip4_reass_get (map_get_ip4 (&ip6->src_address, d->ip6_src_len), + ip6_map_t_embedded_address (d, &ip6->dst_address), + frag_id_6to4 (frag->identification), + (ip6->protocol == + IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : ip6->protocol, + &ignore); if (r) r->port = port; @@ -83,16 +78,13 @@ ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag, { u32 *ignore = NULL; map_ip4_reass_lock (); - map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address, - d->flags), - ip6_map_t_embedded_address (d, - &ip6-> - dst_address), - frag_id_6to4 (frag->identification), - (ip6->protocol == - IP_PROTOCOL_ICMP6) ? - IP_PROTOCOL_ICMP : ip6->protocol, - &ignore); + map_ip4_reass_t *r = + map_ip4_reass_get (map_get_ip4 (&ip6->src_address, d->ip6_src_len), + ip6_map_t_embedded_address (d, &ip6->dst_address), + frag_id_6to4 (frag->identification), + (ip6->protocol == + IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : ip6->protocol, + &ignore); i32 ret = r ? r->port : -1; map_ip4_reass_unlock (); return ret; @@ -110,8 +102,9 @@ ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg) icmp6_to_icmp_ctx_t *ctx = arg; u32 ip4_sadr; - //Security check - //Note that this prevents an intermediate IPv6 router from answering the request + // Security check + // Note that this prevents an intermediate IPv6 router from answering + // the request. ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags); if (ip6->src_address.as_u64[0] != map_get_pfx_net (ctx->d, ip4_sadr, ctx->sender_port) @@ -213,7 +206,7 @@ ip6_map_t_icmp (vlib_main_t * vm, if (vnet_buffer (p0)->map_t.mtu < p0->current_length) { - //Send to fragmentation node if necessary + // Send to fragmentation node if necessary vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG; @@ -242,14 +235,53 @@ ip6_map_t_icmp (vlib_main_t * vm, return frame->n_vectors; } -static int -ip6_to_ip4_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *ctx) +/* + * Translate IPv6 fragmented packet to IPv4. + */ +always_inline int +map_ip6_to_ip4_fragmented (vlib_buffer_t * p) { - vlib_buffer_t *p = ctx; + ip6_header_t *ip6; + ip6_frag_hdr_t *frag; + ip4_header_t *ip4; + u16 frag_id; + u8 frag_more; + u16 frag_offset; + u8 l4_protocol; + u16 l4_offset; + + ip6 = vlib_buffer_get_current (p); + + if (ip6_parse + (ip6, p->current_length, &l4_protocol, &l4_offset, &frag_offset)) + return -1; + + frag = (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_offset); + ip4 = (ip4_header_t *) u8_ptr_add (ip6, l4_offset - sizeof (*ip4)); + vlib_buffer_advance (p, l4_offset - sizeof (*ip4)); + + frag_id = frag_id_6to4 (frag->identification); + frag_more = ip6_frag_hdr_more (frag); + frag_offset = ip6_frag_hdr_offset (frag); ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr; ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr; + ip4->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip4->tos = ip6_translate_tos (ip6); + ip4->length = + u16_net_add (ip6->payload_length, + sizeof (*ip4) - l4_offset + sizeof (*ip6)); + ip4->fragment_id = frag_id; + ip4->flags_and_fragment_offset = + clib_host_to_net_u16 (frag_offset | + (frag_more ? IP4_HEADER_FLAG_MORE_FRAGMENTS : 0)); + ip4->ttl = ip6->hop_limit; + ip4->protocol = + (l4_protocol == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : l4_protocol; + ip4->checksum = ip4_header_checksum (ip4); + return 0; } @@ -268,65 +300,6 @@ ip6_map_t_fragmented (vlib_main_t * vm, { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); -#ifdef IP6_MAP_T_DUAL_LOOP - while (n_left_from >= 4 && n_left_to_next >= 2) - { - u32 pi0, pi1; - vlib_buffer_t *p0, *p1; - u32 next0, next1; - - pi0 = to_next[0] = from[0]; - pi1 = to_next[1] = from[1]; - from += 2; - n_left_from -= 2; - to_next += 2; - n_left_to_next -= 2; - - next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; - next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; - p0 = vlib_get_buffer (vm, pi0); - p1 = vlib_get_buffer (vm, pi1); - - if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0)) - { - p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED]; - next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP; - } - else - { - if (vnet_buffer (p0)->map_t.mtu < p0->current_length) - { - //Send to fragmentation node if necessary - vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; - vnet_buffer (p0)->ip_frag.next_index = - IP4_FRAG_NEXT_IP4_LOOKUP; - next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; - } - } - - if (ip6_to_ip4_fragmented (p1, ip6_to_ip4_set_cb, p1)) - { - p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED]; - next1 = IP6_MAPT_FRAGMENTED_NEXT_DROP; - } - else - { - if (vnet_buffer (p1)->map_t.mtu < p1->current_length) - { - //Send to fragmentation node if necessary - vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; - vnet_buffer (p1)->ip_frag.next_index = - IP4_FRAG_NEXT_IP4_LOOKUP; - next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; - } - } - - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, pi0, pi1, - next0, next1); - } -#endif - while (n_left_from > 0 && n_left_to_next > 0) { u32 pi0; @@ -342,7 +315,7 @@ ip6_map_t_fragmented (vlib_main_t * vm, next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; p0 = vlib_get_buffer (vm, pi0); - if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0)) + if (map_ip6_to_ip4_fragmented (p0)) { p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED]; next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP; @@ -351,7 +324,7 @@ ip6_map_t_fragmented (vlib_main_t * vm, { if (vnet_buffer (p0)->map_t.mtu < p0->current_length) { - //Send to fragmentation node if necessary + // Send to fragmentation node if necessary vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; @@ -368,6 +341,103 @@ ip6_map_t_fragmented (vlib_main_t * vm, return frame->n_vectors; } +/* + * Translate IPv6 UDP/TCP packet to IPv4. + */ +always_inline int +map_ip6_to_ip4_tcp_udp (vlib_buffer_t * p, bool udp_checksum) +{ + map_main_t *mm = &map_main; + ip6_header_t *ip6; + u16 *checksum; + ip_csum_t csum = 0; + ip4_header_t *ip4; + u16 fragment_id; + u16 flags; + u16 frag_offset; + u8 l4_protocol; + u16 l4_offset; + ip6_address_t old_src, old_dst; + + ip6 = vlib_buffer_get_current (p); + + if (ip6_parse + (ip6, p->current_length, &l4_protocol, &l4_offset, &frag_offset)) + return -1; + + if (l4_protocol == IP_PROTOCOL_TCP) + { + tcp_header_t *tcp = ip6_next_header (ip6); + if (mm->tcp_mss > 0) + { + csum = tcp->checksum; + map_mss_clamping (tcp, &csum, mm->tcp_mss); + tcp->checksum = ip_csum_fold (csum); + } + checksum = &tcp->checksum; + } + else + { + udp_header_t *udp = ip6_next_header (ip6); + checksum = &udp->checksum; + } + + old_src.as_u64[0] = ip6->src_address.as_u64[0]; + old_src.as_u64[1] = ip6->src_address.as_u64[1]; + old_dst.as_u64[0] = ip6->dst_address.as_u64[0]; + old_dst.as_u64[1] = ip6->dst_address.as_u64[1]; + + ip4 = (ip4_header_t *) u8_ptr_add (ip6, l4_offset - sizeof (*ip4)); + + vlib_buffer_advance (p, l4_offset - sizeof (*ip4)); + + if (PREDICT_FALSE (frag_offset)) + { + // Only the first fragment + ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_offset); + fragment_id = frag_id_6to4 (hdr->identification); + flags = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); + } + else + { + fragment_id = 0; + flags = 0; + } + + ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr; + ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr; + + ip4->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip4->tos = ip6_translate_tos (ip6); + ip4->length = + u16_net_add (ip6->payload_length, + sizeof (*ip4) + sizeof (*ip6) - l4_offset); + ip4->fragment_id = fragment_id; + ip4->flags_and_fragment_offset = flags; + ip4->ttl = ip6->hop_limit; + ip4->protocol = l4_protocol; + ip4->checksum = ip4_header_checksum (ip4); + + // UDP checksum is optional over IPv4 + if (!udp_checksum && l4_protocol == IP_PROTOCOL_UDP) + { + *checksum = 0; + } + else + { + csum = ip_csum_sub_even (*checksum, old_src.as_u64[0]); + csum = ip_csum_sub_even (csum, old_src.as_u64[1]); + csum = ip_csum_sub_even (csum, old_dst.as_u64[0]); + csum = ip_csum_sub_even (csum, old_dst.as_u64[1]); + csum = ip_csum_add_even (csum, ip4->dst_address.as_u32); + csum = ip_csum_add_even (csum, ip4->src_address.as_u32); + *checksum = ip_csum_fold (csum); + } + + return 0; +} + static uword ip6_map_t_tcp_udp (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) @@ -383,65 +453,6 @@ ip6_map_t_tcp_udp (vlib_main_t * vm, { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); -#ifdef IP6_MAP_T_DUAL_LOOP - while (n_left_from >= 4 && n_left_to_next >= 2) - { - u32 pi0, pi1; - vlib_buffer_t *p0, *p1; - ip6_mapt_tcp_udp_next_t next0, next1; - - pi0 = to_next[0] = from[0]; - pi1 = to_next[1] = from[1]; - from += 2; - n_left_from -= 2; - to_next += 2; - n_left_to_next -= 2; - next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; - next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; - - p0 = vlib_get_buffer (vm, pi0); - p1 = vlib_get_buffer (vm, pi1); - - if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1)) - { - p0->error = error_node->errors[MAP_ERROR_UNKNOWN]; - next0 = IP6_MAPT_TCP_UDP_NEXT_DROP; - } - else - { - if (vnet_buffer (p0)->map_t.mtu < p0->current_length) - { - //Send to fragmentation node if necessary - vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; - vnet_buffer (p0)->ip_frag.next_index = - IP4_FRAG_NEXT_IP4_LOOKUP; - next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; - } - } - - if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1)) - { - p1->error = error_node->errors[MAP_ERROR_UNKNOWN]; - next1 = IP6_MAPT_TCP_UDP_NEXT_DROP; - } - else - { - if (vnet_buffer (p1)->map_t.mtu < p1->current_length) - { - //Send to fragmentation node if necessary - vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; - vnet_buffer (p1)->ip_frag.next_index = - IP4_FRAG_NEXT_IP4_LOOKUP; - next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; - } - } - - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, - n_left_to_next, pi0, pi1, next0, - next1); - } -#endif - while (n_left_from > 0 && n_left_to_next > 0) { u32 pi0; @@ -457,7 +468,7 @@ ip6_map_t_tcp_udp (vlib_main_t * vm, p0 = vlib_get_buffer (vm, pi0); - if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1)) + if (map_ip6_to_ip4_tcp_udp (p0, true)) { p0->error = error_node->errors[MAP_ERROR_UNKNOWN]; next0 = IP6_MAPT_TCP_UDP_NEXT_DROP; @@ -466,7 +477,7 @@ ip6_map_t_tcp_udp (vlib_main_t * vm, { if (vnet_buffer (p0)->map_t.mtu < p0->current_length) { - //Send to fragmentation node if necessary + // Send to fragmentation node if necessary vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; @@ -483,84 +494,6 @@ ip6_map_t_tcp_udp (vlib_main_t * vm, return frame->n_vectors; } -static_always_inline void -ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60, - map_domain_t * d0, i32 * src_port0, - u8 * error0, ip6_mapt_next_t * next0, - u32 l4_len0, ip6_frag_hdr_t * frag0) -{ - if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset && - ip6_frag_hdr_offset (frag0))) - { - *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED; - if (d0->ea_bits_len == 0 && d0->rules) - { - *src_port0 = 0; - } - else - { - *src_port0 = ip6_map_fragment_get (ip60, frag0, d0); - *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED; - } - } - else - if (PREDICT_TRUE - (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP)) - { - *error0 = - l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0; - vnet_buffer (p0)->map_t.checksum_offset = - vnet_buffer (p0)->map_t.v6.l4_offset + 16; - *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP; - *src_port0 = - (i32) * - ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset)); - } - else - if (PREDICT_TRUE - (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP)) - { - *error0 = - l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0; - vnet_buffer (p0)->map_t.checksum_offset = - vnet_buffer (p0)->map_t.v6.l4_offset + 6; - *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP; - *src_port0 = - (i32) * - ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset)); - } - else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6) - { - *error0 = - l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0; - *next0 = IP6_MAPT_NEXT_MAPT_ICMP; - if (d0->ea_bits_len == 0 && d0->rules) - { - *src_port0 = 0; - } - else - if (((icmp46_header_t *) - u8_ptr_add (ip60, - vnet_buffer (p0)->map_t.v6.l4_offset))->code == - ICMP6_echo_reply - || ((icmp46_header_t *) - u8_ptr_add (ip60, - vnet_buffer (p0)->map_t.v6.l4_offset))->code == - ICMP6_echo_request) - { - *src_port0 = - (i32) * - ((u16 *) - u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6)); - } - } - else - { - //TODO: In case of 1:1 mapping, it might be possible to do something with those packets. - *error0 = MAP_ERROR_BAD_PROTOCOL; - } -} - static uword ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { @@ -577,197 +510,6 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); -#ifdef IP6_MAP_T_DUAL_LOOP - while (n_left_from >= 4 && n_left_to_next >= 2) - { - u32 pi0, pi1; - vlib_buffer_t *p0, *p1; - ip6_header_t *ip60, *ip61; - u8 error0, error1; - ip6_mapt_next_t next0, next1; - u32 l4_len0, l4_len1; - i32 src_port0, src_port1; - map_domain_t *d0, *d1; - ip6_frag_hdr_t *frag0, *frag1; - u32 saddr0, saddr1; - next0 = next1 = 0; //Because compiler whines - - pi0 = to_next[0] = from[0]; - pi1 = to_next[1] = from[1]; - from += 2; - n_left_from -= 2; - to_next += 2; - n_left_to_next -= 2; - - error0 = MAP_ERROR_NONE; - error1 = MAP_ERROR_NONE; - - p0 = vlib_get_buffer (vm, pi0); - p1 = vlib_get_buffer (vm, pi1); - ip60 = vlib_buffer_get_current (p0); - ip61 = vlib_buffer_get_current (p1); - - saddr0 = 0; /* TODO */ - saddr1 = 0; /* TODO */ - /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */ - - d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], - (ip4_address_t *) & saddr0, - &vnet_buffer (p0)->map_t.map_domain_index, - &error0); - d1 = - ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX], - (ip4_address_t *) & saddr1, - &vnet_buffer (p1)->map_t.map_domain_index, - &error1); - - saddr0 = map_get_ip4 (&ip60->src_address, d0->flags); - saddr1 = map_get_ip4 (&ip61->src_address, d1->flags); - - vnet_buffer (p0)->map_t.v6.saddr = saddr0; - vnet_buffer (p1)->map_t.v6.saddr = saddr1; - vnet_buffer (p0)->map_t.v6.daddr = - ip6_map_t_embedded_address (d0, &ip60->dst_address); - vnet_buffer (p1)->map_t.v6.daddr = - ip6_map_t_embedded_address (d1, &ip61->dst_address); - vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0; - vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0; - - if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length, - &(vnet_buffer (p0)->map_t. - v6.l4_protocol), - &(vnet_buffer (p0)->map_t. - v6.l4_offset), - &(vnet_buffer (p0)->map_t. - v6.frag_offset)))) - { - error0 = MAP_ERROR_MALFORMED; - next0 = IP6_MAPT_NEXT_DROP; - } - - if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length, - &(vnet_buffer (p1)->map_t. - v6.l4_protocol), - &(vnet_buffer (p1)->map_t. - v6.l4_offset), - &(vnet_buffer (p1)->map_t. - v6.frag_offset)))) - { - error1 = MAP_ERROR_MALFORMED; - next1 = IP6_MAPT_NEXT_DROP; - } - - src_port0 = src_port1 = -1; - l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) + - sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset; - l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) + - sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset; - frag0 = - (ip6_frag_hdr_t *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t. - v6.frag_offset); - frag1 = - (ip6_frag_hdr_t *) u8_ptr_add (ip61, - vnet_buffer (p1)->map_t. - v6.frag_offset); - - ip6_map_t_classify (p0, ip60, d0, &src_port0, &error0, &next0, - l4_len0, frag0); - ip6_map_t_classify (p1, ip61, d1, &src_port1, &error1, &next1, - l4_len1, frag1); - - if (PREDICT_FALSE - ((src_port0 != -1) - && (ip60->src_address.as_u64[0] != - map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr, - src_port0) - || ip60->src_address.as_u64[1] != map_get_sfx_net (d0, - vnet_buffer - (p0)->map_t.v6.saddr, - src_port0)))) - { - error0 = MAP_ERROR_SEC_CHECK; - } - - if (PREDICT_FALSE - ((src_port1 != -1) - && (ip61->src_address.as_u64[0] != - map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr, - src_port1) - || ip61->src_address.as_u64[1] != map_get_sfx_net (d1, - vnet_buffer - (p1)->map_t.v6.saddr, - src_port1)))) - { - error1 = MAP_ERROR_SEC_CHECK; - } - - if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset && - !ip6_frag_hdr_offset ((ip6_frag_hdr_t *) - u8_ptr_add (ip60, - vnet_buffer - (p0)->map_t. - v6.frag_offset))) - && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) - && (error0 == MAP_ERROR_NONE)) - { - ip6_map_fragment_cache (ip60, - (ip6_frag_hdr_t *) u8_ptr_add (ip60, - vnet_buffer - (p0)->map_t. - v6.frag_offset), - d0, src_port0); - } - - if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset && - !ip6_frag_hdr_offset ((ip6_frag_hdr_t *) - u8_ptr_add (ip61, - vnet_buffer - (p1)->map_t. - v6.frag_offset))) - && (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules) - && (error1 == MAP_ERROR_NONE)) - { - ip6_map_fragment_cache (ip61, - (ip6_frag_hdr_t *) u8_ptr_add (ip61, - vnet_buffer - (p1)->map_t. - v6.frag_offset), - d1, src_port1); - } - - if (PREDICT_TRUE - (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) - { - vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - thread_index, - vnet_buffer (p0)-> - map_t.map_domain_index, 1, - clib_net_to_host_u16 - (ip60->payload_length)); - } - - if (PREDICT_TRUE - (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP)) - { - vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - thread_index, - vnet_buffer (p1)-> - map_t.map_domain_index, 1, - clib_net_to_host_u16 - (ip61->payload_length)); - } - - next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0; - next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1; - p0->error = error_node->errors[error0]; - p1->error = error_node->errors[error1]; - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, - n_left_to_next, pi0, pi1, next0, - next1); - } -#endif - while (n_left_from > 0 && n_left_to_next > 0) { u32 pi0; @@ -775,7 +517,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) ip6_header_t *ip60; u8 error0; u32 l4_len0; - i32 src_port0; + i32 map_port0; map_domain_t *d0; ip6_frag_hdr_t *frag0; ip6_mapt_next_t next0 = 0; @@ -791,50 +533,52 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) p0 = vlib_get_buffer (vm, pi0); ip60 = vlib_buffer_get_current (p0); - //Save saddr in a different variable to not overwrite ip.adj_index - saddr = 0; /* TODO */ - /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */ - - d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], - (ip4_address_t *) & saddr, - &vnet_buffer (p0)->map_t.map_domain_index, - &error0); - - saddr = map_get_ip4 (&ip60->src_address, d0->flags); + d0 = + ip6_map_get_domain (&ip60->dst_address, + &vnet_buffer (p0)->map_t.map_domain_index, + &error0); + if (!d0) + { /* Guess it wasn't for us */ + vnet_feature_next (&next0, p0); + goto exit; + } - //FIXME: What if d0 is null + saddr = map_get_ip4 (&ip60->src_address, d0->ip6_src_len); vnet_buffer (p0)->map_t.v6.saddr = saddr; vnet_buffer (p0)->map_t.v6.daddr = ip6_map_t_embedded_address (d0, &ip60->dst_address); vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0; - if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length, - &(vnet_buffer (p0)->map_t. - v6.l4_protocol), - &(vnet_buffer (p0)->map_t. - v6.l4_offset), - &(vnet_buffer (p0)->map_t. - v6.frag_offset)))) + if (PREDICT_FALSE + (ip6_parse (ip60, p0->current_length, + &(vnet_buffer (p0)->map_t.v6.l4_protocol), + &(vnet_buffer (p0)->map_t.v6.l4_offset), + &(vnet_buffer (p0)->map_t.v6.frag_offset)))) { - error0 = MAP_ERROR_MALFORMED; - next0 = IP6_MAPT_NEXT_DROP; + error0 = + error0 == MAP_ERROR_NONE ? MAP_ERROR_MALFORMED : error0; } - src_port0 = -1; - l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) + + map_port0 = -1; + l4_len0 = + (u32) clib_net_to_host_u16 (ip60->payload_length) + sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset; frag0 = (ip6_frag_hdr_t *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t. - v6.frag_offset); + vnet_buffer (p0)->map_t.v6. + frag_offset); - - if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset && - ip6_frag_hdr_offset (frag0))) + if (PREDICT_FALSE + (vnet_buffer (p0)->map_t.v6.frag_offset + && ip6_frag_hdr_offset (frag0))) { - src_port0 = ip6_map_fragment_get (ip60, frag0, d0); - error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY; - next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED; + map_port0 = ip6_map_fragment_get (ip60, frag0, d0); + if (map_port0 == -1) + error0 = + error0 == + MAP_ERROR_NONE ? MAP_ERROR_FRAGMENT_MEMORY : error0; + else + next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED; } else if (PREDICT_TRUE @@ -846,7 +590,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vnet_buffer (p0)->map_t.checksum_offset = vnet_buffer (p0)->map_t.v6.l4_offset + 16; next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP; - src_port0 = + map_port0 = (i32) * ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset)); @@ -861,7 +605,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vnet_buffer (p0)->map_t.checksum_offset = vnet_buffer (p0)->map_t.v6.l4_offset + 6; next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP; - src_port0 = + map_port0 = (i32) * ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset)); @@ -879,9 +623,9 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) ICMP6_echo_reply || ((icmp46_header_t *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t.v6. - l4_offset))->code == ICMP6_echo_request) - src_port0 = + vnet_buffer (p0)->map_t.v6.l4_offset))-> + code == ICMP6_echo_request) + map_port0 = (i32) * ((u16 *) u8_ptr_add (ip60, @@ -889,41 +633,44 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) } else { - //TODO: In case of 1:1 mapping, it might be possible to do something with those packets. + // TODO: In case of 1:1 mapping, it might be possible to + // do something with those packets. error0 = MAP_ERROR_BAD_PROTOCOL; } - //Security check - if (PREDICT_FALSE - ((src_port0 != -1) - && (ip60->src_address.as_u64[0] != - map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr, - src_port0) - || ip60->src_address.as_u64[1] != map_get_sfx_net (d0, - vnet_buffer - (p0)->map_t.v6.saddr, - src_port0)))) + if (PREDICT_FALSE (map_port0 != -1) && + (ip60->src_address.as_u64[0] != + map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr, + map_port0) + || ip60->src_address.as_u64[1] != map_get_sfx_net (d0, + vnet_buffer + (p0)->map_t. + v6.saddr, + map_port0))) { - //Security check when src_port0 is not zero (non-first fragment, UDP or TCP) - error0 = MAP_ERROR_SEC_CHECK; + // Security check when map_port0 is not zero (non-first + // fragment, UDP or TCP) + error0 = + error0 == MAP_ERROR_NONE ? MAP_ERROR_SEC_CHECK : error0; } - //Fragmented first packet needs to be cached for following packets - if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset && - !ip6_frag_hdr_offset ((ip6_frag_hdr_t *) - u8_ptr_add (ip60, - vnet_buffer - (p0)->map_t. - v6.frag_offset))) - && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) + // Fragmented first packet needs to be cached for following packets + if (PREDICT_FALSE + (vnet_buffer (p0)->map_t.v6.frag_offset + && !ip6_frag_hdr_offset ((ip6_frag_hdr_t *) + u8_ptr_add (ip60, + vnet_buffer (p0)->map_t. + v6.frag_offset))) + && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) && (error0 == MAP_ERROR_NONE)) { ip6_map_fragment_cache (ip60, (ip6_frag_hdr_t *) u8_ptr_add (ip60, vnet_buffer - (p0)->map_t. - v6.frag_offset), - d0, src_port0); + (p0)-> + map_t.v6. + frag_offset), + d0, map_port0); } if (PREDICT_TRUE @@ -931,14 +678,15 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, thread_index, - vnet_buffer (p0)-> - map_t.map_domain_index, 1, - clib_net_to_host_u16 - (ip60->payload_length)); + vnet_buffer (p0)->map_t. + map_domain_index, 1, + clib_net_to_host_u16 (ip60-> + payload_length)); } next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0; p0->error = error_node->errors[error0]; + exit: vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, pi0, next0); @@ -949,7 +697,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) } static char *map_t_error_strings[] = { -#define _(sym,string) string, +#define _(sym, string) string, foreach_map_error #undef _ }; @@ -966,10 +714,11 @@ VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = { .error_strings = map_t_error_strings, .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT, - .next_nodes = { - [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup", - [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME, - [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop", + .next_nodes = + { + [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup", + [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME, + [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */ @@ -986,10 +735,11 @@ VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = { .error_strings = map_t_error_strings, .n_next_nodes = IP6_MAPT_ICMP_N_NEXT, - .next_nodes = { - [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup", - [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME, - [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop", + .next_nodes = + { + [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup", + [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME, + [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */ @@ -1006,15 +756,22 @@ VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = { .error_strings = map_t_error_strings, .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT, - .next_nodes = { - [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup", - [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME, - [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop", + .next_nodes = + { + [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup", + [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME, + [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */ /* *INDENT-OFF* */ +VNET_FEATURE_INIT(ip4_map_t_feature, static) = { + .arc_name = "ip6-unicast", + .node_name = "ip6-map-t", + .runs_before = VNET_FEATURES("ip6-flow-classify"), +}; + VLIB_REGISTER_NODE(ip6_map_t_node) = { .function = ip6_map_t, .name = "ip6-map-t", @@ -1026,11 +783,12 @@ VLIB_REGISTER_NODE(ip6_map_t_node) = { .error_strings = map_t_error_strings, .n_next_nodes = IP6_MAPT_N_NEXT, - .next_nodes = { - [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp", - [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp", - [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented", - [IP6_MAPT_NEXT_DROP] = "error-drop", + .next_nodes = + { + [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp", + [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp", + [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented", + [IP6_MAPT_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */