X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fip%2Fip6_forward.c;h=b6eae6e381175d405e68976d9ef36a0afc693543;hb=b538dd868;hp=2c3879b13d1cf524e453bf4d1bbe08eb892ed6ea;hpb=47727c02df4c79c21c6ceffaff65222c32cf6996;p=vpp.git diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c index 2c3879b13d1..b6eae6e3811 100644 --- a/src/vnet/ip/ip6_forward.c +++ b/src/vnet/ip/ip6_forward.c @@ -54,6 +54,7 @@ #include #endif #include +#include /* Flag used by IOAM code. Classifier sets it pop-hop-by-hop checks it */ #define OI_DECAP 0x80000000 @@ -563,212 +564,171 @@ VLIB_NODE_FN (ip6_load_balance_node) (vlib_main_t * vm, vlib_frame_t * frame) { vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters; - u32 n_left_from, n_left_to_next, *from, *to_next; - ip_lookup_next_t next; + u32 n_left, *from; u32 thread_index = vm->thread_index; ip6_main_t *im = &ip6_main; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; + u16 nexts[VLIB_FRAME_SIZE], *next; from = vlib_frame_vector_args (frame); - n_left_from = frame->n_vectors; - next = node->cached_next_index; + n_left = frame->n_vectors; + next = nexts; - while (n_left_from > 0) - { - vlib_get_next_frame (vm, node, next, to_next, n_left_to_next); + vlib_get_buffers (vm, from, bufs, n_left); + while (n_left >= 4) + { + const load_balance_t *lb0, *lb1; + const ip6_header_t *ip0, *ip1; + u32 lbi0, hc0, lbi1, hc1; + const dpo_id_t *dpo0, *dpo1; - while (n_left_from >= 4 && n_left_to_next >= 2) + /* Prefetch next iteration. */ + { + vlib_prefetch_buffer_header (b[2], STORE); + vlib_prefetch_buffer_header (b[3], STORE); + + CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), STORE); + CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), STORE); + } + + ip0 = vlib_buffer_get_current (b[0]); + ip1 = vlib_buffer_get_current (b[1]); + lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX]; + lbi1 = vnet_buffer (b[1])->ip.adj_index[VLIB_TX]; + + lb0 = load_balance_get (lbi0); + lb1 = load_balance_get (lbi1); + + /* + * this node is for via FIBs we can re-use the hash value from the + * to node if present. + * We don't want to use the same hash value at each level in the recursion + * graph as that would lead to polarisation + */ + hc0 = hc1 = 0; + + if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) { - ip_lookup_next_t next0, next1; - const load_balance_t *lb0, *lb1; - vlib_buffer_t *p0, *p1; - u32 pi0, lbi0, hc0, pi1, lbi1, hc1; - const ip6_header_t *ip0, *ip1; - const dpo_id_t *dpo0, *dpo1; - - /* Prefetch next iteration. */ - { - vlib_buffer_t *p2, *p3; - - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, STORE); - vlib_prefetch_buffer_header (p3, STORE); - - CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE); - CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE); - } - - pi0 = to_next[0] = from[0]; - pi1 = to_next[1] = from[1]; - - from += 2; - n_left_from -= 2; - to_next += 2; - n_left_to_next -= 2; - - p0 = vlib_get_buffer (vm, pi0); - p1 = vlib_get_buffer (vm, pi1); - - ip0 = vlib_buffer_get_current (p0); - ip1 = vlib_buffer_get_current (p1); - lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; - lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX]; - - lb0 = load_balance_get (lbi0); - lb1 = load_balance_get (lbi1); - - /* - * this node is for via FIBs we can re-use the hash value from the - * to node if present. - * We don't want to use the same hash value at each level in the recursion - * graph as that would lead to polarisation - */ - hc0 = hc1 = 0; - - if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) + if (PREDICT_TRUE (vnet_buffer (b[0])->ip.flow_hash)) { - if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash)) - { - hc0 = vnet_buffer (p0)->ip.flow_hash = - vnet_buffer (p0)->ip.flow_hash >> 1; - } - else - { - hc0 = vnet_buffer (p0)->ip.flow_hash = - ip6_compute_flow_hash (ip0, lb0->lb_hash_config); - } - dpo0 = - load_balance_get_fwd_bucket (lb0, - (hc0 & - lb0->lb_n_buckets_minus_1)); + hc0 = vnet_buffer (b[0])->ip.flow_hash = + vnet_buffer (b[0])->ip.flow_hash >> 1; } else { - dpo0 = load_balance_get_bucket_i (lb0, 0); + hc0 = vnet_buffer (b[0])->ip.flow_hash = + ip6_compute_flow_hash (ip0, lb0->lb_hash_config); } - if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) + dpo0 = load_balance_get_fwd_bucket + (lb0, (hc0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); + } + if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) + { + if (PREDICT_TRUE (vnet_buffer (b[1])->ip.flow_hash)) { - if (PREDICT_TRUE (vnet_buffer (p1)->ip.flow_hash)) - { - hc1 = vnet_buffer (p1)->ip.flow_hash = - vnet_buffer (p1)->ip.flow_hash >> 1; - } - else - { - hc1 = vnet_buffer (p1)->ip.flow_hash = - ip6_compute_flow_hash (ip1, lb1->lb_hash_config); - } - dpo1 = - load_balance_get_fwd_bucket (lb1, - (hc1 & - lb1->lb_n_buckets_minus_1)); + hc1 = vnet_buffer (b[1])->ip.flow_hash = + vnet_buffer (b[1])->ip.flow_hash >> 1; } else { - dpo1 = load_balance_get_bucket_i (lb1, 0); - } - - next0 = dpo0->dpoi_next_node; - next1 = dpo1->dpoi_next_node; - - /* Only process the HBH Option Header if explicitly configured to do so */ - if (PREDICT_FALSE - (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)) - { - next0 = (dpo_is_adj (dpo0) && im->hbh_enabled) ? - (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next0; + hc1 = vnet_buffer (b[1])->ip.flow_hash = + ip6_compute_flow_hash (ip1, lb1->lb_hash_config); } - /* Only process the HBH Option Header if explicitly configured to do so */ - if (PREDICT_FALSE - (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)) - { - next1 = (dpo_is_adj (dpo1) && im->hbh_enabled) ? - (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next1; - } - - vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; + dpo1 = load_balance_get_fwd_bucket + (lb1, (hc1 & (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); + } - vlib_increment_combined_counter - (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); - vlib_increment_combined_counter - (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); + next[0] = dpo0->dpoi_next_node; + next[1] = dpo1->dpoi_next_node; - vlib_validate_buffer_enqueue_x2 (vm, node, next, - to_next, n_left_to_next, - pi0, pi1, next0, next1); + /* Only process the HBH Option Header if explicitly configured to do so */ + if (PREDICT_FALSE (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)) + { + next[0] = (dpo_is_adj (dpo0) && im->hbh_enabled) ? + (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next[0]; } - - while (n_left_from > 0 && n_left_to_next > 0) + /* Only process the HBH Option Header if explicitly configured to do so */ + if (PREDICT_FALSE (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)) { - ip_lookup_next_t next0; - const load_balance_t *lb0; - vlib_buffer_t *p0; - u32 pi0, lbi0, hc0; - const ip6_header_t *ip0; - const dpo_id_t *dpo0; + next[1] = (dpo_is_adj (dpo1) && im->hbh_enabled) ? + (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next[1]; + } - pi0 = from[0]; - to_next[0] = pi0; - from += 1; - to_next += 1; - n_left_to_next -= 1; - n_left_from -= 1; + vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; - p0 = vlib_get_buffer (vm, pi0); + vlib_increment_combined_counter + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b[0])); + vlib_increment_combined_counter + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b[1])); - ip0 = vlib_buffer_get_current (p0); - lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; + b += 2; + next += 2; + n_left -= 2; + } + + while (n_left > 0) + { + const load_balance_t *lb0; + const ip6_header_t *ip0; + const dpo_id_t *dpo0; + u32 lbi0, hc0; + + ip0 = vlib_buffer_get_current (b[0]); + lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX]; - lb0 = load_balance_get (lbi0); + lb0 = load_balance_get (lbi0); - hc0 = 0; - if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) + hc0 = 0; + if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) + { + if (PREDICT_TRUE (vnet_buffer (b[0])->ip.flow_hash)) { - if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash)) - { - hc0 = vnet_buffer (p0)->ip.flow_hash = - vnet_buffer (p0)->ip.flow_hash >> 1; - } - else - { - hc0 = vnet_buffer (p0)->ip.flow_hash = - ip6_compute_flow_hash (ip0, lb0->lb_hash_config); - } - dpo0 = - load_balance_get_fwd_bucket (lb0, - (hc0 & - lb0->lb_n_buckets_minus_1)); + hc0 = vnet_buffer (b[0])->ip.flow_hash = + vnet_buffer (b[0])->ip.flow_hash >> 1; } else { - dpo0 = load_balance_get_bucket_i (lb0, 0); - } - - next0 = dpo0->dpoi_next_node; - vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - - /* Only process the HBH Option Header if explicitly configured to do so */ - if (PREDICT_FALSE - (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)) - { - next0 = (dpo_is_adj (dpo0) && im->hbh_enabled) ? - (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next0; + hc0 = vnet_buffer (b[0])->ip.flow_hash = + ip6_compute_flow_hash (ip0, lb0->lb_hash_config); } + dpo0 = load_balance_get_fwd_bucket + (lb0, (hc0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); + } - vlib_increment_combined_counter - (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + next[0] = dpo0->dpoi_next_node; + vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - vlib_validate_buffer_enqueue_x1 (vm, node, next, - to_next, n_left_to_next, - pi0, next0); + /* Only process the HBH Option Header if explicitly configured to do so */ + if (PREDICT_FALSE (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)) + { + next[0] = (dpo_is_adj (dpo0) && im->hbh_enabled) ? + (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next[0]; } - vlib_put_next_frame (vm, node, next, n_left_to_next); + vlib_increment_combined_counter + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b[0])); + + b += 1; + next += 1; + n_left -= 1; } + vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); + if (node->flags & VLIB_NODE_FLAG_TRACE) ip6_forward_next_trace (vm, node, frame, VLIB_TX); @@ -1010,7 +970,7 @@ ip6_tcp_udp_icmp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0, } p0 = vlib_get_buffer (vm, p0->next_buffer); data_this_buffer = vlib_buffer_get_current (p0); - n_this_buffer = p0->current_length; + n_this_buffer = clib_min (p0->current_length, n_bytes_left); } sum16 = ~ip_csum_fold (sum0); @@ -1093,316 +1053,319 @@ VNET_FEATURE_ARC_INIT (ip6_local) = }; /* *INDENT-ON* */ -static uword +always_inline uword ip6_local_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int head_of_feature_arc) { ip6_main_t *im = &ip6_main; ip_lookup_main_t *lm = &im->lookup_main; - ip_local_next_t next_index; - u32 *from, *to_next, n_left_from, n_left_to_next; + u32 *from, n_left_from; vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip6_input_node.index); u8 arc_index = vnet_feat_arc_ip6_local.feature_arc_index; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; + u16 nexts[VLIB_FRAME_SIZE], *next; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; - next_index = node->cached_next_index; if (node->flags & VLIB_NODE_FLAG_TRACE) ip6_forward_next_trace (vm, node, frame, VLIB_TX); - while (n_left_from > 0) - { - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + vlib_get_buffers (vm, from, bufs, n_left_from); + b = bufs; + next = nexts; - while (n_left_from >= 4 && n_left_to_next >= 2) + while (n_left_from > 2) + { + /* Prefetch next iteration. */ + if (n_left_from >= 6) { - vlib_buffer_t *p0, *p1; - ip6_header_t *ip0, *ip1; - udp_header_t *udp0, *udp1; - u32 pi0, ip_len0, udp_len0, flags0, next0; - u32 pi1, ip_len1, udp_len1, flags1, next1; - i32 len_diff0, len_diff1; - u8 error0, type0, good_l4_csum0, is_tcp_udp0; - u8 error1, type1, good_l4_csum1, is_tcp_udp1; - u32 udp_offset0, udp_offset1; - - pi0 = to_next[0] = from[0]; - pi1 = to_next[1] = from[1]; - from += 2; - n_left_from -= 2; - to_next += 2; - n_left_to_next -= 2; - - error0 = error1 = IP6_ERROR_UNKNOWN_PROTOCOL; - - p0 = vlib_get_buffer (vm, pi0); - p1 = vlib_get_buffer (vm, pi1); - - ip0 = vlib_buffer_get_current (p0); - ip1 = vlib_buffer_get_current (p1); - - if (head_of_feature_arc == 0) - goto skip_checks; - - vnet_buffer (p0)->l3_hdr_offset = p0->current_data; - vnet_buffer (p1)->l3_hdr_offset = p1->current_data; - - type0 = lm->builtin_protocol_by_ip_protocol[ip0->protocol]; - type1 = lm->builtin_protocol_by_ip_protocol[ip1->protocol]; - - flags0 = p0->flags; - flags1 = p1->flags; + vlib_prefetch_buffer_header (b[4], STORE); + vlib_prefetch_buffer_header (b[5], STORE); + vlib_prefetch_buffer_data (b[2], LOAD); + vlib_prefetch_buffer_data (b[3], LOAD); + } - is_tcp_udp0 = ip6_next_proto_is_tcp_udp (p0, ip0, &udp_offset0); - is_tcp_udp1 = ip6_next_proto_is_tcp_udp (p1, ip1, &udp_offset1); + u8 error[2]; + error[0] = IP6_ERROR_UNKNOWN_PROTOCOL; + error[1] = IP6_ERROR_UNKNOWN_PROTOCOL; - good_l4_csum0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT - || (flags0 & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM - || flags0 & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)) - != 0; - good_l4_csum1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT - || (flags1 & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM - || flags1 & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)) - != 0; - len_diff0 = 0; - len_diff1 = 0; + ip6_header_t *ip[2]; + ip[0] = vlib_buffer_get_current (b[0]); + ip[1] = vlib_buffer_get_current (b[1]); - if (PREDICT_TRUE (is_tcp_udp0)) + if (head_of_feature_arc) + { + vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data; + vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data; + + u8 type[2]; + type[0] = lm->builtin_protocol_by_ip_protocol[ip[0]->protocol]; + type[1] = lm->builtin_protocol_by_ip_protocol[ip[1]->protocol]; + + u32 flags[2]; + flags[0] = b[0]->flags; + flags[1] = b[1]->flags; + + u32 good_l4_csum[2]; + good_l4_csum[0] = + flags[0] & (VNET_BUFFER_F_L4_CHECKSUM_CORRECT | + VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | + VNET_BUFFER_F_OFFLOAD_UDP_CKSUM); + good_l4_csum[1] = + flags[1] & (VNET_BUFFER_F_L4_CHECKSUM_CORRECT | + VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | + VNET_BUFFER_F_OFFLOAD_UDP_CKSUM); + + u32 udp_offset[2] = { }; + u8 is_tcp_udp[2]; + is_tcp_udp[0] = + ip6_next_proto_is_tcp_udp (b[0], ip[0], &udp_offset[0]); + is_tcp_udp[1] = + ip6_next_proto_is_tcp_udp (b[1], ip[1], &udp_offset[1]); + i16 len_diff[2] = { 0 }; + if (PREDICT_TRUE (is_tcp_udp[0])) { - udp0 = (udp_header_t *) ((u8 *) ip0 + udp_offset0); - /* Don't verify UDP checksum for packets with explicit zero checksum. */ - good_l4_csum0 |= type0 == IP_BUILTIN_PROTOCOL_UDP - && udp0->checksum == 0; - /* Verify UDP length. */ - if (is_tcp_udp0 == IP_PROTOCOL_UDP) - { - ip_len0 = clib_net_to_host_u16 (ip0->payload_length); - udp_len0 = clib_net_to_host_u16 (udp0->length); - len_diff0 = ip_len0 - udp_len0; - } + udp_header_t *udp = + (udp_header_t *) ((u8 *) ip[0] + udp_offset[0]); + good_l4_csum[0] |= type[0] == IP_BUILTIN_PROTOCOL_UDP + && udp->checksum == 0; + /* optimistically verify UDP length. */ + u16 ip_len, udp_len; + ip_len = clib_net_to_host_u16 (ip[0]->payload_length); + udp_len = clib_net_to_host_u16 (udp->length); + len_diff[0] = ip_len - udp_len; } - if (PREDICT_TRUE (is_tcp_udp1)) + if (PREDICT_TRUE (is_tcp_udp[1])) { - udp1 = (udp_header_t *) ((u8 *) ip1 + udp_offset1); - /* Don't verify UDP checksum for packets with explicit zero checksum. */ - good_l4_csum1 |= type1 == IP_BUILTIN_PROTOCOL_UDP - && udp1->checksum == 0; - /* Verify UDP length. */ - if (is_tcp_udp1 == IP_PROTOCOL_UDP) - { - ip_len1 = clib_net_to_host_u16 (ip1->payload_length); - udp_len1 = clib_net_to_host_u16 (udp1->length); - len_diff1 = ip_len1 - udp_len1; - } + udp_header_t *udp = + (udp_header_t *) ((u8 *) ip[1] + udp_offset[1]); + good_l4_csum[1] |= type[1] == IP_BUILTIN_PROTOCOL_UDP + && udp->checksum == 0; + /* optimistically verify UDP length. */ + u16 ip_len, udp_len; + ip_len = clib_net_to_host_u16 (ip[1]->payload_length); + udp_len = clib_net_to_host_u16 (udp->length); + len_diff[1] = ip_len - udp_len; } - good_l4_csum0 |= type0 == IP_BUILTIN_PROTOCOL_UNKNOWN; - good_l4_csum1 |= type1 == IP_BUILTIN_PROTOCOL_UNKNOWN; + good_l4_csum[0] |= type[0] == IP_BUILTIN_PROTOCOL_UNKNOWN; + good_l4_csum[1] |= type[1] == IP_BUILTIN_PROTOCOL_UNKNOWN; - len_diff0 = type0 == IP_BUILTIN_PROTOCOL_UDP ? len_diff0 : 0; - len_diff1 = type1 == IP_BUILTIN_PROTOCOL_UDP ? len_diff1 : 0; + len_diff[0] = type[0] == IP_BUILTIN_PROTOCOL_UDP ? len_diff[0] : 0; + len_diff[1] = type[1] == IP_BUILTIN_PROTOCOL_UDP ? len_diff[1] : 0; - if (PREDICT_FALSE (type0 != IP_BUILTIN_PROTOCOL_UNKNOWN - && !good_l4_csum0 - && !(flags0 & - VNET_BUFFER_F_L4_CHECKSUM_COMPUTED))) + u8 need_csum[2]; + need_csum[0] = type[0] != IP_BUILTIN_PROTOCOL_UNKNOWN + && !good_l4_csum[0] + && !(flags[0] & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED); + need_csum[1] = type[1] != IP_BUILTIN_PROTOCOL_UNKNOWN + && !good_l4_csum[1] + && !(flags[1] & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED); + if (PREDICT_FALSE (need_csum[0])) { - flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, p0); - good_l4_csum0 = - (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0; + flags[0] = ip6_tcp_udp_icmp_validate_checksum (vm, b[0]); + good_l4_csum[0] = flags[0] & VNET_BUFFER_F_L4_CHECKSUM_CORRECT; } - if (PREDICT_FALSE (type1 != IP_BUILTIN_PROTOCOL_UNKNOWN - && !good_l4_csum1 - && !(flags1 & - VNET_BUFFER_F_L4_CHECKSUM_COMPUTED))) + if (PREDICT_FALSE (need_csum[1])) { - flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, p1); - good_l4_csum1 = - (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0; + flags[1] = ip6_tcp_udp_icmp_validate_checksum (vm, b[1]); + good_l4_csum[1] = flags[1] & VNET_BUFFER_F_L4_CHECKSUM_CORRECT; } - error0 = error1 = IP6_ERROR_UNKNOWN_PROTOCOL; - error0 = len_diff0 < 0 ? IP6_ERROR_UDP_LENGTH : error0; - error1 = len_diff1 < 0 ? IP6_ERROR_UDP_LENGTH : error1; + error[0] = IP6_ERROR_UNKNOWN_PROTOCOL; + error[0] = len_diff[0] < 0 ? IP6_ERROR_UDP_LENGTH : error[0]; + error[1] = IP6_ERROR_UNKNOWN_PROTOCOL; + error[1] = len_diff[1] < 0 ? IP6_ERROR_UDP_LENGTH : error[1]; + + STATIC_ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_UDP == + IP6_ERROR_UDP_CHECKSUM, + "Wrong IP6 errors constants"); + STATIC_ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_ICMP == + IP6_ERROR_ICMP_CHECKSUM, + "Wrong IP6 errors constants"); - ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_UDP == - IP6_ERROR_UDP_CHECKSUM); - ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_ICMP == - IP6_ERROR_ICMP_CHECKSUM); - error0 = (!good_l4_csum0 ? IP6_ERROR_UDP_CHECKSUM + type0 : error0); - error1 = (!good_l4_csum1 ? IP6_ERROR_UDP_CHECKSUM + type1 : error1); + error[0] = + !good_l4_csum[0] ? IP6_ERROR_UDP_CHECKSUM + type[0] : error[0]; + error[1] = + !good_l4_csum[1] ? IP6_ERROR_UDP_CHECKSUM + type[1] : error[1]; /* Drop packets from unroutable hosts. */ /* If this is a neighbor solicitation (ICMP), skip source RPF check */ - if (error0 == IP6_ERROR_UNKNOWN_PROTOCOL && - type0 != IP_BUILTIN_PROTOCOL_ICMP && - !ip6_address_is_link_local_unicast (&ip0->src_address)) + u8 unroutable[2]; + unroutable[0] = error[0] == IP6_ERROR_UNKNOWN_PROTOCOL + && type[0] != IP_BUILTIN_PROTOCOL_ICMP + && !ip6_address_is_link_local_unicast (&ip[0]->src_address); + unroutable[1] = error[1] == IP6_ERROR_UNKNOWN_PROTOCOL + && type[1] != IP_BUILTIN_PROTOCOL_ICMP + && !ip6_address_is_link_local_unicast (&ip[1]->src_address); + if (PREDICT_FALSE (unroutable[0])) { - error0 = (!ip6_urpf_loose_check (im, p0, ip0) - ? IP6_ERROR_SRC_LOOKUP_MISS : error0); + error[0] = + !ip6_urpf_loose_check (im, b[0], + ip[0]) ? IP6_ERROR_SRC_LOOKUP_MISS + : error[0]; } - if (error1 == IP6_ERROR_UNKNOWN_PROTOCOL && - type1 != IP_BUILTIN_PROTOCOL_ICMP && - !ip6_address_is_link_local_unicast (&ip1->src_address)) + if (PREDICT_FALSE (unroutable[1])) { - error1 = (!ip6_urpf_loose_check (im, p1, ip1) - ? IP6_ERROR_SRC_LOOKUP_MISS : error1); + error[1] = + !ip6_urpf_loose_check (im, b[1], + ip[1]) ? IP6_ERROR_SRC_LOOKUP_MISS + : error[1]; } - vnet_buffer (p0)->ip.fib_index = - vnet_buffer (p0)->sw_if_index[VLIB_TX] != ~0 ? - vnet_buffer (p0)->sw_if_index[VLIB_TX] : - vnet_buffer (p0)->ip.fib_index; - - vnet_buffer (p1)->ip.fib_index = - vnet_buffer (p1)->sw_if_index[VLIB_TX] != ~0 ? - vnet_buffer (p1)->sw_if_index[VLIB_TX] : - vnet_buffer (p1)->ip.fib_index; - - skip_checks: - - next0 = lm->local_next_by_ip_protocol[ip0->protocol]; - next1 = lm->local_next_by_ip_protocol[ip1->protocol]; - - next0 = - error0 != IP6_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0; - next1 = - error1 != IP6_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next1; - - p0->error = error_node->errors[error0]; - p1->error = error_node->errors[error1]; - - if (head_of_feature_arc) + vnet_buffer (b[0])->ip.fib_index = + vnet_buffer (b[0])->sw_if_index[VLIB_TX] != ~0 ? + vnet_buffer (b[0])->sw_if_index[VLIB_TX] : + vnet_buffer (b[0])->ip.fib_index; + vnet_buffer (b[1])->ip.fib_index = + vnet_buffer (b[1])->sw_if_index[VLIB_TX] != ~0 ? + vnet_buffer (b[1])->sw_if_index[VLIB_TX] : + vnet_buffer (b[1])->ip.fib_index; + } /* head_of_feature_arc */ + + next[0] = lm->local_next_by_ip_protocol[ip[0]->protocol]; + next[0] = + error[0] != IP6_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next[0]; + next[1] = lm->local_next_by_ip_protocol[ip[1]->protocol]; + next[1] = + error[1] != IP6_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next[1]; + + b[0]->error = error_node->errors[0]; + b[1]->error = error_node->errors[1]; + + if (head_of_feature_arc) + { + u8 ip6_unknown[2]; + ip6_unknown[0] = error[0] == (u8) IP6_ERROR_UNKNOWN_PROTOCOL; + ip6_unknown[1] = error[1] == (u8) IP6_ERROR_UNKNOWN_PROTOCOL; + if (PREDICT_TRUE (ip6_unknown[0])) { - if (PREDICT_TRUE (error0 == (u8) IP6_ERROR_UNKNOWN_PROTOCOL)) - vnet_feature_arc_start (arc_index, - vnet_buffer (p0)->sw_if_index - [VLIB_RX], &next0, p0); - if (PREDICT_TRUE (error1 == (u8) IP6_ERROR_UNKNOWN_PROTOCOL)) - vnet_feature_arc_start (arc_index, - vnet_buffer (p1)->sw_if_index - [VLIB_RX], &next1, p1); + u32 next32 = next[0]; + vnet_feature_arc_start (arc_index, + vnet_buffer (b[0])->sw_if_index + [VLIB_RX], &next32, b[0]); + next[0] = next32; + } + if (PREDICT_TRUE (ip6_unknown[1])) + { + u32 next32 = next[1]; + vnet_feature_arc_start (arc_index, + vnet_buffer (b[1])->sw_if_index + [VLIB_RX], &next32, b[1]); + next[1] = next32; } - - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, - pi0, pi1, next0, next1); } - while (n_left_from > 0 && n_left_to_next > 0) - { - vlib_buffer_t *p0; - ip6_header_t *ip0; - udp_header_t *udp0; - u32 pi0, ip_len0, udp_len0, flags0, next0; - i32 len_diff0; - u8 error0, type0, good_l4_csum0; - u32 udp_offset0; - u8 is_tcp_udp0; - - pi0 = to_next[0] = from[0]; - from += 1; - n_left_from -= 1; - to_next += 1; - n_left_to_next -= 1; - - error0 = IP6_ERROR_UNKNOWN_PROTOCOL; - - p0 = vlib_get_buffer (vm, pi0); - ip0 = vlib_buffer_get_current (p0); - - if (head_of_feature_arc == 0) - goto skip_check; + /* next */ + b += 2; + next += 2; + n_left_from -= 2; + } - vnet_buffer (p0)->l3_hdr_offset = p0->current_data; + while (n_left_from) + { + u8 error; + error = IP6_ERROR_UNKNOWN_PROTOCOL; - type0 = lm->builtin_protocol_by_ip_protocol[ip0->protocol]; - flags0 = p0->flags; - is_tcp_udp0 = ip6_next_proto_is_tcp_udp (p0, ip0, &udp_offset0); - good_l4_csum0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT - || (flags0 & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM - || flags0 & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)) - != 0; + ip6_header_t *ip; + ip = vlib_buffer_get_current (b[0]); - len_diff0 = 0; - if (PREDICT_TRUE (is_tcp_udp0)) + if (head_of_feature_arc) + { + vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data; + u8 type = lm->builtin_protocol_by_ip_protocol[ip->protocol]; + + u32 flags = b[0]->flags; + u32 good_l4_csum = + flags & (VNET_BUFFER_F_L4_CHECKSUM_CORRECT | + VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | + VNET_BUFFER_F_OFFLOAD_UDP_CKSUM); + + u32 udp_offset; + i16 len_diff = 0; + u8 is_tcp_udp = ip6_next_proto_is_tcp_udp (b[0], ip, &udp_offset); + if (PREDICT_TRUE (is_tcp_udp)) { - udp0 = (udp_header_t *) ((u8 *) ip0 + udp_offset0); - /* Don't verify UDP checksum for packets with explicit zero - * checksum. */ - good_l4_csum0 |= type0 == IP_BUILTIN_PROTOCOL_UDP - && udp0->checksum == 0; - /* Verify UDP length. */ - if (is_tcp_udp0 == IP_PROTOCOL_UDP) - { - ip_len0 = clib_net_to_host_u16 (ip0->payload_length); - udp_len0 = clib_net_to_host_u16 (udp0->length); - len_diff0 = ip_len0 - udp_len0; - } + udp_header_t *udp = (udp_header_t *) ((u8 *) ip + udp_offset); + /* Don't verify UDP checksum for packets with explicit zero checksum. */ + good_l4_csum |= type == IP_BUILTIN_PROTOCOL_UDP + && udp->checksum == 0; + /* optimistically verify UDP length. */ + u16 ip_len, udp_len; + ip_len = clib_net_to_host_u16 (ip->payload_length); + udp_len = clib_net_to_host_u16 (udp->length); + len_diff = ip_len - udp_len; } - good_l4_csum0 |= type0 == IP_BUILTIN_PROTOCOL_UNKNOWN; - len_diff0 = type0 == IP_BUILTIN_PROTOCOL_UDP ? len_diff0 : 0; + good_l4_csum |= type == IP_BUILTIN_PROTOCOL_UNKNOWN; + len_diff = type == IP_BUILTIN_PROTOCOL_UDP ? len_diff : 0; - if (PREDICT_FALSE (type0 != IP_BUILTIN_PROTOCOL_UNKNOWN - && !good_l4_csum0 - && !(flags0 & - VNET_BUFFER_F_L4_CHECKSUM_COMPUTED))) + u8 need_csum = type != IP_BUILTIN_PROTOCOL_UNKNOWN && !good_l4_csum + && !(flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED); + if (PREDICT_FALSE (need_csum)) { - flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, p0); - good_l4_csum0 = - (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0; + flags = ip6_tcp_udp_icmp_validate_checksum (vm, b[0]); + good_l4_csum = flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT; } - error0 = IP6_ERROR_UNKNOWN_PROTOCOL; - error0 = len_diff0 < 0 ? IP6_ERROR_UDP_LENGTH : error0; + error = IP6_ERROR_UNKNOWN_PROTOCOL; + error = len_diff < 0 ? IP6_ERROR_UDP_LENGTH : error; - ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_UDP == - IP6_ERROR_UDP_CHECKSUM); - ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_ICMP == - IP6_ERROR_ICMP_CHECKSUM); - error0 = (!good_l4_csum0 ? IP6_ERROR_UDP_CHECKSUM + type0 : error0); + STATIC_ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_UDP == + IP6_ERROR_UDP_CHECKSUM, + "Wrong IP6 errors constants"); + STATIC_ASSERT (IP6_ERROR_UDP_CHECKSUM + IP_BUILTIN_PROTOCOL_ICMP == + IP6_ERROR_ICMP_CHECKSUM, + "Wrong IP6 errors constants"); - /* If this is a neighbor solicitation (ICMP), skip src RPF check */ - if (error0 == IP6_ERROR_UNKNOWN_PROTOCOL && - type0 != IP_BUILTIN_PROTOCOL_ICMP && - !ip6_address_is_link_local_unicast (&ip0->src_address)) + error = !good_l4_csum ? IP6_ERROR_UDP_CHECKSUM + type : error; + + /* Drop packets from unroutable hosts. */ + /* If this is a neighbor solicitation (ICMP), skip source RPF check */ + u8 unroutable = error == IP6_ERROR_UNKNOWN_PROTOCOL + && type != IP_BUILTIN_PROTOCOL_ICMP + && !ip6_address_is_link_local_unicast (&ip->src_address); + if (PREDICT_FALSE (unroutable)) { - error0 = (!ip6_urpf_loose_check (im, p0, ip0) - ? IP6_ERROR_SRC_LOOKUP_MISS : error0); + error = + !ip6_urpf_loose_check (im, b[0], + ip) ? IP6_ERROR_SRC_LOOKUP_MISS : + error; } - vnet_buffer (p0)->ip.fib_index = - vnet_buffer (p0)->sw_if_index[VLIB_TX] != ~0 ? - vnet_buffer (p0)->sw_if_index[VLIB_TX] : - vnet_buffer (p0)->ip.fib_index; - - skip_check: + vnet_buffer (b[0])->ip.fib_index = + vnet_buffer (b[0])->sw_if_index[VLIB_TX] != ~0 ? + vnet_buffer (b[0])->sw_if_index[VLIB_TX] : + vnet_buffer (b[0])->ip.fib_index; + } /* head_of_feature_arc */ - next0 = lm->local_next_by_ip_protocol[ip0->protocol]; - next0 = - error0 != IP6_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0; + next[0] = lm->local_next_by_ip_protocol[ip->protocol]; + next[0] = + error != IP6_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next[0]; - p0->error = error_node->errors[error0]; + b[0]->error = error_node->errors[0]; - if (head_of_feature_arc) + if (head_of_feature_arc) + { + if (PREDICT_TRUE (error == (u8) IP6_ERROR_UNKNOWN_PROTOCOL)) { - if (PREDICT_TRUE (error0 == (u8) IP6_ERROR_UNKNOWN_PROTOCOL)) - vnet_feature_arc_start (arc_index, - vnet_buffer (p0)->sw_if_index - [VLIB_RX], &next0, p0); + u32 next32 = next[0]; + vnet_feature_arc_start (arc_index, + vnet_buffer (b[0])->sw_if_index + [VLIB_RX], &next32, b[0]); + next[0] = next32; } - - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - pi0, next0); } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); + /* next */ + b += 1; + next += 1; + n_left_from -= 1; } + vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); return frame->n_vectors; } @@ -1470,6 +1433,16 @@ ip6_register_protocol (u32 protocol, u32 node_index) vlib_node_add_next (vm, ip6_local_node.index, node_index); } +void +ip6_unregister_protocol (u32 protocol) +{ + ip6_main_t *im = &ip6_main; + ip_lookup_main_t *lm = &im->lookup_main; + + ASSERT (protocol < ARRAY_LEN (lm->local_next_by_ip_protocol)); + lm->local_next_by_ip_protocol[protocol] = IP_LOCAL_NEXT_PUNT; +} + clib_error_t * ip6_probe_neighbor (vlib_main_t * vm, ip6_address_t * dst, u32 sw_if_index, u8 refresh) @@ -1619,10 +1592,11 @@ ip6_mtu_check (vlib_buffer_t * b, u16 packet_bytes, } always_inline uword -ip6_rewrite_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, - int do_counters, int is_midchain, int is_mcast) +ip6_rewrite_inline_with_gso (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, + int do_counters, int is_midchain, int is_mcast, + int do_gso) { ip_lookup_main_t *lm = &ip6_main.lookup_main; u32 *from = vlib_frame_vector_args (frame); @@ -1768,17 +1742,28 @@ ip6_rewrite_inline (vlib_main_t * vm, } /* Check MTU of outgoing interface. */ - ip6_mtu_check (p0, clib_net_to_host_u16 (ip0->payload_length) + - sizeof (ip6_header_t), + u16 ip0_len = + clib_net_to_host_u16 (ip0->payload_length) + + sizeof (ip6_header_t); + u16 ip1_len = + clib_net_to_host_u16 (ip1->payload_length) + + sizeof (ip6_header_t); + if (do_gso && (p0->flags & VNET_BUFFER_F_GSO)) + ip0_len = gso_mtu_sz (p0); + if (do_gso && (p1->flags & VNET_BUFFER_F_GSO)) + ip1_len = gso_mtu_sz (p1); + + + + ip6_mtu_check (p0, ip0_len, adj0[0].rewrite_header.max_l3_packet_bytes, is_locally_originated0, &next0, &error0); - ip6_mtu_check (p1, clib_net_to_host_u16 (ip1->payload_length) + - sizeof (ip6_header_t), + ip6_mtu_check (p1, ip1_len, adj1[0].rewrite_header.max_l3_packet_bytes, is_locally_originated1, &next1, &error1); /* Don't adjust the buffer for hop count issue; icmp-error node - * wants to see the IP headerr */ + * wants to see the IP header */ if (PREDICT_TRUE (error0 == IP6_ERROR_NONE)) { p0->current_data -= rw_len0; @@ -1793,6 +1778,10 @@ ip6_rewrite_inline (vlib_main_t * vm, vnet_feature_arc_start (lm->output_feature_arc_index, tx_sw_if_index0, &next0, p0); } + else + { + p0->error = error_node->errors[error0]; + } if (PREDICT_TRUE (error1 == IP6_ERROR_NONE)) { p1->current_data -= rw_len1; @@ -1807,6 +1796,18 @@ ip6_rewrite_inline (vlib_main_t * vm, vnet_feature_arc_start (lm->output_feature_arc_index, tx_sw_if_index1, &next1, p1); } + else + { + p1->error = error_node->errors[error1]; + } + + if (is_midchain) + { + /* before we paint on the next header, update the L4 + * checksums if required, since there's no offload on a tunnel */ + calc_checksums (vm, p0); + calc_checksums (vm, p1); + } /* Guess we are only writing on simple Ethernet header. */ vnet_rewrite_two_headers (adj0[0], adj1[0], @@ -1814,10 +1815,12 @@ ip6_rewrite_inline (vlib_main_t * vm, if (is_midchain) { - adj0->sub_type.midchain.fixup_func - (vm, adj0, p0, adj0->sub_type.midchain.fixup_data); - adj1->sub_type.midchain.fixup_func - (vm, adj1, p1, adj1->sub_type.midchain.fixup_data); + if (adj0->sub_type.midchain.fixup_func) + adj0->sub_type.midchain.fixup_func + (vm, adj0, p0, adj0->sub_type.midchain.fixup_data); + if (adj1->sub_type.midchain.fixup_func) + adj1->sub_type.midchain.fixup_func + (vm, adj1, p1, adj1->sub_type.midchain.fixup_data); } if (is_mcast) { @@ -1896,6 +1899,11 @@ ip6_rewrite_inline (vlib_main_t * vm, p0->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED; } + if (is_midchain) + { + calc_checksums (vm, p0); + } + /* Guess we are only writing on simple Ethernet header. */ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t)); @@ -1912,8 +1920,13 @@ ip6_rewrite_inline (vlib_main_t * vm, } /* Check MTU of outgoing interface. */ - ip6_mtu_check (p0, clib_net_to_host_u16 (ip0->payload_length) + - sizeof (ip6_header_t), + u16 ip0_len = + clib_net_to_host_u16 (ip0->payload_length) + + sizeof (ip6_header_t); + if (do_gso && (p0->flags & VNET_BUFFER_F_GSO)) + ip0_len = gso_mtu_sz (p0); + + ip6_mtu_check (p0, ip0_len, adj0[0].rewrite_header.max_l3_packet_bytes, is_locally_originated0, &next0, &error0); @@ -1934,11 +1947,16 @@ ip6_rewrite_inline (vlib_main_t * vm, vnet_feature_arc_start (lm->output_feature_arc_index, tx_sw_if_index0, &next0, p0); } + else + { + p0->error = error_node->errors[error0]; + } if (is_midchain) { - adj0->sub_type.midchain.fixup_func - (vm, adj0, p0, adj0->sub_type.midchain.fixup_data); + if (adj0->sub_type.midchain.fixup_func) + adj0->sub_type.midchain.fixup_func + (vm, adj0, p0, adj0->sub_type.midchain.fixup_data); } if (is_mcast) { @@ -1949,8 +1967,6 @@ ip6_rewrite_inline (vlib_main_t * vm, (u8 *) ip0); } - p0->error = error_node->errors[error0]; - from += 1; n_left_from -= 1; to_next += 1; @@ -1971,6 +1987,23 @@ ip6_rewrite_inline (vlib_main_t * vm, return frame->n_vectors; } +always_inline uword +ip6_rewrite_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, + int do_counters, int is_midchain, int is_mcast) +{ + vnet_main_t *vnm = vnet_get_main (); + if (PREDICT_FALSE (vnm->interface_main.gso_interface_count > 0)) + return ip6_rewrite_inline_with_gso (vm, node, frame, do_counters, + is_midchain, is_mcast, + 1 /* do_gso */ ); + else + return ip6_rewrite_inline_with_gso (vm, node, frame, do_counters, + is_midchain, is_mcast, + 0 /* no do_gso */ ); +} + VLIB_NODE_FN (ip6_rewrite_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)