X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fip%2Fip4_forward.c;h=332c483aa9df94648619f8c8f6986813dafe9d12;hb=f2984bbb0;hp=fd8559d99869c937d095f11019de75004e9afda6;hpb=067cd6229a47ea3ba8b59a2a04090e80afb5bd2c;p=vpp.git diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index fd8559d9986..332c483aa9d 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -49,12 +49,16 @@ #include /* for FIB table and entry creation */ #include /* for FIB uRPF check */ #include +#include #include #include #include #include /* for mFIB table and entry creation */ +#include #include +#include +#include /** @brief IPv4 lookup node. @node ip4-lookup @@ -88,14 +92,10 @@ ip_adjacency_t @c adj->lookup_next_index (where @c adj is the lookup result adjacency). */ -static uword -ip4_lookup (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_lookup_node) (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) { - return ip4_lookup_inline (vm, node, frame, - /* lookup_for_responses_to_locally_received_packets */ - 0); - + return ip4_lookup_inline (vm, node, frame); } static u8 *format_ip4_lookup_trace (u8 * s, va_list * args); @@ -103,7 +103,6 @@ static u8 *format_ip4_lookup_trace (u8 * s, va_list * args); /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_lookup_node) = { - .function = ip4_lookup, .name = "ip4-lookup", .vector_size = sizeof (u32), .format_trace = format_ip4_lookup_trace, @@ -112,209 +111,171 @@ VLIB_REGISTER_NODE (ip4_lookup_node) = }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ip4_lookup_node, ip4_lookup); - -static uword -ip4_load_balance (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_load_balance_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters; - u32 n_left_from, n_left_to_next, *from, *to_next; - ip_lookup_next_t next; + u32 n_left, *from; u32 thread_index = vm->thread_index; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; + u16 nexts[VLIB_FRAME_SIZE], *next; from = vlib_frame_vector_args (frame); - n_left_from = frame->n_vectors; - next = node->cached_next_index; + n_left = frame->n_vectors; + next = nexts; - if (node->flags & VLIB_NODE_FLAG_TRACE) - ip4_forward_next_trace (vm, node, frame, VLIB_TX); + vlib_get_buffers (vm, from, bufs, n_left); - while (n_left_from > 0) + while (n_left >= 4) { - vlib_get_next_frame (vm, node, next, to_next, n_left_to_next); - - - while (n_left_from >= 4 && n_left_to_next >= 2) - { - ip_lookup_next_t next0, next1; - const load_balance_t *lb0, *lb1; - vlib_buffer_t *p0, *p1; - u32 pi0, lbi0, hc0, pi1, lbi1, hc1; - const ip4_header_t *ip0, *ip1; - const dpo_id_t *dpo0, *dpo1; - - /* Prefetch next iteration. */ - { - vlib_buffer_t *p2, *p3; - - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); + const load_balance_t *lb0, *lb1; + const ip4_header_t *ip0, *ip1; + u32 lbi0, hc0, lbi1, hc1; + const dpo_id_t *dpo0, *dpo1; - vlib_prefetch_buffer_header (p2, STORE); - vlib_prefetch_buffer_header (p3, STORE); - - CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE); - CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE); - } - - pi0 = to_next[0] = from[0]; - pi1 = to_next[1] = from[1]; - - from += 2; - n_left_from -= 2; - to_next += 2; - n_left_to_next -= 2; + /* Prefetch next iteration. */ + { + vlib_prefetch_buffer_header (b[2], LOAD); + vlib_prefetch_buffer_header (b[3], LOAD); - p0 = vlib_get_buffer (vm, pi0); - p1 = vlib_get_buffer (vm, pi1); + CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), LOAD); + CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), LOAD); + } - ip0 = vlib_buffer_get_current (p0); - ip1 = vlib_buffer_get_current (p1); - lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; - lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX]; + ip0 = vlib_buffer_get_current (b[0]); + ip1 = vlib_buffer_get_current (b[1]); + lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX]; + lbi1 = vnet_buffer (b[1])->ip.adj_index[VLIB_TX]; - lb0 = load_balance_get (lbi0); - lb1 = load_balance_get (lbi1); + lb0 = load_balance_get (lbi0); + lb1 = load_balance_get (lbi1); - /* - * this node is for via FIBs we can re-use the hash value from the - * to node if present. - * We don't want to use the same hash value at each level in the recursion - * graph as that would lead to polarisation - */ - hc0 = hc1 = 0; + /* + * this node is for via FIBs we can re-use the hash value from the + * to node if present. + * We don't want to use the same hash value at each level in the recursion + * graph as that would lead to polarisation + */ + hc0 = hc1 = 0; - if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) + if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) + { + if (PREDICT_TRUE (vnet_buffer (b[0])->ip.flow_hash)) { - if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash)) - { - hc0 = vnet_buffer (p0)->ip.flow_hash = - vnet_buffer (p0)->ip.flow_hash >> 1; - } - else - { - hc0 = vnet_buffer (p0)->ip.flow_hash = - ip4_compute_flow_hash (ip0, lb0->lb_hash_config); - } - dpo0 = load_balance_get_fwd_bucket - (lb0, (hc0 & (lb0->lb_n_buckets_minus_1))); + hc0 = vnet_buffer (b[0])->ip.flow_hash = + vnet_buffer (b[0])->ip.flow_hash >> 1; } else { - dpo0 = load_balance_get_bucket_i (lb0, 0); + hc0 = vnet_buffer (b[0])->ip.flow_hash = + ip4_compute_flow_hash (ip0, lb0->lb_hash_config); } - if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) + dpo0 = load_balance_get_fwd_bucket + (lb0, (hc0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); + } + if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) + { + if (PREDICT_TRUE (vnet_buffer (b[1])->ip.flow_hash)) { - if (PREDICT_TRUE (vnet_buffer (p1)->ip.flow_hash)) - { - hc1 = vnet_buffer (p1)->ip.flow_hash = - vnet_buffer (p1)->ip.flow_hash >> 1; - } - else - { - hc1 = vnet_buffer (p1)->ip.flow_hash = - ip4_compute_flow_hash (ip1, lb1->lb_hash_config); - } - dpo1 = load_balance_get_fwd_bucket - (lb1, (hc1 & (lb1->lb_n_buckets_minus_1))); + hc1 = vnet_buffer (b[1])->ip.flow_hash = + vnet_buffer (b[1])->ip.flow_hash >> 1; } else { - dpo1 = load_balance_get_bucket_i (lb1, 0); + hc1 = vnet_buffer (b[1])->ip.flow_hash = + ip4_compute_flow_hash (ip1, lb1->lb_hash_config); } + dpo1 = load_balance_get_fwd_bucket + (lb1, (hc1 & (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); + } - next0 = dpo0->dpoi_next_node; - next1 = dpo1->dpoi_next_node; - - vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; - - vlib_increment_combined_counter - (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); - vlib_increment_combined_counter - (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); + next[0] = dpo0->dpoi_next_node; + next[1] = dpo1->dpoi_next_node; - vlib_validate_buffer_enqueue_x2 (vm, node, next, - to_next, n_left_to_next, - pi0, pi1, next0, next1); - } + vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; - while (n_left_from > 0 && n_left_to_next > 0) - { - ip_lookup_next_t next0; - const load_balance_t *lb0; - vlib_buffer_t *p0; - u32 pi0, lbi0, hc0; - const ip4_header_t *ip0; - const dpo_id_t *dpo0; + vlib_increment_combined_counter + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b[0])); + vlib_increment_combined_counter + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b[1])); - pi0 = from[0]; - to_next[0] = pi0; - from += 1; - to_next += 1; - n_left_to_next -= 1; - n_left_from -= 1; + b += 2; + next += 2; + n_left -= 2; + } - p0 = vlib_get_buffer (vm, pi0); + while (n_left > 0) + { + const load_balance_t *lb0; + const ip4_header_t *ip0; + const dpo_id_t *dpo0; + u32 lbi0, hc0; - ip0 = vlib_buffer_get_current (p0); - lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; + ip0 = vlib_buffer_get_current (b[0]); + lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX]; - lb0 = load_balance_get (lbi0); + lb0 = load_balance_get (lbi0); - hc0 = 0; - if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) + hc0 = 0; + if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) + { + if (PREDICT_TRUE (vnet_buffer (b[0])->ip.flow_hash)) { - if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash)) - { - hc0 = vnet_buffer (p0)->ip.flow_hash = - vnet_buffer (p0)->ip.flow_hash >> 1; - } - else - { - hc0 = vnet_buffer (p0)->ip.flow_hash = - ip4_compute_flow_hash (ip0, lb0->lb_hash_config); - } - dpo0 = load_balance_get_fwd_bucket - (lb0, (hc0 & (lb0->lb_n_buckets_minus_1))); + hc0 = vnet_buffer (b[0])->ip.flow_hash = + vnet_buffer (b[0])->ip.flow_hash >> 1; } else { - dpo0 = load_balance_get_bucket_i (lb0, 0); + hc0 = vnet_buffer (b[0])->ip.flow_hash = + ip4_compute_flow_hash (ip0, lb0->lb_hash_config); } + dpo0 = load_balance_get_fwd_bucket + (lb0, (hc0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); + } - next0 = dpo0->dpoi_next_node; - vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - - vlib_increment_combined_counter - (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + next[0] = dpo0->dpoi_next_node; + vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - vlib_validate_buffer_enqueue_x1 (vm, node, next, - to_next, n_left_to_next, - pi0, next0); - } + vlib_increment_combined_counter + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b[0])); - vlib_put_next_frame (vm, node, next, n_left_to_next); + b += 1; + next += 1; + n_left -= 1; } + vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); + if (node->flags & VLIB_NODE_FLAG_TRACE) + ip4_forward_next_trace (vm, node, frame, VLIB_TX); + return frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_load_balance_node) = { - .function = ip4_load_balance, .name = "ip4-load-balance", .vector_size = sizeof (u32), .sibling_of = "ip4-lookup", - .format_trace = - format_ip4_lookup_trace, + .format_trace = format_ip4_lookup_trace, }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ip4_load_balance_node, ip4_load_balance); - +#ifndef CLIB_MARCH_VARIANT /* get first interface address */ ip4_address_t * ip4_interface_first_address (ip4_main_t * im, u32 sw_if_index, @@ -339,30 +300,29 @@ ip4_interface_first_address (ip4_main_t * im, u32 sw_if_index, *result_ia = result ? ia : 0; return result; } +#endif static void -ip4_add_interface_routes (u32 sw_if_index, - ip4_main_t * im, u32 fib_index, - ip_interface_address_t * a) +ip4_add_subnet_bcast_route (u32 fib_index, + fib_prefix_t *pfx, + u32 sw_if_index) { - ip_lookup_main_t *lm = &im->lookup_main; - ip4_address_t *address = ip_interface_address_get_address (lm, a); - fib_prefix_t pfx = { - .fp_len = a->address_length, - .fp_proto = FIB_PROTOCOL_IP4, - .fp_addr.ip4 = *address, - }; + vnet_sw_interface_flags_t iflags; - if (pfx.fp_len <= 30) + iflags = vnet_sw_interface_get_flags(vnet_get_main(), sw_if_index); + + fib_table_entry_special_remove(fib_index, + pfx, + FIB_SOURCE_INTERFACE); + + if (iflags & VNET_SW_INTERFACE_FLAG_DIRECTED_BCAST) { - /* a /30 or shorter - add a glean for the network address */ - fib_table_entry_update_one_path (fib_index, &pfx, + fib_table_entry_update_one_path (fib_index, pfx, FIB_SOURCE_INTERFACE, - (FIB_ENTRY_FLAG_CONNECTED | - FIB_ENTRY_FLAG_ATTACHED), + FIB_ENTRY_FLAG_NONE, DPO_PROTO_IP4, /* No next-hop address */ - NULL, + &ADJ_BCAST_ADDR, sw_if_index, // invalid FIB index ~0, @@ -370,49 +330,134 @@ ip4_add_interface_routes (u32 sw_if_index, // no out-label stack NULL, FIB_ROUTE_PATH_FLAG_NONE); - - /* Add the two broadcast addresses as drop */ - fib_prefix_t net_pfx = { - .fp_len = 32, - .fp_proto = FIB_PROTOCOL_IP4, - .fp_addr.ip4.as_u32 = address->as_u32 & im->fib_masks[pfx.fp_len], - }; - if (net_pfx.fp_addr.ip4.as_u32 != pfx.fp_addr.ip4.as_u32) - fib_table_entry_special_add(fib_index, - &net_pfx, - FIB_SOURCE_INTERFACE, - (FIB_ENTRY_FLAG_DROP | - FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT)); - net_pfx.fp_addr.ip4.as_u32 |= ~im->fib_masks[pfx.fp_len]; - if (net_pfx.fp_addr.ip4.as_u32 != pfx.fp_addr.ip4.as_u32) + } + else + { fib_table_entry_special_add(fib_index, - &net_pfx, + pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_DROP | FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT)); } - else if (pfx.fp_len == 31) +} + +static void +ip4_add_interface_prefix_routes (ip4_main_t *im, + u32 sw_if_index, + u32 fib_index, + ip_interface_address_t * a) +{ + ip_lookup_main_t *lm = &im->lookup_main; + ip_interface_prefix_t *if_prefix; + ip4_address_t *address = ip_interface_address_get_address (lm, a); + + ip_interface_prefix_key_t key = { + .prefix = { + .fp_len = a->address_length, + .fp_proto = FIB_PROTOCOL_IP4, + .fp_addr.ip4.as_u32 = address->as_u32 & im->fib_masks[a->address_length], + }, + .sw_if_index = sw_if_index, + }; + + fib_prefix_t pfx_special = { + .fp_proto = FIB_PROTOCOL_IP4, + }; + + /* If prefix already set on interface, just increment ref count & return */ + if_prefix = ip_get_interface_prefix (lm, &key); + if (if_prefix) { - u32 mask = clib_host_to_net_u32(1); - fib_prefix_t net_pfx = pfx; + if_prefix->ref_count += 1; + return; + } - net_pfx.fp_len = 32; - net_pfx.fp_addr.ip4.as_u32 ^= mask; + /* New prefix - allocate a pool entry, initialize it, add to the hash */ + pool_get (lm->if_prefix_pool, if_prefix); + if_prefix->ref_count = 1; + if_prefix->src_ia_index = a - lm->if_address_pool; + clib_memcpy (&if_prefix->key, &key, sizeof (key)); + mhash_set (&lm->prefix_to_if_prefix_index, &key, + if_prefix - lm->if_prefix_pool, 0 /* old value */); - /* a /31 - add the other end as an attached host */ - fib_table_entry_update_one_path (fib_index, &net_pfx, - FIB_SOURCE_INTERFACE, - (FIB_ENTRY_FLAG_ATTACHED), - DPO_PROTO_IP4, - &net_pfx.fp_addr, - sw_if_index, - // invalid FIB index + pfx_special.fp_len = a->address_length; + pfx_special.fp_addr.ip4.as_u32 = address->as_u32; + + /* set the glean route for the prefix */ + fib_table_entry_update_one_path (fib_index, &pfx_special, + FIB_SOURCE_INTERFACE, + (FIB_ENTRY_FLAG_CONNECTED | + FIB_ENTRY_FLAG_ATTACHED), + DPO_PROTO_IP4, + /* No next-hop address */ + NULL, + sw_if_index, + /* invalid FIB index */ + ~0, + 1, + /* no out-label stack */ + NULL, + FIB_ROUTE_PATH_FLAG_NONE); + + /* length <= 30 - add glean, drop first address, maybe drop bcast address */ + if (a->address_length <= 30) + { + /* set a drop route for the base address of the prefix */ + pfx_special.fp_len = 32; + pfx_special.fp_addr.ip4.as_u32 = + address->as_u32 & im->fib_masks[a->address_length]; + + if (pfx_special.fp_addr.ip4.as_u32 != address->as_u32) + fib_table_entry_special_add (fib_index, &pfx_special, + FIB_SOURCE_INTERFACE, + (FIB_ENTRY_FLAG_DROP | + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT)); + + /* set a route for the broadcast address of the prefix */ + pfx_special.fp_len = 32; + pfx_special.fp_addr.ip4.as_u32 = + address->as_u32 | ~im->fib_masks[a->address_length]; + if (pfx_special.fp_addr.ip4.as_u32 != address->as_u32) + ip4_add_subnet_bcast_route (fib_index, &pfx_special, sw_if_index); + + + } + /* length == 31 - add an attached route for the other address */ + else if (a->address_length == 31) + { + pfx_special.fp_len = 32; + pfx_special.fp_addr.ip4.as_u32 = + address->as_u32 ^ clib_host_to_net_u32(1); + + fib_table_entry_update_one_path (fib_index, &pfx_special, + FIB_SOURCE_INTERFACE, + (FIB_ENTRY_FLAG_ATTACHED), + DPO_PROTO_IP4, + &pfx_special.fp_addr, + sw_if_index, + /* invalid FIB index */ ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); } - pfx.fp_len = 32; +} + +static void +ip4_add_interface_routes (u32 sw_if_index, + ip4_main_t * im, u32 fib_index, + ip_interface_address_t * a) +{ + ip_lookup_main_t *lm = &im->lookup_main; + ip4_address_t *address = ip_interface_address_get_address (lm, a); + fib_prefix_t pfx = { + .fp_len = 32, + .fp_proto = FIB_PROTOCOL_IP4, + .fp_addr.ip4 = *address, + }; + + /* set special routes for the prefix if needed */ + ip4_add_interface_prefix_routes (im, sw_if_index, fib_index, a); if (sw_if_index < vec_len (lm->classify_table_index_by_sw_if_index)) { @@ -449,53 +494,115 @@ ip4_add_interface_routes (u32 sw_if_index, } static void -ip4_del_interface_routes (ip4_main_t * im, - u32 fib_index, - ip4_address_t * address, u32 address_length) +ip4_del_interface_prefix_routes (ip4_main_t * im, + u32 sw_if_index, + u32 fib_index, + ip4_address_t * address, + u32 address_length) { - fib_prefix_t pfx = { - .fp_len = address_length, + ip_lookup_main_t *lm = &im->lookup_main; + ip_interface_prefix_t *if_prefix; + + ip_interface_prefix_key_t key = { + .prefix = { + .fp_len = address_length, + .fp_proto = FIB_PROTOCOL_IP4, + .fp_addr.ip4.as_u32 = address->as_u32 & im->fib_masks[address_length], + }, + .sw_if_index = sw_if_index, + }; + + fib_prefix_t pfx_special = { + .fp_len = 32, .fp_proto = FIB_PROTOCOL_IP4, - .fp_addr.ip4 = *address, }; - if (pfx.fp_len <= 30) + if_prefix = ip_get_interface_prefix (lm, &key); + if (!if_prefix) { - fib_prefix_t net_pfx = { - .fp_len = 32, - .fp_proto = FIB_PROTOCOL_IP4, - .fp_addr.ip4.as_u32 = address->as_u32 & im->fib_masks[pfx.fp_len], - }; - if (net_pfx.fp_addr.ip4.as_u32 != pfx.fp_addr.ip4.as_u32) - fib_table_entry_special_remove(fib_index, - &net_pfx, - FIB_SOURCE_INTERFACE); - net_pfx.fp_addr.ip4.as_u32 |= ~im->fib_masks[pfx.fp_len]; - if (net_pfx.fp_addr.ip4.as_u32 != pfx.fp_addr.ip4.as_u32) - fib_table_entry_special_remove(fib_index, - &net_pfx, - FIB_SOURCE_INTERFACE); - fib_table_entry_delete (fib_index, &pfx, FIB_SOURCE_INTERFACE); + clib_warning ("Prefix not found while deleting %U", + format_ip4_address_and_length, address, address_length); + return; } - else if (pfx.fp_len == 31) - { - u32 mask = clib_host_to_net_u32(1); - fib_prefix_t net_pfx = pfx; - net_pfx.fp_len = 32; - net_pfx.fp_addr.ip4.as_u32 ^= mask; + if_prefix->ref_count -= 1; + + /* + * Routes need to be adjusted if deleting last intf addr in prefix + * + * We're done now otherwise + */ + if (if_prefix->ref_count > 0) + return; + + /* length <= 30, delete glean route, first address, last address */ + if (address_length <= 30) + { + /* Less work to do in FIB if we remove the covered /32s first */ + + /* first address in prefix */ + pfx_special.fp_addr.ip4.as_u32 = + address->as_u32 & im->fib_masks[address_length]; + pfx_special.fp_len = 32; + + if (pfx_special.fp_addr.ip4.as_u32 != address->as_u32) + fib_table_entry_special_remove (fib_index, + &pfx_special, + FIB_SOURCE_INTERFACE); + + /* prefix broadcast address */ + pfx_special.fp_addr.ip4.as_u32 = + address->as_u32 | ~im->fib_masks[address_length]; + pfx_special.fp_len = 32; + + if (pfx_special.fp_addr.ip4.as_u32 != address->as_u32) + fib_table_entry_special_remove (fib_index, + &pfx_special, + FIB_SOURCE_INTERFACE); + } + else if (address_length == 31) + { + /* length == 31, delete attached route for the other address */ + pfx_special.fp_addr.ip4.as_u32 = + address->as_u32 ^ clib_host_to_net_u32(1); - fib_table_entry_delete (fib_index, &net_pfx, FIB_SOURCE_INTERFACE); + fib_table_entry_delete (fib_index, &pfx_special, FIB_SOURCE_INTERFACE); } - pfx.fp_len = 32; + /* remove glean route for prefix */ + pfx_special.fp_addr.ip4 = *address; + pfx_special.fp_len = address_length; + fib_table_entry_delete (fib_index, &pfx_special, FIB_SOURCE_INTERFACE); + + mhash_unset (&lm->prefix_to_if_prefix_index, &key, 0 /* old_value */); + pool_put (lm->if_prefix_pool, if_prefix); +} + +static void +ip4_del_interface_routes (u32 sw_if_index, + ip4_main_t * im, + u32 fib_index, + ip4_address_t * address, u32 address_length) +{ + fib_prefix_t pfx = { + .fp_len = 32, + .fp_proto = FIB_PROTOCOL_IP4, + .fp_addr.ip4 = *address, + }; + fib_table_entry_delete (fib_index, &pfx, FIB_SOURCE_INTERFACE); + + ip4_del_interface_prefix_routes (im, sw_if_index, fib_index, + address, address_length); } +#ifndef CLIB_MARCH_VARIANT void ip4_sw_interface_enable_disable (u32 sw_if_index, u32 is_enable) { ip4_main_t *im = &ip4_main; + vnet_main_t *vnm = vnet_get_main (); + vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index); vec_validate_init_empty (im->ip_enabled_by_sw_if_index, sw_if_index, 0); @@ -519,6 +626,17 @@ ip4_sw_interface_enable_disable (u32 sw_if_index, u32 is_enable) vnet_feature_enable_disable ("ip4-multicast", "ip4-not-enabled", sw_if_index, !is_enable, 0, 0); + + if (is_enable) + hi->l3_if_count++; + else if (hi->l3_if_count) + hi->l3_if_count--; + + { + ip4_enable_disable_interface_callback_t *cb; + vec_foreach (cb, im->enable_disable_interface_callbacks) + cb->function (im, cb->function_opaque, sw_if_index, is_enable); + } } static clib_error_t * @@ -531,7 +649,7 @@ ip4_add_del_interface_address_internal (vlib_main_t * vm, ip4_main_t *im = &ip4_main; ip_lookup_main_t *lm = &im->lookup_main; clib_error_t *error = 0; - u32 if_address_index, elts_before; + u32 if_address_index; ip4_address_fib_t ip4_af, *addr_fib = 0; /* local0 interface doesn't support IP addressing */ @@ -559,8 +677,8 @@ ip4_add_del_interface_address_internal (vlib_main_t * vm, ip_interface_address_t *ia; vnet_sw_interface_t *sif; - pool_foreach(sif, vnm->interface_main.sw_interfaces, - ({ + pool_foreach (sif, vnm->interface_main.sw_interfaces) + { if (im->fib_index_by_sw_if_index[sw_if_index] == im->fib_index_by_sw_if_index[sif->sw_if_index]) { @@ -571,6 +689,7 @@ ip4_add_del_interface_address_internal (vlib_main_t * vm, ip4_address_t * x = ip_interface_address_get_address (&im->lookup_main, ia); + if (ip4_destination_matches_route (im, address, x, ia->address_length) || ip4_destination_matches_route (im, @@ -578,50 +697,133 @@ ip4_add_del_interface_address_internal (vlib_main_t * vm, address, address_length)) { - vnm->api_errno = VNET_API_ERROR_DUPLICATE_IF_ADDRESS; - - return - clib_error_create - ("failed to add %U which conflicts with %U for interface %U", + /* an intf may have >1 addr from the same prefix */ + if ((sw_if_index == sif->sw_if_index) && + (ia->address_length == address_length) && + (x->as_u32 != address->as_u32)) + continue; + + if (ia->flags & IP_INTERFACE_ADDRESS_FLAG_STALE) + /* if the address we're comparing against is stale + * then the CP has not added this one back yet, maybe + * it never will, so we have to assume it won't and + * ignore it. if it does add it back, then it will fail + * because this one is now present */ + continue; + + /* error if the length or intf was different */ + vnm->api_errno = VNET_API_ERROR_ADDRESS_IN_USE; + + error = clib_error_create + ("failed to add %U on %U which conflicts with %U for interface %U", format_ip4_address_and_length, address, address_length, + format_vnet_sw_if_index_name, vnm, + sw_if_index, format_ip4_address_and_length, x, ia->address_length, format_vnet_sw_if_index_name, vnm, sif->sw_if_index); + goto done; } })); } - })); + } } /* *INDENT-ON* */ - elts_before = pool_elts (lm->if_address_pool); + if_address_index = ip_interface_address_find (lm, addr_fib, address_length); + + if (is_del) + { + if (~0 == if_address_index) + { + vnm->api_errno = VNET_API_ERROR_ADDRESS_NOT_FOUND_FOR_INTERFACE; + error = clib_error_create ("%U not found for interface %U", + lm->format_address_and_length, + addr_fib, address_length, + format_vnet_sw_if_index_name, vnm, + sw_if_index); + goto done; + } + + error = ip_interface_address_del (lm, vnm, if_address_index, addr_fib, + address_length, sw_if_index); + if (error) + goto done; + } + else + { + if (~0 != if_address_index) + { + ip_interface_address_t *ia; + + ia = pool_elt_at_index (lm->if_address_pool, if_address_index); + + if (ia->flags & IP_INTERFACE_ADDRESS_FLAG_STALE) + { + if (ia->sw_if_index == sw_if_index) + { + /* re-adding an address during the replace action. + * consdier this the update. clear the flag and + * we're done */ + ia->flags &= ~IP_INTERFACE_ADDRESS_FLAG_STALE; + goto done; + } + else + { + /* The prefix is moving from one interface to another. + * delete the stale and add the new */ + ip4_add_del_interface_address_internal (vm, + ia->sw_if_index, + address, + address_length, 1); + ia = NULL; + error = ip_interface_address_add (lm, sw_if_index, + addr_fib, address_length, + &if_address_index); + } + } + else + { + vnm->api_errno = VNET_API_ERROR_DUPLICATE_IF_ADDRESS; + error = clib_error_create + ("Prefix %U already found on interface %U", + lm->format_address_and_length, addr_fib, address_length, + format_vnet_sw_if_index_name, vnm, ia->sw_if_index); + } + } + else + error = ip_interface_address_add (lm, sw_if_index, + addr_fib, address_length, + &if_address_index); + } - error = ip_interface_address_add_del - (lm, sw_if_index, addr_fib, address_length, is_del, &if_address_index); if (error) goto done; ip4_sw_interface_enable_disable (sw_if_index, !is_del); + ip4_mfib_interface_enable_disable (sw_if_index, !is_del); - if (is_del) - ip4_del_interface_routes (im, ip4_af.fib_index, address, address_length); - else - ip4_add_interface_routes (sw_if_index, - im, ip4_af.fib_index, - pool_elt_at_index - (lm->if_address_pool, if_address_index)); - - /* If pool did not grow/shrink: add duplicate address. */ - if (elts_before != pool_elts (lm->if_address_pool)) + /* intf addr routes are added/deleted on admin up/down */ + if (vnet_sw_interface_is_admin_up (vnm, sw_if_index)) { - ip4_add_del_interface_address_callback_t *cb; - vec_foreach (cb, im->add_del_interface_address_callbacks) - cb->function (im, cb->function_opaque, sw_if_index, - address, address_length, if_address_index, is_del); + if (is_del) + ip4_del_interface_routes (sw_if_index, + im, ip4_af.fib_index, address, + address_length); + else + ip4_add_interface_routes (sw_if_index, + im, ip4_af.fib_index, + pool_elt_at_index + (lm->if_address_pool, if_address_index)); } + ip4_add_del_interface_address_callback_t *cb; + vec_foreach (cb, im->add_del_interface_address_callbacks) + cb->function (im, cb->function_opaque, sw_if_index, + address, address_length, if_address_index, is_del); + done: vec_free (addr_fib); return error; @@ -637,12 +839,93 @@ ip4_add_del_interface_address (vlib_main_t * vm, (vm, sw_if_index, address, address_length, is_del); } +void +ip4_directed_broadcast (u32 sw_if_index, u8 enable) +{ + ip_interface_address_t *ia; + ip4_main_t *im; + + im = &ip4_main; + + /* + * when directed broadcast is enabled, the subnet braodcast route will forward + * packets using an adjacency with a broadcast MAC. otherwise it drops + */ + /* *INDENT-OFF* */ + foreach_ip_interface_address(&im->lookup_main, ia, + sw_if_index, 0, + ({ + if (ia->address_length <= 30) + { + ip4_address_t *ipa; + + ipa = ip_interface_address_get_address (&im->lookup_main, ia); + + fib_prefix_t pfx = { + .fp_len = 32, + .fp_proto = FIB_PROTOCOL_IP4, + .fp_addr = { + .ip4.as_u32 = (ipa->as_u32 | ~im->fib_masks[ia->address_length]), + }, + }; + + ip4_add_subnet_bcast_route + (fib_table_get_index_for_sw_if_index(FIB_PROTOCOL_IP4, + sw_if_index), + &pfx, sw_if_index); + } + })); + /* *INDENT-ON* */ +} +#endif + +static clib_error_t * +ip4_sw_interface_admin_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags) +{ + ip4_main_t *im = &ip4_main; + ip_interface_address_t *ia; + ip4_address_t *a; + u32 is_admin_up, fib_index; + + /* Fill in lookup tables with default table (0). */ + vec_validate (im->fib_index_by_sw_if_index, sw_if_index); + + vec_validate_init_empty (im-> + lookup_main.if_address_pool_index_by_sw_if_index, + sw_if_index, ~0); + + is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0; + + fib_index = vec_elt (im->fib_index_by_sw_if_index, sw_if_index); + + /* *INDENT-OFF* */ + foreach_ip_interface_address (&im->lookup_main, ia, sw_if_index, + 0 /* honor unnumbered */, + ({ + a = ip_interface_address_get_address (&im->lookup_main, ia); + if (is_admin_up) + ip4_add_interface_routes (sw_if_index, + im, fib_index, + ia); + else + ip4_del_interface_routes (sw_if_index, + im, fib_index, + a, ia->address_length); + })); + /* *INDENT-ON* */ + + return 0; +} + +VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ip4_sw_interface_admin_up_down); + /* Built-in ip4 unicast rx feature path definition */ /* *INDENT-OFF* */ VNET_FEATURE_ARC_INIT (ip4_unicast, static) = { .arc_name = "ip4-unicast", .start_nodes = VNET_FEATURES ("ip4-input", "ip4-input-no-checksum"), + .last_in_arc = "ip4-lookup", .arc_index_ptr = &ip4_main.lookup_main.ucast_feature_arc_index, }; @@ -657,20 +940,6 @@ VNET_FEATURE_INIT (ip4_inacl, static) = { .arc_name = "ip4-unicast", .node_name = "ip4-inacl", - .runs_before = VNET_FEATURES ("ip4-source-check-via-rx"), -}; - -VNET_FEATURE_INIT (ip4_source_check_1, static) = -{ - .arc_name = "ip4-unicast", - .node_name = "ip4-source-check-via-rx", - .runs_before = VNET_FEATURES ("ip4-source-check-via-any"), -}; - -VNET_FEATURE_INIT (ip4_source_check_2, static) = -{ - .arc_name = "ip4-unicast", - .node_name = "ip4-source-check-via-any", .runs_before = VNET_FEATURES ("ip4-policer-classify"), }; @@ -685,13 +954,13 @@ VNET_FEATURE_INIT (ip4_policer_classify, static) = { .arc_name = "ip4-unicast", .node_name = "ip4-policer-classify", - .runs_before = VNET_FEATURES ("ipsec-input-ip4"), + .runs_before = VNET_FEATURES ("ipsec4-input-feature"), }; VNET_FEATURE_INIT (ip4_ipsec, static) = { .arc_name = "ip4-unicast", - .node_name = "ipsec-input-ip4", + .node_name = "ipsec4-input-feature", .runs_before = VNET_FEATURES ("vpath-input-ip4"), }; @@ -728,6 +997,7 @@ VNET_FEATURE_ARC_INIT (ip4_multicast, static) = { .arc_name = "ip4-multicast", .start_nodes = VNET_FEATURES ("ip4-input", "ip4-input-no-checksum"), + .last_in_arc = "ip4-mfib-forward-lookup", .arc_index_ptr = &ip4_main.lookup_main.mcast_feature_arc_index, }; @@ -757,6 +1027,7 @@ VNET_FEATURE_ARC_INIT (ip4_output, static) = { .arc_name = "ip4-output", .start_nodes = VNET_FEATURES ("ip4-rewrite", "ip4-midchain", "ip4-dvr-dpo"), + .last_in_arc = "interface-output", .arc_index_ptr = &ip4_main.lookup_main.output_feature_arc_index, }; @@ -771,13 +1042,13 @@ VNET_FEATURE_INIT (ip4_outacl, static) = { .arc_name = "ip4-output", .node_name = "ip4-outacl", - .runs_before = VNET_FEATURES ("ipsec-output-ip4"), + .runs_before = VNET_FEATURES ("ipsec4-output-feature"), }; VNET_FEATURE_INIT (ip4_ipsec_output, static) = { .arc_name = "ip4-output", - .node_name = "ipsec-output-ip4", + .node_name = "ipsec4-output-feature", .runs_before = VNET_FEATURES ("interface-output"), }; @@ -815,6 +1086,7 @@ ip4_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add) ip4_add_del_interface_address(vm, sw_if_index, address, ia->address_length, 1); })); /* *INDENT-ON* */ + ip4_mfib_interface_enable_disable (sw_if_index, 0); } vnet_feature_enable_disable ("ip4-unicast", "ip4-not-enabled", sw_if_index, @@ -829,9 +1101,11 @@ ip4_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add) VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ip4_sw_interface_add_del); /* Global IP4 main. */ +#ifndef CLIB_MARCH_VARIANT ip4_main_t ip4_main; +#endif /* CLIB_MARCH_VARIANT */ -clib_error_t * +static clib_error_t * ip4_lookup_init (vlib_main_t * vm) { ip4_main_t *im = &ip4_main; @@ -875,11 +1149,7 @@ ip4_lookup_init (vlib_main_t * vm) { ethernet_arp_header_t h; - memset (&h, 0, sizeof (h)); - - /* Set target ethernet address to all zeros. */ - memset (h.ip4_over_ethernet[1].ethernet, 0, - sizeof (h.ip4_over_ethernet[1].ethernet)); + clib_memset (&h, 0, sizeof (h)); #define _16(f,v) h.f = clib_host_to_net_u16 (v); #define _8(f,v) h.f = v; @@ -915,6 +1185,7 @@ typedef struct } ip4_forward_next_trace_t; +#ifndef CLIB_MARCH_VARIANT u8 * format_ip4_forward_next_trace (u8 * s, va_list * args) { @@ -927,6 +1198,7 @@ format_ip4_forward_next_trace (u8 * s, va_list * args) format_ip4_header, t->packet_data, sizeof (t->packet_data)); return s; } +#endif static u8 * format_ip4_lookup_trace (u8 * s, va_list * args) @@ -958,10 +1230,11 @@ format_ip4_rewrite_trace (u8 * s, va_list * args) s = format (s, "\n%U%U", format_white_space, indent, format_ip_adjacency_packet_data, - t->dpo_index, t->packet_data, sizeof (t->packet_data)); + t->packet_data, sizeof (t->packet_data)); return s; } +#ifndef CLIB_MARCH_VARIANT /* Common trace function for all ip4-forward next nodes. */ void ip4_forward_next_trace (vlib_main_t * vm, @@ -1001,9 +1274,9 @@ ip4_forward_next_trace (vlib_main_t * vm, vec_elt (im->fib_index_by_sw_if_index, vnet_buffer (b0)->sw_if_index[VLIB_RX]); - clib_memcpy (t0->packet_data, - vlib_buffer_get_current (b0), - sizeof (t0->packet_data)); + clib_memcpy_fast (t0->packet_data, + vlib_buffer_get_current (b0), + sizeof (t0->packet_data)); } if (b1->flags & VLIB_BUFFER_IS_TRACED) { @@ -1015,8 +1288,8 @@ ip4_forward_next_trace (vlib_main_t * vm, (u32) ~ 0) ? vnet_buffer (b1)->sw_if_index[VLIB_TX] : vec_elt (im->fib_index_by_sw_if_index, vnet_buffer (b1)->sw_if_index[VLIB_RX]); - clib_memcpy (t1->packet_data, vlib_buffer_get_current (b1), - sizeof (t1->packet_data)); + clib_memcpy_fast (t1->packet_data, vlib_buffer_get_current (b1), + sizeof (t1->packet_data)); } from += 2; n_left -= 2; @@ -1042,8 +1315,8 @@ ip4_forward_next_trace (vlib_main_t * vm, (u32) ~ 0) ? vnet_buffer (b0)->sw_if_index[VLIB_TX] : vec_elt (im->fib_index_by_sw_if_index, vnet_buffer (b0)->sw_if_index[VLIB_RX]); - clib_memcpy (t0->packet_data, vlib_buffer_get_current (b0), - sizeof (t0->packet_data)); + clib_memcpy_fast (t0->packet_data, vlib_buffer_get_current (b0), + sizeof (t0->packet_data)); } from += 1; n_left -= 1; @@ -1057,9 +1330,6 @@ ip4_tcp_udp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0, { ip_csum_t sum0; u32 ip_header_length, payload_length_host_byte_order; - u32 n_this_buffer, n_bytes_left, n_ip_bytes_this_buffer; - u16 sum16; - void *data_this_buffer; /* Initialize checksum with ip header. */ ip_header_length = ip4_header_bytes (ip0); @@ -1082,31 +1352,9 @@ ip4_tcp_udp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0, sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&ip0->src_address, u64)); - n_bytes_left = n_this_buffer = payload_length_host_byte_order; - data_this_buffer = (void *) ip0 + ip_header_length; - n_ip_bytes_this_buffer = - p0->current_length - (((u8 *) ip0 - p0->data) - p0->current_data); - if (n_this_buffer + ip_header_length > n_ip_bytes_this_buffer) - { - n_this_buffer = n_ip_bytes_this_buffer > ip_header_length ? - n_ip_bytes_this_buffer - ip_header_length : 0; - } - while (1) - { - sum0 = ip_incremental_checksum (sum0, data_this_buffer, n_this_buffer); - n_bytes_left -= n_this_buffer; - if (n_bytes_left == 0) - break; - - ASSERT (p0->flags & VLIB_BUFFER_NEXT_PRESENT); - p0 = vlib_get_buffer (vm, p0->next_buffer); - data_this_buffer = vlib_buffer_get_current (p0); - n_this_buffer = p0->current_length; - } - - sum16 = ~ip_csum_fold (sum0); - - return sum16; + return ip_calculate_l4_checksum (vm, p0, sum0, + payload_length_host_byte_order, (u8 *) ip0, + ip_header_length, NULL); } u32 @@ -1134,12 +1382,14 @@ ip4_tcp_udp_validate_checksum (vlib_main_t * vm, vlib_buffer_t * p0) return p0->flags; } +#endif /* *INDENT-OFF* */ VNET_FEATURE_ARC_INIT (ip4_local) = { .arc_name = "ip4-local", .start_nodes = VNET_FEATURES ("ip4-local"), + .last_in_arc = "ip4-local-end-of-arc", }; /* *INDENT-ON* */ @@ -1258,6 +1508,7 @@ typedef struct ip4_address_t src; u32 lbi; u8 error; + u8 first; } ip4_local_last_check_t; static inline void @@ -1274,7 +1525,14 @@ ip4_local_check_src (vlib_buffer_t * b, ip4_header_t * ip0, vnet_buffer (b)->sw_if_index[VLIB_TX] != ~0 ? vnet_buffer (b)->sw_if_index[VLIB_TX] : vnet_buffer (b)->ip.fib_index; - if (PREDICT_FALSE (last_check->src.as_u32 != ip0->src_address.as_u32)) + /* + * vnet_buffer()->ip.adj_index[VLIB_RX] will be set to the index of the + * adjacency for the destination address (the local interface address). + * vnet_buffer()->ip.adj_index[VLIB_TX] will be set to the index of the + * adjacency for the source address (the remote sender's address) + */ + if (PREDICT_TRUE (last_check->src.as_u32 != ip0->src_address.as_u32) || + last_check->first) { mtrie0 = &ip4_fib_get (vnet_buffer (b)->ip.fib_index)->mtrie; leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address); @@ -1282,8 +1540,9 @@ ip4_local_check_src (vlib_buffer_t * b, ip4_header_t * ip0, leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 3); lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0); + vnet_buffer (b)->ip.adj_index[VLIB_RX] = + vnet_buffer (b)->ip.adj_index[VLIB_TX]; vnet_buffer (b)->ip.adj_index[VLIB_TX] = lbi0; - vnet_buffer (b)->ip.adj_index[VLIB_RX] = lbi0; lb0 = load_balance_get (lbi0); dpo0 = load_balance_get_bucket_i (lb0, 0); @@ -1310,11 +1569,13 @@ ip4_local_check_src (vlib_buffer_t * b, ip4_header_t * ip0, last_check->src.as_u32 = ip0->src_address.as_u32; last_check->lbi = lbi0; last_check->error = *error0; + last_check->first = 0; } else { + vnet_buffer (b)->ip.adj_index[VLIB_RX] = + vnet_buffer (b)->ip.adj_index[VLIB_TX]; vnet_buffer (b)->ip.adj_index[VLIB_TX] = last_check->lbi; - vnet_buffer (b)->ip.adj_index[VLIB_RX] = last_check->lbi; *error0 = last_check->error; } } @@ -1327,9 +1588,10 @@ ip4_local_check_src_x2 (vlib_buffer_t ** b, ip4_header_t ** ip, ip4_fib_mtrie_t *mtrie[2]; const dpo_id_t *dpo[2]; load_balance_t *lb[2]; - u32 not_last_hit = 0; + u32 not_last_hit; u32 lbi[2]; + not_last_hit = last_check->first; not_last_hit |= ip[0]->src_address.as_u32 ^ last_check->src.as_u32; not_last_hit |= ip[1]->src_address.as_u32 ^ last_check->src.as_u32; @@ -1343,7 +1605,13 @@ ip4_local_check_src_x2 (vlib_buffer_t ** b, ip4_header_t ** ip, vnet_buffer (b[1])->sw_if_index[VLIB_TX] : vnet_buffer (b[1])->ip.fib_index; - if (PREDICT_FALSE (not_last_hit)) + /* + * vnet_buffer()->ip.adj_index[VLIB_RX] will be set to the index of the + * adjacency for the destination address (the local interface address). + * vnet_buffer()->ip.adj_index[VLIB_TX] will be set to the index of the + * adjacency for the source address (the remote sender's address) + */ + if (PREDICT_TRUE (not_last_hit)) { mtrie[0] = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie; mtrie[1] = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie; @@ -1364,11 +1632,13 @@ ip4_local_check_src_x2 (vlib_buffer_t ** b, ip4_header_t ** ip, lbi[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf[0]); lbi[1] = ip4_fib_mtrie_leaf_get_adj_index (leaf[1]); + vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = + vnet_buffer (b[0])->ip.adj_index[VLIB_TX]; vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = lbi[0]; - vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = lbi[0]; + vnet_buffer (b[1])->ip.adj_index[VLIB_RX] = + vnet_buffer (b[1])->ip.adj_index[VLIB_TX]; vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = lbi[1]; - vnet_buffer (b[1])->ip.adj_index[VLIB_RX] = lbi[1]; lb[0] = load_balance_get (lbi[0]); lb[1] = load_balance_get (lbi[1]); @@ -1395,39 +1665,80 @@ ip4_local_check_src_x2 (vlib_buffer_t ** b, ip4_header_t ** ip, last_check->src.as_u32 = ip[1]->src_address.as_u32; last_check->lbi = lbi[1]; last_check->error = error[1]; + last_check->first = 0; } else { + vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = + vnet_buffer (b[0])->ip.adj_index[VLIB_TX]; vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = last_check->lbi; - vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = last_check->lbi; + vnet_buffer (b[1])->ip.adj_index[VLIB_RX] = + vnet_buffer (b[1])->ip.adj_index[VLIB_TX]; vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = last_check->lbi; - vnet_buffer (b[1])->ip.adj_index[VLIB_RX] = last_check->lbi; error[0] = last_check->error; error[1] = last_check->error; } } +enum ip_local_packet_type_e +{ + IP_LOCAL_PACKET_TYPE_L4, + IP_LOCAL_PACKET_TYPE_NAT, + IP_LOCAL_PACKET_TYPE_FRAG, +}; + +/** + * Determine packet type and next node. + * + * The expectation is that all packets that are not L4 will skip + * checksums and source checks. + */ +always_inline u8 +ip4_local_classify (vlib_buffer_t * b, ip4_header_t * ip, u16 * next) +{ + ip_lookup_main_t *lm = &ip4_main.lookup_main; + + if (PREDICT_FALSE (ip4_is_fragment (ip))) + { + *next = IP_LOCAL_NEXT_REASSEMBLY; + return IP_LOCAL_PACKET_TYPE_FRAG; + } + if (PREDICT_FALSE (b->flags & VNET_BUFFER_F_IS_NATED)) + { + *next = lm->local_next_by_ip_protocol[ip->protocol]; + return IP_LOCAL_PACKET_TYPE_NAT; + } + + *next = lm->local_next_by_ip_protocol[ip->protocol]; + return IP_LOCAL_PACKET_TYPE_L4; +} + static inline uword ip4_local_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int head_of_feature_arc) { - ip4_main_t *im = &ip4_main; - ip_lookup_main_t *lm = &im->lookup_main; u32 *from, n_left_from; vlib_node_runtime_t *error_node = - vlib_node_get_runtime (vm, ip4_input_node.index); + vlib_node_get_runtime (vm, ip4_local_node.index); u16 nexts[VLIB_FRAME_SIZE], *next; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; ip4_header_t *ip[2]; - u8 error[2]; + u8 error[2], pt[2]; ip4_local_last_check_t last_check = { + /* + * 0.0.0.0 can appear as the source address of an IP packet, + * as can any other address, hence the need to use the 'first' + * member to make sure the .lbi is initialised for the first + * packet. + */ .src = {.as_u32 = 0}, .lbi = ~0, - .error = IP4_ERROR_UNKNOWN_PROTOCOL + .error = IP4_ERROR_UNKNOWN_PROTOCOL, + .first = 1, }; from = vlib_frame_vector_args (frame); @@ -1442,7 +1753,7 @@ ip4_local_inline (vlib_main_t * vm, while (n_left_from >= 6) { - u32 is_nat, not_batch = 0; + u8 not_batch = 0; /* Prefetch next iteration. */ { @@ -1461,10 +1772,12 @@ ip4_local_inline (vlib_main_t * vm, vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data; vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data; - is_nat = b[0]->flags & VNET_BUFFER_F_IS_NATED; - not_batch |= is_nat ^ (b[1]->flags & VNET_BUFFER_F_IS_NATED); + pt[0] = ip4_local_classify (b[0], ip[0], &next[0]); + pt[1] = ip4_local_classify (b[1], ip[1], &next[1]); + + not_batch = pt[0] ^ pt[1]; - if (head_of_feature_arc == 0 || (is_nat && not_batch == 0)) + if (head_of_feature_arc == 0 || (pt[0] && not_batch == 0)) goto skip_checks; if (PREDICT_TRUE (not_batch == 0)) @@ -1474,12 +1787,12 @@ ip4_local_inline (vlib_main_t * vm, } else { - if (!(b[0]->flags & VNET_BUFFER_F_IS_NATED)) + if (!pt[0]) { ip4_local_check_l4_csum (vm, b[0], ip[0], &error[0]); ip4_local_check_src (b[0], ip[0], &last_check, &error[0]); } - if (!(b[1]->flags & VNET_BUFFER_F_IS_NATED)) + if (!pt[1]) { ip4_local_check_l4_csum (vm, b[1], ip[1], &error[1]); ip4_local_check_src (b[1], ip[1], &last_check, &error[1]); @@ -1488,8 +1801,6 @@ ip4_local_inline (vlib_main_t * vm, skip_checks: - next[0] = lm->local_next_by_ip_protocol[ip[0]->protocol]; - next[1] = lm->local_next_by_ip_protocol[ip[1]->protocol]; ip4_local_set_next_and_error (error_node, b[0], &next[0], error[0], head_of_feature_arc); ip4_local_set_next_and_error (error_node, b[1], &next[1], error[1], @@ -1506,8 +1817,9 @@ ip4_local_inline (vlib_main_t * vm, ip[0] = vlib_buffer_get_current (b[0]); vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data; + pt[0] = ip4_local_classify (b[0], ip[0], &next[0]); - if (head_of_feature_arc == 0 || (b[0]->flags & VNET_BUFFER_F_IS_NATED)) + if (head_of_feature_arc == 0 || pt[0]) goto skip_check; ip4_local_check_l4_csum (vm, b[0], ip[0], &error[0]); @@ -1515,7 +1827,6 @@ ip4_local_inline (vlib_main_t * vm, skip_check: - next[0] = lm->local_next_by_ip_protocol[ip[0]->protocol]; ip4_local_set_next_and_error (error_node, b[0], &next[0], error[0], head_of_feature_arc); @@ -1528,8 +1839,8 @@ ip4_local_inline (vlib_main_t * vm, return frame->n_vectors; } -static uword -ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_local_node) (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) { return ip4_local_inline (vm, node, frame, 1 /* head of feature arc */ ); } @@ -1537,10 +1848,11 @@ ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_local_node) = { - .function = ip4_local, .name = "ip4-local", .vector_size = sizeof (u32), .format_trace = format_ip4_forward_next_trace, + .n_errors = IP4_N_ERROR, + .error_strings = ip4_error_strings, .n_next_nodes = IP_LOCAL_N_NEXT, .next_nodes = { @@ -1548,22 +1860,21 @@ VLIB_REGISTER_NODE (ip4_local_node) = [IP_LOCAL_NEXT_PUNT] = "ip4-punt", [IP_LOCAL_NEXT_UDP_LOOKUP] = "ip4-udp-lookup", [IP_LOCAL_NEXT_ICMP] = "ip4-icmp-input", + [IP_LOCAL_NEXT_REASSEMBLY] = "ip4-full-reassembly", }, }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ip4_local_node, ip4_local); -static uword -ip4_local_end_of_arc (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_local_end_of_arc_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return ip4_local_inline (vm, node, frame, 0 /* head of feature arc */ ); } /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (ip4_local_end_of_arc_node,static) = { - .function = ip4_local_end_of_arc, +VLIB_REGISTER_NODE (ip4_local_end_of_arc_node) = { .name = "ip4-local-end-of-arc", .vector_size = sizeof (u32), @@ -1571,8 +1882,6 @@ VLIB_REGISTER_NODE (ip4_local_end_of_arc_node,static) = { .sibling_of = "ip4-local", }; -VLIB_NODE_FUNCTION_MULTIARCH (ip4_local_end_of_arc_node, ip4_local_end_of_arc) - VNET_FEATURE_INIT (ip4_local_end_of_arc, static) = { .arc_name = "ip4-local", .node_name = "ip4-local-end-of-arc", @@ -1580,6 +1889,7 @@ VNET_FEATURE_INIT (ip4_local_end_of_arc, static) = { }; /* *INDENT-ON* */ +#ifndef CLIB_MARCH_VARIANT void ip4_register_protocol (u32 protocol, u32 node_index) { @@ -1592,438 +1902,62 @@ ip4_register_protocol (u32 protocol, u32 node_index) vlib_node_add_next (vm, ip4_local_node.index, node_index); } -static clib_error_t * -show_ip_local_command_fn (vlib_main_t * vm, - unformat_input_t * input, vlib_cli_command_t * cmd) +void +ip4_unregister_protocol (u32 protocol) { ip4_main_t *im = &ip4_main; ip_lookup_main_t *lm = &im->lookup_main; - int i; - vlib_cli_output (vm, "Protocols handled by ip4_local"); - for (i = 0; i < ARRAY_LEN (lm->local_next_by_ip_protocol); i++) - { - if (lm->local_next_by_ip_protocol[i] != IP_LOCAL_NEXT_PUNT) - { - u32 node_index = vlib_get_node (vm, - ip4_local_node.index)-> - next_nodes[lm->local_next_by_ip_protocol[i]]; - vlib_cli_output (vm, "%d: %U", i, format_vlib_node_name, vm, - node_index); - } - } - return 0; + ASSERT (protocol < ARRAY_LEN (lm->local_next_by_ip_protocol)); + lm->local_next_by_ip_protocol[protocol] = IP_LOCAL_NEXT_PUNT; } +#endif - - -/*? - * Display the set of protocols handled by the local IPv4 stack. - * - * @cliexpar - * Example of how to display local protocol table: - * @cliexstart{show ip local} - * Protocols handled by ip4_local - * 1 - * 17 - * 47 - * @cliexend -?*/ -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (show_ip_local, static) = -{ - .path = "show ip local", - .function = show_ip_local_command_fn, - .short_help = "show ip local", -}; -/* *INDENT-ON* */ - -always_inline uword -ip4_arp_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, int is_glean) +static clib_error_t * +show_ip_local_command_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) { - vnet_main_t *vnm = vnet_get_main (); ip4_main_t *im = &ip4_main; ip_lookup_main_t *lm = &im->lookup_main; - u32 *from, *to_next_drop; - uword n_left_from, n_left_to_next_drop, next_index; - static f64 time_last_seed_change = -1e100; - static u32 hash_seeds[3]; - static uword hash_bitmap[256 / BITS (uword)]; - f64 time_now; - - if (node->flags & VLIB_NODE_FLAG_TRACE) - ip4_forward_next_trace (vm, node, frame, VLIB_TX); - - time_now = vlib_time_now (vm); - if (time_now - time_last_seed_change > 1e-3) - { - uword i; - u32 *r = clib_random_buffer_get_data (&vm->random_buffer, - sizeof (hash_seeds)); - for (i = 0; i < ARRAY_LEN (hash_seeds); i++) - hash_seeds[i] = r[i]; - - /* Mark all hash keys as been no-seen before. */ - for (i = 0; i < ARRAY_LEN (hash_bitmap); i++) - hash_bitmap[i] = 0; - - time_last_seed_change = time_now; - } - - from = vlib_frame_vector_args (frame); - n_left_from = frame->n_vectors; - next_index = node->cached_next_index; - if (next_index == IP4_ARP_NEXT_DROP) - next_index = IP4_ARP_N_NEXT; /* point to first interface */ - - while (n_left_from > 0) - { - vlib_get_next_frame (vm, node, IP4_ARP_NEXT_DROP, - to_next_drop, n_left_to_next_drop); - - while (n_left_from > 0 && n_left_to_next_drop > 0) - { - u32 pi0, adj_index0, a0, b0, c0, m0, sw_if_index0, drop0; - ip_adjacency_t *adj0; - vlib_buffer_t *p0; - ip4_header_t *ip0; - uword bm0; - - pi0 = from[0]; - - p0 = vlib_get_buffer (vm, pi0); - - adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; - adj0 = adj_get (adj_index0); - ip0 = vlib_buffer_get_current (p0); - - a0 = hash_seeds[0]; - b0 = hash_seeds[1]; - c0 = hash_seeds[2]; - - sw_if_index0 = adj0->rewrite_header.sw_if_index; - vnet_buffer (p0)->sw_if_index[VLIB_TX] = sw_if_index0; - - if (is_glean) - { - /* - * this is the Glean case, so we are ARPing for the - * packet's destination - */ - a0 ^= ip0->dst_address.data_u32; - } - else - { - a0 ^= adj0->sub_type.nbr.next_hop.ip4.data_u32; - } - b0 ^= sw_if_index0; - - hash_v3_mix32 (a0, b0, c0); - hash_v3_finalize32 (a0, b0, c0); - - c0 &= BITS (hash_bitmap) - 1; - m0 = (uword) 1 << (c0 % BITS (uword)); - c0 = c0 / BITS (uword); - - bm0 = hash_bitmap[c0]; - drop0 = (bm0 & m0) != 0; - - /* Mark it as seen. */ - hash_bitmap[c0] = bm0 | m0; - - from += 1; - n_left_from -= 1; - to_next_drop[0] = pi0; - to_next_drop += 1; - n_left_to_next_drop -= 1; - - p0->error = - node->errors[drop0 ? IP4_ARP_ERROR_DROP : - IP4_ARP_ERROR_REQUEST_SENT]; - - /* - * the adj has been updated to a rewrite but the node the DPO that got - * us here hasn't - yet. no big deal. we'll drop while we wait. - */ - if (IP_LOOKUP_NEXT_REWRITE == adj0->lookup_next_index) - continue; - - if (drop0) - continue; - - /* - * Can happen if the control-plane is programming tables - * with traffic flowing; at least that's today's lame excuse. - */ - if ((is_glean && adj0->lookup_next_index != IP_LOOKUP_NEXT_GLEAN) - || (!is_glean && adj0->lookup_next_index != IP_LOOKUP_NEXT_ARP)) - { - p0->error = node->errors[IP4_ARP_ERROR_NON_ARP_ADJ]; - } - else - /* Send ARP request. */ - { - u32 bi0 = 0; - vlib_buffer_t *b0; - ethernet_arp_header_t *h0; - vnet_hw_interface_t *hw_if0; - - h0 = - vlib_packet_template_get_packet (vm, - &im->ip4_arp_request_packet_template, - &bi0); - - /* Seems we're out of buffers */ - if (PREDICT_FALSE (!h0)) - continue; - - /* Add rewrite/encap string for ARP packet. */ - vnet_rewrite_one_header (adj0[0], h0, - sizeof (ethernet_header_t)); - - hw_if0 = vnet_get_sup_hw_interface (vnm, sw_if_index0); - - /* Src ethernet address in ARP header. */ - clib_memcpy (h0->ip4_over_ethernet[0].ethernet, - hw_if0->hw_address, - sizeof (h0->ip4_over_ethernet[0].ethernet)); - - if (is_glean) - { - /* The interface's source address is stashed in the Glean Adj */ - h0->ip4_over_ethernet[0].ip4 = - adj0->sub_type.glean.receive_addr.ip4; - - /* Copy in destination address we are requesting. This is the - * glean case, so it's the packet's destination.*/ - h0->ip4_over_ethernet[1].ip4.data_u32 = - ip0->dst_address.data_u32; - } - else - { - /* Src IP address in ARP header. */ - if (ip4_src_address_for_packet (lm, sw_if_index0, - &h0-> - ip4_over_ethernet[0].ip4)) - { - /* No source address available */ - p0->error = - node->errors[IP4_ARP_ERROR_NO_SOURCE_ADDRESS]; - vlib_buffer_free (vm, &bi0, 1); - continue; - } - - /* Copy in destination address we are requesting from the - incomplete adj */ - h0->ip4_over_ethernet[1].ip4.data_u32 = - adj0->sub_type.nbr.next_hop.ip4.as_u32; - } - - vlib_buffer_copy_trace_flag (vm, p0, bi0); - b0 = vlib_get_buffer (vm, bi0); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); - vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0; - - vlib_buffer_advance (b0, -adj0->rewrite_header.data_bytes); - - vlib_set_next_frame_buffer (vm, node, - adj0->rewrite_header.next_index, - bi0); - } - } - - vlib_put_next_frame (vm, node, IP4_ARP_NEXT_DROP, n_left_to_next_drop); - } - - return frame->n_vectors; -} - -static uword -ip4_arp (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) -{ - return (ip4_arp_inline (vm, node, frame, 0)); -} - -static uword -ip4_glean (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) -{ - return (ip4_arp_inline (vm, node, frame, 1)); -} - -static char *ip4_arp_error_strings[] = { - [IP4_ARP_ERROR_DROP] = "address overflow drops", - [IP4_ARP_ERROR_REQUEST_SENT] = "ARP requests sent", - [IP4_ARP_ERROR_NON_ARP_ADJ] = "ARPs to non-ARP adjacencies", - [IP4_ARP_ERROR_REPLICATE_DROP] = "ARP replication completed", - [IP4_ARP_ERROR_REPLICATE_FAIL] = "ARP replication failed", - [IP4_ARP_ERROR_NO_SOURCE_ADDRESS] = "no source address for ARP request", -}; - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (ip4_arp_node) = -{ - .function = ip4_arp, - .name = "ip4-arp", - .vector_size = sizeof (u32), - .format_trace = format_ip4_forward_next_trace, - .n_errors = ARRAY_LEN (ip4_arp_error_strings), - .error_strings = ip4_arp_error_strings, - .n_next_nodes = IP4_ARP_N_NEXT, - .next_nodes = - { - [IP4_ARP_NEXT_DROP] = "error-drop", - }, -}; - -VLIB_REGISTER_NODE (ip4_glean_node) = -{ - .function = ip4_glean, - .name = "ip4-glean", - .vector_size = sizeof (u32), - .format_trace = format_ip4_forward_next_trace, - .n_errors = ARRAY_LEN (ip4_arp_error_strings), - .error_strings = ip4_arp_error_strings, - .n_next_nodes = IP4_ARP_N_NEXT, - .next_nodes = { - [IP4_ARP_NEXT_DROP] = "error-drop", - }, -}; -/* *INDENT-ON* */ - -#define foreach_notrace_ip4_arp_error \ -_(DROP) \ -_(REQUEST_SENT) \ -_(REPLICATE_DROP) \ -_(REPLICATE_FAIL) - -clib_error_t * -arp_notrace_init (vlib_main_t * vm) -{ - vlib_node_runtime_t *rt = vlib_node_get_runtime (vm, ip4_arp_node.index); - - /* don't trace ARP request packets */ -#define _(a) \ - vnet_pcap_drop_trace_filter_add_del \ - (rt->errors[IP4_ARP_ERROR_##a], \ - 1 /* is_add */); - foreach_notrace_ip4_arp_error; -#undef _ - return 0; -} - -VLIB_INIT_FUNCTION (arp_notrace_init); - - -/* Send an ARP request to see if given destination is reachable on given interface. */ -clib_error_t * -ip4_probe_neighbor (vlib_main_t * vm, ip4_address_t * dst, u32 sw_if_index, - u8 refresh) -{ - vnet_main_t *vnm = vnet_get_main (); - ip4_main_t *im = &ip4_main; - ethernet_arp_header_t *h; - ip4_address_t *src; - ip_interface_address_t *ia; - ip_adjacency_t *adj; - vnet_hw_interface_t *hi; - vnet_sw_interface_t *si; - vlib_buffer_t *b; - adj_index_t ai; - u32 bi = 0; - u8 unicast_rewrite = 0; - - si = vnet_get_sw_interface (vnm, sw_if_index); - - if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)) - { - return clib_error_return (0, "%U: interface %U down", - format_ip4_address, dst, - format_vnet_sw_if_index_name, vnm, - sw_if_index); - } - - src = - ip4_interface_address_matching_destination (im, dst, sw_if_index, &ia); - if (!src) - { - vnm->api_errno = VNET_API_ERROR_NO_MATCHING_INTERFACE; - return clib_error_return - (0, - "no matching interface address for destination %U (interface %U)", - format_ip4_address, dst, format_vnet_sw_if_index_name, vnm, - sw_if_index); - } - - h = vlib_packet_template_get_packet (vm, - &im->ip4_arp_request_packet_template, - &bi); - - if (!h) - return clib_error_return (0, "ARP request packet allocation failed"); - - hi = vnet_get_sup_hw_interface (vnm, sw_if_index); - if (PREDICT_FALSE (!hi->hw_address)) - { - return clib_error_return (0, "%U: interface %U do not support ip probe", - format_ip4_address, dst, - format_vnet_sw_if_index_name, vnm, - sw_if_index); - } - - clib_memcpy (h->ip4_over_ethernet[0].ethernet, hi->hw_address, - sizeof (h->ip4_over_ethernet[0].ethernet)); - - h->ip4_over_ethernet[0].ip4 = src[0]; - h->ip4_over_ethernet[1].ip4 = dst[0]; - - b = vlib_get_buffer (vm, bi); - vnet_buffer (b)->sw_if_index[VLIB_RX] = - vnet_buffer (b)->sw_if_index[VLIB_TX] = sw_if_index; - - ip46_address_t nh = { - .ip4 = *dst, - }; - - ai = adj_nbr_add_or_lock (FIB_PROTOCOL_IP4, - VNET_LINK_IP4, &nh, sw_if_index); - adj = adj_get (ai); + int i; - /* Peer has been previously resolved, retrieve glean adj instead */ - if (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE) + vlib_cli_output (vm, "Protocols handled by ip4_local"); + for (i = 0; i < ARRAY_LEN (lm->local_next_by_ip_protocol); i++) { - if (refresh) - unicast_rewrite = 1; - else + if (lm->local_next_by_ip_protocol[i] != IP_LOCAL_NEXT_PUNT) { - adj_unlock (ai); - ai = adj_glean_add_or_lock (FIB_PROTOCOL_IP4, - VNET_LINK_IP4, sw_if_index, &nh); - adj = adj_get (ai); + u32 node_index = vlib_get_node (vm, + ip4_local_node.index)-> + next_nodes[lm->local_next_by_ip_protocol[i]]; + vlib_cli_output (vm, "%U: %U", format_ip_protocol, i, + format_vlib_node_name, vm, node_index); } } + return 0; +} - /* Add encapsulation string for software interface (e.g. ethernet header). */ - vnet_rewrite_one_header (adj[0], h, sizeof (ethernet_header_t)); - if (unicast_rewrite) - { - u16 *etype = vlib_buffer_get_current (b) - 2; - etype[0] = clib_host_to_net_u16 (ETHERNET_TYPE_ARP); - } - vlib_buffer_advance (b, -adj->rewrite_header.data_bytes); - { - vlib_frame_t *f = vlib_get_frame_to_node (vm, hi->output_node_index); - u32 *to_next = vlib_frame_vector_args (f); - to_next[0] = bi; - f->n_vectors = 1; - vlib_put_frame_to_node (vm, hi->output_node_index, f); - } - adj_unlock (ai); - return /* no error */ 0; -} +/*? + * Display the set of protocols handled by the local IPv4 stack. + * + * @cliexpar + * Example of how to display local protocol table: + * @cliexstart{show ip local} + * Protocols handled by ip4_local + * 1 + * 17 + * 47 + * @cliexend +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_ip_local, static) = +{ + .path = "show ip local", + .function = show_ip_local_command_fn, + .short_help = "show ip local", +}; +/* *INDENT-ON* */ typedef enum { @@ -2045,7 +1979,8 @@ typedef enum always_inline void ip4_mtu_check (vlib_buffer_t * b, u16 packet_len, - u16 adj_packet_bytes, bool df, u32 * next, u32 * error) + u16 adj_packet_bytes, bool df, u16 * next, + u8 is_midchain, u32 * error) { if (packet_len > adj_packet_bytes) { @@ -2061,424 +1996,555 @@ ip4_mtu_check (vlib_buffer_t * b, u16 packet_len, else { /* IP fragmentation */ - ip_frag_set_vnet_buffer (b, 0, adj_packet_bytes, - IP4_FRAG_NEXT_IP4_LOOKUP, 0); + ip_frag_set_vnet_buffer (b, adj_packet_bytes, + (is_midchain ? + IP_FRAG_NEXT_IP_REWRITE_MIDCHAIN : + IP_FRAG_NEXT_IP_REWRITE), 0); *next = IP4_REWRITE_NEXT_FRAGMENT; } } } +/* increment TTL & update checksum. + Works either endian, so no need for byte swap. */ +static_always_inline void +ip4_ttl_inc (vlib_buffer_t * b, ip4_header_t * ip) +{ + i32 ttl; + u32 checksum; + if (PREDICT_FALSE (b->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED)) + return; + + ttl = ip->ttl; + + checksum = ip->checksum - clib_host_to_net_u16 (0x0100); + checksum += checksum >= 0xffff; + + ip->checksum = checksum; + ttl += 1; + ip->ttl = ttl; + + ASSERT (ip4_header_checksum_is_valid (ip)); +} + +/* Decrement TTL & update checksum. + Works either endian, so no need for byte swap. */ +static_always_inline void +ip4_ttl_and_checksum_check (vlib_buffer_t * b, ip4_header_t * ip, u16 * next, + u32 * error) +{ + i32 ttl; + u32 checksum; + if (PREDICT_FALSE (b->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED)) + return; + + ttl = ip->ttl; + + /* Input node should have reject packets with ttl 0. */ + ASSERT (ip->ttl > 0); + + checksum = ip->checksum + clib_host_to_net_u16 (0x0100); + checksum += checksum >= 0xffff; + + ip->checksum = checksum; + ttl -= 1; + ip->ttl = ttl; + + /* + * If the ttl drops below 1 when forwarding, generate + * an ICMP response. + */ + if (PREDICT_FALSE (ttl <= 0)) + { + *error = IP4_ERROR_TIME_EXPIRED; + vnet_buffer (b)->sw_if_index[VLIB_TX] = (u32) ~ 0; + icmp4_error_set_vnet_buffer (b, ICMP4_time_exceeded, + ICMP4_time_exceeded_ttl_exceeded_in_transit, + 0); + *next = IP4_REWRITE_NEXT_ICMP_ERROR; + } + + /* Verify checksum. */ + ASSERT (ip4_header_checksum_is_valid (ip) || + (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)); +} + + always_inline uword -ip4_rewrite_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, - int do_counters, int is_midchain, int is_mcast) +ip4_rewrite_inline_with_gso (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, + int do_counters, int is_midchain, int is_mcast) { ip_lookup_main_t *lm = &ip4_main.lookup_main; u32 *from = vlib_frame_vector_args (frame); - u32 n_left_from, n_left_to_next, *to_next, next_index; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; + u16 nexts[VLIB_FRAME_SIZE], *next; + u32 n_left_from; vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip4_input_node.index); n_left_from = frame->n_vectors; - next_index = node->cached_next_index; u32 thread_index = vm->thread_index; - while (n_left_from > 0) + vlib_get_buffers (vm, from, bufs, n_left_from); + clib_memset_u16 (nexts, IP4_REWRITE_NEXT_DROP, n_left_from); + +#if (CLIB_N_PREFETCHES >= 8) + if (n_left_from >= 6) { - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + int i; + for (i = 2; i < 6; i++) + vlib_prefetch_buffer_header (bufs[i], LOAD); + } - while (n_left_from >= 4 && n_left_to_next >= 2) + next = nexts; + b = bufs; + while (n_left_from >= 8) + { + const ip_adjacency_t *adj0, *adj1; + ip4_header_t *ip0, *ip1; + u32 rw_len0, error0, adj_index0; + u32 rw_len1, error1, adj_index1; + u32 tx_sw_if_index0, tx_sw_if_index1; + u8 *p; + + if (is_midchain) { - ip_adjacency_t *adj0, *adj1; - vlib_buffer_t *p0, *p1; - ip4_header_t *ip0, *ip1; - u32 pi0, rw_len0, next0, error0, checksum0, adj_index0; - u32 pi1, rw_len1, next1, error1, checksum1, adj_index1; - u32 tx_sw_if_index0, tx_sw_if_index1; + vlib_prefetch_buffer_header (b[6], LOAD); + vlib_prefetch_buffer_header (b[7], LOAD); + } - /* Prefetch next iteration. */ - { - vlib_buffer_t *p2, *p3; + adj_index0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX]; + adj_index1 = vnet_buffer (b[1])->ip.adj_index[VLIB_TX]; - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); + /* + * pre-fetch the per-adjacency counters + */ + if (do_counters) + { + vlib_prefetch_combined_counter (&adjacency_counters, + thread_index, adj_index0); + vlib_prefetch_combined_counter (&adjacency_counters, + thread_index, adj_index1); + } - vlib_prefetch_buffer_header (p2, STORE); - vlib_prefetch_buffer_header (p3, STORE); + ip0 = vlib_buffer_get_current (b[0]); + ip1 = vlib_buffer_get_current (b[1]); + + error0 = error1 = IP4_ERROR_NONE; + + ip4_ttl_and_checksum_check (b[0], ip0, next + 0, &error0); + ip4_ttl_and_checksum_check (b[1], ip1, next + 1, &error1); + + /* Rewrite packet header and updates lengths. */ + adj0 = adj_get (adj_index0); + adj1 = adj_get (adj_index1); + + /* Worth pipelining. No guarantee that adj0,1 are hot... */ + rw_len0 = adj0[0].rewrite_header.data_bytes; + rw_len1 = adj1[0].rewrite_header.data_bytes; + vnet_buffer (b[0])->ip.save_rewrite_length = rw_len0; + vnet_buffer (b[1])->ip.save_rewrite_length = rw_len1; + + p = vlib_buffer_get_current (b[2]); + CLIB_PREFETCH (p - CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); + + p = vlib_buffer_get_current (b[3]); + CLIB_PREFETCH (p - CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); + + /* Check MTU of outgoing interface. */ + u16 ip0_len = clib_net_to_host_u16 (ip0->length); + u16 ip1_len = clib_net_to_host_u16 (ip1->length); + + if (b[0]->flags & VNET_BUFFER_F_GSO) + ip0_len = gso_mtu_sz (b[0]); + if (b[1]->flags & VNET_BUFFER_F_GSO) + ip1_len = gso_mtu_sz (b[1]); + + ip4_mtu_check (b[0], ip0_len, + adj0[0].rewrite_header.max_l3_packet_bytes, + ip0->flags_and_fragment_offset & + clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT), + next + 0, is_midchain, &error0); + ip4_mtu_check (b[1], ip1_len, + adj1[0].rewrite_header.max_l3_packet_bytes, + ip1->flags_and_fragment_offset & + clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT), + next + 1, is_midchain, &error1); + + if (is_mcast) + { + error0 = ((adj0[0].rewrite_header.sw_if_index == + vnet_buffer (b[0])->sw_if_index[VLIB_RX]) ? + IP4_ERROR_SAME_INTERFACE : error0); + error1 = ((adj1[0].rewrite_header.sw_if_index == + vnet_buffer (b[1])->sw_if_index[VLIB_RX]) ? + IP4_ERROR_SAME_INTERFACE : error1); + } - CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE); - CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE); - } + /* Don't adjust the buffer for ttl issue; icmp-error node wants + * to see the IP header */ + if (PREDICT_TRUE (error0 == IP4_ERROR_NONE)) + { + u32 next_index = adj0[0].rewrite_header.next_index; + vlib_buffer_advance (b[0], -(word) rw_len0); - pi0 = to_next[0] = from[0]; - pi1 = to_next[1] = from[1]; + tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index; + vnet_buffer (b[0])->sw_if_index[VLIB_TX] = tx_sw_if_index0; - from += 2; - n_left_from -= 2; - to_next += 2; - n_left_to_next -= 2; + if (PREDICT_FALSE + (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES)) + vnet_feature_arc_start_w_cfg_index (lm->output_feature_arc_index, + tx_sw_if_index0, + &next_index, b[0], + adj0->ia_cfg_index); - p0 = vlib_get_buffer (vm, pi0); - p1 = vlib_get_buffer (vm, pi1); + next[0] = next_index; + if (is_midchain) + vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ , + 0 /* is_ip6 */ ); + } + else + { + b[0]->error = error_node->errors[error0]; + if (error0 == IP4_ERROR_MTU_EXCEEDED) + ip4_ttl_inc (b[0], ip0); + } + if (PREDICT_TRUE (error1 == IP4_ERROR_NONE)) + { + u32 next_index = adj1[0].rewrite_header.next_index; + vlib_buffer_advance (b[1], -(word) rw_len1); + + tx_sw_if_index1 = adj1[0].rewrite_header.sw_if_index; + vnet_buffer (b[1])->sw_if_index[VLIB_TX] = tx_sw_if_index1; + + if (PREDICT_FALSE + (adj1[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES)) + vnet_feature_arc_start_w_cfg_index (lm->output_feature_arc_index, + tx_sw_if_index1, + &next_index, b[1], + adj1->ia_cfg_index); + next[1] = next_index; + if (is_midchain) + vnet_calc_checksums_inline (vm, b[1], 1 /* is_ip4 */ , + 0 /* is_ip6 */ ); + } + else + { + b[1]->error = error_node->errors[error1]; + if (error1 == IP4_ERROR_MTU_EXCEEDED) + ip4_ttl_inc (b[1], ip1); + } - adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; - adj_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX]; + if (is_midchain) + /* Guess we are only writing on ipv4 header. */ + vnet_rewrite_two_headers (adj0[0], adj1[0], + ip0, ip1, sizeof (ip4_header_t)); + else + /* Guess we are only writing on simple Ethernet header. */ + vnet_rewrite_two_headers (adj0[0], adj1[0], + ip0, ip1, sizeof (ethernet_header_t)); - /* - * pre-fetch the per-adjacency counters - */ - if (do_counters) - { - vlib_prefetch_combined_counter (&adjacency_counters, - thread_index, adj_index0); - vlib_prefetch_combined_counter (&adjacency_counters, - thread_index, adj_index1); - } + if (do_counters) + { + if (error0 == IP4_ERROR_NONE) + vlib_increment_combined_counter + (&adjacency_counters, + thread_index, + adj_index0, 1, + vlib_buffer_length_in_chain (vm, b[0]) + rw_len0); - ip0 = vlib_buffer_get_current (p0); - ip1 = vlib_buffer_get_current (p1); + if (error1 == IP4_ERROR_NONE) + vlib_increment_combined_counter + (&adjacency_counters, + thread_index, + adj_index1, 1, + vlib_buffer_length_in_chain (vm, b[1]) + rw_len1); + } - error0 = error1 = IP4_ERROR_NONE; - next0 = next1 = IP4_REWRITE_NEXT_DROP; + if (is_midchain) + { + if (error0 == IP4_ERROR_NONE) + adj_midchain_fixup (vm, adj0, b[0], VNET_LINK_IP4); + if (error1 == IP4_ERROR_NONE) + adj_midchain_fixup (vm, adj1, b[1], VNET_LINK_IP4); + } - /* Decrement TTL & update checksum. - Works either endian, so no need for byte swap. */ - if (PREDICT_TRUE (!(p0->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED))) - { - i32 ttl0 = ip0->ttl; + if (is_mcast) + { + /* copy bytes from the IP address into the MAC rewrite */ + if (error0 == IP4_ERROR_NONE) + vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK, + adj0->rewrite_header.dst_mcast_offset, + &ip0->dst_address.as_u32, (u8 *) ip0); + if (error1 == IP4_ERROR_NONE) + vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK, + adj1->rewrite_header.dst_mcast_offset, + &ip1->dst_address.as_u32, (u8 *) ip1); + } - /* Input node should have reject packets with ttl 0. */ - ASSERT (ip0->ttl > 0); + next += 2; + b += 2; + n_left_from -= 2; + } +#elif (CLIB_N_PREFETCHES >= 4) + next = nexts; + b = bufs; + while (n_left_from >= 1) + { + ip_adjacency_t *adj0; + ip4_header_t *ip0; + u32 rw_len0, error0, adj_index0; + u32 tx_sw_if_index0; + u8 *p; + + /* Prefetch next iteration */ + if (PREDICT_TRUE (n_left_from >= 4)) + { + ip_adjacency_t *adj2; + u32 adj_index2; + + vlib_prefetch_buffer_header (b[3], LOAD); + vlib_prefetch_buffer_data (b[2], LOAD); + + /* Prefetch adj->rewrite_header */ + adj_index2 = vnet_buffer (b[2])->ip.adj_index[VLIB_TX]; + adj2 = adj_get (adj_index2); + p = (u8 *) adj2; + CLIB_PREFETCH (p + CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES, + LOAD); + } - checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100); - checksum0 += checksum0 >= 0xffff; + adj_index0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX]; - ip0->checksum = checksum0; - ttl0 -= 1; - ip0->ttl = ttl0; + /* + * Prefetch the per-adjacency counters + */ + if (do_counters) + { + vlib_prefetch_combined_counter (&adjacency_counters, + thread_index, adj_index0); + } - /* - * If the ttl drops below 1 when forwarding, generate - * an ICMP response. - */ - if (PREDICT_FALSE (ttl0 <= 0)) - { - error0 = IP4_ERROR_TIME_EXPIRED; - vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0; - icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded, - ICMP4_time_exceeded_ttl_exceeded_in_transit, - 0); - next0 = IP4_REWRITE_NEXT_ICMP_ERROR; - } + ip0 = vlib_buffer_get_current (b[0]); - /* Verify checksum. */ - ASSERT ((ip0->checksum == ip4_header_checksum (ip0)) || - (p0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)); - } - else - { - p0->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED; - } - if (PREDICT_TRUE (!(p1->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED))) - { - i32 ttl1 = ip1->ttl; + error0 = IP4_ERROR_NONE; - /* Input node should have reject packets with ttl 0. */ - ASSERT (ip1->ttl > 0); + ip4_ttl_and_checksum_check (b[0], ip0, next + 0, &error0); - checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100); - checksum1 += checksum1 >= 0xffff; + /* Rewrite packet header and updates lengths. */ + adj0 = adj_get (adj_index0); - ip1->checksum = checksum1; - ttl1 -= 1; - ip1->ttl = ttl1; + /* Rewrite header was prefetched. */ + rw_len0 = adj0[0].rewrite_header.data_bytes; + vnet_buffer (b[0])->ip.save_rewrite_length = rw_len0; - /* - * If the ttl drops below 1 when forwarding, generate - * an ICMP response. - */ - if (PREDICT_FALSE (ttl1 <= 0)) - { - error1 = IP4_ERROR_TIME_EXPIRED; - vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0; - icmp4_error_set_vnet_buffer (p1, ICMP4_time_exceeded, - ICMP4_time_exceeded_ttl_exceeded_in_transit, - 0); - next1 = IP4_REWRITE_NEXT_ICMP_ERROR; - } + /* Check MTU of outgoing interface. */ + u16 ip0_len = clib_net_to_host_u16 (ip0->length); - /* Verify checksum. */ - ASSERT ((ip1->checksum == ip4_header_checksum (ip1)) || - (p1->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)); - } - else - { - p1->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED; - } + if (b[0]->flags & VNET_BUFFER_F_GSO) + ip0_len = gso_mtu_sz (b[0]); - /* Rewrite packet header and updates lengths. */ - adj0 = adj_get (adj_index0); - adj1 = adj_get (adj_index1); - - /* Worth pipelining. No guarantee that adj0,1 are hot... */ - rw_len0 = adj0[0].rewrite_header.data_bytes; - rw_len1 = adj1[0].rewrite_header.data_bytes; - vnet_buffer (p0)->ip.save_rewrite_length = rw_len0; - vnet_buffer (p1)->ip.save_rewrite_length = rw_len1; - - /* Check MTU of outgoing interface. */ - ip4_mtu_check (p0, clib_net_to_host_u16 (ip0->length), - adj0[0].rewrite_header.max_l3_packet_bytes, - ip0->flags_and_fragment_offset & - clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT), - &next0, &error0); - ip4_mtu_check (p1, clib_net_to_host_u16 (ip1->length), - adj1[0].rewrite_header.max_l3_packet_bytes, - ip1->flags_and_fragment_offset & - clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT), - &next1, &error1); + ip4_mtu_check (b[0], ip0_len, + adj0[0].rewrite_header.max_l3_packet_bytes, + ip0->flags_and_fragment_offset & + clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT), + next + 0, is_midchain, &error0); - if (is_mcast) - { - error0 = ((adj0[0].rewrite_header.sw_if_index == - vnet_buffer (p0)->sw_if_index[VLIB_RX]) ? - IP4_ERROR_SAME_INTERFACE : error0); - error1 = ((adj1[0].rewrite_header.sw_if_index == - vnet_buffer (p1)->sw_if_index[VLIB_RX]) ? - IP4_ERROR_SAME_INTERFACE : error1); - } + if (is_mcast) + { + error0 = ((adj0[0].rewrite_header.sw_if_index == + vnet_buffer (b[0])->sw_if_index[VLIB_RX]) ? + IP4_ERROR_SAME_INTERFACE : error0); + } - p0->error = error_node->errors[error0]; - p1->error = error_node->errors[error1]; - /* Don't adjust the buffer for ttl issue; icmp-error node wants - * to see the IP headerr */ - if (PREDICT_TRUE (error0 == IP4_ERROR_NONE)) - { - next0 = adj0[0].rewrite_header.next_index; - p0->current_data -= rw_len0; - p0->current_length += rw_len0; - tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index; - vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0; - - if (PREDICT_FALSE - (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES)) - vnet_feature_arc_start (lm->output_feature_arc_index, - tx_sw_if_index0, &next0, p0); - } - if (PREDICT_TRUE (error1 == IP4_ERROR_NONE)) - { - next1 = adj1[0].rewrite_header.next_index; - p1->current_data -= rw_len1; - p1->current_length += rw_len1; + /* Don't adjust the buffer for ttl issue; icmp-error node wants + * to see the IP header */ + if (PREDICT_TRUE (error0 == IP4_ERROR_NONE)) + { + u32 next_index = adj0[0].rewrite_header.next_index; + vlib_buffer_advance (b[0], -(word) rw_len0); + tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index; + vnet_buffer (b[0])->sw_if_index[VLIB_TX] = tx_sw_if_index0; + + if (PREDICT_FALSE + (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES)) + vnet_feature_arc_start_w_cfg_index (lm->output_feature_arc_index, + tx_sw_if_index0, + &next_index, b[0], + adj0->ia_cfg_index); + next[0] = next_index; - tx_sw_if_index1 = adj1[0].rewrite_header.sw_if_index; - vnet_buffer (p1)->sw_if_index[VLIB_TX] = tx_sw_if_index1; + if (is_midchain) + { + vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ , + 0 /* is_ip6 */ ); - if (PREDICT_FALSE - (adj1[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES)) - vnet_feature_arc_start (lm->output_feature_arc_index, - tx_sw_if_index1, &next1, p1); + /* Guess we are only writing on ipv4 header. */ + vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip4_header_t)); } - - /* Guess we are only writing on simple Ethernet header. */ - vnet_rewrite_two_headers (adj0[0], adj1[0], - ip0, ip1, sizeof (ethernet_header_t)); + else + /* Guess we are only writing on simple Ethernet header. */ + vnet_rewrite_one_header (adj0[0], ip0, + sizeof (ethernet_header_t)); /* * Bump the per-adjacency counters */ if (do_counters) - { - vlib_increment_combined_counter - (&adjacency_counters, - thread_index, - adj_index0, 1, - vlib_buffer_length_in_chain (vm, p0) + rw_len0); - - vlib_increment_combined_counter - (&adjacency_counters, - thread_index, - adj_index1, 1, - vlib_buffer_length_in_chain (vm, p1) + rw_len1); - } + vlib_increment_combined_counter + (&adjacency_counters, + thread_index, + adj_index0, 1, vlib_buffer_length_in_chain (vm, + b[0]) + rw_len0); if (is_midchain) - { - adj0->sub_type.midchain.fixup_func - (vm, adj0, p0, adj0->sub_type.midchain.fixup_data); - adj1->sub_type.midchain.fixup_func - (vm, adj1, p1, adj0->sub_type.midchain.fixup_data); - } - if (is_mcast) - { - /* - * copy bytes from the IP address into the MAC rewrite - */ - vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK, - adj0-> - rewrite_header.dst_mcast_offset, - &ip0->dst_address.as_u32, - (u8 *) ip0); - vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK, - adj0-> - rewrite_header.dst_mcast_offset, - &ip1->dst_address.as_u32, - (u8 *) ip1); - } + adj_midchain_fixup (vm, adj0, b[0], VNET_LINK_IP4); - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, - to_next, n_left_to_next, - pi0, pi1, next0, next1); + if (is_mcast) + /* copy bytes from the IP address into the MAC rewrite */ + vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK, + adj0->rewrite_header.dst_mcast_offset, + &ip0->dst_address.as_u32, (u8 *) ip0); } - - while (n_left_from > 0 && n_left_to_next > 0) + else { - ip_adjacency_t *adj0; - vlib_buffer_t *p0; - ip4_header_t *ip0; - u32 pi0, rw_len0, adj_index0, next0, error0, checksum0; - u32 tx_sw_if_index0; + b[0]->error = error_node->errors[error0]; + if (error0 == IP4_ERROR_MTU_EXCEEDED) + ip4_ttl_inc (b[0], ip0); + } - pi0 = to_next[0] = from[0]; + next += 1; + b += 1; + n_left_from -= 1; + } +#endif - p0 = vlib_get_buffer (vm, pi0); + while (n_left_from > 0) + { + ip_adjacency_t *adj0; + ip4_header_t *ip0; + u32 rw_len0, adj_index0, error0; + u32 tx_sw_if_index0; - adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; + adj_index0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX]; - adj0 = adj_get (adj_index0); + adj0 = adj_get (adj_index0); - ip0 = vlib_buffer_get_current (p0); + if (do_counters) + vlib_prefetch_combined_counter (&adjacency_counters, + thread_index, adj_index0); - error0 = IP4_ERROR_NONE; - next0 = IP4_REWRITE_NEXT_DROP; /* drop on error */ + ip0 = vlib_buffer_get_current (b[0]); - /* Decrement TTL & update checksum. */ - if (PREDICT_TRUE (!(p0->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED))) - { - i32 ttl0 = ip0->ttl; + error0 = IP4_ERROR_NONE; - checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100); + ip4_ttl_and_checksum_check (b[0], ip0, next + 0, &error0); - checksum0 += checksum0 >= 0xffff; - ip0->checksum = checksum0; + /* Update packet buffer attributes/set output interface. */ + rw_len0 = adj0[0].rewrite_header.data_bytes; + vnet_buffer (b[0])->ip.save_rewrite_length = rw_len0; - ASSERT (ip0->ttl > 0); + /* Check MTU of outgoing interface. */ + u16 ip0_len = clib_net_to_host_u16 (ip0->length); + if (b[0]->flags & VNET_BUFFER_F_GSO) + ip0_len = gso_mtu_sz (b[0]); - ttl0 -= 1; + ip4_mtu_check (b[0], ip0_len, + adj0[0].rewrite_header.max_l3_packet_bytes, + ip0->flags_and_fragment_offset & + clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT), + next + 0, is_midchain, &error0); - ip0->ttl = ttl0; + if (is_mcast) + { + error0 = ((adj0[0].rewrite_header.sw_if_index == + vnet_buffer (b[0])->sw_if_index[VLIB_RX]) ? + IP4_ERROR_SAME_INTERFACE : error0); + } - ASSERT ((ip0->checksum == ip4_header_checksum (ip0)) || - (p0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)); + /* Don't adjust the buffer for ttl issue; icmp-error node wants + * to see the IP header */ + if (PREDICT_TRUE (error0 == IP4_ERROR_NONE)) + { + u32 next_index = adj0[0].rewrite_header.next_index; + vlib_buffer_advance (b[0], -(word) rw_len0); + tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index; + vnet_buffer (b[0])->sw_if_index[VLIB_TX] = tx_sw_if_index0; + + if (PREDICT_FALSE + (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES)) + vnet_feature_arc_start_w_cfg_index (lm->output_feature_arc_index, + tx_sw_if_index0, + &next_index, b[0], + adj0->ia_cfg_index); + next[0] = next_index; - if (PREDICT_FALSE (ttl0 <= 0)) - { - /* - * If the ttl drops below 1 when forwarding, generate - * an ICMP response. - */ - error0 = IP4_ERROR_TIME_EXPIRED; - next0 = IP4_REWRITE_NEXT_ICMP_ERROR; - vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0; - icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded, - ICMP4_time_exceeded_ttl_exceeded_in_transit, - 0); - } - } - else + if (is_midchain) { - p0->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED; - } - - if (do_counters) - vlib_prefetch_combined_counter (&adjacency_counters, - thread_index, adj_index0); + /* this acts on the packet that is about to be encapped */ + vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ , + 0 /* is_ip6 */ ); - /* Guess we are only writing on simple Ethernet header. */ - vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t)); - if (is_mcast) - { - /* - * copy bytes from the IP address into the MAC rewrite - */ - vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK, - adj0-> - rewrite_header.dst_mcast_offset, - &ip0->dst_address.as_u32, - (u8 *) ip0); + /* Guess we are only writing on ipv4 header. */ + vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip4_header_t)); } - - /* Update packet buffer attributes/set output interface. */ - rw_len0 = adj0[0].rewrite_header.data_bytes; - vnet_buffer (p0)->ip.save_rewrite_length = rw_len0; + else + /* Guess we are only writing on simple Ethernet header. */ + vnet_rewrite_one_header (adj0[0], ip0, + sizeof (ethernet_header_t)); if (do_counters) vlib_increment_combined_counter (&adjacency_counters, thread_index, adj_index0, 1, - vlib_buffer_length_in_chain (vm, p0) + rw_len0); + vlib_buffer_length_in_chain (vm, b[0]) + rw_len0); - /* Check MTU of outgoing interface. */ - ip4_mtu_check (p0, clib_net_to_host_u16 (ip0->length), - adj0[0].rewrite_header.max_l3_packet_bytes, - ip0->flags_and_fragment_offset & - clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT), - &next0, &error0); + if (is_midchain) + adj_midchain_fixup (vm, adj0, b[0], VNET_LINK_IP4); if (is_mcast) - { - error0 = ((adj0[0].rewrite_header.sw_if_index == - vnet_buffer (p0)->sw_if_index[VLIB_RX]) ? - IP4_ERROR_SAME_INTERFACE : error0); - } - p0->error = error_node->errors[error0]; - - /* Don't adjust the buffer for ttl issue; icmp-error node wants - * to see the IP headerr */ - if (PREDICT_TRUE (error0 == IP4_ERROR_NONE)) - { - p0->current_data -= rw_len0; - p0->current_length += rw_len0; - tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index; - - vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0; - next0 = adj0[0].rewrite_header.next_index; - - if (is_midchain) - { - adj0->sub_type.midchain.fixup_func - (vm, adj0, p0, adj0->sub_type.midchain.fixup_data); - } - - if (PREDICT_FALSE - (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES)) - vnet_feature_arc_start (lm->output_feature_arc_index, - tx_sw_if_index0, &next0, p0); - - } - - from += 1; - n_left_from -= 1; - to_next += 1; - n_left_to_next -= 1; - - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - pi0, next0); + /* copy bytes from the IP address into the MAC rewrite */ + vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK, + adj0->rewrite_header.dst_mcast_offset, + &ip0->dst_address.as_u32, (u8 *) ip0); + } + else + { + b[0]->error = error_node->errors[error0]; + /* undo the TTL decrement - we'll be back to do it again */ + if (error0 == IP4_ERROR_MTU_EXCEEDED) + ip4_ttl_inc (b[0], ip0); } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); + next += 1; + b += 1; + n_left_from -= 1; } + /* Need to do trace after rewrites to pick up new packet data. */ if (node->flags & VLIB_NODE_FLAG_TRACE) ip4_forward_next_trace (vm, node, frame, VLIB_TX); + vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); return frame->n_vectors; } +always_inline uword +ip4_rewrite_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, + int do_counters, int is_midchain, int is_mcast) +{ + return ip4_rewrite_inline_with_gso (vm, node, frame, do_counters, + is_midchain, is_mcast); +} + /** @brief IPv4 rewrite node. @node ip4-rewrite @@ -2511,9 +2577,19 @@ ip4_rewrite_inline (vlib_main_t * vm, - adj->rewrite_header.next_index or @c ip4-drop */ -static uword -ip4_rewrite (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) + +VLIB_NODE_FN (ip4_rewrite_node) (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + if (adj_are_counters_enabled ()) + return ip4_rewrite_inline (vm, node, frame, 1, 0, 0); + else + return ip4_rewrite_inline (vm, node, frame, 0, 0, 0); +} + +VLIB_NODE_FN (ip4_rewrite_bcast_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { if (adj_are_counters_enabled ()) return ip4_rewrite_inline (vm, node, frame, 1, 0, 0); @@ -2521,9 +2597,9 @@ ip4_rewrite (vlib_main_t * vm, return ip4_rewrite_inline (vm, node, frame, 0, 0, 0); } -static uword -ip4_midchain (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_midchain_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { if (adj_are_counters_enabled ()) return ip4_rewrite_inline (vm, node, frame, 1, 1, 0); @@ -2531,9 +2607,9 @@ ip4_midchain (vlib_main_t * vm, return ip4_rewrite_inline (vm, node, frame, 0, 1, 0); } -static uword -ip4_rewrite_mcast (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_rewrite_mcast_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { if (adj_are_counters_enabled ()) return ip4_rewrite_inline (vm, node, frame, 1, 0, 1); @@ -2541,9 +2617,9 @@ ip4_rewrite_mcast (vlib_main_t * vm, return ip4_rewrite_inline (vm, node, frame, 0, 0, 1); } -static uword -ip4_mcast_midchain (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_mcast_midchain_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { if (adj_are_counters_enabled ()) return ip4_rewrite_inline (vm, node, frame, 1, 1, 1); @@ -2553,7 +2629,6 @@ ip4_mcast_midchain (vlib_main_t * vm, /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_rewrite_node) = { - .function = ip4_rewrite, .name = "ip4-rewrite", .vector_size = sizeof (u32), @@ -2566,39 +2641,40 @@ VLIB_REGISTER_NODE (ip4_rewrite_node) = { [IP4_REWRITE_NEXT_FRAGMENT] = "ip4-frag", }, }; -VLIB_NODE_FUNCTION_MULTIARCH (ip4_rewrite_node, ip4_rewrite) + +VLIB_REGISTER_NODE (ip4_rewrite_bcast_node) = { + .name = "ip4-rewrite-bcast", + .vector_size = sizeof (u32), + + .format_trace = format_ip4_rewrite_trace, + .sibling_of = "ip4-rewrite", +}; VLIB_REGISTER_NODE (ip4_rewrite_mcast_node) = { - .function = ip4_rewrite_mcast, .name = "ip4-rewrite-mcast", .vector_size = sizeof (u32), .format_trace = format_ip4_rewrite_trace, .sibling_of = "ip4-rewrite", }; -VLIB_NODE_FUNCTION_MULTIARCH (ip4_rewrite_mcast_node, ip4_rewrite_mcast) -VLIB_REGISTER_NODE (ip4_mcast_midchain_node, static) = { - .function = ip4_mcast_midchain, +VLIB_REGISTER_NODE (ip4_mcast_midchain_node) = { .name = "ip4-mcast-midchain", .vector_size = sizeof (u32), .format_trace = format_ip4_rewrite_trace, .sibling_of = "ip4-rewrite", }; -VLIB_NODE_FUNCTION_MULTIARCH (ip4_mcast_midchain_node, ip4_mcast_midchain) VLIB_REGISTER_NODE (ip4_midchain_node) = { - .function = ip4_midchain, .name = "ip4-midchain", .vector_size = sizeof (u32), - .format_trace = format_ip4_forward_next_trace, - .sibling_of = "ip4-rewrite", + .format_trace = format_ip4_rewrite_trace, + .sibling_of = "ip4-rewrite", }; -VLIB_NODE_FUNCTION_MULTIARCH (ip4_midchain_node, ip4_midchain); /* *INDENT-ON */ -int +static int ip4_lookup_validate (ip4_address_t * a, u32 fib_index0) { ip4_fib_mtrie_t *mtrie0; @@ -2695,22 +2771,6 @@ VLIB_CLI_COMMAND (lookup_test_command, static) = }; /* *INDENT-ON* */ -int -vnet_set_ip4_flow_hash (u32 table_id, u32 flow_hash_config) -{ - u32 fib_index; - - fib_index = fib_table_find (FIB_PROTOCOL_IP4, table_id); - - if (~0 == fib_index) - return VNET_API_ERROR_NO_SUCH_FIB; - - fib_table_set_flow_hash_config (fib_index, FIB_PROTOCOL_IP4, - flow_hash_config); - - return 0; -} - static clib_error_t * set_ip_flow_hash_command_fn (vlib_main_t * vm, unformat_input_t * input, @@ -2725,8 +2785,12 @@ set_ip_flow_hash_command_fn (vlib_main_t * vm, { if (unformat (input, "table %d", &table_id)) matched = 1; -#define _(a,v) \ - else if (unformat (input, #a)) { flow_hash_config |= v; matched=1;} +#define _(a, b, v) \ + else if (unformat (input, #a)) \ + { \ + flow_hash_config |= v; \ + matched = 1; \ + } foreach_flow_hash_bit #undef _ else @@ -2737,7 +2801,7 @@ set_ip_flow_hash_command_fn (vlib_main_t * vm, return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); - rv = vnet_set_ip4_flow_hash (table_id, flow_hash_config); + rv = ip_flow_hash_set (AF_IP4, table_id, flow_hash_config); switch (rv) { case 0: @@ -2847,6 +2911,7 @@ VLIB_CLI_COMMAND (set_ip_flow_hash_command, static) = }; /* *INDENT-ON* */ +#ifndef CLIB_MARCH_VARIANT int vnet_set_ip4_classify_intfc (vlib_main_t * vm, u32 sw_if_index, u32 table_index) @@ -2906,6 +2971,7 @@ vnet_set_ip4_classify_intfc (vlib_main_t * vm, u32 sw_if_index, return 0; } +#endif static clib_error_t * set_ip_classify_command_fn (vlib_main_t * vm, @@ -2970,29 +3036,6 @@ VLIB_CLI_COMMAND (set_ip_classify_command, static) = }; /* *INDENT-ON* */ -static clib_error_t * -ip4_config (vlib_main_t * vm, unformat_input_t * input) -{ - ip4_main_t *im = &ip4_main; - uword heapsize = 0; - - while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) - { - if (unformat (input, "heap-size %U", unformat_memory_size, &heapsize)) - ; - else - return clib_error_return (0, - "invalid heap-size parameter `%U'", - format_unformat_error, input); - } - - im->mtrie_heap_size = heapsize; - - return 0; -} - -VLIB_EARLY_CONFIG_FUNCTION (ip4_config, "ip"); - /* * fd.io coding-style-patch-verification: ON *