#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
+#include <vnet/ip/ip_frag.h>
#include <vnet/ethernet/ethernet.h> /* for ethernet_header_t */
#include <vnet/ethernet/arp_packet.h> /* for ethernet_arp_header_t */
#include <vnet/ppp/ppp.h>
#include <vnet/dpo/classify_dpo.h>
#include <vnet/mfib/mfib_table.h> /* for mFIB table and entry creation */
-/**
- * @file
- * @brief IPv4 Forwarding.
- *
- * This file contains the source code for IPv4 forwarding.
- */
-
-void
-ip4_forward_next_trace (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame,
- vlib_rx_or_tx_t which_adj_index);
-
-always_inline uword
-ip4_lookup_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame,
- int lookup_for_responses_to_locally_received_packets)
-{
- ip4_main_t *im = &ip4_main;
- vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
- u32 n_left_from, n_left_to_next, *from, *to_next;
- ip_lookup_next_t next;
- u32 thread_index = vlib_get_thread_index ();
-
- from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors;
- next = node->cached_next_index;
-
- while (n_left_from > 0)
- {
- vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
-
- while (n_left_from >= 8 && n_left_to_next >= 4)
- {
- vlib_buffer_t *p0, *p1, *p2, *p3;
- ip4_header_t *ip0, *ip1, *ip2, *ip3;
- ip_lookup_next_t next0, next1, next2, next3;
- const load_balance_t *lb0, *lb1, *lb2, *lb3;
- ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3;
- ip4_fib_mtrie_leaf_t leaf0, leaf1, leaf2, leaf3;
- ip4_address_t *dst_addr0, *dst_addr1, *dst_addr2, *dst_addr3;
- u32 pi0, fib_index0, lb_index0;
- u32 pi1, fib_index1, lb_index1;
- u32 pi2, fib_index2, lb_index2;
- u32 pi3, fib_index3, lb_index3;
- flow_hash_config_t flow_hash_config0, flow_hash_config1;
- flow_hash_config_t flow_hash_config2, flow_hash_config3;
- u32 hash_c0, hash_c1, hash_c2, hash_c3;
- const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t *p4, *p5, *p6, *p7;
-
- p4 = vlib_get_buffer (vm, from[4]);
- p5 = vlib_get_buffer (vm, from[5]);
- p6 = vlib_get_buffer (vm, from[6]);
- p7 = vlib_get_buffer (vm, from[7]);
-
- vlib_prefetch_buffer_header (p4, LOAD);
- vlib_prefetch_buffer_header (p5, LOAD);
- vlib_prefetch_buffer_header (p6, LOAD);
- vlib_prefetch_buffer_header (p7, LOAD);
-
- CLIB_PREFETCH (p4->data, sizeof (ip0[0]), LOAD);
- CLIB_PREFETCH (p5->data, sizeof (ip0[0]), LOAD);
- CLIB_PREFETCH (p6->data, sizeof (ip0[0]), LOAD);
- CLIB_PREFETCH (p7->data, sizeof (ip0[0]), LOAD);
- }
-
- pi0 = to_next[0] = from[0];
- pi1 = to_next[1] = from[1];
- pi2 = to_next[2] = from[2];
- pi3 = to_next[3] = from[3];
-
- from += 4;
- to_next += 4;
- n_left_to_next -= 4;
- n_left_from -= 4;
-
- p0 = vlib_get_buffer (vm, pi0);
- p1 = vlib_get_buffer (vm, pi1);
- p2 = vlib_get_buffer (vm, pi2);
- p3 = vlib_get_buffer (vm, pi3);
-
- ip0 = vlib_buffer_get_current (p0);
- ip1 = vlib_buffer_get_current (p1);
- ip2 = vlib_buffer_get_current (p2);
- ip3 = vlib_buffer_get_current (p3);
-
- dst_addr0 = &ip0->dst_address;
- dst_addr1 = &ip1->dst_address;
- dst_addr2 = &ip2->dst_address;
- dst_addr3 = &ip3->dst_address;
-
- fib_index0 =
- vec_elt (im->fib_index_by_sw_if_index,
- vnet_buffer (p0)->sw_if_index[VLIB_RX]);
- fib_index1 =
- vec_elt (im->fib_index_by_sw_if_index,
- vnet_buffer (p1)->sw_if_index[VLIB_RX]);
- fib_index2 =
- vec_elt (im->fib_index_by_sw_if_index,
- vnet_buffer (p2)->sw_if_index[VLIB_RX]);
- fib_index3 =
- vec_elt (im->fib_index_by_sw_if_index,
- vnet_buffer (p3)->sw_if_index[VLIB_RX]);
- fib_index0 =
- (vnet_buffer (p0)->sw_if_index[VLIB_TX] ==
- (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
- fib_index1 =
- (vnet_buffer (p1)->sw_if_index[VLIB_TX] ==
- (u32) ~ 0) ? fib_index1 : vnet_buffer (p1)->sw_if_index[VLIB_TX];
- fib_index2 =
- (vnet_buffer (p2)->sw_if_index[VLIB_TX] ==
- (u32) ~ 0) ? fib_index2 : vnet_buffer (p2)->sw_if_index[VLIB_TX];
- fib_index3 =
- (vnet_buffer (p3)->sw_if_index[VLIB_TX] ==
- (u32) ~ 0) ? fib_index3 : vnet_buffer (p3)->sw_if_index[VLIB_TX];
-
-
- if (!lookup_for_responses_to_locally_received_packets)
- {
- mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
- mtrie1 = &ip4_fib_get (fib_index1)->mtrie;
- mtrie2 = &ip4_fib_get (fib_index2)->mtrie;
- mtrie3 = &ip4_fib_get (fib_index3)->mtrie;
-
- leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
- leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1);
- leaf2 = ip4_fib_mtrie_lookup_step_one (mtrie2, dst_addr2);
- leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, dst_addr3);
- }
-
- if (!lookup_for_responses_to_locally_received_packets)
- {
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
- leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2);
- leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 2);
- leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 2);
- }
-
- if (!lookup_for_responses_to_locally_received_packets)
- {
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
- leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3);
- leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 3);
- leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 3);
- }
-
- if (lookup_for_responses_to_locally_received_packets)
- {
- lb_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_RX];
- lb_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_RX];
- lb_index2 = vnet_buffer (p2)->ip.adj_index[VLIB_RX];
- lb_index3 = vnet_buffer (p3)->ip.adj_index[VLIB_RX];
- }
- else
- {
- lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
- lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
- lb_index2 = ip4_fib_mtrie_leaf_get_adj_index (leaf2);
- lb_index3 = ip4_fib_mtrie_leaf_get_adj_index (leaf3);
- }
-
- ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3);
- lb0 = load_balance_get (lb_index0);
- lb1 = load_balance_get (lb_index1);
- lb2 = load_balance_get (lb_index2);
- lb3 = load_balance_get (lb_index3);
-
- ASSERT (lb0->lb_n_buckets > 0);
- ASSERT (is_pow2 (lb0->lb_n_buckets));
- ASSERT (lb1->lb_n_buckets > 0);
- ASSERT (is_pow2 (lb1->lb_n_buckets));
- ASSERT (lb2->lb_n_buckets > 0);
- ASSERT (is_pow2 (lb2->lb_n_buckets));
- ASSERT (lb3->lb_n_buckets > 0);
- ASSERT (is_pow2 (lb3->lb_n_buckets));
-
- /* Use flow hash to compute multipath adjacency. */
- hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0;
- hash_c1 = vnet_buffer (p1)->ip.flow_hash = 0;
- hash_c2 = vnet_buffer (p2)->ip.flow_hash = 0;
- hash_c3 = vnet_buffer (p3)->ip.flow_hash = 0;
- if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
- {
- flow_hash_config0 = lb0->lb_hash_config;
- hash_c0 = vnet_buffer (p0)->ip.flow_hash =
- ip4_compute_flow_hash (ip0, flow_hash_config0);
- dpo0 =
- load_balance_get_fwd_bucket (lb0,
- (hash_c0 &
- (lb0->lb_n_buckets_minus_1)));
- }
- else
- {
- dpo0 = load_balance_get_bucket_i (lb0, 0);
- }
- if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
- {
- flow_hash_config1 = lb1->lb_hash_config;
- hash_c1 = vnet_buffer (p1)->ip.flow_hash =
- ip4_compute_flow_hash (ip1, flow_hash_config1);
- dpo1 =
- load_balance_get_fwd_bucket (lb1,
- (hash_c1 &
- (lb1->lb_n_buckets_minus_1)));
- }
- else
- {
- dpo1 = load_balance_get_bucket_i (lb1, 0);
- }
- if (PREDICT_FALSE (lb2->lb_n_buckets > 1))
- {
- flow_hash_config2 = lb2->lb_hash_config;
- hash_c2 = vnet_buffer (p2)->ip.flow_hash =
- ip4_compute_flow_hash (ip2, flow_hash_config2);
- dpo2 =
- load_balance_get_fwd_bucket (lb2,
- (hash_c2 &
- (lb2->lb_n_buckets_minus_1)));
- }
- else
- {
- dpo2 = load_balance_get_bucket_i (lb2, 0);
- }
- if (PREDICT_FALSE (lb3->lb_n_buckets > 1))
- {
- flow_hash_config3 = lb3->lb_hash_config;
- hash_c3 = vnet_buffer (p3)->ip.flow_hash =
- ip4_compute_flow_hash (ip3, flow_hash_config3);
- dpo3 =
- load_balance_get_fwd_bucket (lb3,
- (hash_c3 &
- (lb3->lb_n_buckets_minus_1)));
- }
- else
- {
- dpo3 = load_balance_get_bucket_i (lb3, 0);
- }
-
- next0 = dpo0->dpoi_next_node;
- vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
- next1 = dpo1->dpoi_next_node;
- vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
- next2 = dpo2->dpoi_next_node;
- vnet_buffer (p2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
- next3 = dpo3->dpoi_next_node;
- vnet_buffer (p3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
-
- vlib_increment_combined_counter
- (cm, thread_index, lb_index0, 1,
- vlib_buffer_length_in_chain (vm, p0));
- vlib_increment_combined_counter
- (cm, thread_index, lb_index1, 1,
- vlib_buffer_length_in_chain (vm, p1));
- vlib_increment_combined_counter
- (cm, thread_index, lb_index2, 1,
- vlib_buffer_length_in_chain (vm, p2));
- vlib_increment_combined_counter
- (cm, thread_index, lb_index3, 1,
- vlib_buffer_length_in_chain (vm, p3));
-
- vlib_validate_buffer_enqueue_x4 (vm, node, next,
- to_next, n_left_to_next,
- pi0, pi1, pi2, pi3,
- next0, next1, next2, next3);
- }
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- vlib_buffer_t *p0;
- ip4_header_t *ip0;
- ip_lookup_next_t next0;
- const load_balance_t *lb0;
- ip4_fib_mtrie_t *mtrie0;
- ip4_fib_mtrie_leaf_t leaf0;
- ip4_address_t *dst_addr0;
- u32 pi0, fib_index0, lbi0;
- flow_hash_config_t flow_hash_config0;
- const dpo_id_t *dpo0;
- u32 hash_c0;
-
- pi0 = from[0];
- to_next[0] = pi0;
-
- p0 = vlib_get_buffer (vm, pi0);
-
- ip0 = vlib_buffer_get_current (p0);
-
- dst_addr0 = &ip0->dst_address;
-
- fib_index0 =
- vec_elt (im->fib_index_by_sw_if_index,
- vnet_buffer (p0)->sw_if_index[VLIB_RX]);
- fib_index0 =
- (vnet_buffer (p0)->sw_if_index[VLIB_TX] ==
- (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
-
- if (!lookup_for_responses_to_locally_received_packets)
- {
- mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
-
- leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0);
- }
-
- if (!lookup_for_responses_to_locally_received_packets)
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2);
-
- if (!lookup_for_responses_to_locally_received_packets)
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3);
-
- if (lookup_for_responses_to_locally_received_packets)
- lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_RX];
- else
- {
- /* Handle default route. */
- lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
- }
-
- ASSERT (lbi0);
- lb0 = load_balance_get (lbi0);
-
- ASSERT (lb0->lb_n_buckets > 0);
- ASSERT (is_pow2 (lb0->lb_n_buckets));
-
- /* Use flow hash to compute multipath adjacency. */
- hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0;
- if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
- {
- flow_hash_config0 = lb0->lb_hash_config;
-
- hash_c0 = vnet_buffer (p0)->ip.flow_hash =
- ip4_compute_flow_hash (ip0, flow_hash_config0);
- dpo0 =
- load_balance_get_fwd_bucket (lb0,
- (hash_c0 &
- (lb0->lb_n_buckets_minus_1)));
- }
- else
- {
- dpo0 = load_balance_get_bucket_i (lb0, 0);
- }
-
- next0 = dpo0->dpoi_next_node;
- vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
-
- vlib_increment_combined_counter (cm, thread_index, lbi0, 1,
- vlib_buffer_length_in_chain (vm,
- p0));
-
- from += 1;
- to_next += 1;
- n_left_to_next -= 1;
- n_left_from -= 1;
-
- if (PREDICT_FALSE (next0 != next))
- {
- n_left_to_next += 1;
- vlib_put_next_frame (vm, node, next, n_left_to_next);
- next = next0;
- vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
- to_next[0] = pi0;
- to_next += 1;
- n_left_to_next -= 1;
- }
- }
-
- vlib_put_next_frame (vm, node, next, n_left_to_next);
- }
-
- if (node->flags & VLIB_NODE_FLAG_TRACE)
- ip4_forward_next_trace (vm, node, frame, VLIB_TX);
-
- return frame->n_vectors;
-}
+#include <vnet/ip/ip4_forward.h>
+#include <vnet/interface_output.h>
/** @brief IPv4 lookup node.
@node ip4-lookup
ip_adjacency_t @c adj->lookup_next_index
(where @c adj is the lookup result adjacency).
*/
-static uword
-ip4_lookup (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_lookup_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
- return ip4_lookup_inline (vm, node, frame,
- /* lookup_for_responses_to_locally_received_packets */
- 0);
-
+ return ip4_lookup_inline (vm, node, frame);
}
static u8 *format_ip4_lookup_trace (u8 * s, va_list * args);
+/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_lookup_node) =
{
-.function = ip4_lookup,.name = "ip4-lookup",.vector_size =
- sizeof (u32),.format_trace = format_ip4_lookup_trace,.n_next_nodes =
- IP_LOOKUP_N_NEXT,.next_nodes = IP4_LOOKUP_NEXT_NODES,};
-
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_lookup_node, ip4_lookup);
+ .name = "ip4-lookup",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip4_lookup_trace,
+ .n_next_nodes = IP_LOOKUP_N_NEXT,
+ .next_nodes = IP4_LOOKUP_NEXT_NODES,
+};
+/* *INDENT-ON* */
-always_inline uword
-ip4_load_balance (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_load_balance_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters;
- u32 n_left_from, n_left_to_next, *from, *to_next;
- ip_lookup_next_t next;
- u32 thread_index = vlib_get_thread_index ();
+ u32 n_left, *from;
+ u32 thread_index = vm->thread_index;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u16 nexts[VLIB_FRAME_SIZE], *next;
from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors;
- next = node->cached_next_index;
+ n_left = frame->n_vectors;
+ next = nexts;
- if (node->flags & VLIB_NODE_FLAG_TRACE)
- ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+ vlib_get_buffers (vm, from, bufs, n_left);
- while (n_left_from > 0)
+ while (n_left >= 4)
{
- vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+ const load_balance_t *lb0, *lb1;
+ const ip4_header_t *ip0, *ip1;
+ u32 lbi0, hc0, lbi1, hc1;
+ const dpo_id_t *dpo0, *dpo1;
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
+ /* Prefetch next iteration. */
+ {
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ vlib_prefetch_buffer_header (b[3], LOAD);
+
+ CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), LOAD);
+ }
+
+ ip0 = vlib_buffer_get_current (b[0]);
+ ip1 = vlib_buffer_get_current (b[1]);
+ lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
+ lbi1 = vnet_buffer (b[1])->ip.adj_index[VLIB_TX];
+
+ lb0 = load_balance_get (lbi0);
+ lb1 = load_balance_get (lbi1);
+
+ /*
+ * this node is for via FIBs we can re-use the hash value from the
+ * to node if present.
+ * We don't want to use the same hash value at each level in the recursion
+ * graph as that would lead to polarisation
+ */
+ hc0 = hc1 = 0;
+
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
{
- ip_lookup_next_t next0, next1;
- const load_balance_t *lb0, *lb1;
- vlib_buffer_t *p0, *p1;
- u32 pi0, lbi0, hc0, pi1, lbi1, hc1;
- const ip4_header_t *ip0, *ip1;
- const dpo_id_t *dpo0, *dpo1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t *p2, *p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, STORE);
- vlib_prefetch_buffer_header (p3, STORE);
-
- CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE);
- CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE);
- }
-
- pi0 = to_next[0] = from[0];
- pi1 = to_next[1] = from[1];
-
- from += 2;
- n_left_from -= 2;
- to_next += 2;
- n_left_to_next -= 2;
-
- p0 = vlib_get_buffer (vm, pi0);
- p1 = vlib_get_buffer (vm, pi1);
-
- ip0 = vlib_buffer_get_current (p0);
- ip1 = vlib_buffer_get_current (p1);
- lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
- lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
-
- lb0 = load_balance_get (lbi0);
- lb1 = load_balance_get (lbi1);
-
- /*
- * this node is for via FIBs we can re-use the hash value from the
- * to node if present.
- * We don't want to use the same hash value at each level in the recursion
- * graph as that would lead to polarisation
- */
- hc0 = hc1 = 0;
-
- if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ if (PREDICT_TRUE (vnet_buffer (b[0])->ip.flow_hash))
{
- if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
- {
- hc0 = vnet_buffer (p0)->ip.flow_hash =
- vnet_buffer (p0)->ip.flow_hash >> 1;
- }
- else
- {
- hc0 = vnet_buffer (p0)->ip.flow_hash =
- ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
- }
- dpo0 = load_balance_get_fwd_bucket
- (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+ hc0 = vnet_buffer (b[0])->ip.flow_hash =
+ vnet_buffer (b[0])->ip.flow_hash >> 1;
}
else
{
- dpo0 = load_balance_get_bucket_i (lb0, 0);
+ hc0 = vnet_buffer (b[0])->ip.flow_hash =
+ ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
}
- if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+ dpo0 = load_balance_get_fwd_bucket
+ (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+ }
+ else
+ {
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+ }
+ if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer (b[1])->ip.flow_hash))
{
- if (PREDICT_TRUE (vnet_buffer (p1)->ip.flow_hash))
- {
- hc1 = vnet_buffer (p1)->ip.flow_hash =
- vnet_buffer (p1)->ip.flow_hash >> 1;
- }
- else
- {
- hc1 = vnet_buffer (p1)->ip.flow_hash =
- ip4_compute_flow_hash (ip1, lb1->lb_hash_config);
- }
- dpo1 = load_balance_get_fwd_bucket
- (lb1, (hc1 & (lb1->lb_n_buckets_minus_1)));
+ hc1 = vnet_buffer (b[1])->ip.flow_hash =
+ vnet_buffer (b[1])->ip.flow_hash >> 1;
}
else
{
- dpo1 = load_balance_get_bucket_i (lb1, 0);
+ hc1 = vnet_buffer (b[1])->ip.flow_hash =
+ ip4_compute_flow_hash (ip1, lb1->lb_hash_config);
}
+ dpo1 = load_balance_get_fwd_bucket
+ (lb1, (hc1 & (lb1->lb_n_buckets_minus_1)));
+ }
+ else
+ {
+ dpo1 = load_balance_get_bucket_i (lb1, 0);
+ }
- next0 = dpo0->dpoi_next_node;
- next1 = dpo1->dpoi_next_node;
-
- vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
- vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
-
- vlib_increment_combined_counter
- (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
- vlib_increment_combined_counter
- (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));
+ next[0] = dpo0->dpoi_next_node;
+ next[1] = dpo1->dpoi_next_node;
- vlib_validate_buffer_enqueue_x2 (vm, node, next,
- to_next, n_left_to_next,
- pi0, pi1, next0, next1);
- }
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- ip_lookup_next_t next0;
- const load_balance_t *lb0;
- vlib_buffer_t *p0;
- u32 pi0, lbi0, hc0;
- const ip4_header_t *ip0;
- const dpo_id_t *dpo0;
+ vlib_increment_combined_counter
+ (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b[0]));
+ vlib_increment_combined_counter
+ (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b[1]));
- pi0 = from[0];
- to_next[0] = pi0;
- from += 1;
- to_next += 1;
- n_left_to_next -= 1;
- n_left_from -= 1;
+ b += 2;
+ next += 2;
+ n_left -= 2;
+ }
- p0 = vlib_get_buffer (vm, pi0);
+ while (n_left > 0)
+ {
+ const load_balance_t *lb0;
+ const ip4_header_t *ip0;
+ const dpo_id_t *dpo0;
+ u32 lbi0, hc0;
- ip0 = vlib_buffer_get_current (p0);
- lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ ip0 = vlib_buffer_get_current (b[0]);
+ lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
- lb0 = load_balance_get (lbi0);
+ lb0 = load_balance_get (lbi0);
- hc0 = 0;
- if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ hc0 = 0;
+ if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+ {
+ if (PREDICT_TRUE (vnet_buffer (b[0])->ip.flow_hash))
{
- if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
- {
- hc0 = vnet_buffer (p0)->ip.flow_hash =
- vnet_buffer (p0)->ip.flow_hash >> 1;
- }
- else
- {
- hc0 = vnet_buffer (p0)->ip.flow_hash =
- ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
- }
- dpo0 = load_balance_get_fwd_bucket
- (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+ hc0 = vnet_buffer (b[0])->ip.flow_hash =
+ vnet_buffer (b[0])->ip.flow_hash >> 1;
}
else
{
- dpo0 = load_balance_get_bucket_i (lb0, 0);
+ hc0 = vnet_buffer (b[0])->ip.flow_hash =
+ ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
}
+ dpo0 = load_balance_get_fwd_bucket
+ (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+ }
+ else
+ {
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+ }
- next0 = dpo0->dpoi_next_node;
- vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
-
- vlib_increment_combined_counter
- (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+ next[0] = dpo0->dpoi_next_node;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
- vlib_validate_buffer_enqueue_x1 (vm, node, next,
- to_next, n_left_to_next,
- pi0, next0);
- }
+ vlib_increment_combined_counter
+ (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b[0]));
- vlib_put_next_frame (vm, node, next, n_left_to_next);
+ b += 1;
+ next += 1;
+ n_left -= 1;
}
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
return frame->n_vectors;
}
+/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_load_balance_node) =
{
-.function = ip4_load_balance,.name = "ip4-load-balance",.vector_size =
- sizeof (u32),.sibling_of = "ip4-lookup",.format_trace =
- format_ip4_lookup_trace,};
-
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_load_balance_node, ip4_load_balance);
+ .name = "ip4-load-balance",
+ .vector_size = sizeof (u32),
+ .sibling_of = "ip4-lookup",
+ .format_trace = format_ip4_lookup_trace,
+};
+/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
/* get first interface address */
ip4_address_t *
ip4_interface_first_address (ip4_main_t * im, u32 sw_if_index,
*result_ia = result ? ia : 0;
return result;
}
+#endif
static void
-ip4_add_interface_routes (u32 sw_if_index,
- ip4_main_t * im, u32 fib_index,
- ip_interface_address_t * a)
+ip4_add_subnet_bcast_route (u32 fib_index,
+ fib_prefix_t *pfx,
+ u32 sw_if_index)
{
- ip_lookup_main_t *lm = &im->lookup_main;
- ip4_address_t *address = ip_interface_address_get_address (lm, a);
- fib_prefix_t pfx = {
- .fp_len = a->address_length,
- .fp_proto = FIB_PROTOCOL_IP4,
- .fp_addr.ip4 = *address,
- };
+ vnet_sw_interface_flags_t iflags;
- if (pfx.fp_len <= 30)
+ iflags = vnet_sw_interface_get_flags(vnet_get_main(), sw_if_index);
+
+ fib_table_entry_special_remove(fib_index,
+ pfx,
+ FIB_SOURCE_INTERFACE);
+
+ if (iflags & VNET_SW_INTERFACE_FLAG_DIRECTED_BCAST)
{
- /* a /30 or shorter - add a glean for the network address */
- fib_table_entry_update_one_path (fib_index, &pfx,
+ fib_table_entry_update_one_path (fib_index, pfx,
FIB_SOURCE_INTERFACE,
- (FIB_ENTRY_FLAG_CONNECTED |
- FIB_ENTRY_FLAG_ATTACHED),
- FIB_PROTOCOL_IP4,
+ FIB_ENTRY_FLAG_NONE,
+ DPO_PROTO_IP4,
/* No next-hop address */
- NULL,
+ &ADJ_BCAST_ADDR,
sw_if_index,
// invalid FIB index
~0,
// no out-label stack
NULL,
FIB_ROUTE_PATH_FLAG_NONE);
-
- /* Add the two broadcast addresses as drop */
- fib_prefix_t net_pfx = {
- .fp_len = 32,
- .fp_proto = FIB_PROTOCOL_IP4,
- .fp_addr.ip4.as_u32 = address->as_u32 & im->fib_masks[pfx.fp_len],
- };
- if (net_pfx.fp_addr.ip4.as_u32 != pfx.fp_addr.ip4.as_u32)
- fib_table_entry_special_add(fib_index,
- &net_pfx,
- FIB_SOURCE_INTERFACE,
- (FIB_ENTRY_FLAG_DROP |
- FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT));
- net_pfx.fp_addr.ip4.as_u32 |= ~im->fib_masks[pfx.fp_len];
- if (net_pfx.fp_addr.ip4.as_u32 != pfx.fp_addr.ip4.as_u32)
+ }
+ else
+ {
fib_table_entry_special_add(fib_index,
- &net_pfx,
+ pfx,
FIB_SOURCE_INTERFACE,
(FIB_ENTRY_FLAG_DROP |
FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT));
}
- else if (pfx.fp_len == 31)
+}
+
+static void
+ip4_add_interface_prefix_routes (ip4_main_t *im,
+ u32 sw_if_index,
+ u32 fib_index,
+ ip_interface_address_t * a)
+{
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip_interface_prefix_t *if_prefix;
+ ip4_address_t *address = ip_interface_address_get_address (lm, a);
+
+ ip_interface_prefix_key_t key = {
+ .prefix = {
+ .fp_len = a->address_length,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr.ip4.as_u32 = address->as_u32 & im->fib_masks[a->address_length],
+ },
+ .sw_if_index = sw_if_index,
+ };
+
+ fib_prefix_t pfx_special = {
+ .fp_proto = FIB_PROTOCOL_IP4,
+ };
+
+ /* If prefix already set on interface, just increment ref count & return */
+ if_prefix = ip_get_interface_prefix (lm, &key);
+ if (if_prefix)
{
- u32 mask = clib_host_to_net_u32(1);
- fib_prefix_t net_pfx = pfx;
+ if_prefix->ref_count += 1;
+ return;
+ }
- net_pfx.fp_len = 32;
- net_pfx.fp_addr.ip4.as_u32 ^= mask;
+ /* New prefix - allocate a pool entry, initialize it, add to the hash */
+ pool_get (lm->if_prefix_pool, if_prefix);
+ if_prefix->ref_count = 1;
+ if_prefix->src_ia_index = a - lm->if_address_pool;
+ clib_memcpy (&if_prefix->key, &key, sizeof (key));
+ mhash_set (&lm->prefix_to_if_prefix_index, &key,
+ if_prefix - lm->if_prefix_pool, 0 /* old value */);
- /* a /31 - add the other end as an attached host */
- fib_table_entry_update_one_path (fib_index, &net_pfx,
- FIB_SOURCE_INTERFACE,
- (FIB_ENTRY_FLAG_ATTACHED),
- FIB_PROTOCOL_IP4,
- &net_pfx.fp_addr,
- sw_if_index,
- // invalid FIB index
+ /* length <= 30 - add glean, drop first address, maybe drop bcast address */
+ if (a->address_length <= 30)
+ {
+ pfx_special.fp_len = a->address_length;
+ pfx_special.fp_addr.ip4.as_u32 = address->as_u32;
+
+ /* set the glean route for the prefix */
+ fib_table_entry_update_one_path (fib_index, &pfx_special,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_ATTACHED),
+ DPO_PROTO_IP4,
+ /* No next-hop address */
+ NULL,
+ sw_if_index,
+ /* invalid FIB index */
~0,
1,
+ /* no out-label stack */
NULL,
FIB_ROUTE_PATH_FLAG_NONE);
+
+ /* set a drop route for the base address of the prefix */
+ pfx_special.fp_len = 32;
+ pfx_special.fp_addr.ip4.as_u32 =
+ address->as_u32 & im->fib_masks[a->address_length];
+
+ if (pfx_special.fp_addr.ip4.as_u32 != address->as_u32)
+ fib_table_entry_special_add (fib_index, &pfx_special,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_DROP |
+ FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT));
+
+ /* set a route for the broadcast address of the prefix */
+ pfx_special.fp_len = 32;
+ pfx_special.fp_addr.ip4.as_u32 =
+ address->as_u32 | ~im->fib_masks[a->address_length];
+ if (pfx_special.fp_addr.ip4.as_u32 != address->as_u32)
+ ip4_add_subnet_bcast_route (fib_index, &pfx_special, sw_if_index);
+
+
}
- pfx.fp_len = 32;
+ /* length == 31 - add an attached route for the other address */
+ else if (a->address_length == 31)
+ {
+ pfx_special.fp_len = 32;
+ pfx_special.fp_addr.ip4.as_u32 =
+ address->as_u32 ^ clib_host_to_net_u32(1);
+
+ fib_table_entry_update_one_path (fib_index, &pfx_special,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_ATTACHED),
+ DPO_PROTO_IP4,
+ &pfx_special.fp_addr,
+ sw_if_index,
+ /* invalid FIB index */
+ ~0,
+ 1,
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ }
+}
+
+static void
+ip4_add_interface_routes (u32 sw_if_index,
+ ip4_main_t * im, u32 fib_index,
+ ip_interface_address_t * a)
+{
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip4_address_t *address = ip_interface_address_get_address (lm, a);
+ fib_prefix_t pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr.ip4 = *address,
+ };
+
+ /* set special routes for the prefix if needed */
+ ip4_add_interface_prefix_routes (im, sw_if_index, fib_index, a);
if (sw_if_index < vec_len (lm->classify_table_index_by_sw_if_index))
{
FIB_SOURCE_INTERFACE,
(FIB_ENTRY_FLAG_CONNECTED |
FIB_ENTRY_FLAG_LOCAL),
- FIB_PROTOCOL_IP4,
+ DPO_PROTO_IP4,
&pfx.fp_addr,
sw_if_index,
// invalid FIB index
}
static void
-ip4_del_interface_routes (ip4_main_t * im,
- u32 fib_index,
- ip4_address_t * address, u32 address_length)
+ip4_del_interface_prefix_routes (ip4_main_t * im,
+ u32 sw_if_index,
+ u32 fib_index,
+ ip4_address_t * address,
+ u32 address_length)
{
- fib_prefix_t pfx = {
- .fp_len = address_length,
+ ip_lookup_main_t *lm = &im->lookup_main;
+ ip_interface_prefix_t *if_prefix;
+
+ ip_interface_prefix_key_t key = {
+ .prefix = {
+ .fp_len = address_length,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr.ip4.as_u32 = address->as_u32 & im->fib_masks[address_length],
+ },
+ .sw_if_index = sw_if_index,
+ };
+
+ fib_prefix_t pfx_special = {
+ .fp_len = 32,
.fp_proto = FIB_PROTOCOL_IP4,
- .fp_addr.ip4 = *address,
};
- if (pfx.fp_len <= 30)
+ if_prefix = ip_get_interface_prefix (lm, &key);
+ if (!if_prefix)
{
- fib_prefix_t net_pfx = {
- .fp_len = 32,
- .fp_proto = FIB_PROTOCOL_IP4,
- .fp_addr.ip4.as_u32 = address->as_u32 & im->fib_masks[pfx.fp_len],
- };
- if (net_pfx.fp_addr.ip4.as_u32 != pfx.fp_addr.ip4.as_u32)
- fib_table_entry_special_remove(fib_index,
- &net_pfx,
- FIB_SOURCE_INTERFACE);
- net_pfx.fp_addr.ip4.as_u32 |= ~im->fib_masks[pfx.fp_len];
- if (net_pfx.fp_addr.ip4.as_u32 != pfx.fp_addr.ip4.as_u32)
- fib_table_entry_special_remove(fib_index,
- &net_pfx,
- FIB_SOURCE_INTERFACE);
- fib_table_entry_delete (fib_index, &pfx, FIB_SOURCE_INTERFACE);
+ clib_warning ("Prefix not found while deleting %U",
+ format_ip4_address_and_length, address, address_length);
+ return;
}
- else if (pfx.fp_len == 31)
+
+ if_prefix->ref_count -= 1;
+
+ /*
+ * Routes need to be adjusted if:
+ * - deleting last intf addr in prefix
+ * - deleting intf addr used as default source address in glean adjacency
+ *
+ * We're done now otherwise
+ */
+ if ((if_prefix->ref_count > 0) &&
+ !pool_is_free_index (lm->if_address_pool, if_prefix->src_ia_index))
+ return;
+
+ /* length <= 30, delete glean route, first address, last address */
+ if (address_length <= 30)
{
- u32 mask = clib_host_to_net_u32(1);
- fib_prefix_t net_pfx = pfx;
- net_pfx.fp_len = 32;
- net_pfx.fp_addr.ip4.as_u32 ^= mask;
+ /* remove glean route for prefix */
+ pfx_special.fp_addr.ip4 = *address;
+ pfx_special.fp_len = address_length;
+ fib_table_entry_delete (fib_index, &pfx_special, FIB_SOURCE_INTERFACE);
+
+ /* if no more intf addresses in prefix, remove other special routes */
+ if (!if_prefix->ref_count)
+ {
+ /* first address in prefix */
+ pfx_special.fp_addr.ip4.as_u32 =
+ address->as_u32 & im->fib_masks[address_length];
+ pfx_special.fp_len = 32;
+
+ if (pfx_special.fp_addr.ip4.as_u32 != address->as_u32)
+ fib_table_entry_special_remove (fib_index,
+ &pfx_special,
+ FIB_SOURCE_INTERFACE);
+
+ /* prefix broadcast address */
+ pfx_special.fp_addr.ip4.as_u32 =
+ address->as_u32 | ~im->fib_masks[address_length];
+ pfx_special.fp_len = 32;
- fib_table_entry_delete (fib_index, &net_pfx, FIB_SOURCE_INTERFACE);
+ if (pfx_special.fp_addr.ip4.as_u32 != address->as_u32)
+ fib_table_entry_special_remove (fib_index,
+ &pfx_special,
+ FIB_SOURCE_INTERFACE);
+ }
+ else
+ /* default source addr just got deleted, find another */
+ {
+ ip_interface_address_t *new_src_ia = NULL;
+ ip4_address_t *new_src_addr = NULL;
+
+ new_src_addr =
+ ip4_interface_address_matching_destination
+ (im, address, sw_if_index, &new_src_ia);
+
+ if_prefix->src_ia_index = new_src_ia - lm->if_address_pool;
+
+ pfx_special.fp_len = address_length;
+ pfx_special.fp_addr.ip4 = *new_src_addr;
+
+ /* set new glean route for the prefix */
+ fib_table_entry_update_one_path (fib_index, &pfx_special,
+ FIB_SOURCE_INTERFACE,
+ (FIB_ENTRY_FLAG_CONNECTED |
+ FIB_ENTRY_FLAG_ATTACHED),
+ DPO_PROTO_IP4,
+ /* No next-hop address */
+ NULL,
+ sw_if_index,
+ /* invalid FIB index */
+ ~0,
+ 1,
+ /* no out-label stack */
+ NULL,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ return;
+ }
}
+ /* length == 31, delete attached route for the other address */
+ else if (address_length == 31)
+ {
+ pfx_special.fp_addr.ip4.as_u32 =
+ address->as_u32 ^ clib_host_to_net_u32(1);
+
+ fib_table_entry_delete (fib_index, &pfx_special, FIB_SOURCE_INTERFACE);
+ }
+
+ mhash_unset (&lm->prefix_to_if_prefix_index, &key, 0 /* old_value */);
+ pool_put (lm->if_prefix_pool, if_prefix);
+}
+
+static void
+ip4_del_interface_routes (u32 sw_if_index,
+ ip4_main_t * im,
+ u32 fib_index,
+ ip4_address_t * address, u32 address_length)
+{
+ fib_prefix_t pfx = {
+ .fp_len = address_length,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr.ip4 = *address,
+ };
+
+ ip4_del_interface_prefix_routes (im, sw_if_index, fib_index,
+ address, address_length);
pfx.fp_len = 32;
fib_table_entry_delete (fib_index, &pfx, FIB_SOURCE_INTERFACE);
}
+#ifndef CLIB_MARCH_VARIANT
void
ip4_sw_interface_enable_disable (u32 sw_if_index, u32 is_enable)
{
if (0 != --im->ip_enabled_by_sw_if_index[sw_if_index])
return;
}
- vnet_feature_enable_disable ("ip4-unicast", "ip4-drop", sw_if_index,
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-not-enabled", sw_if_index,
!is_enable, 0, 0);
- vnet_feature_enable_disable ("ip4-multicast", "ip4-drop",
+ vnet_feature_enable_disable ("ip4-multicast", "ip4-not-enabled",
sw_if_index, !is_enable, 0, 0);
+
+ {
+ ip4_enable_disable_interface_callback_t *cb;
+ vec_foreach (cb, im->enable_disable_interface_callbacks)
+ cb->function (im, cb->function_opaque, sw_if_index, is_enable);
+ }
}
static clib_error_t *
u32 if_address_index, elts_before;
ip4_address_fib_t ip4_af, *addr_fib = 0;
+ /* local0 interface doesn't support IP addressing */
+ if (sw_if_index == 0)
+ {
+ return
+ clib_error_create ("local0 interface doesn't support IP addressing");
+ }
+
vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
ip4_addr_fib_init (&ip4_af, address,
vec_elt (im->fib_index_by_sw_if_index, sw_if_index));
vec_add1 (addr_fib, ip4_af);
- /* FIXME-LATER
+ /*
* there is no support for adj-fib handling in the presence of overlapping
* subnets on interfaces. Easy fix - disallow overlapping subnets, like
* most routers do.
if (!is_del)
{
/* When adding an address check that it does not conflict
- with an existing address. */
+ with an existing address on any interface in this table. */
ip_interface_address_t *ia;
- foreach_ip_interface_address
- (&im->lookup_main, ia, sw_if_index,
- 0 /* honor unnumbered */ ,
- ({
- ip4_address_t * x =
- ip_interface_address_get_address
- (&im->lookup_main, ia);
- if (ip4_destination_matches_route
- (im, address, x, ia->address_length) ||
- ip4_destination_matches_route (im,
- x,
- address,
- address_length))
- return
- clib_error_create
- ("failed to add %U which conflicts with %U for interface %U",
- format_ip4_address_and_length, address,
- address_length,
- format_ip4_address_and_length, x,
- ia->address_length,
- format_vnet_sw_if_index_name, vnm,
- sw_if_index);
- }));
+ vnet_sw_interface_t *sif;
+
+ pool_foreach(sif, vnm->interface_main.sw_interfaces,
+ ({
+ if (im->fib_index_by_sw_if_index[sw_if_index] ==
+ im->fib_index_by_sw_if_index[sif->sw_if_index])
+ {
+ foreach_ip_interface_address
+ (&im->lookup_main, ia, sif->sw_if_index,
+ 0 /* honor unnumbered */ ,
+ ({
+ ip4_address_t * x =
+ ip_interface_address_get_address
+ (&im->lookup_main, ia);
+ if (ip4_destination_matches_route
+ (im, address, x, ia->address_length) ||
+ ip4_destination_matches_route (im,
+ x,
+ address,
+ address_length))
+ {
+ /* an intf may have >1 addr from the same prefix */
+ if ((sw_if_index == sif->sw_if_index) &&
+ (ia->address_length == address_length) &&
+ (x->as_u32 != address->as_u32))
+ continue;
+
+ /* error if the length or intf was different */
+ vnm->api_errno = VNET_API_ERROR_DUPLICATE_IF_ADDRESS;
+
+ return
+ clib_error_create
+ ("failed to add %U on %U which conflicts with %U for interface %U",
+ format_ip4_address_and_length, address,
+ address_length,
+ format_vnet_sw_if_index_name, vnm,
+ sw_if_index,
+ format_ip4_address_and_length, x,
+ ia->address_length,
+ format_vnet_sw_if_index_name, vnm,
+ sif->sw_if_index);
+ }
+ }));
+ }
+ }));
}
/* *INDENT-ON* */
ip4_sw_interface_enable_disable (sw_if_index, !is_del);
- if (is_del)
- ip4_del_interface_routes (im, ip4_af.fib_index, address, address_length);
- else
- ip4_add_interface_routes (sw_if_index,
- im, ip4_af.fib_index,
- pool_elt_at_index
- (lm->if_address_pool, if_address_index));
+ /* intf addr routes are added/deleted on admin up/down */
+ if (vnet_sw_interface_is_admin_up (vnm, sw_if_index))
+ {
+ if (is_del)
+ ip4_del_interface_routes (sw_if_index,
+ im, ip4_af.fib_index, address,
+ address_length);
+ else
+ ip4_add_interface_routes (sw_if_index,
+ im, ip4_af.fib_index,
+ pool_elt_at_index
+ (lm->if_address_pool, if_address_index));
+ }
/* If pool did not grow/shrink: add duplicate address. */
if (elts_before != pool_elts (lm->if_address_pool))
address, address_length, if_address_index, is_del);
}
-done:
- vec_free (addr_fib);
- return error;
-}
+done:
+ vec_free (addr_fib);
+ return error;
+}
+
+clib_error_t *
+ip4_add_del_interface_address (vlib_main_t * vm,
+ u32 sw_if_index,
+ ip4_address_t * address,
+ u32 address_length, u32 is_del)
+{
+ return ip4_add_del_interface_address_internal
+ (vm, sw_if_index, address, address_length, is_del);
+}
+
+void
+ip4_directed_broadcast (u32 sw_if_index, u8 enable)
+{
+ ip_interface_address_t *ia;
+ ip4_main_t *im;
+
+ im = &ip4_main;
+
+ /*
+ * when directed broadcast is enabled, the subnet braodcast route will forward
+ * packets using an adjacency with a broadcast MAC. otherwise it drops
+ */
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address(&im->lookup_main, ia,
+ sw_if_index, 0,
+ ({
+ if (ia->address_length <= 30)
+ {
+ ip4_address_t *ipa;
+
+ ipa = ip_interface_address_get_address (&im->lookup_main, ia);
+
+ fib_prefix_t pfx = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ .ip4.as_u32 = (ipa->as_u32 | ~im->fib_masks[ia->address_length]),
+ },
+ };
+
+ ip4_add_subnet_bcast_route
+ (fib_table_get_index_for_sw_if_index(FIB_PROTOCOL_IP4,
+ sw_if_index),
+ &pfx, sw_if_index);
+ }
+ }));
+ /* *INDENT-ON* */
+}
+#endif
+
+static clib_error_t *
+ip4_sw_interface_admin_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
+{
+ ip4_main_t *im = &ip4_main;
+ ip_interface_address_t *ia;
+ ip4_address_t *a;
+ u32 is_admin_up, fib_index;
+
+ /* Fill in lookup tables with default table (0). */
+ vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
+
+ vec_validate_init_empty (im->
+ lookup_main.if_address_pool_index_by_sw_if_index,
+ sw_if_index, ~0);
-clib_error_t *
-ip4_add_del_interface_address (vlib_main_t * vm,
- u32 sw_if_index,
- ip4_address_t * address,
- u32 address_length, u32 is_del)
-{
- return ip4_add_del_interface_address_internal
- (vm, sw_if_index, address, address_length, is_del);
+ is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+
+ fib_index = vec_elt (im->fib_index_by_sw_if_index, sw_if_index);
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (&im->lookup_main, ia, sw_if_index,
+ 0 /* honor unnumbered */,
+ ({
+ a = ip_interface_address_get_address (&im->lookup_main, ia);
+ if (is_admin_up)
+ ip4_add_interface_routes (sw_if_index,
+ im, fib_index,
+ ia);
+ else
+ ip4_del_interface_routes (sw_if_index,
+ im, fib_index,
+ a, ia->address_length);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
}
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ip4_sw_interface_admin_up_down);
+
/* Built-in ip4 unicast rx feature path definition */
/* *INDENT-OFF* */
VNET_FEATURE_ARC_INIT (ip4_unicast, static) =
{
.arc_name = "ip4-unicast",
.start_nodes = VNET_FEATURES ("ip4-input", "ip4-input-no-checksum"),
+ .last_in_arc = "ip4-lookup",
.arc_index_ptr = &ip4_main.lookup_main.ucast_feature_arc_index,
};
{
.arc_name = "ip4-unicast",
.node_name = "ip4-policer-classify",
- .runs_before = VNET_FEATURES ("ipsec-input-ip4"),
+ .runs_before = VNET_FEATURES ("ipsec4-input-feature"),
};
VNET_FEATURE_INIT (ip4_ipsec, static) =
{
.arc_name = "ip4-unicast",
- .node_name = "ipsec-input-ip4",
+ .node_name = "ipsec4-input-feature",
.runs_before = VNET_FEATURES ("vpath-input-ip4"),
};
.runs_before = VNET_FEATURES ("ip4-lookup"),
};
-VNET_FEATURE_INIT (ip4_drop, static) =
+VNET_FEATURE_INIT (ip4_not_enabled, static) =
{
.arc_name = "ip4-unicast",
- .node_name = "ip4-drop",
+ .node_name = "ip4-not-enabled",
.runs_before = VNET_FEATURES ("ip4-lookup"),
};
{
.arc_name = "ip4-multicast",
.start_nodes = VNET_FEATURES ("ip4-input", "ip4-input-no-checksum"),
+ .last_in_arc = "ip4-mfib-forward-lookup",
.arc_index_ptr = &ip4_main.lookup_main.mcast_feature_arc_index,
};
.runs_before = VNET_FEATURES ("ip4-mfib-forward-lookup"),
};
-VNET_FEATURE_INIT (ip4_mc_drop, static) =
+VNET_FEATURE_INIT (ip4_mc_not_enabled, static) =
{
.arc_name = "ip4-multicast",
- .node_name = "ip4-drop",
+ .node_name = "ip4-not-enabled",
.runs_before = VNET_FEATURES ("ip4-mfib-forward-lookup"),
};
VNET_FEATURE_ARC_INIT (ip4_output, static) =
{
.arc_name = "ip4-output",
- .start_nodes = VNET_FEATURES ("ip4-rewrite", "ip4-midchain"),
+ .start_nodes = VNET_FEATURES ("ip4-rewrite", "ip4-midchain", "ip4-dvr-dpo"),
+ .last_in_arc = "interface-output",
.arc_index_ptr = &ip4_main.lookup_main.output_feature_arc_index,
};
{
.arc_name = "ip4-output",
.node_name = "ip4-source-and-port-range-check-tx",
- .runs_before = VNET_FEATURES ("ipsec-output-ip4"),
+ .runs_before = VNET_FEATURES ("ip4-outacl"),
+};
+
+VNET_FEATURE_INIT (ip4_outacl, static) =
+{
+ .arc_name = "ip4-output",
+ .node_name = "ip4-outacl",
+ .runs_before = VNET_FEATURES ("ipsec4-output-feature"),
};
VNET_FEATURE_INIT (ip4_ipsec_output, static) =
{
.arc_name = "ip4-output",
- .node_name = "ipsec-output-ip4",
+ .node_name = "ipsec4-output-feature",
.runs_before = VNET_FEATURES ("interface-output"),
};
ip4_address_t *address;
vlib_main_t *vm = vlib_get_main ();
+ vnet_sw_interface_update_unnumbered (sw_if_index, ~0, 0);
/* *INDENT-OFF* */
- foreach_ip_interface_address (lm4, ia, sw_if_index, 1 /* honor unnumbered */,
+ foreach_ip_interface_address (lm4, ia, sw_if_index, 0,
({
address = ip_interface_address_get_address (lm4, ia);
ip4_add_del_interface_address(vm, sw_if_index, address, ia->address_length, 1);
/* *INDENT-ON* */
}
- vnet_feature_enable_disable ("ip4-unicast", "ip4-drop", sw_if_index,
+ vnet_feature_enable_disable ("ip4-unicast", "ip4-not-enabled", sw_if_index,
is_add, 0, 0);
- vnet_feature_enable_disable ("ip4-multicast", "ip4-drop", sw_if_index,
- is_add, 0, 0);
+ vnet_feature_enable_disable ("ip4-multicast", "ip4-not-enabled",
+ sw_if_index, is_add, 0, 0);
return /* no error */ 0;
}
VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ip4_sw_interface_add_del);
/* Global IP4 main. */
+#ifndef CLIB_MARCH_VARIANT
ip4_main_t ip4_main;
+#endif /* CLIB_MARCH_VARIANT */
-clib_error_t *
+static clib_error_t *
ip4_lookup_init (vlib_main_t * vm)
{
ip4_main_t *im = &ip4_main;
if ((error = vlib_call_init_function (vm, vnet_feature_init)))
return error;
+ if ((error = vlib_call_init_function (vm, ip4_mtrie_module_init)))
+ return (error);
+ if ((error = vlib_call_init_function (vm, fib_module_init)))
+ return error;
+ if ((error = vlib_call_init_function (vm, mfib_module_init)))
+ return error;
for (i = 0; i < ARRAY_LEN (im->fib_masks); i++)
{
ip_lookup_init (&im->lookup_main, /* is_ip6 */ 0);
/* Create FIB with index 0 and table id of 0. */
- fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, 0);
- mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, 0);
+ fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, 0,
+ FIB_SOURCE_DEFAULT_ROUTE);
+ mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, 0,
+ MFIB_SOURCE_DEFAULT_ROUTE);
{
pg_node_t *pn;
{
ethernet_arp_header_t h;
- memset (&h, 0, sizeof (h));
-
- /* Set target ethernet address to all zeros. */
- memset (h.ip4_over_ethernet[1].ethernet, 0,
- sizeof (h.ip4_over_ethernet[1].ethernet));
+ clib_memset (&h, 0, sizeof (h));
#define _16(f,v) h.f = clib_host_to_net_u16 (v);
#define _8(f,v) h.f = v;
}
ip4_forward_next_trace_t;
+#ifndef CLIB_MARCH_VARIANT
u8 *
format_ip4_forward_next_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
ip4_forward_next_trace_t *t = va_arg (*args, ip4_forward_next_trace_t *);
- uword indent = format_get_indent (s);
+ u32 indent = format_get_indent (s);
s = format (s, "%U%U",
format_white_space, indent,
format_ip4_header, t->packet_data, sizeof (t->packet_data));
return s;
}
+#endif
static u8 *
format_ip4_lookup_trace (u8 * s, va_list * args)
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
ip4_forward_next_trace_t *t = va_arg (*args, ip4_forward_next_trace_t *);
- uword indent = format_get_indent (s);
+ u32 indent = format_get_indent (s);
s = format (s, "fib %d dpo-idx %d flow hash: 0x%08x",
t->fib_index, t->dpo_index, t->flow_hash);
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
ip4_forward_next_trace_t *t = va_arg (*args, ip4_forward_next_trace_t *);
- uword indent = format_get_indent (s);
+ u32 indent = format_get_indent (s);
s = format (s, "tx_sw_if_index %d dpo-idx %d : %U flow hash: 0x%08x",
t->fib_index, t->dpo_index, format_ip_adjacency,
s = format (s, "\n%U%U",
format_white_space, indent,
format_ip_adjacency_packet_data,
- t->dpo_index, t->packet_data, sizeof (t->packet_data));
+ t->packet_data, sizeof (t->packet_data));
return s;
}
+#ifndef CLIB_MARCH_VARIANT
/* Common trace function for all ip4-forward next nodes. */
void
ip4_forward_next_trace (vlib_main_t * vm,
vec_elt (im->fib_index_by_sw_if_index,
vnet_buffer (b0)->sw_if_index[VLIB_RX]);
- clib_memcpy (t0->packet_data,
- vlib_buffer_get_current (b0),
- sizeof (t0->packet_data));
+ clib_memcpy_fast (t0->packet_data,
+ vlib_buffer_get_current (b0),
+ sizeof (t0->packet_data));
}
if (b1->flags & VLIB_BUFFER_IS_TRACED)
{
(u32) ~ 0) ? vnet_buffer (b1)->sw_if_index[VLIB_TX] :
vec_elt (im->fib_index_by_sw_if_index,
vnet_buffer (b1)->sw_if_index[VLIB_RX]);
- clib_memcpy (t1->packet_data, vlib_buffer_get_current (b1),
- sizeof (t1->packet_data));
+ clib_memcpy_fast (t1->packet_data, vlib_buffer_get_current (b1),
+ sizeof (t1->packet_data));
}
from += 2;
n_left -= 2;
(u32) ~ 0) ? vnet_buffer (b0)->sw_if_index[VLIB_TX] :
vec_elt (im->fib_index_by_sw_if_index,
vnet_buffer (b0)->sw_if_index[VLIB_RX]);
- clib_memcpy (t0->packet_data, vlib_buffer_get_current (b0),
- sizeof (t0->packet_data));
+ clib_memcpy_fast (t0->packet_data, vlib_buffer_get_current (b0),
+ sizeof (t0->packet_data));
}
from += 1;
n_left -= 1;
}
}
-static uword
-ip4_drop_or_punt (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, ip4_error_t error_code)
-{
- u32 *buffers = vlib_frame_vector_args (frame);
- uword n_packets = frame->n_vectors;
-
- vlib_error_drop_buffers (vm, node, buffers,
- /* stride */ 1,
- n_packets,
- /* next */ 0,
- ip4_input_node.index, error_code);
-
- if (node->flags & VLIB_NODE_FLAG_TRACE)
- ip4_forward_next_trace (vm, node, frame, VLIB_TX);
-
- return n_packets;
-}
-
-static uword
-ip4_drop (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
-{
- return ip4_drop_or_punt (vm, node, frame, IP4_ERROR_ADJACENCY_DROP);
-}
-
-static uword
-ip4_punt (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
-{
- return ip4_drop_or_punt (vm, node, frame, IP4_ERROR_ADJACENCY_PUNT);
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip4_drop_node, static) =
-{
- .function = ip4_drop,
- .name = "ip4-drop",
- .vector_size = sizeof (u32),
- .format_trace = format_ip4_forward_next_trace,
- .n_next_nodes = 1,
- .next_nodes = {
- [0] = "error-drop",
- },
-};
-
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_drop_node, ip4_drop);
-
-VLIB_REGISTER_NODE (ip4_punt_node, static) =
-{
- .function = ip4_punt,
- .name = "ip4-punt",
- .vector_size = sizeof (u32),
- .format_trace = format_ip4_forward_next_trace,
- .n_next_nodes = 1,
- .next_nodes = {
- [0] = "error-punt",
- },
-};
-
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_punt_node, ip4_punt);
-/* *INDENT-ON */
-
/* Compute TCP/UDP/ICMP4 checksum in software. */
u16
ip4_tcp_udp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
{
ip_csum_t sum0;
u32 ip_header_length, payload_length_host_byte_order;
- u32 n_this_buffer, n_bytes_left;
- u16 sum16;
- void *data_this_buffer;
/* Initialize checksum with ip header. */
ip_header_length = ip4_header_bytes (ip0);
sum0 =
ip_csum_with_carry (sum0, clib_mem_unaligned (&ip0->src_address, u64));
- n_bytes_left = n_this_buffer = payload_length_host_byte_order;
- data_this_buffer = (void *) ip0 + ip_header_length;
- if (n_this_buffer + ip_header_length > p0->current_length)
- n_this_buffer =
- p0->current_length >
- ip_header_length ? p0->current_length - ip_header_length : 0;
- while (1)
- {
- sum0 = ip_incremental_checksum (sum0, data_this_buffer, n_this_buffer);
- n_bytes_left -= n_this_buffer;
- if (n_bytes_left == 0)
- break;
-
- ASSERT (p0->flags & VLIB_BUFFER_NEXT_PRESENT);
- p0 = vlib_get_buffer (vm, p0->next_buffer);
- data_this_buffer = vlib_buffer_get_current (p0);
- n_this_buffer = p0->current_length;
- }
-
- sum16 = ~ip_csum_fold (sum0);
-
- return sum16;
+ return ip_calculate_l4_checksum (vm, p0, sum0,
+ payload_length_host_byte_order, (u8 *) ip0,
+ ip_header_length, NULL);
}
u32
return p0->flags;
}
+#endif
/* *INDENT-OFF* */
VNET_FEATURE_ARC_INIT (ip4_local) =
{
.arc_name = "ip4-local",
.start_nodes = VNET_FEATURES ("ip4-local"),
+ .last_in_arc = "ip4-local-end-of-arc",
};
/* *INDENT-ON* */
-static inline uword
-ip4_local_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, int head_of_feature_arc)
+static inline void
+ip4_local_l4_csum_validate (vlib_main_t * vm, vlib_buffer_t * p,
+ ip4_header_t * ip, u8 is_udp, u8 * error,
+ u8 * good_tcp_udp)
{
- ip4_main_t *im = &ip4_main;
- ip_lookup_main_t *lm = &im->lookup_main;
- ip_local_next_t next_index;
- u32 *from, *to_next, n_left_from, n_left_to_next;
- vlib_node_runtime_t *error_node =
- vlib_node_get_runtime (vm, ip4_input_node.index);
- u8 arc_index = vnet_feat_arc_ip4_local.feature_arc_index;
-
- from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
-
- if (node->flags & VLIB_NODE_FLAG_TRACE)
- ip4_forward_next_trace (vm, node, frame, VLIB_TX);
-
- while (n_left_from > 0)
+ u32 flags0;
+ flags0 = ip4_tcp_udp_validate_checksum (vm, p);
+ *good_tcp_udp = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+ if (is_udp)
{
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
- {
- vlib_buffer_t *p0, *p1;
- ip4_header_t *ip0, *ip1;
- udp_header_t *udp0, *udp1;
- ip4_fib_mtrie_t *mtrie0, *mtrie1;
- ip4_fib_mtrie_leaf_t leaf0, leaf1;
- const dpo_id_t *dpo0, *dpo1;
- const load_balance_t *lb0, *lb1;
- u32 pi0, ip_len0, udp_len0, flags0, next0, fib_index0, lbi0;
- u32 pi1, ip_len1, udp_len1, flags1, next1, fib_index1, lbi1;
- i32 len_diff0, len_diff1;
- u8 error0, is_udp0, is_tcp_udp0, good_tcp_udp0, proto0;
- u8 error1, is_udp1, is_tcp_udp1, good_tcp_udp1, proto1;
- u32 sw_if_index0, sw_if_index1;
-
- pi0 = to_next[0] = from[0];
- pi1 = to_next[1] = from[1];
- from += 2;
- n_left_from -= 2;
- to_next += 2;
- n_left_to_next -= 2;
-
- next0 = next1 = IP_LOCAL_NEXT_DROP;
-
- p0 = vlib_get_buffer (vm, pi0);
- p1 = vlib_get_buffer (vm, pi1);
-
- ip0 = vlib_buffer_get_current (p0);
- ip1 = vlib_buffer_get_current (p1);
-
- vnet_buffer (p0)->l3_hdr_offset = p0->current_data;
- vnet_buffer (p1)->l3_hdr_offset = p1->current_data;
-
- sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
- sw_if_index1 = vnet_buffer (p1)->sw_if_index[VLIB_RX];
-
- fib_index0 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index0);
- fib_index1 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index1);
-
- fib_index0 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index0);
- fib_index0 =
- (vnet_buffer (p0)->sw_if_index[VLIB_TX] ==
- (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
-
- fib_index1 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index1);
- fib_index1 =
- (vnet_buffer (p1)->sw_if_index[VLIB_TX] ==
- (u32) ~ 0) ? fib_index1 : vnet_buffer (p1)->sw_if_index[VLIB_TX];
-
- mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
- mtrie1 = &ip4_fib_get (fib_index1)->mtrie;
-
- leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address);
- leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, &ip1->src_address);
-
- /* Treat IP frag packets as "experimental" protocol for now
- until support of IP frag reassembly is implemented */
- proto0 = ip4_is_fragment (ip0) ? 0xfe : ip0->protocol;
- proto1 = ip4_is_fragment (ip1) ? 0xfe : ip1->protocol;
-
- if (head_of_feature_arc == 0)
- {
- error0 = error1 = IP4_ERROR_UNKNOWN_PROTOCOL;
- goto skip_checks;
- }
-
- is_udp0 = proto0 == IP_PROTOCOL_UDP;
- is_udp1 = proto1 == IP_PROTOCOL_UDP;
- is_tcp_udp0 = is_udp0 || proto0 == IP_PROTOCOL_TCP;
- is_tcp_udp1 = is_udp1 || proto1 == IP_PROTOCOL_TCP;
-
- flags0 = p0->flags;
- flags1 = p1->flags;
-
- good_tcp_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
- good_tcp_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
-
- udp0 = ip4_next_header (ip0);
- udp1 = ip4_next_header (ip1);
-
- /* Don't verify UDP checksum for packets with explicit zero checksum. */
- good_tcp_udp0 |= is_udp0 && udp0->checksum == 0;
- good_tcp_udp1 |= is_udp1 && udp1->checksum == 0;
-
- /* Verify UDP length. */
- ip_len0 = clib_net_to_host_u16 (ip0->length);
- ip_len1 = clib_net_to_host_u16 (ip1->length);
- udp_len0 = clib_net_to_host_u16 (udp0->length);
- udp_len1 = clib_net_to_host_u16 (udp1->length);
+ udp_header_t *udp;
+ u32 ip_len, udp_len;
+ i32 len_diff;
+ udp = ip4_next_header (ip);
+ /* Verify UDP length. */
+ ip_len = clib_net_to_host_u16 (ip->length);
+ udp_len = clib_net_to_host_u16 (udp->length);
+
+ len_diff = ip_len - udp_len;
+ *good_tcp_udp &= len_diff >= 0;
+ *error = len_diff < 0 ? IP4_ERROR_UDP_LENGTH : *error;
+ }
+}
- len_diff0 = ip_len0 - udp_len0;
- len_diff1 = ip_len1 - udp_len1;
+#define ip4_local_csum_is_offloaded(_b) \
+ _b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM \
+ || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM
- len_diff0 = is_udp0 ? len_diff0 : 0;
- len_diff1 = is_udp1 ? len_diff1 : 0;
+#define ip4_local_need_csum_check(is_tcp_udp, _b) \
+ (is_tcp_udp && !(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED \
+ || ip4_local_csum_is_offloaded (_b)))
- if (PREDICT_FALSE (!(is_tcp_udp0 & is_tcp_udp1
- & good_tcp_udp0 & good_tcp_udp1)))
- {
- if (is_tcp_udp0)
- {
- if (is_tcp_udp0
- && !(flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED))
- flags0 = ip4_tcp_udp_validate_checksum (vm, p0);
- good_tcp_udp0 =
- (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
- good_tcp_udp0 |= is_udp0 && udp0->checksum == 0;
- }
- if (is_tcp_udp1)
- {
- if (is_tcp_udp1
- && !(flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED))
- flags1 = ip4_tcp_udp_validate_checksum (vm, p1);
- good_tcp_udp1 =
- (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
- good_tcp_udp1 |= is_udp1 && udp1->checksum == 0;
- }
- }
+#define ip4_local_csum_is_valid(_b) \
+ (_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT \
+ || (ip4_local_csum_is_offloaded (_b))) != 0
- good_tcp_udp0 &= len_diff0 >= 0;
- good_tcp_udp1 &= len_diff1 >= 0;
+static inline void
+ip4_local_check_l4_csum (vlib_main_t * vm, vlib_buffer_t * b,
+ ip4_header_t * ih, u8 * error)
+{
+ u8 is_udp, is_tcp_udp, good_tcp_udp;
- leaf0 =
- ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2);
- leaf1 =
- ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 2);
+ is_udp = ih->protocol == IP_PROTOCOL_UDP;
+ is_tcp_udp = is_udp || ih->protocol == IP_PROTOCOL_TCP;
- error0 = error1 = IP4_ERROR_UNKNOWN_PROTOCOL;
+ if (PREDICT_FALSE (ip4_local_need_csum_check (is_tcp_udp, b)))
+ ip4_local_l4_csum_validate (vm, b, ih, is_udp, error, &good_tcp_udp);
+ else
+ good_tcp_udp = ip4_local_csum_is_valid (b);
- error0 = len_diff0 < 0 ? IP4_ERROR_UDP_LENGTH : error0;
- error1 = len_diff1 < 0 ? IP4_ERROR_UDP_LENGTH : error1;
+ ASSERT (IP4_ERROR_TCP_CHECKSUM + 1 == IP4_ERROR_UDP_CHECKSUM);
+ *error = (is_tcp_udp && !good_tcp_udp
+ ? IP4_ERROR_TCP_CHECKSUM + is_udp : *error);
+}
- ASSERT (IP4_ERROR_TCP_CHECKSUM + 1 == IP4_ERROR_UDP_CHECKSUM);
- error0 = (is_tcp_udp0 && !good_tcp_udp0
- ? IP4_ERROR_TCP_CHECKSUM + is_udp0 : error0);
- error1 = (is_tcp_udp1 && !good_tcp_udp1
- ? IP4_ERROR_TCP_CHECKSUM + is_udp1 : error1);
+static inline void
+ip4_local_check_l4_csum_x2 (vlib_main_t * vm, vlib_buffer_t ** b,
+ ip4_header_t ** ih, u8 * error)
+{
+ u8 is_udp[2], is_tcp_udp[2], good_tcp_udp[2];
- leaf0 =
- ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 3);
- leaf1 =
- ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address, 3);
+ is_udp[0] = ih[0]->protocol == IP_PROTOCOL_UDP;
+ is_udp[1] = ih[1]->protocol == IP_PROTOCOL_UDP;
- vnet_buffer (p0)->ip.adj_index[VLIB_RX] = lbi0 =
- ip4_fib_mtrie_leaf_get_adj_index (leaf0);
- vnet_buffer (p0)->ip.adj_index[VLIB_TX] = lbi0;
+ is_tcp_udp[0] = is_udp[0] || ih[0]->protocol == IP_PROTOCOL_TCP;
+ is_tcp_udp[1] = is_udp[1] || ih[1]->protocol == IP_PROTOCOL_TCP;
- vnet_buffer (p1)->ip.adj_index[VLIB_RX] = lbi1 =
- ip4_fib_mtrie_leaf_get_adj_index (leaf1);
- vnet_buffer (p1)->ip.adj_index[VLIB_TX] = lbi1;
+ good_tcp_udp[0] = ip4_local_csum_is_valid (b[0]);
+ good_tcp_udp[1] = ip4_local_csum_is_valid (b[1]);
- lb0 = load_balance_get (lbi0);
- lb1 = load_balance_get (lbi1);
- dpo0 = load_balance_get_bucket_i (lb0, 0);
- dpo1 = load_balance_get_bucket_i (lb1, 0);
+ if (PREDICT_FALSE (ip4_local_need_csum_check (is_tcp_udp[0], b[0])
+ || ip4_local_need_csum_check (is_tcp_udp[1], b[1])))
+ {
+ if (is_tcp_udp[0])
+ ip4_local_l4_csum_validate (vm, b[0], ih[0], is_udp[0], &error[0],
+ &good_tcp_udp[0]);
+ if (is_tcp_udp[1])
+ ip4_local_l4_csum_validate (vm, b[1], ih[1], is_udp[1], &error[1],
+ &good_tcp_udp[1]);
+ }
- /*
- * Must have a route to source otherwise we drop the packet.
- * ip4 broadcasts are accepted, e.g. to make dhcp client work
- *
- * The checks are:
- * - the source is a recieve => it's from us => bogus, do this
- * first since it sets a different error code.
- * - uRPF check for any route to source - accept if passes.
- * - allow packets destined to the broadcast address from unknown sources
- */
- error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- dpo0->dpoi_type == DPO_RECEIVE) ?
- IP4_ERROR_SPOOFED_LOCAL_PACKETS : error0);
- error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- !fib_urpf_check_size (lb0->lb_urpf) &&
- ip0->dst_address.as_u32 != 0xFFFFFFFF)
- ? IP4_ERROR_SRC_LOOKUP_MISS : error0);
- error1 = ((error1 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- dpo1->dpoi_type == DPO_RECEIVE) ?
- IP4_ERROR_SPOOFED_LOCAL_PACKETS : error1);
- error1 = ((error1 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- !fib_urpf_check_size (lb1->lb_urpf) &&
- ip1->dst_address.as_u32 != 0xFFFFFFFF)
- ? IP4_ERROR_SRC_LOOKUP_MISS : error1);
-
- skip_checks:
-
- next0 = lm->local_next_by_ip_protocol[proto0];
- next1 = lm->local_next_by_ip_protocol[proto1];
-
- next0 =
- error0 != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0;
- next1 =
- error1 != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next1;
-
- p0->error = error0 ? error_node->errors[error0] : 0;
- p1->error = error1 ? error_node->errors[error1] : 0;
-
- if (head_of_feature_arc)
- {
- if (PREDICT_TRUE (error0 == (u8) IP4_ERROR_UNKNOWN_PROTOCOL))
- vnet_feature_arc_start (arc_index, sw_if_index0, &next0, p0);
- if (PREDICT_TRUE (error1 == (u8) IP4_ERROR_UNKNOWN_PROTOCOL))
- vnet_feature_arc_start (arc_index, sw_if_index1, &next1, p1);
- }
+ error[0] = (is_tcp_udp[0] && !good_tcp_udp[0] ?
+ IP4_ERROR_TCP_CHECKSUM + is_udp[0] : error[0]);
+ error[1] = (is_tcp_udp[1] && !good_tcp_udp[1] ?
+ IP4_ERROR_TCP_CHECKSUM + is_udp[1] : error[1]);
+}
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
- n_left_to_next, pi0, pi1,
- next0, next1);
- }
+static inline void
+ip4_local_set_next_and_error (vlib_node_runtime_t * error_node,
+ vlib_buffer_t * b, u16 * next, u8 error,
+ u8 head_of_feature_arc)
+{
+ u8 arc_index = vnet_feat_arc_ip4_local.feature_arc_index;
+ u32 next_index;
- while (n_left_from > 0 && n_left_to_next > 0)
+ *next = error != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : *next;
+ b->error = error ? error_node->errors[error] : 0;
+ if (head_of_feature_arc)
+ {
+ next_index = *next;
+ if (PREDICT_TRUE (error == (u8) IP4_ERROR_UNKNOWN_PROTOCOL))
{
- vlib_buffer_t *p0;
- ip4_header_t *ip0;
- udp_header_t *udp0;
- ip4_fib_mtrie_t *mtrie0;
- ip4_fib_mtrie_leaf_t leaf0;
- u32 pi0, next0, ip_len0, udp_len0, flags0, fib_index0, lbi0;
- i32 len_diff0;
- u8 error0, is_udp0, is_tcp_udp0, good_tcp_udp0, proto0;
- load_balance_t *lb0;
- const dpo_id_t *dpo0;
- u32 sw_if_index0;
+ vnet_feature_arc_start (arc_index,
+ vnet_buffer (b)->sw_if_index[VLIB_RX],
+ &next_index, b);
+ *next = next_index;
+ }
+ }
+}
- pi0 = to_next[0] = from[0];
- from += 1;
- n_left_from -= 1;
- to_next += 1;
- n_left_to_next -= 1;
+typedef struct
+{
+ ip4_address_t src;
+ u32 lbi;
+ u8 error;
+ u8 first;
+} ip4_local_last_check_t;
- next0 = IP_LOCAL_NEXT_DROP;
+static inline void
+ip4_local_check_src (vlib_buffer_t * b, ip4_header_t * ip0,
+ ip4_local_last_check_t * last_check, u8 * error0)
+{
+ ip4_fib_mtrie_leaf_t leaf0;
+ ip4_fib_mtrie_t *mtrie0;
+ const dpo_id_t *dpo0;
+ load_balance_t *lb0;
+ u32 lbi0;
- p0 = vlib_get_buffer (vm, pi0);
+ vnet_buffer (b)->ip.fib_index =
+ vnet_buffer (b)->sw_if_index[VLIB_TX] != ~0 ?
+ vnet_buffer (b)->sw_if_index[VLIB_TX] : vnet_buffer (b)->ip.fib_index;
- ip0 = vlib_buffer_get_current (p0);
+ /*
+ * vnet_buffer()->ip.adj_index[VLIB_RX] will be set to the index of the
+ * adjacency for the destination address (the local interface address).
+ * vnet_buffer()->ip.adj_index[VLIB_TX] will be set to the index of the
+ * adjacency for the source address (the remote sender's address)
+ */
+ if (PREDICT_FALSE (last_check->first ||
+ (last_check->src.as_u32 != ip0->src_address.as_u32)))
+ {
+ mtrie0 = &ip4_fib_get (vnet_buffer (b)->ip.fib_index)->mtrie;
+ leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 3);
+ lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+
+ vnet_buffer (b)->ip.adj_index[VLIB_RX] =
+ vnet_buffer (b)->ip.adj_index[VLIB_TX];
+ vnet_buffer (b)->ip.adj_index[VLIB_TX] = lbi0;
+
+ lb0 = load_balance_get (lbi0);
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+
+ /*
+ * Must have a route to source otherwise we drop the packet.
+ * ip4 broadcasts are accepted, e.g. to make dhcp client work
+ *
+ * The checks are:
+ * - the source is a recieve => it's from us => bogus, do this
+ * first since it sets a different error code.
+ * - uRPF check for any route to source - accept if passes.
+ * - allow packets destined to the broadcast address from unknown sources
+ */
+
+ *error0 = ((*error0 == IP4_ERROR_UNKNOWN_PROTOCOL
+ && dpo0->dpoi_type == DPO_RECEIVE) ?
+ IP4_ERROR_SPOOFED_LOCAL_PACKETS : *error0);
+ *error0 = ((*error0 == IP4_ERROR_UNKNOWN_PROTOCOL
+ && !fib_urpf_check_size (lb0->lb_urpf)
+ && ip0->dst_address.as_u32 != 0xFFFFFFFF) ?
+ IP4_ERROR_SRC_LOOKUP_MISS : *error0);
+
+ last_check->src.as_u32 = ip0->src_address.as_u32;
+ last_check->lbi = lbi0;
+ last_check->error = *error0;
+ }
+ else
+ {
+ vnet_buffer (b)->ip.adj_index[VLIB_RX] =
+ vnet_buffer (b)->ip.adj_index[VLIB_TX];
+ vnet_buffer (b)->ip.adj_index[VLIB_TX] = last_check->lbi;
+ *error0 = last_check->error;
+ last_check->first = 0;
+ }
+}
- vnet_buffer (p0)->l3_hdr_offset = p0->current_data;
+static inline void
+ip4_local_check_src_x2 (vlib_buffer_t ** b, ip4_header_t ** ip,
+ ip4_local_last_check_t * last_check, u8 * error)
+{
+ ip4_fib_mtrie_leaf_t leaf[2];
+ ip4_fib_mtrie_t *mtrie[2];
+ const dpo_id_t *dpo[2];
+ load_balance_t *lb[2];
+ u32 not_last_hit;
+ u32 lbi[2];
- sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ not_last_hit = last_check->first;
+ not_last_hit |= ip[0]->src_address.as_u32 ^ last_check->src.as_u32;
+ not_last_hit |= ip[1]->src_address.as_u32 ^ last_check->src.as_u32;
- fib_index0 = vec_elt (im->fib_index_by_sw_if_index, sw_if_index0);
+ vnet_buffer (b[0])->ip.fib_index =
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] != ~0 ?
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] :
+ vnet_buffer (b[0])->ip.fib_index;
- fib_index0 =
- (vnet_buffer (p0)->sw_if_index[VLIB_TX] ==
- (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX];
+ vnet_buffer (b[1])->ip.fib_index =
+ vnet_buffer (b[1])->sw_if_index[VLIB_TX] != ~0 ?
+ vnet_buffer (b[1])->sw_if_index[VLIB_TX] :
+ vnet_buffer (b[1])->ip.fib_index;
- mtrie0 = &ip4_fib_get (fib_index0)->mtrie;
+ /*
+ * vnet_buffer()->ip.adj_index[VLIB_RX] will be set to the index of the
+ * adjacency for the destination address (the local interface address).
+ * vnet_buffer()->ip.adj_index[VLIB_TX] will be set to the index of the
+ * adjacency for the source address (the remote sender's address)
+ */
+ if (PREDICT_FALSE (not_last_hit))
+ {
+ mtrie[0] = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
+ mtrie[1] = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
+
+ leaf[0] = ip4_fib_mtrie_lookup_step_one (mtrie[0], &ip[0]->src_address);
+ leaf[1] = ip4_fib_mtrie_lookup_step_one (mtrie[1], &ip[1]->src_address);
+
+ leaf[0] = ip4_fib_mtrie_lookup_step (mtrie[0], leaf[0],
+ &ip[0]->src_address, 2);
+ leaf[1] = ip4_fib_mtrie_lookup_step (mtrie[1], leaf[1],
+ &ip[1]->src_address, 2);
+
+ leaf[0] = ip4_fib_mtrie_lookup_step (mtrie[0], leaf[0],
+ &ip[0]->src_address, 3);
+ leaf[1] = ip4_fib_mtrie_lookup_step (mtrie[1], leaf[1],
+ &ip[1]->src_address, 3);
+
+ lbi[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf[0]);
+ lbi[1] = ip4_fib_mtrie_leaf_get_adj_index (leaf[1]);
+
+ vnet_buffer (b[0])->ip.adj_index[VLIB_RX] =
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = lbi[0];
+
+ vnet_buffer (b[1])->ip.adj_index[VLIB_RX] =
+ vnet_buffer (b[1])->ip.adj_index[VLIB_TX];
+ vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = lbi[1];
+
+ lb[0] = load_balance_get (lbi[0]);
+ lb[1] = load_balance_get (lbi[1]);
+
+ dpo[0] = load_balance_get_bucket_i (lb[0], 0);
+ dpo[1] = load_balance_get_bucket_i (lb[1], 0);
+
+ error[0] = ((error[0] == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ dpo[0]->dpoi_type == DPO_RECEIVE) ?
+ IP4_ERROR_SPOOFED_LOCAL_PACKETS : error[0]);
+ error[0] = ((error[0] == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ !fib_urpf_check_size (lb[0]->lb_urpf) &&
+ ip[0]->dst_address.as_u32 != 0xFFFFFFFF)
+ ? IP4_ERROR_SRC_LOOKUP_MISS : error[0]);
+
+ error[1] = ((error[1] == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ dpo[1]->dpoi_type == DPO_RECEIVE) ?
+ IP4_ERROR_SPOOFED_LOCAL_PACKETS : error[1]);
+ error[1] = ((error[1] == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ !fib_urpf_check_size (lb[1]->lb_urpf) &&
+ ip[1]->dst_address.as_u32 != 0xFFFFFFFF)
+ ? IP4_ERROR_SRC_LOOKUP_MISS : error[1]);
+
+ last_check->src.as_u32 = ip[1]->src_address.as_u32;
+ last_check->lbi = lbi[1];
+ last_check->error = error[1];
+ }
+ else
+ {
+ vnet_buffer (b[0])->ip.adj_index[VLIB_RX] =
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = last_check->lbi;
- leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address);
+ vnet_buffer (b[1])->ip.adj_index[VLIB_RX] =
+ vnet_buffer (b[1])->ip.adj_index[VLIB_TX];
+ vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = last_check->lbi;
- /* Treat IP frag packets as "experimental" protocol for now
- until support of IP frag reassembly is implemented */
- proto0 = ip4_is_fragment (ip0) ? 0xfe : ip0->protocol;
+ error[0] = last_check->error;
+ error[1] = last_check->error;
+ last_check->first = 0;
+ }
+}
- if (head_of_feature_arc == 0)
- {
- error0 = IP4_ERROR_UNKNOWN_PROTOCOL;
- goto skip_check;
- }
+enum ip_local_packet_type_e
+{
+ IP_LOCAL_PACKET_TYPE_L4,
+ IP_LOCAL_PACKET_TYPE_NAT,
+ IP_LOCAL_PACKET_TYPE_FRAG,
+};
- is_udp0 = proto0 == IP_PROTOCOL_UDP;
- is_tcp_udp0 = is_udp0 || proto0 == IP_PROTOCOL_TCP;
+/**
+ * Determine packet type and next node.
+ *
+ * The expectation is that all packets that are not L4 will skip
+ * checksums and source checks.
+ */
+always_inline u8
+ip4_local_classify (vlib_buffer_t * b, ip4_header_t * ip, u16 * next)
+{
+ ip_lookup_main_t *lm = &ip4_main.lookup_main;
- flags0 = p0->flags;
+ if (PREDICT_FALSE (ip4_is_fragment (ip)))
+ {
+ *next = IP_LOCAL_NEXT_REASSEMBLY;
+ return IP_LOCAL_PACKET_TYPE_FRAG;
+ }
+ if (PREDICT_FALSE (b->flags & VNET_BUFFER_F_IS_NATED))
+ {
+ *next = lm->local_next_by_ip_protocol[ip->protocol];
+ return IP_LOCAL_PACKET_TYPE_NAT;
+ }
- good_tcp_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
+ *next = lm->local_next_by_ip_protocol[ip->protocol];
+ return IP_LOCAL_PACKET_TYPE_L4;
+}
- udp0 = ip4_next_header (ip0);
+static inline uword
+ip4_local_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int head_of_feature_arc)
+{
+ u32 *from, n_left_from;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_input_node.index);
+ u16 nexts[VLIB_FRAME_SIZE], *next;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+ ip4_header_t *ip[2];
+ u8 error[2], pt[2];
+
+ ip4_local_last_check_t last_check = {
+ /*
+ * 0.0.0.0 can appear as the source address of an IP packet,
+ * as can any other address, hence the need to use the 'first'
+ * member to make sure the .lbi is initialised for the first
+ * packet.
+ */
+ .src = {.as_u32 = 0},
+ .lbi = ~0,
+ .error = IP4_ERROR_UNKNOWN_PROTOCOL,
+ .first = 1,
+ };
- /* Don't verify UDP checksum for packets with explicit zero checksum. */
- good_tcp_udp0 |= is_udp0 && udp0->checksum == 0;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
- /* Verify UDP length. */
- ip_len0 = clib_net_to_host_u16 (ip0->length);
- udp_len0 = clib_net_to_host_u16 (udp0->length);
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
- len_diff0 = ip_len0 - udp_len0;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+ b = bufs;
+ next = nexts;
- len_diff0 = is_udp0 ? len_diff0 : 0;
+ while (n_left_from >= 6)
+ {
+ u8 not_batch = 0;
- if (PREDICT_FALSE (!(is_tcp_udp0 & good_tcp_udp0)))
- {
- if (is_tcp_udp0)
- {
- if (is_tcp_udp0
- && !(flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED))
- flags0 = ip4_tcp_udp_validate_checksum (vm, p0);
- good_tcp_udp0 =
- (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
- good_tcp_udp0 |= is_udp0 && udp0->checksum == 0;
- }
- }
+ /* Prefetch next iteration. */
+ {
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
- good_tcp_udp0 &= len_diff0 >= 0;
+ CLIB_PREFETCH (b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
- leaf0 =
- ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2);
+ error[0] = error[1] = IP4_ERROR_UNKNOWN_PROTOCOL;
- error0 = IP4_ERROR_UNKNOWN_PROTOCOL;
+ ip[0] = vlib_buffer_get_current (b[0]);
+ ip[1] = vlib_buffer_get_current (b[1]);
- error0 = len_diff0 < 0 ? IP4_ERROR_UDP_LENGTH : error0;
+ vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data;
+ vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data;
- ASSERT (IP4_ERROR_TCP_CHECKSUM + 1 == IP4_ERROR_UDP_CHECKSUM);
- error0 = (is_tcp_udp0 && !good_tcp_udp0
- ? IP4_ERROR_TCP_CHECKSUM + is_udp0 : error0);
+ pt[0] = ip4_local_classify (b[0], ip[0], &next[0]);
+ pt[1] = ip4_local_classify (b[1], ip[1], &next[1]);
- leaf0 =
- ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 3);
+ not_batch = pt[0] ^ pt[1];
- lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
- vnet_buffer (p0)->ip.adj_index[VLIB_TX] = lbi0;
+ if (head_of_feature_arc == 0 || (pt[0] && not_batch == 0))
+ goto skip_checks;
- lb0 = load_balance_get (lbi0);
- dpo0 = load_balance_get_bucket_i (lb0, 0);
+ if (PREDICT_TRUE (not_batch == 0))
+ {
+ ip4_local_check_l4_csum_x2 (vm, b, ip, error);
+ ip4_local_check_src_x2 (b, ip, &last_check, error);
+ }
+ else
+ {
+ if (!pt[0])
+ {
+ ip4_local_check_l4_csum (vm, b[0], ip[0], &error[0]);
+ ip4_local_check_src (b[0], ip[0], &last_check, &error[0]);
+ }
+ if (!pt[1])
+ {
+ ip4_local_check_l4_csum (vm, b[1], ip[1], &error[1]);
+ ip4_local_check_src (b[1], ip[1], &last_check, &error[1]);
+ }
+ }
- vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
- vnet_buffer (p0)->ip.adj_index[VLIB_RX] = lbi0;
+ skip_checks:
- error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- dpo0->dpoi_type == DPO_RECEIVE) ?
- IP4_ERROR_SPOOFED_LOCAL_PACKETS : error0);
- error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- !fib_urpf_check_size (lb0->lb_urpf) &&
- ip0->dst_address.as_u32 != 0xFFFFFFFF)
- ? IP4_ERROR_SRC_LOOKUP_MISS : error0);
+ ip4_local_set_next_and_error (error_node, b[0], &next[0], error[0],
+ head_of_feature_arc);
+ ip4_local_set_next_and_error (error_node, b[1], &next[1], error[1],
+ head_of_feature_arc);
- skip_check:
+ b += 2;
+ next += 2;
+ n_left_from -= 2;
+ }
- next0 = lm->local_next_by_ip_protocol[proto0];
+ while (n_left_from > 0)
+ {
+ error[0] = IP4_ERROR_UNKNOWN_PROTOCOL;
- next0 =
- error0 != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0;
+ ip[0] = vlib_buffer_get_current (b[0]);
+ vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data;
+ pt[0] = ip4_local_classify (b[0], ip[0], &next[0]);
- p0->error = error0 ? error_node->errors[error0] : 0;
+ if (head_of_feature_arc == 0 || pt[0])
+ goto skip_check;
- if (head_of_feature_arc)
- {
- if (PREDICT_TRUE (error0 == (u8) IP4_ERROR_UNKNOWN_PROTOCOL))
- vnet_feature_arc_start (arc_index, sw_if_index0, &next0, p0);
- }
+ ip4_local_check_l4_csum (vm, b[0], ip[0], &error[0]);
+ ip4_local_check_src (b[0], ip[0], &last_check, &error[0]);
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, pi0, next0);
+ skip_check:
- }
+ ip4_local_set_next_and_error (error_node, b[0], &next[0], error[0],
+ head_of_feature_arc);
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ b += 1;
+ next += 1;
+ n_left_from -= 1;
}
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
return frame->n_vectors;
}
-static uword
-ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_local_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return ip4_local_inline (vm, node, frame, 1 /* head of feature arc */ );
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_local_node) =
{
- .function = ip4_local,
.name = "ip4-local",
.vector_size = sizeof (u32),
.format_trace = format_ip4_forward_next_trace,
.n_next_nodes = IP_LOCAL_N_NEXT,
.next_nodes =
{
- [IP_LOCAL_NEXT_DROP] = "error-drop",
- [IP_LOCAL_NEXT_PUNT] = "error-punt",
+ [IP_LOCAL_NEXT_DROP] = "ip4-drop",
+ [IP_LOCAL_NEXT_PUNT] = "ip4-punt",
[IP_LOCAL_NEXT_UDP_LOOKUP] = "ip4-udp-lookup",
- [IP_LOCAL_NEXT_ICMP] = "ip4-icmp-input",},
+ [IP_LOCAL_NEXT_ICMP] = "ip4-icmp-input",
+ [IP_LOCAL_NEXT_REASSEMBLY] = "ip4-full-reassembly",
+ },
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_local_node, ip4_local);
-static uword
-ip4_local_end_of_arc (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_local_end_of_arc_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return ip4_local_inline (vm, node, frame, 0 /* head of feature arc */ );
}
/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip4_local_end_of_arc_node,static) = {
- .function = ip4_local_end_of_arc,
+VLIB_REGISTER_NODE (ip4_local_end_of_arc_node) = {
.name = "ip4-local-end-of-arc",
.vector_size = sizeof (u32),
.sibling_of = "ip4-local",
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_local_end_of_arc_node, ip4_local_end_of_arc)
-
VNET_FEATURE_INIT (ip4_local_end_of_arc, static) = {
.arc_name = "ip4-local",
.node_name = "ip4-local-end-of-arc",
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
void
ip4_register_protocol (u32 protocol, u32 node_index)
{
vlib_node_add_next (vm, ip4_local_node.index, node_index);
}
+void
+ip4_unregister_protocol (u32 protocol)
+{
+ ip4_main_t *im = &ip4_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+
+ ASSERT (protocol < ARRAY_LEN (lm->local_next_by_ip_protocol));
+ lm->local_next_by_ip_protocol[protocol] = IP_LOCAL_NEXT_PUNT;
+}
+#endif
+
static clib_error_t *
show_ip_local_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
for (i = 0; i < ARRAY_LEN (lm->local_next_by_ip_protocol); i++)
{
if (lm->local_next_by_ip_protocol[i] != IP_LOCAL_NEXT_PUNT)
- vlib_cli_output (vm, "%d", i);
+ {
+ u32 node_index = vlib_get_node (vm,
+ ip4_local_node.index)->
+ next_nodes[lm->local_next_by_ip_protocol[i]];
+ vlib_cli_output (vm, "%U: %U", format_ip_protocol, i,
+ format_vlib_node_name, vm, node_index);
+ }
}
return 0;
}
};
/* *INDENT-ON* */
-always_inline uword
-ip4_arp_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, int is_glean)
+typedef enum
{
- vnet_main_t *vnm = vnet_get_main ();
- ip4_main_t *im = &ip4_main;
- ip_lookup_main_t *lm = &im->lookup_main;
- u32 *from, *to_next_drop;
- uword n_left_from, n_left_to_next_drop, next_index;
- static f64 time_last_seed_change = -1e100;
- static u32 hash_seeds[3];
- static uword hash_bitmap[256 / BITS (uword)];
- f64 time_now;
-
- if (node->flags & VLIB_NODE_FLAG_TRACE)
- ip4_forward_next_trace (vm, node, frame, VLIB_TX);
-
- time_now = vlib_time_now (vm);
- if (time_now - time_last_seed_change > 1e-3)
- {
- uword i;
- u32 *r = clib_random_buffer_get_data (&vm->random_buffer,
- sizeof (hash_seeds));
- for (i = 0; i < ARRAY_LEN (hash_seeds); i++)
- hash_seeds[i] = r[i];
-
- /* Mark all hash keys as been no-seen before. */
- for (i = 0; i < ARRAY_LEN (hash_bitmap); i++)
- hash_bitmap[i] = 0;
-
- time_last_seed_change = time_now;
- }
-
- from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
- if (next_index == IP4_ARP_NEXT_DROP)
- next_index = IP4_ARP_N_NEXT; /* point to first interface */
-
- while (n_left_from > 0)
- {
- vlib_get_next_frame (vm, node, IP4_ARP_NEXT_DROP,
- to_next_drop, n_left_to_next_drop);
-
- while (n_left_from > 0 && n_left_to_next_drop > 0)
- {
- u32 pi0, adj_index0, a0, b0, c0, m0, sw_if_index0, drop0;
- ip_adjacency_t *adj0;
- vlib_buffer_t *p0;
- ip4_header_t *ip0;
- uword bm0;
-
- pi0 = from[0];
-
- p0 = vlib_get_buffer (vm, pi0);
-
- adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
- adj0 = adj_get (adj_index0);
- ip0 = vlib_buffer_get_current (p0);
-
- a0 = hash_seeds[0];
- b0 = hash_seeds[1];
- c0 = hash_seeds[2];
-
- sw_if_index0 = adj0->rewrite_header.sw_if_index;
- vnet_buffer (p0)->sw_if_index[VLIB_TX] = sw_if_index0;
-
- if (is_glean)
- {
- /*
- * this is the Glean case, so we are ARPing for the
- * packet's destination
- */
- a0 ^= ip0->dst_address.data_u32;
- }
- else
- {
- a0 ^= adj0->sub_type.nbr.next_hop.ip4.data_u32;
- }
- b0 ^= sw_if_index0;
-
- hash_v3_finalize32 (a0, b0, c0);
-
- c0 &= BITS (hash_bitmap) - 1;
- c0 = c0 / BITS (uword);
- m0 = (uword) 1 << (c0 % BITS (uword));
-
- bm0 = hash_bitmap[c0];
- drop0 = (bm0 & m0) != 0;
-
- /* Mark it as seen. */
- hash_bitmap[c0] = bm0 | m0;
-
- from += 1;
- n_left_from -= 1;
- to_next_drop[0] = pi0;
- to_next_drop += 1;
- n_left_to_next_drop -= 1;
-
- p0->error =
- node->errors[drop0 ? IP4_ARP_ERROR_DROP :
- IP4_ARP_ERROR_REQUEST_SENT];
-
- /*
- * the adj has been updated to a rewrite but the node the DPO that got
- * us here hasn't - yet. no big deal. we'll drop while we wait.
- */
- if (IP_LOOKUP_NEXT_REWRITE == adj0->lookup_next_index)
- continue;
-
- if (drop0)
- continue;
-
- /*
- * Can happen if the control-plane is programming tables
- * with traffic flowing; at least that's today's lame excuse.
- */
- if ((is_glean && adj0->lookup_next_index != IP_LOOKUP_NEXT_GLEAN)
- || (!is_glean && adj0->lookup_next_index != IP_LOOKUP_NEXT_ARP))
- {
- p0->error = node->errors[IP4_ARP_ERROR_NON_ARP_ADJ];
- }
- else
- /* Send ARP request. */
- {
- u32 bi0 = 0;
- vlib_buffer_t *b0;
- ethernet_arp_header_t *h0;
- vnet_hw_interface_t *hw_if0;
-
- h0 =
- vlib_packet_template_get_packet (vm,
- &im->ip4_arp_request_packet_template,
- &bi0);
-
- /* Add rewrite/encap string for ARP packet. */
- vnet_rewrite_one_header (adj0[0], h0,
- sizeof (ethernet_header_t));
-
- hw_if0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
-
- /* Src ethernet address in ARP header. */
- clib_memcpy (h0->ip4_over_ethernet[0].ethernet,
- hw_if0->hw_address,
- sizeof (h0->ip4_over_ethernet[0].ethernet));
-
- if (is_glean)
- {
- /* The interface's source address is stashed in the Glean Adj */
- h0->ip4_over_ethernet[0].ip4 =
- adj0->sub_type.glean.receive_addr.ip4;
-
- /* Copy in destination address we are requesting. This is the
- * glean case, so it's the packet's destination.*/
- h0->ip4_over_ethernet[1].ip4.data_u32 =
- ip0->dst_address.data_u32;
- }
- else
- {
- /* Src IP address in ARP header. */
- if (ip4_src_address_for_packet (lm, sw_if_index0,
- &h0->
- ip4_over_ethernet[0].ip4))
- {
- /* No source address available */
- p0->error =
- node->errors[IP4_ARP_ERROR_NO_SOURCE_ADDRESS];
- vlib_buffer_free (vm, &bi0, 1);
- continue;
- }
-
- /* Copy in destination address we are requesting from the
- incomplete adj */
- h0->ip4_over_ethernet[1].ip4.data_u32 =
- adj0->sub_type.nbr.next_hop.ip4.as_u32;
- }
-
- vlib_buffer_copy_trace_flag (vm, p0, bi0);
- b0 = vlib_get_buffer (vm, bi0);
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
-
- vlib_buffer_advance (b0, -adj0->rewrite_header.data_bytes);
-
- vlib_set_next_frame_buffer (vm, node,
- adj0->rewrite_header.next_index,
- bi0);
- }
- }
+ IP4_REWRITE_NEXT_DROP,
+ IP4_REWRITE_NEXT_ICMP_ERROR,
+ IP4_REWRITE_NEXT_FRAGMENT,
+ IP4_REWRITE_N_NEXT /* Last */
+} ip4_rewrite_next_t;
- vlib_put_next_frame (vm, node, IP4_ARP_NEXT_DROP, n_left_to_next_drop);
+/**
+ * This bits of an IPv4 address to mask to construct a multicast
+ * MAC address
+ */
+#if CLIB_ARCH_IS_BIG_ENDIAN
+#define IP4_MCAST_ADDR_MASK 0x007fffff
+#else
+#define IP4_MCAST_ADDR_MASK 0xffff7f00
+#endif
+
+always_inline void
+ip4_mtu_check (vlib_buffer_t * b, u16 packet_len,
+ u16 adj_packet_bytes, bool df, u16 * next,
+ u8 is_midchain, u32 * error)
+{
+ if (packet_len > adj_packet_bytes)
+ {
+ *error = IP4_ERROR_MTU_EXCEEDED;
+ if (df)
+ {
+ icmp4_error_set_vnet_buffer
+ (b, ICMP4_destination_unreachable,
+ ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
+ adj_packet_bytes);
+ *next = IP4_REWRITE_NEXT_ICMP_ERROR;
+ }
+ else
+ {
+ /* IP fragmentation */
+ ip_frag_set_vnet_buffer (b, adj_packet_bytes,
+ (is_midchain ?
+ IP_FRAG_NEXT_IP_REWRITE_MIDCHAIN :
+ IP_FRAG_NEXT_IP_REWRITE), 0);
+ *next = IP4_REWRITE_NEXT_FRAGMENT;
+ }
}
-
- return frame->n_vectors;
}
-static uword
-ip4_arp (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+/* increment TTL & update checksum.
+ Works either endian, so no need for byte swap. */
+static_always_inline void
+ip4_ttl_inc (vlib_buffer_t * b, ip4_header_t * ip)
{
- return (ip4_arp_inline (vm, node, frame, 0));
+ i32 ttl;
+ u32 checksum;
+ if (PREDICT_FALSE (b->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED))
+ {
+ b->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED;
+ return;
+ }
+
+ ttl = ip->ttl;
+
+ checksum = ip->checksum - clib_host_to_net_u16 (0x0100);
+ checksum += checksum >= 0xffff;
+
+ ip->checksum = checksum;
+ ttl += 1;
+ ip->ttl = ttl;
+
+ ASSERT (ip->checksum == ip4_header_checksum (ip));
}
-static uword
-ip4_glean (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+/* Decrement TTL & update checksum.
+ Works either endian, so no need for byte swap. */
+static_always_inline void
+ip4_ttl_and_checksum_check (vlib_buffer_t * b, ip4_header_t * ip, u16 * next,
+ u32 * error)
{
- return (ip4_arp_inline (vm, node, frame, 1));
-}
+ i32 ttl;
+ u32 checksum;
+ if (PREDICT_FALSE (b->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED))
+ {
+ b->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED;
+ return;
+ }
-static char *ip4_arp_error_strings[] = {
- [IP4_ARP_ERROR_DROP] = "address overflow drops",
- [IP4_ARP_ERROR_REQUEST_SENT] = "ARP requests sent",
- [IP4_ARP_ERROR_NON_ARP_ADJ] = "ARPs to non-ARP adjacencies",
- [IP4_ARP_ERROR_REPLICATE_DROP] = "ARP replication completed",
- [IP4_ARP_ERROR_REPLICATE_FAIL] = "ARP replication failed",
- [IP4_ARP_ERROR_NO_SOURCE_ADDRESS] = "no source address for ARP request",
-};
+ ttl = ip->ttl;
-VLIB_REGISTER_NODE (ip4_arp_node) =
-{
- .function = ip4_arp,.name = "ip4-arp",.vector_size =
- sizeof (u32),.format_trace = format_ip4_forward_next_trace,.n_errors =
- ARRAY_LEN (ip4_arp_error_strings),.error_strings =
- ip4_arp_error_strings,.n_next_nodes = IP4_ARP_N_NEXT,.next_nodes =
- {
- [IP4_ARP_NEXT_DROP] = "error-drop",}
-,};
+ /* Input node should have reject packets with ttl 0. */
+ ASSERT (ip->ttl > 0);
-VLIB_REGISTER_NODE (ip4_glean_node) =
-{
- .function = ip4_glean,.name = "ip4-glean",.vector_size =
- sizeof (u32),.format_trace = format_ip4_forward_next_trace,.n_errors =
- ARRAY_LEN (ip4_arp_error_strings),.error_strings =
- ip4_arp_error_strings,.n_next_nodes = IP4_ARP_N_NEXT,.next_nodes =
- {
- [IP4_ARP_NEXT_DROP] = "error-drop",}
-,};
+ checksum = ip->checksum + clib_host_to_net_u16 (0x0100);
+ checksum += checksum >= 0xffff;
-#define foreach_notrace_ip4_arp_error \
-_(DROP) \
-_(REQUEST_SENT) \
-_(REPLICATE_DROP) \
-_(REPLICATE_FAIL)
+ ip->checksum = checksum;
+ ttl -= 1;
+ ip->ttl = ttl;
-clib_error_t *
-arp_notrace_init (vlib_main_t * vm)
-{
- vlib_node_runtime_t *rt = vlib_node_get_runtime (vm, ip4_arp_node.index);
+ /*
+ * If the ttl drops below 1 when forwarding, generate
+ * an ICMP response.
+ */
+ if (PREDICT_FALSE (ttl <= 0))
+ {
+ *error = IP4_ERROR_TIME_EXPIRED;
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ icmp4_error_set_vnet_buffer (b, ICMP4_time_exceeded,
+ ICMP4_time_exceeded_ttl_exceeded_in_transit,
+ 0);
+ *next = IP4_REWRITE_NEXT_ICMP_ERROR;
+ }
- /* don't trace ARP request packets */
-#define _(a) \
- vnet_pcap_drop_trace_filter_add_del \
- (rt->errors[IP4_ARP_ERROR_##a], \
- 1 /* is_add */);
- foreach_notrace_ip4_arp_error;
-#undef _
- return 0;
+ /* Verify checksum. */
+ ASSERT ((ip->checksum == ip4_header_checksum (ip)) ||
+ (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM));
}
-VLIB_INIT_FUNCTION (arp_notrace_init);
-
-/* Send an ARP request to see if given destination is reachable on given interface. */
-clib_error_t *
-ip4_probe_neighbor (vlib_main_t * vm, ip4_address_t * dst, u32 sw_if_index)
+always_inline uword
+ip4_rewrite_inline_with_gso (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ int do_counters, int is_midchain, int is_mcast)
{
- vnet_main_t *vnm = vnet_get_main ();
- ip4_main_t *im = &ip4_main;
- ethernet_arp_header_t *h;
- ip4_address_t *src;
- ip_interface_address_t *ia;
- ip_adjacency_t *adj;
- vnet_hw_interface_t *hi;
- vnet_sw_interface_t *si;
- vlib_buffer_t *b;
- adj_index_t ai;
- u32 bi = 0;
+ ip_lookup_main_t *lm = &ip4_main.lookup_main;
+ u32 *from = vlib_frame_vector_args (frame);
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+ u16 nexts[VLIB_FRAME_SIZE], *next;
+ u32 n_left_from;
+ vlib_node_runtime_t *error_node =
+ vlib_node_get_runtime (vm, ip4_input_node.index);
- si = vnet_get_sw_interface (vnm, sw_if_index);
+ n_left_from = frame->n_vectors;
+ u32 thread_index = vm->thread_index;
- if (!(si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP))
- {
- return clib_error_return (0, "%U: interface %U down",
- format_ip4_address, dst,
- format_vnet_sw_if_index_name, vnm,
- sw_if_index);
- }
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+ clib_memset_u16 (nexts, IP4_REWRITE_NEXT_DROP, n_left_from);
- src =
- ip4_interface_address_matching_destination (im, dst, sw_if_index, &ia);
- if (!src)
+#if (CLIB_N_PREFETCHES >= 8)
+ if (n_left_from >= 6)
{
- vnm->api_errno = VNET_API_ERROR_NO_MATCHING_INTERFACE;
- return clib_error_return
- (0,
- "no matching interface address for destination %U (interface %U)",
- format_ip4_address, dst, format_vnet_sw_if_index_name, vnm,
- sw_if_index);
+ int i;
+ for (i = 2; i < 6; i++)
+ vlib_prefetch_buffer_header (bufs[i], LOAD);
}
- ip46_address_t nh = {
- .ip4 = *dst,
- };
-
- ai = adj_nbr_add_or_lock (FIB_PROTOCOL_IP4,
- VNET_LINK_IP4, &nh, sw_if_index);
- adj = adj_get (ai);
+ next = nexts;
+ b = bufs;
+ while (n_left_from >= 8)
+ {
+ const ip_adjacency_t *adj0, *adj1;
+ ip4_header_t *ip0, *ip1;
+ u32 rw_len0, error0, adj_index0;
+ u32 rw_len1, error1, adj_index1;
+ u32 tx_sw_if_index0, tx_sw_if_index1;
+ u8 *p;
+
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
+
+ adj_index0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
+ adj_index1 = vnet_buffer (b[1])->ip.adj_index[VLIB_TX];
+
+ /*
+ * pre-fetch the per-adjacency counters
+ */
+ if (do_counters)
+ {
+ vlib_prefetch_combined_counter (&adjacency_counters,
+ thread_index, adj_index0);
+ vlib_prefetch_combined_counter (&adjacency_counters,
+ thread_index, adj_index1);
+ }
- h = vlib_packet_template_get_packet (vm,
- &im->ip4_arp_request_packet_template,
- &bi);
+ ip0 = vlib_buffer_get_current (b[0]);
+ ip1 = vlib_buffer_get_current (b[1]);
+
+ error0 = error1 = IP4_ERROR_NONE;
+
+ ip4_ttl_and_checksum_check (b[0], ip0, next + 0, &error0);
+ ip4_ttl_and_checksum_check (b[1], ip1, next + 1, &error1);
+
+ /* Rewrite packet header and updates lengths. */
+ adj0 = adj_get (adj_index0);
+ adj1 = adj_get (adj_index1);
+
+ /* Worth pipelining. No guarantee that adj0,1 are hot... */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+ rw_len1 = adj1[0].rewrite_header.data_bytes;
+ vnet_buffer (b[0])->ip.save_rewrite_length = rw_len0;
+ vnet_buffer (b[1])->ip.save_rewrite_length = rw_len1;
+
+ p = vlib_buffer_get_current (b[2]);
+ CLIB_PREFETCH (p - CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ p = vlib_buffer_get_current (b[3]);
+ CLIB_PREFETCH (p - CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ /* Check MTU of outgoing interface. */
+ u16 ip0_len = clib_net_to_host_u16 (ip0->length);
+ u16 ip1_len = clib_net_to_host_u16 (ip1->length);
+
+ if (b[0]->flags & VNET_BUFFER_F_GSO)
+ ip0_len = gso_mtu_sz (b[0]);
+ if (b[1]->flags & VNET_BUFFER_F_GSO)
+ ip1_len = gso_mtu_sz (b[1]);
+
+ ip4_mtu_check (b[0], ip0_len,
+ adj0[0].rewrite_header.max_l3_packet_bytes,
+ ip0->flags_and_fragment_offset &
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
+ next + 0, is_midchain, &error0);
+ ip4_mtu_check (b[1], ip1_len,
+ adj1[0].rewrite_header.max_l3_packet_bytes,
+ ip1->flags_and_fragment_offset &
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
+ next + 1, is_midchain, &error1);
+
+ if (is_mcast)
+ {
+ error0 = ((adj0[0].rewrite_header.sw_if_index ==
+ vnet_buffer (b[0])->sw_if_index[VLIB_RX]) ?
+ IP4_ERROR_SAME_INTERFACE : error0);
+ error1 = ((adj1[0].rewrite_header.sw_if_index ==
+ vnet_buffer (b[1])->sw_if_index[VLIB_RX]) ?
+ IP4_ERROR_SAME_INTERFACE : error1);
+ }
- hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ /* Don't adjust the buffer for ttl issue; icmp-error node wants
+ * to see the IP header */
+ if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
+ {
+ u32 next_index = adj0[0].rewrite_header.next_index;
+ vlib_buffer_advance (b[0], -(word) rw_len0);
- clib_memcpy (h->ip4_over_ethernet[0].ethernet, hi->hw_address,
- sizeof (h->ip4_over_ethernet[0].ethernet));
+ tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] = tx_sw_if_index0;
- h->ip4_over_ethernet[0].ip4 = src[0];
- h->ip4_over_ethernet[1].ip4 = dst[0];
+ if (PREDICT_FALSE
+ (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
+ vnet_feature_arc_start (lm->output_feature_arc_index,
+ tx_sw_if_index0, &next_index, b[0]);
+ next[0] = next_index;
+ if (is_midchain)
+ calc_checksums (vm, b[0]);
+ }
+ else
+ {
+ b[0]->error = error_node->errors[error0];
+ if (error0 == IP4_ERROR_MTU_EXCEEDED)
+ ip4_ttl_inc (b[0], ip0);
+ }
+ if (PREDICT_TRUE (error1 == IP4_ERROR_NONE))
+ {
+ u32 next_index = adj1[0].rewrite_header.next_index;
+ vlib_buffer_advance (b[1], -(word) rw_len1);
- b = vlib_get_buffer (vm, bi);
- vnet_buffer (b)->sw_if_index[VLIB_RX] =
- vnet_buffer (b)->sw_if_index[VLIB_TX] = sw_if_index;
+ tx_sw_if_index1 = adj1[0].rewrite_header.sw_if_index;
+ vnet_buffer (b[1])->sw_if_index[VLIB_TX] = tx_sw_if_index1;
- /* Add encapsulation string for software interface (e.g. ethernet header). */
- vnet_rewrite_one_header (adj[0], h, sizeof (ethernet_header_t));
- vlib_buffer_advance (b, -adj->rewrite_header.data_bytes);
+ if (PREDICT_FALSE
+ (adj1[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
+ vnet_feature_arc_start (lm->output_feature_arc_index,
+ tx_sw_if_index1, &next_index, b[1]);
+ next[1] = next_index;
+ if (is_midchain)
+ calc_checksums (vm, b[1]);
+ }
+ else
+ {
+ b[1]->error = error_node->errors[error1];
+ if (error1 == IP4_ERROR_MTU_EXCEEDED)
+ ip4_ttl_inc (b[1], ip1);
+ }
- {
- vlib_frame_t *f = vlib_get_frame_to_node (vm, hi->output_node_index);
- u32 *to_next = vlib_frame_vector_args (f);
- to_next[0] = bi;
- f->n_vectors = 1;
- vlib_put_frame_to_node (vm, hi->output_node_index, f);
- }
+ /* Guess we are only writing on simple Ethernet header. */
+ vnet_rewrite_two_headers (adj0[0], adj1[0],
+ ip0, ip1, sizeof (ethernet_header_t));
- adj_unlock (ai);
- return /* no error */ 0;
-}
+ if (do_counters)
+ {
+ if (error0 == IP4_ERROR_NONE)
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ thread_index,
+ adj_index0, 1,
+ vlib_buffer_length_in_chain (vm, b[0]) + rw_len0);
-typedef enum
-{
- IP4_REWRITE_NEXT_DROP,
- IP4_REWRITE_NEXT_ICMP_ERROR,
-} ip4_rewrite_next_t;
+ if (error1 == IP4_ERROR_NONE)
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ thread_index,
+ adj_index1, 1,
+ vlib_buffer_length_in_chain (vm, b[1]) + rw_len1);
+ }
-always_inline uword
-ip4_rewrite_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame,
- int do_counters, int is_midchain, int is_mcast)
-{
- ip_lookup_main_t *lm = &ip4_main.lookup_main;
- u32 *from = vlib_frame_vector_args (frame);
- u32 n_left_from, n_left_to_next, *to_next, next_index;
- vlib_node_runtime_t *error_node =
- vlib_node_get_runtime (vm, ip4_input_node.index);
+ if (is_midchain)
+ {
+ if (error0 == IP4_ERROR_NONE && adj0->sub_type.midchain.fixup_func)
+ adj0->sub_type.midchain.fixup_func
+ (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
+ if (error1 == IP4_ERROR_NONE && adj1->sub_type.midchain.fixup_func)
+ adj1->sub_type.midchain.fixup_func
+ (vm, adj1, b[1], adj1->sub_type.midchain.fixup_data);
+ }
- n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
- u32 thread_index = vlib_get_thread_index ();
+ if (is_mcast)
+ {
+ /* copy bytes from the IP address into the MAC rewrite */
+ if (error0 == IP4_ERROR_NONE)
+ vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
+ adj0->rewrite_header.dst_mcast_offset,
+ &ip0->dst_address.as_u32, (u8 *) ip0);
+ if (error1 == IP4_ERROR_NONE)
+ vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
+ adj1->rewrite_header.dst_mcast_offset,
+ &ip1->dst_address.as_u32, (u8 *) ip1);
+ }
- while (n_left_from > 0)
+ next += 2;
+ b += 2;
+ n_left_from -= 2;
+ }
+#elif (CLIB_N_PREFETCHES >= 4)
+ next = nexts;
+ b = bufs;
+ while (n_left_from >= 1)
{
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
+ ip_adjacency_t *adj0;
+ ip4_header_t *ip0;
+ u32 rw_len0, error0, adj_index0;
+ u32 tx_sw_if_index0;
+ u8 *p;
+
+ /* Prefetch next iteration */
+ if (PREDICT_TRUE (n_left_from >= 4))
{
- ip_adjacency_t *adj0, *adj1;
- vlib_buffer_t *p0, *p1;
- ip4_header_t *ip0, *ip1;
- u32 pi0, rw_len0, next0, error0, checksum0, adj_index0;
- u32 pi1, rw_len1, next1, error1, checksum1, adj_index1;
- u32 tx_sw_if_index0, tx_sw_if_index1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t *p2, *p3;
+ ip_adjacency_t *adj2;
+ u32 adj_index2;
+
+ vlib_prefetch_buffer_header (b[3], LOAD);
+ vlib_prefetch_buffer_data (b[2], LOAD);
+
+ /* Prefetch adj->rewrite_header */
+ adj_index2 = vnet_buffer (b[2])->ip.adj_index[VLIB_TX];
+ adj2 = adj_get (adj_index2);
+ p = (u8 *) adj2;
+ CLIB_PREFETCH (p + CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES,
+ LOAD);
+ }
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
+ adj_index0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
- vlib_prefetch_buffer_header (p2, STORE);
- vlib_prefetch_buffer_header (p3, STORE);
+ /*
+ * Prefetch the per-adjacency counters
+ */
+ if (do_counters)
+ {
+ vlib_prefetch_combined_counter (&adjacency_counters,
+ thread_index, adj_index0);
+ }
- CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE);
- CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE);
- }
+ ip0 = vlib_buffer_get_current (b[0]);
- pi0 = to_next[0] = from[0];
- pi1 = to_next[1] = from[1];
+ error0 = IP4_ERROR_NONE;
- from += 2;
- n_left_from -= 2;
- to_next += 2;
- n_left_to_next -= 2;
+ ip4_ttl_and_checksum_check (b[0], ip0, next + 0, &error0);
- p0 = vlib_get_buffer (vm, pi0);
- p1 = vlib_get_buffer (vm, pi1);
+ /* Rewrite packet header and updates lengths. */
+ adj0 = adj_get (adj_index0);
- adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
- adj_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
+ /* Rewrite header was prefetched. */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+ vnet_buffer (b[0])->ip.save_rewrite_length = rw_len0;
- /*
- * pre-fetch the per-adjacency counters
- */
- if (do_counters)
- {
- vlib_prefetch_combined_counter (&adjacency_counters,
- thread_index, adj_index0);
- vlib_prefetch_combined_counter (&adjacency_counters,
- thread_index, adj_index1);
- }
+ /* Check MTU of outgoing interface. */
+ u16 ip0_len = clib_net_to_host_u16 (ip0->length);
- ip0 = vlib_buffer_get_current (p0);
- ip1 = vlib_buffer_get_current (p1);
+ if (b[0]->flags & VNET_BUFFER_F_GSO)
+ ip0_len = gso_mtu_sz (b[0]);
- error0 = error1 = IP4_ERROR_NONE;
- next0 = next1 = IP4_REWRITE_NEXT_DROP;
+ ip4_mtu_check (b[0], ip0_len,
+ adj0[0].rewrite_header.max_l3_packet_bytes,
+ ip0->flags_and_fragment_offset &
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
+ next + 0, is_midchain, &error0);
- /* Decrement TTL & update checksum.
- Works either endian, so no need for byte swap. */
- if (PREDICT_TRUE (!(p0->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED)))
- {
- i32 ttl0 = ip0->ttl;
-
- /* Input node should have reject packets with ttl 0. */
- ASSERT (ip0->ttl > 0);
-
- checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
- checksum0 += checksum0 >= 0xffff;
-
- ip0->checksum = checksum0;
- ttl0 -= 1;
- ip0->ttl = ttl0;
-
- /*
- * If the ttl drops below 1 when forwarding, generate
- * an ICMP response.
- */
- if (PREDICT_FALSE (ttl0 <= 0))
- {
- error0 = IP4_ERROR_TIME_EXPIRED;
- vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
- icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
- ICMP4_time_exceeded_ttl_exceeded_in_transit,
- 0);
- next0 = IP4_REWRITE_NEXT_ICMP_ERROR;
- }
-
- /* Verify checksum. */
- ASSERT ((ip0->checksum == ip4_header_checksum (ip0)) ||
- (p0->flags | VNET_BUFFER_F_OFFLOAD_IP_CKSUM));
- }
- else
- {
- p0->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED;
- }
- if (PREDICT_TRUE (!(p1->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED)))
- {
- i32 ttl1 = ip1->ttl;
-
- /* Input node should have reject packets with ttl 0. */
- ASSERT (ip1->ttl > 0);
-
- checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100);
- checksum1 += checksum1 >= 0xffff;
-
- ip1->checksum = checksum1;
- ttl1 -= 1;
- ip1->ttl = ttl1;
-
- /*
- * If the ttl drops below 1 when forwarding, generate
- * an ICMP response.
- */
- if (PREDICT_FALSE (ttl1 <= 0))
- {
- error1 = IP4_ERROR_TIME_EXPIRED;
- vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
- icmp4_error_set_vnet_buffer (p1, ICMP4_time_exceeded,
- ICMP4_time_exceeded_ttl_exceeded_in_transit,
- 0);
- next1 = IP4_REWRITE_NEXT_ICMP_ERROR;
- }
-
- /* Verify checksum. */
- ASSERT ((ip1->checksum == ip4_header_checksum (ip1)) ||
- (p1->flags | VNET_BUFFER_F_OFFLOAD_IP_CKSUM));
- }
- else
- {
- p1->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED;
- }
+ if (is_mcast)
+ {
+ error0 = ((adj0[0].rewrite_header.sw_if_index ==
+ vnet_buffer (b[0])->sw_if_index[VLIB_RX]) ?
+ IP4_ERROR_SAME_INTERFACE : error0);
+ }
- /* Rewrite packet header and updates lengths. */
- adj0 = adj_get (adj_index0);
- adj1 = adj_get (adj_index1);
-
- /* Worth pipelining. No guarantee that adj0,1 are hot... */
- rw_len0 = adj0[0].rewrite_header.data_bytes;
- rw_len1 = adj1[0].rewrite_header.data_bytes;
- vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
- vnet_buffer (p1)->ip.save_rewrite_length = rw_len1;
-
- /* Check MTU of outgoing interface. */
- error0 =
- (vlib_buffer_length_in_chain (vm, p0) >
- adj0[0].
- rewrite_header.max_l3_packet_bytes ? IP4_ERROR_MTU_EXCEEDED :
- error0);
- error1 =
- (vlib_buffer_length_in_chain (vm, p1) >
- adj1[0].
- rewrite_header.max_l3_packet_bytes ? IP4_ERROR_MTU_EXCEEDED :
- error1);
-
- /* Don't adjust the buffer for ttl issue; icmp-error node wants
- * to see the IP headerr */
- if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
- {
- next0 = adj0[0].rewrite_header.next_index;
- p0->current_data -= rw_len0;
- p0->current_length += rw_len0;
- tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
- vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0;
-
- if (PREDICT_FALSE
- (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
- vnet_feature_arc_start (lm->output_feature_arc_index,
- tx_sw_if_index0, &next0, p0);
- }
- if (PREDICT_TRUE (error1 == IP4_ERROR_NONE))
- {
- next1 = adj1[0].rewrite_header.next_index;
- p1->current_data -= rw_len1;
- p1->current_length += rw_len1;
+ /* Don't adjust the buffer for ttl issue; icmp-error node wants
+ * to see the IP header */
+ if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
+ {
+ u32 next_index = adj0[0].rewrite_header.next_index;
+ vlib_buffer_advance (b[0], -(word) rw_len0);
+ tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] = tx_sw_if_index0;
- tx_sw_if_index1 = adj1[0].rewrite_header.sw_if_index;
- vnet_buffer (p1)->sw_if_index[VLIB_TX] = tx_sw_if_index1;
+ if (PREDICT_FALSE
+ (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
+ vnet_feature_arc_start (lm->output_feature_arc_index,
+ tx_sw_if_index0, &next_index, b[0]);
+ next[0] = next_index;
- if (PREDICT_FALSE
- (adj1[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
- vnet_feature_arc_start (lm->output_feature_arc_index,
- tx_sw_if_index1, &next1, p1);
- }
+ if (is_midchain)
+ calc_checksums (vm, b[0]);
/* Guess we are only writing on simple Ethernet header. */
- vnet_rewrite_two_headers (adj0[0], adj1[0],
- ip0, ip1, sizeof (ethernet_header_t));
+ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
/*
* Bump the per-adjacency counters
*/
if (do_counters)
- {
- vlib_increment_combined_counter
- (&adjacency_counters,
- thread_index,
- adj_index0, 1,
- vlib_buffer_length_in_chain (vm, p0) + rw_len0);
-
- vlib_increment_combined_counter
- (&adjacency_counters,
- thread_index,
- adj_index1, 1,
- vlib_buffer_length_in_chain (vm, p1) + rw_len1);
- }
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ thread_index,
+ adj_index0, 1, vlib_buffer_length_in_chain (vm,
+ b[0]) + rw_len0);
- if (is_midchain)
- {
- adj0->sub_type.midchain.fixup_func (vm, adj0, p0);
- adj1->sub_type.midchain.fixup_func (vm, adj1, p1);
- }
- if (is_mcast)
- {
- /*
- * copy bytes from the IP address into the MAC rewrite
- */
- vnet_fixup_one_header (adj0[0], &ip0->dst_address, ip0);
- vnet_fixup_one_header (adj1[0], &ip1->dst_address, ip1);
- }
+ if (is_midchain && adj0->sub_type.midchain.fixup_func)
+ adj0->sub_type.midchain.fixup_func
+ (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- pi0, pi1, next0, next1);
+ if (is_mcast)
+ /* copy bytes from the IP address into the MAC rewrite */
+ vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
+ adj0->rewrite_header.dst_mcast_offset,
+ &ip0->dst_address.as_u32, (u8 *) ip0);
}
-
- while (n_left_from > 0 && n_left_to_next > 0)
+ else
{
- ip_adjacency_t *adj0;
- vlib_buffer_t *p0;
- ip4_header_t *ip0;
- u32 pi0, rw_len0, adj_index0, next0, error0, checksum0;
- u32 tx_sw_if_index0;
+ b[0]->error = error_node->errors[error0];
+ if (error0 == IP4_ERROR_MTU_EXCEEDED)
+ ip4_ttl_inc (b[0], ip0);
+ }
- pi0 = to_next[0] = from[0];
+ next += 1;
+ b += 1;
+ n_left_from -= 1;
+ }
+#endif
- p0 = vlib_get_buffer (vm, pi0);
+ while (n_left_from > 0)
+ {
+ ip_adjacency_t *adj0;
+ ip4_header_t *ip0;
+ u32 rw_len0, adj_index0, error0;
+ u32 tx_sw_if_index0;
- adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ adj_index0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
- adj0 = adj_get (adj_index0);
+ adj0 = adj_get (adj_index0);
- ip0 = vlib_buffer_get_current (p0);
+ if (do_counters)
+ vlib_prefetch_combined_counter (&adjacency_counters,
+ thread_index, adj_index0);
- error0 = IP4_ERROR_NONE;
- next0 = IP4_REWRITE_NEXT_DROP; /* drop on error */
+ ip0 = vlib_buffer_get_current (b[0]);
- /* Decrement TTL & update checksum. */
- if (PREDICT_TRUE (!(p0->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED)))
- {
- i32 ttl0 = ip0->ttl;
+ error0 = IP4_ERROR_NONE;
- checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
+ ip4_ttl_and_checksum_check (b[0], ip0, next + 0, &error0);
- checksum0 += checksum0 >= 0xffff;
- ip0->checksum = checksum0;
+ /* Update packet buffer attributes/set output interface. */
+ rw_len0 = adj0[0].rewrite_header.data_bytes;
+ vnet_buffer (b[0])->ip.save_rewrite_length = rw_len0;
- ASSERT (ip0->ttl > 0);
+ /* Check MTU of outgoing interface. */
+ u16 ip0_len = clib_net_to_host_u16 (ip0->length);
+ if (b[0]->flags & VNET_BUFFER_F_GSO)
+ ip0_len = gso_mtu_sz (b[0]);
- ttl0 -= 1;
+ ip4_mtu_check (b[0], ip0_len,
+ adj0[0].rewrite_header.max_l3_packet_bytes,
+ ip0->flags_and_fragment_offset &
+ clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
+ next + 0, is_midchain, &error0);
- ip0->ttl = ttl0;
+ if (is_mcast)
+ {
+ error0 = ((adj0[0].rewrite_header.sw_if_index ==
+ vnet_buffer (b[0])->sw_if_index[VLIB_RX]) ?
+ IP4_ERROR_SAME_INTERFACE : error0);
+ }
- ASSERT ((ip0->checksum == ip4_header_checksum (ip0)) ||
- (p0->flags | VNET_BUFFER_F_OFFLOAD_IP_CKSUM));
+ /* Don't adjust the buffer for ttl issue; icmp-error node wants
+ * to see the IP header */
+ if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
+ {
+ u32 next_index = adj0[0].rewrite_header.next_index;
+ vlib_buffer_advance (b[0], -(word) rw_len0);
+ tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] = tx_sw_if_index0;
- if (PREDICT_FALSE (ttl0 <= 0))
- {
- /*
- * If the ttl drops below 1 when forwarding, generate
- * an ICMP response.
- */
- error0 = IP4_ERROR_TIME_EXPIRED;
- next0 = IP4_REWRITE_NEXT_ICMP_ERROR;
- vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
- icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
- ICMP4_time_exceeded_ttl_exceeded_in_transit,
- 0);
- }
- }
- else
- {
- p0->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED;
- }
+ if (PREDICT_FALSE
+ (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
+ vnet_feature_arc_start (lm->output_feature_arc_index,
+ tx_sw_if_index0, &next_index, b[0]);
+ next[0] = next_index;
- if (do_counters)
- vlib_prefetch_combined_counter (&adjacency_counters,
- thread_index, adj_index0);
+ if (is_midchain)
+ /* this acts on the packet that is about to be encapped */
+ calc_checksums (vm, b[0]);
/* Guess we are only writing on simple Ethernet header. */
vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
- if (is_mcast)
- {
- /*
- * copy bytes from the IP address into the MAC rewrite
- */
- vnet_fixup_one_header (adj0[0], &ip0->dst_address, ip0);
- }
-
- /* Update packet buffer attributes/set output interface. */
- rw_len0 = adj0[0].rewrite_header.data_bytes;
- vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
if (do_counters)
vlib_increment_combined_counter
(&adjacency_counters,
thread_index, adj_index0, 1,
- vlib_buffer_length_in_chain (vm, p0) + rw_len0);
-
- /* Check MTU of outgoing interface. */
- error0 = (vlib_buffer_length_in_chain (vm, p0)
- > adj0[0].rewrite_header.max_l3_packet_bytes
- ? IP4_ERROR_MTU_EXCEEDED : error0);
-
- p0->error = error_node->errors[error0];
+ vlib_buffer_length_in_chain (vm, b[0]) + rw_len0);
- /* Don't adjust the buffer for ttl issue; icmp-error node wants
- * to see the IP headerr */
- if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
- {
- p0->current_data -= rw_len0;
- p0->current_length += rw_len0;
- tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
-
- vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0;
- next0 = adj0[0].rewrite_header.next_index;
-
- if (is_midchain)
- {
- adj0->sub_type.midchain.fixup_func (vm, adj0, p0);
- }
-
- if (PREDICT_FALSE
- (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
- vnet_feature_arc_start (lm->output_feature_arc_index,
- tx_sw_if_index0, &next0, p0);
+ if (is_midchain && adj0->sub_type.midchain.fixup_func)
+ adj0->sub_type.midchain.fixup_func
+ (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
- }
-
- from += 1;
- n_left_from -= 1;
- to_next += 1;
- n_left_to_next -= 1;
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- pi0, next0);
+ if (is_mcast)
+ /* copy bytes from the IP address into the MAC rewrite */
+ vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
+ adj0->rewrite_header.dst_mcast_offset,
+ &ip0->dst_address.as_u32, (u8 *) ip0);
+ }
+ else
+ {
+ b[0]->error = error_node->errors[error0];
+ /* undo the TTL decrement - we'll be back to do it again */
+ if (error0 == IP4_ERROR_MTU_EXCEEDED)
+ ip4_ttl_inc (b[0], ip0);
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ next += 1;
+ b += 1;
+ n_left_from -= 1;
}
+
/* Need to do trace after rewrites to pick up new packet data. */
if (node->flags & VLIB_NODE_FLAG_TRACE)
ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
return frame->n_vectors;
}
+always_inline uword
+ip4_rewrite_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ int do_counters, int is_midchain, int is_mcast)
+{
+ return ip4_rewrite_inline_with_gso (vm, node, frame, do_counters,
+ is_midchain, is_mcast);
+}
+
/** @brief IPv4 rewrite node.
@node ip4-rewrite
<em>Next Indices:</em>
- <code> adj->rewrite_header.next_index </code>
- or @c error-drop
+ or @c ip4-drop
*/
-static uword
-ip4_rewrite (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+
+VLIB_NODE_FN (ip4_rewrite_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ if (adj_are_counters_enabled ())
+ return ip4_rewrite_inline (vm, node, frame, 1, 0, 0);
+ else
+ return ip4_rewrite_inline (vm, node, frame, 0, 0, 0);
+}
+
+VLIB_NODE_FN (ip4_rewrite_bcast_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
if (adj_are_counters_enabled ())
return ip4_rewrite_inline (vm, node, frame, 1, 0, 0);
return ip4_rewrite_inline (vm, node, frame, 0, 0, 0);
}
-static uword
-ip4_midchain (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_midchain_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
if (adj_are_counters_enabled ())
return ip4_rewrite_inline (vm, node, frame, 1, 1, 0);
return ip4_rewrite_inline (vm, node, frame, 0, 1, 0);
}
-static uword
-ip4_rewrite_mcast (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_rewrite_mcast_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
if (adj_are_counters_enabled ())
return ip4_rewrite_inline (vm, node, frame, 1, 0, 1);
return ip4_rewrite_inline (vm, node, frame, 0, 0, 1);
}
-static uword
-ip4_mcast_midchain (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_mcast_midchain_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
if (adj_are_counters_enabled ())
return ip4_rewrite_inline (vm, node, frame, 1, 1, 1);
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_rewrite_node) = {
- .function = ip4_rewrite,
.name = "ip4-rewrite",
.vector_size = sizeof (u32),
.format_trace = format_ip4_rewrite_trace,
- .n_next_nodes = 2,
+ .n_next_nodes = IP4_REWRITE_N_NEXT,
.next_nodes = {
- [IP4_REWRITE_NEXT_DROP] = "error-drop",
+ [IP4_REWRITE_NEXT_DROP] = "ip4-drop",
[IP4_REWRITE_NEXT_ICMP_ERROR] = "ip4-icmp-error",
+ [IP4_REWRITE_NEXT_FRAGMENT] = "ip4-frag",
},
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_rewrite_node, ip4_rewrite)
+
+VLIB_REGISTER_NODE (ip4_rewrite_bcast_node) = {
+ .name = "ip4-rewrite-bcast",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_ip4_rewrite_trace,
+ .sibling_of = "ip4-rewrite",
+};
VLIB_REGISTER_NODE (ip4_rewrite_mcast_node) = {
- .function = ip4_rewrite_mcast,
.name = "ip4-rewrite-mcast",
.vector_size = sizeof (u32),
.format_trace = format_ip4_rewrite_trace,
.sibling_of = "ip4-rewrite",
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_rewrite_mcast_node, ip4_rewrite_mcast)
-VLIB_REGISTER_NODE (ip4_mcast_midchain_node, static) = {
- .function = ip4_mcast_midchain,
+VLIB_REGISTER_NODE (ip4_mcast_midchain_node) = {
.name = "ip4-mcast-midchain",
.vector_size = sizeof (u32),
.format_trace = format_ip4_rewrite_trace,
.sibling_of = "ip4-rewrite",
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_mcast_midchain_node, ip4_mcast_midchain)
VLIB_REGISTER_NODE (ip4_midchain_node) = {
- .function = ip4_midchain,
.name = "ip4-midchain",
.vector_size = sizeof (u32),
- .format_trace = format_ip4_forward_next_trace,
- .sibling_of = "ip4-rewrite",
+ .format_trace = format_ip4_rewrite_trace,
+ .sibling_of = "ip4-rewrite",
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_midchain_node, ip4_midchain);
/* *INDENT-ON */
-static clib_error_t *
-add_del_interface_table (vlib_main_t * vm,
- unformat_input_t * input, vlib_cli_command_t * cmd)
-{
- vnet_main_t *vnm = vnet_get_main ();
- ip_interface_address_t *ia;
- clib_error_t *error = 0;
- u32 sw_if_index, table_id;
-
- sw_if_index = ~0;
-
- if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
- {
- error = clib_error_return (0, "unknown interface `%U'",
- format_unformat_error, input);
- goto done;
- }
-
- if (unformat (input, "%d", &table_id))
- ;
- else
- {
- error = clib_error_return (0, "expected table id `%U'",
- format_unformat_error, input);
- goto done;
- }
-
- /*
- * If the interface already has in IP address, then a change int
- * VRF is not allowed. The IP address applied must first be removed.
- * We do not do that automatically here, since VPP has no knowledge
- * of whether thoses subnets are valid in the destination VRF.
- */
- /* *INDENT-OFF* */
- foreach_ip_interface_address (&ip4_main.lookup_main,
- ia, sw_if_index,
- 1 /* honor unnumbered */,
- ({
- ip4_address_t * a;
-
- a = ip_interface_address_get_address (&ip4_main.lookup_main, ia);
- error = clib_error_return (0, "interface %U has address %U",
- format_vnet_sw_if_index_name, vnm,
- sw_if_index,
- format_ip4_address, a);
- goto done;
- }));
- /* *INDENT-ON* */
-
-{
- ip4_main_t *im = &ip4_main;
- u32 fib_index;
-
- fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id);
-
- vec_validate (im->fib_index_by_sw_if_index, sw_if_index);
- im->fib_index_by_sw_if_index[sw_if_index] = fib_index;
-
- fib_index = mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id);
- vec_validate (im->mfib_index_by_sw_if_index, sw_if_index);
- im->mfib_index_by_sw_if_index[sw_if_index] = fib_index;
-}
-
-done:
-return error;
-}
-
-/*?
- * Place the indicated interface into the supplied IPv4 FIB table (also known
- * as a VRF). If the FIB table does not exist, this command creates it. To
- * display the current IPv4 FIB table, use the command '<em>show ip fib</em>'.
- * FIB table will only be displayed if a route has been added to the table, or
- * an IP Address is assigned to an interface in the table (which adds a route
- * automatically).
- *
- * @note IP addresses added after setting the interface IP table are added to
- * the indicated FIB table. If an IP address is added prior to changing the
- * table then this is an error. The control plane must remove these addresses
- * first and then change the table. VPP will not automatically move the
- * addresses from the old to the new table as it does not know the validity
- * of such a change.
- *
- * @cliexpar
- * Example of how to add an interface to an IPv4 FIB table (where 2 is the table-id):
- * @cliexcmd{set interface ip table GigabitEthernet2/0/0 2}
- ?*/
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (set_interface_ip_table_command, static) =
-{
- .path = "set interface ip table",
- .function = add_del_interface_table,
- .short_help = "set interface ip table <interface> <table-id>",
-};
-/* *INDENT-ON* */
-
-int
+static int
ip4_lookup_validate (ip4_address_t * a, u32 fib_index0)
{
ip4_fib_mtrie_t *mtrie0;
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
int
vnet_set_ip4_flow_hash (u32 table_id, u32 flow_hash_config)
{
return 0;
}
+#endif
static clib_error_t *
set_ip_flow_hash_command_fn (vlib_main_t * vm,
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
int
vnet_set_ip4_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
u32 table_index)
return 0;
}
+#endif
static clib_error_t *
set_ip_classify_command_fn (vlib_main_t * vm,
};
/* *INDENT-ON* */
+static clib_error_t *
+ip4_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ ip4_main_t *im = &ip4_main;
+ uword heapsize = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "heap-size %U", unformat_memory_size, &heapsize))
+ ;
+ else
+ return clib_error_return (0,
+ "invalid heap-size parameter `%U'",
+ format_unformat_error, input);
+ }
+
+ im->mtrie_heap_size = heapsize;
+
+ return 0;
+}
+
+VLIB_EARLY_CONFIG_FUNCTION (ip4_config, "ip");
+
/*
* fd.io coding-style-patch-verification: ON
*