* We don't want to use the same hash value at each level in the recursion
* graph as that would lead to polarisation
*/
- hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
- hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
+ hc0 = hc1 = 0;
if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
{
else
{
hc0 = vnet_buffer (p0)->ip.flow_hash =
- ip4_compute_flow_hash (ip0, hc0);
+ ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
}
}
if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
else
{
hc1 = vnet_buffer (p1)->ip.flow_hash =
- ip4_compute_flow_hash (ip1, hc1);
+ ip4_compute_flow_hash (ip1, lb1->lb_hash_config);
}
}
lb0 = load_balance_get (lbi0);
- hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
+ hc0 = 0;
if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
{
if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
else
{
hc0 = vnet_buffer (p0)->ip.flow_hash =
- ip4_compute_flow_hash (ip0, hc0);
+ ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
}
}
always_inline uword
ip4_rewrite_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
- vlib_frame_t * frame, int is_midchain, int is_mcast)
+ vlib_frame_t * frame,
+ int do_counters, int is_midchain, int is_mcast)
{
ip_lookup_main_t *lm = &ip4_main.lookup_main;
u32 *from = vlib_frame_vector_args (frame);
/*
* pre-fetch the per-adjacency counters
*/
- vlib_prefetch_combined_counter (&adjacency_counters,
- cpu_index, adj_index0);
- vlib_prefetch_combined_counter (&adjacency_counters,
- cpu_index, adj_index1);
+ if (do_counters)
+ {
+ vlib_prefetch_combined_counter (&adjacency_counters,
+ cpu_index, adj_index0);
+ vlib_prefetch_combined_counter (&adjacency_counters,
+ cpu_index, adj_index1);
+ }
/* Don't adjust the buffer for ttl issue; icmp-error node wants
* to see the IP headerr */
/*
* Bump the per-adjacency counters
*/
- vlib_increment_combined_counter
- (&adjacency_counters,
- cpu_index,
- adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0);
-
- vlib_increment_combined_counter
- (&adjacency_counters,
- cpu_index,
- adj_index1, 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1);
+ if (do_counters)
+ {
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index,
+ adj_index0, 1,
+ vlib_buffer_length_in_chain (vm, p0) + rw_len0);
+
+ vlib_increment_combined_counter
+ (&adjacency_counters,
+ cpu_index,
+ adj_index1, 1,
+ vlib_buffer_length_in_chain (vm, p1) + rw_len1);
+ }
if (is_midchain)
{
ip4_rewrite (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- return ip4_rewrite_inline (vm, node, frame, 0, 0);
+ if (adj_are_counters_enabled ())
+ return ip4_rewrite_inline (vm, node, frame, 1, 0, 0);
+ else
+ return ip4_rewrite_inline (vm, node, frame, 0, 0, 0);
}
static uword
ip4_midchain (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- return ip4_rewrite_inline (vm, node, frame, 1, 0);
+ if (adj_are_counters_enabled ())
+ return ip4_rewrite_inline (vm, node, frame, 1, 1, 0);
+ else
+ return ip4_rewrite_inline (vm, node, frame, 0, 1, 0);
}
static uword
ip4_rewrite_mcast (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- return ip4_rewrite_inline (vm, node, frame, 0, 1);
+ if (adj_are_counters_enabled ())
+ return ip4_rewrite_inline (vm, node, frame, 1, 0, 1);
+ else
+ return ip4_rewrite_inline (vm, node, frame, 0, 0, 1);
}
/* *INDENT-OFF* */