Type: fix
Several tunnels encapsulation use udp as outer header and udp src port
is set by inner header flow hash, such as gtpu, geneve, vxlan, vxlan-gbd
Since flow hash of inner header is already been calculated, keeping it
to vnet_buffere[b]->ip.flow_hash should save load-balance node work to
select ECMP uplinks.
Change-Id: I0e4e2b27178f4fcc5785e221d6d1f3e8747d0d59
Signed-off-by: Shawn Ji <xiaji@tethrnet.com>
stats_n_packets += 4;
stats_n_bytes += len0 + len1 + len2 + len3;
stats_n_packets += 4;
stats_n_bytes += len0 + len1 + len2 + len3;
+ /* save inner packet flow_hash for load-balance node */
+ vnet_buffer (b0)->ip.flow_hash = flow_hash0;
+ vnet_buffer (b1)->ip.flow_hash = flow_hash1;
+ vnet_buffer (b2)->ip.flow_hash = flow_hash2;
+ vnet_buffer (b3)->ip.flow_hash = flow_hash3;
+
/* Batch stats increment on the same gtpu tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile
/* Batch stats increment on the same gtpu tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile
stats_n_packets += 1;
stats_n_bytes += len0;
stats_n_packets += 1;
stats_n_bytes += len0;
+ /* save inner packet flow_hash for load-balance node */
+ vnet_buffer (b0)->ip.flow_hash = flow_hash0;
+
/* Batch stats increment on the same gtpu tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile
/* Batch stats increment on the same gtpu tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile
flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
/* Get next node index and adj index from tunnel next_dpo */
if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
{
/* Get next node index and adj index from tunnel next_dpo */
if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
{
stats_n_packets += 2;
stats_n_bytes += len0 + len1;
stats_n_packets += 2;
stats_n_bytes += len0 + len1;
+ /* save inner packet flow_hash for load-balance node */
+ vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
+ vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
+
/* Batch stats increment on the same geneve tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile
/* Batch stats increment on the same geneve tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile
stats_n_packets += 1;
stats_n_bytes += len0;
stats_n_packets += 1;
stats_n_bytes += len0;
+ /* save inner packet flow_hash for load-balance node */
+ vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
+
/* Batch stats increment on the same geneve tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile
/* Batch stats increment on the same geneve tunnel so counter is not
incremented per packet. Note stats are still incremented for deleted
and admin-down tunnel where packets are dropped. It is not worthwhile
udp1->checksum = 0xffff;
}
udp1->checksum = 0xffff;
}
+ /* save inner packet flow_hash for load-balance node */
+ vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
+ vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
+
vlib_increment_combined_counter (tx_counter, thread_index,
sw_if_index0, 1, len0);
vlib_increment_combined_counter (tx_counter, thread_index,
vlib_increment_combined_counter (tx_counter, thread_index,
sw_if_index0, 1, len0);
vlib_increment_combined_counter (tx_counter, thread_index,
udp0->checksum = 0xffff;
}
udp0->checksum = 0xffff;
}
+ /* save inner packet flow_hash for load-balance node */
+ vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
+
vlib_increment_combined_counter (tx_counter, thread_index,
sw_if_index0, 1, len0);
pkts_encapsulated++;
vlib_increment_combined_counter (tx_counter, thread_index,
sw_if_index0, 1, len0);
pkts_encapsulated++;
udp1->checksum = 0xffff;
}
udp1->checksum = 0xffff;
}
+ /* save inner packet flow_hash for load-balance node */
+ vnet_buffer (b0)->ip.flow_hash = flow_hash0;
+ vnet_buffer (b1)->ip.flow_hash = flow_hash1;
+
if (sw_if_index0 == sw_if_index1)
{
vlib_increment_combined_counter (tx_counter, thread_index,
if (sw_if_index0 == sw_if_index1)
{
vlib_increment_combined_counter (tx_counter, thread_index,
udp0->checksum = 0xffff;
}
udp0->checksum = 0xffff;
}
+ /* reuse inner packet flow_hash for load-balance node */
+ vnet_buffer (b0)->ip.flow_hash = flow_hash0;
+
vlib_increment_combined_counter (tx_counter, thread_index,
sw_if_index0, 1, len0);
pkts_encapsulated ++;
vlib_increment_combined_counter (tx_counter, thread_index,
sw_if_index0, 1, len0);
pkts_encapsulated ++;