bond_main_t *bm = &bond_main;
bond_if_t *bif = pool_elt_at_index (bm->interfaces, dev_instance);
- s = format (s, "BondEthernet%lu", bif->dev_instance);
+ s = format (s, "BondEthernet%lu", bif->id);
return s;
}
ethernet_set_rx_redirect (vnm, sif_hw, 1);
}
}
+ else if ((bif_hw->l2_if_count == 0) && (l2_if_adjust == -1))
+ {
+ /* Just removed last L2 subinterface on this port */
+ vec_foreach (sw_if_index, bif->slaves)
+ {
+ sif_hw = vnet_get_sup_hw_interface (vnm, *sw_if_index);
+
+ /* Allow ip packets to go directly to ip4-input etc */
+ ethernet_set_rx_redirect (vnm, sif_hw, 0);
+ }
+ }
return 0;
}
}
static_always_inline u32
-bond_lb_broadcast (vlib_main_t * vm, vlib_node_runtime_t * node,
+bond_lb_broadcast (vlib_main_t * vm,
bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves)
{
bond_main_t *bm = &bond_main;
}
static_always_inline u32
-bond_lb_l2 (vlib_main_t * vm, vlib_node_runtime_t * node,
- bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves)
+bond_lb_l2 (vlib_buffer_t * b0)
{
- ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
+ ethernet_header_t *eth = vlib_buffer_get_current (b0);
u64 *dst = (u64 *) & eth->dst_address[0];
u64 a = clib_mem_unaligned (dst, u64);
u32 *src = (u32 *) & eth->src_address[2];
}
static_always_inline u32
-bond_lb_l23 (vlib_main_t * vm, vlib_node_runtime_t * node,
- bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves)
+bond_lb_l23 (vlib_buffer_t * b0)
{
- ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
+ ethernet_header_t *eth = vlib_buffer_get_current (b0);
u8 ip_version;
ip4_header_t *ip4;
u16 ethertype, *ethertype_p;
if ((ethertype != htons (ETHERNET_TYPE_IP4)) &&
(ethertype != htons (ETHERNET_TYPE_IP6)))
- return (bond_lb_l2 (vm, node, bif, b0, n_slaves));
+ return bond_lb_l2 (b0);
ip4 = (ip4_header_t *) (ethertype_p + 1);
ip_version = (ip4->ip_version_and_header_length >> 4);
uword), a);
return c;
}
- return (bond_lb_l2 (vm, node, bif, b0, n_slaves));
+ return bond_lb_l2 (b0);
}
static_always_inline u32
-bond_lb_l34 (vlib_main_t * vm, vlib_node_runtime_t * node,
- bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves)
+bond_lb_l34 (vlib_buffer_t * b0)
{
- ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
+ ethernet_header_t *eth = vlib_buffer_get_current (b0);
u8 ip_version;
uword is_tcp_udp;
ip4_header_t *ip4;
if ((ethertype != htons (ETHERNET_TYPE_IP4)) &&
(ethertype != htons (ETHERNET_TYPE_IP6)))
- return (bond_lb_l2 (vm, node, bif, b0, n_slaves));
+ return (bond_lb_l2 (b0));
ip4 = (ip4_header_t *) (ethertype_p + 1);
ip_version = (ip4->ip_version_and_header_length >> 4);
return c;
}
- return (bond_lb_l2 (vm, node, bif, b0, n_slaves));
+ return bond_lb_l2 (b0);
}
static_always_inline u32
-bond_lb_round_robin (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves)
+bond_lb_round_robin (bond_if_t * bif, vlib_buffer_t * b0, uword n_slaves)
{
bif->lb_rr_last_index++;
if (bif->lb_rr_last_index >= n_slaves)
}
static_always_inline void
-bond_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
- bond_if_t * bif, vlib_buffer_t ** b,
+bond_tx_inline (vlib_main_t * vm, bond_if_t * bif, vlib_buffer_t ** b,
u32 * h, u32 n_left, uword n_slaves, u32 lb_alg)
{
while (n_left >= 4)
if (lb_alg == BOND_LB_L2)
{
- h[0] = bond_lb_l2 (vm, node, bif, b[0], n_slaves);
- h[1] = bond_lb_l2 (vm, node, bif, b[1], n_slaves);
- h[2] = bond_lb_l2 (vm, node, bif, b[2], n_slaves);
- h[3] = bond_lb_l2 (vm, node, bif, b[3], n_slaves);
+ h[0] = bond_lb_l2 (b[0]);
+ h[1] = bond_lb_l2 (b[1]);
+ h[2] = bond_lb_l2 (b[2]);
+ h[3] = bond_lb_l2 (b[3]);
}
else if (lb_alg == BOND_LB_L34)
{
- h[0] = bond_lb_l34 (vm, node, bif, b[0], n_slaves);
- h[1] = bond_lb_l34 (vm, node, bif, b[1], n_slaves);
- h[2] = bond_lb_l34 (vm, node, bif, b[2], n_slaves);
- h[3] = bond_lb_l34 (vm, node, bif, b[3], n_slaves);
+ h[0] = bond_lb_l34 (b[0]);
+ h[1] = bond_lb_l34 (b[1]);
+ h[2] = bond_lb_l34 (b[2]);
+ h[3] = bond_lb_l34 (b[3]);
}
else if (lb_alg == BOND_LB_L23)
{
- h[0] = bond_lb_l23 (vm, node, bif, b[0], n_slaves);
- h[1] = bond_lb_l23 (vm, node, bif, b[1], n_slaves);
- h[2] = bond_lb_l23 (vm, node, bif, b[2], n_slaves);
- h[3] = bond_lb_l23 (vm, node, bif, b[3], n_slaves);
+ h[0] = bond_lb_l23 (b[0]);
+ h[1] = bond_lb_l23 (b[1]);
+ h[2] = bond_lb_l23 (b[2]);
+ h[3] = bond_lb_l23 (b[3]);
}
else if (lb_alg == BOND_LB_RR)
{
- h[0] = bond_lb_round_robin (vm, node, bif, b[0], n_slaves);
- h[1] = bond_lb_round_robin (vm, node, bif, b[1], n_slaves);
- h[2] = bond_lb_round_robin (vm, node, bif, b[2], n_slaves);
- h[3] = bond_lb_round_robin (vm, node, bif, b[3], n_slaves);
+ h[0] = bond_lb_round_robin (bif, b[0], n_slaves);
+ h[1] = bond_lb_round_robin (bif, b[1], n_slaves);
+ h[2] = bond_lb_round_robin (bif, b[2], n_slaves);
+ h[3] = bond_lb_round_robin (bif, b[3], n_slaves);
}
else if (lb_alg == BOND_LB_BC)
{
- h[0] = bond_lb_broadcast (vm, node, bif, b[0], n_slaves);
- h[1] = bond_lb_broadcast (vm, node, bif, b[1], n_slaves);
- h[2] = bond_lb_broadcast (vm, node, bif, b[2], n_slaves);
- h[3] = bond_lb_broadcast (vm, node, bif, b[3], n_slaves);
+ h[0] = bond_lb_broadcast (vm, bif, b[0], n_slaves);
+ h[1] = bond_lb_broadcast (vm, bif, b[1], n_slaves);
+ h[2] = bond_lb_broadcast (vm, bif, b[2], n_slaves);
+ h[3] = bond_lb_broadcast (vm, bif, b[3], n_slaves);
}
else
{
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
if (bif->lb == BOND_LB_L2)
- h[0] = bond_lb_l2 (vm, node, bif, b[0], n_slaves);
+ h[0] = bond_lb_l2 (b[0]);
else if (bif->lb == BOND_LB_L34)
- h[0] = bond_lb_l34 (vm, node, bif, b[0], n_slaves);
+ h[0] = bond_lb_l34 (b[0]);
else if (bif->lb == BOND_LB_L23)
- h[0] = bond_lb_l23 (vm, node, bif, b[0], n_slaves);
+ h[0] = bond_lb_l23 (b[0]);
else if (bif->lb == BOND_LB_RR)
- h[0] = bond_lb_round_robin (vm, node, bif, b[0], n_slaves);
+ h[0] = bond_lb_round_robin (bif, b[0], n_slaves);
else if (bif->lb == BOND_LB_BC)
- h[0] = bond_lb_broadcast (vm, node, bif, b[0], n_slaves);
+ h[0] = bond_lb_broadcast (vm, bif, b[0], n_slaves);
else
{
ASSERT (0);
u32 mask = n_slaves - 1;
#ifdef CLIB_HAVE_VEC256
- /* only lower 16 bits of hash due to single precision fp arithmetics */
+ /* only lower 16 bits of hash due to single precision fp arithmetic */
u32x8 mask8, sc8u, h8a, h8b;
f32x8 sc8f;
vlib_trace_buffer (vm, node, next0, b[0], 0 /* follow_chain */ );
vlib_set_trace_count (vm, node, --n_trace);
t0 = vlib_add_trace (vm, node, b[0], sizeof (*t0));
- eth = (ethernet_header_t *) vlib_buffer_get_current (b[0]);
+ eth = vlib_buffer_get_current (b[0]);
t0->ethernet = *eth;
t0->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
if (!h)
if (PREDICT_FALSE (bif->admin_up == 0))
{
- vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
VNET_INTERFACE_COUNTER_DROP,
thread_index, bif->sw_if_index,
n_slaves = vec_len (bif->active_slaves);
if (PREDICT_FALSE (n_slaves == 0))
{
- vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
VNET_INTERFACE_COUNTER_DROP,
thread_index, bif->sw_if_index,
vlib_get_buffers (vm, from, bufs, n_left);
- /* active-backup mode, ship everyting to first sw if index */
+ /* active-backup mode, ship everything to first sw if index */
if ((bif->lb == BOND_LB_AB) || PREDICT_FALSE (n_slaves == 1))
{
sw_if_index = *vec_elt_at_index (bif->active_slaves, 0);
{
sw_if_index = *vec_elt_at_index (bif->active_slaves, 0);
- bond_tx_inline (vm, node, bif, bufs, hashes, n_left, n_slaves,
- BOND_LB_BC);
+ bond_tx_inline (vm, bif, bufs, hashes, n_left, n_slaves, BOND_LB_BC);
bond_tx_trace (vm, node, bif, bufs, frame->n_vectors, 0);
bond_update_sw_if_index (ptd, bif, from, bufs, &sw_if_index, n_left,
/* single_sw_if_index */ 1);
goto done;
}
+ /* if have at least one slave on local numa node, only slaves on local numa
+ node will transmit pkts when bif->local_numa_only is enabled */
+ if (bif->n_numa_slaves >= 1)
+ n_slaves = bif->n_numa_slaves;
+
if (bif->lb == BOND_LB_L2)
- bond_tx_inline (vm, node, bif, bufs, hashes, n_left, n_slaves,
- BOND_LB_L2);
+ bond_tx_inline (vm, bif, bufs, hashes, n_left, n_slaves, BOND_LB_L2);
else if (bif->lb == BOND_LB_L34)
- bond_tx_inline (vm, node, bif, bufs, hashes, n_left, n_slaves,
- BOND_LB_L34);
+ bond_tx_inline (vm, bif, bufs, hashes, n_left, n_slaves, BOND_LB_L34);
else if (bif->lb == BOND_LB_L23)
- bond_tx_inline (vm, node, bif, bufs, hashes, n_left, n_slaves,
- BOND_LB_L23);
+ bond_tx_inline (vm, bif, bufs, hashes, n_left, n_slaves, BOND_LB_L23);
else if (bif->lb == BOND_LB_RR)
- bond_tx_inline (vm, node, bif, bufs, hashes, n_left, n_slaves,
- BOND_LB_RR);
+ bond_tx_inline (vm, bif, bufs, hashes, n_left, n_slaves, BOND_LB_RR);
else
ASSERT (0);
f = vnet_get_frame_to_sw_interface (vnm, sw_if_index);
f->n_vectors = ptd->per_port_queue[p].n_buffers;
to_next = vlib_frame_vector_args (f);
- clib_memcpy (to_next, ptd->per_port_queue[p].buffers,
- f->n_vectors * sizeof (u32));
+ clib_memcpy_fast (to_next, ptd->per_port_queue[p].buffers,
+ f->n_vectors * sizeof (u32));
vnet_put_frame_to_sw_interface (vnm, sw_if_index, f);
ptd->per_port_queue[p].n_buffers = 0;
}
}
-
- vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters
- + VNET_INTERFACE_COUNTER_TX, thread_index,
- bif->sw_if_index, frame->n_vectors);
-
return frame->n_vectors;
}