void
bond_disable_collecting_distributing (vlib_main_t * vm, slave_if_t * sif)
{
+ bond_main_t *bm = &bond_main;
bond_if_t *bif;
int i;
uword p;
+ u8 switching_active = 0;
bif = bond_get_master_by_dev_instance (sif->bif_dev_instance);
clib_spinlock_lock_if_init (&bif->lockp);
p = *vec_elt_at_index (bif->active_slaves, i);
if (p == sif->sw_if_index)
{
+ /* Are we disabling the very 1st slave? */
+ if (sif->sw_if_index == *vec_elt_at_index (bif->active_slaves, 0))
+ switching_active = 1;
+
vec_del1 (bif->active_slaves, i);
hash_unset (bif->active_slave_by_sw_if_index, sif->sw_if_index);
+
+ /* We got a new slave just becoming active? */
+ if ((vec_len (bif->active_slaves) >= 1) &&
+ (bif->mode == BOND_MODE_ACTIVE_BACKUP) && switching_active)
+ vlib_process_signal_event (bm->vlib_main, bond_process_node.index,
+ BOND_SEND_GARP_NA, bif->hw_if_index);
break;
}
}
bond_enable_collecting_distributing (vlib_main_t * vm, slave_if_t * sif)
{
bond_if_t *bif;
+ bond_main_t *bm = &bond_main;
bif = bond_get_master_by_dev_instance (sif->bif_dev_instance);
clib_spinlock_lock_if_init (&bif->lockp);
hash_set (bif->active_slave_by_sw_if_index, sif->sw_if_index,
sif->sw_if_index);
vec_add1 (bif->active_slaves, sif->sw_if_index);
+
+ /* First slave becomes active? */
+ if ((vec_len (bif->active_slaves) == 1) &&
+ (bif->mode == BOND_MODE_ACTIVE_BACKUP))
+ vlib_process_signal_event (bm->vlib_main, bond_process_node.index,
+ BOND_SEND_GARP_NA, bif->hw_if_index);
}
clib_spinlock_unlock_if_init (&bif->lockp);
}
/* *INDENT-OFF* */
pool_foreach (bif, bm->interfaces,
vec_add2(r_bondifs, bondif, 1);
- memset (bondif, 0, sizeof (*bondif));
+ clib_memset (bondif, 0, sizeof (*bondif));
bondif->sw_if_index = bif->sw_if_index;
hi = vnet_get_hw_interface (vnm, bif->hw_if_index);
clib_memcpy(bondif->interface_name, hi->name,
vec_foreach (sw_if_index, bif->slaves)
{
vec_add2 (r_slaveifs, slaveif, 1);
- memset (slaveif, 0, sizeof (*slaveif));
+ clib_memset (slaveif, 0, sizeof (*slaveif));
sif = bond_get_slave_by_sw_if_index (*sw_if_index);
if (sif)
{
bond_disable_collecting_distributing (vm, sif);
+ vnet_feature_enable_disable ("device-input", "bond-input",
+ sif_hw->hw_if_index, 0, 0, 0);
+
/* Put back the old mac */
vnet_hw_interface_change_mac_address (vnm, sif_hw->hw_if_index,
sif->persistent_hw_address);
- pool_put (bm->neighbors, sif);
-
if ((bif->mode == BOND_MODE_LACP) && bm->lacp_enable_disable)
(*bm->lacp_enable_disable) (vm, bif, sif, 0);
+
+ pool_put (bm->neighbors, sif);
}
int
slave_if_t *sif;
vnet_hw_interface_t *hw;
u32 *sif_sw_if_index;
- u32 thread_index;
u32 **s_list = 0;
u32 i;
clib_bitmap_free (bif->port_number_bitmap);
hash_unset (bm->bond_by_sw_if_index, bif->sw_if_index);
- for (thread_index = 0; thread_index < vlib_get_thread_main ()->n_vlib_mains;
- thread_index++)
- {
- vec_free (bif->per_thread_info[thread_index].frame);
- }
- vec_free (bif->per_thread_info);
- memset (bif, 0, sizeof (*bif));
+ clib_memset (bif, 0, sizeof (*bif));
pool_put (bm->interfaces, bif);
return 0;
return;
}
pool_get (bm->interfaces, bif);
- memset (bif, 0, sizeof (*bif));
+ clib_memset (bif, 0, sizeof (*bif));
bif->dev_instance = bif - bm->interfaces;
bif->lb = args->lb;
bif->mode = args->mode;
bif->lb = BOND_LB_RR;
else if (bif->mode == BOND_MODE_BROADCAST)
bif->lb = BOND_LB_BC;
+ else if (bif->mode == BOND_MODE_ACTIVE_BACKUP)
+ bif->lb = BOND_LB_AB;
bif->use_custom_mac = args->hw_addr_set;
if (!args->hw_addr_set)
sw = vnet_get_hw_sw_interface (vnm, bif->hw_if_index);
bif->sw_if_index = sw->sw_if_index;
bif->group = bif->sw_if_index;
- vec_validate_aligned (bif->per_thread_info,
- vlib_get_thread_main ()->n_vlib_mains - 1,
- CLIB_CACHE_LINE_BYTES);
if (vlib_get_thread_main ()->n_vlib_mains > 1)
clib_spinlock_init (&bif->lockp);
vnet_interface_main_t *im = &vnm->interface_main;
vnet_hw_interface_t *bif_hw, *sif_hw;
vnet_sw_interface_t *sw;
+ u32 thread_index;
+ u32 sif_if_index;
bif = bond_get_master_by_sw_if_index (args->group);
if (!bif)
return;
}
pool_get (bm->neighbors, sif);
- memset (sif, 0, sizeof (*sif));
- clib_spinlock_init (&sif->lockp);
+ clib_memset (sif, 0, sizeof (*sif));
sw = pool_elt_at_index (im->sw_interfaces, args->slave);
sif->port_enabled = sw->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP;
sif->sw_if_index = sw->sw_if_index;
bond_enable_collecting_distributing (vm, sif);
}
+ vec_foreach_index (thread_index, bm->per_thread_data)
+ {
+ bond_per_thread_data_t *ptd = vec_elt_at_index (bm->per_thread_data,
+ thread_index);
+
+ vec_validate_aligned (ptd->per_port_queue, vec_len (bif->slaves) - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ vec_foreach_index (sif_if_index, ptd->per_port_queue)
+ {
+ ptd->per_port_queue[sif_if_index].n_buffers = 0;
+ }
+ }
+
args->rv = vnet_feature_enable_disable ("device-input", "bond-input",
sif_hw->hw_if_index, 1, 0, 0);
bond_main_t *bm = &bond_main;
bond_if_t *bif;
- vlib_cli_output (vm, "%-16s %-12s %-12s %-13s %-14s %s",
+ vlib_cli_output (vm, "%-16s %-12s %-13s %-13s %-14s %s",
"interface name", "sw_if_index", "mode",
"load balance", "active slaves", "slaves");
/* *INDENT-OFF* */
pool_foreach (bif, bm->interfaces,
({
- vlib_cli_output (vm, "%-16U %-12d %-12U %-13U %-14u %u",
+ vlib_cli_output (vm, "%-16U %-12d %-13U %-13U %-14u %u",
format_bond_interface_name, bif->dev_instance,
bif->sw_if_index, format_bond_mode, bif->mode,
format_bond_load_balance, bif->lb,
bm->vlib_main = vm;
bm->vnet_main = vnet_get_main ();
vec_validate_aligned (bm->slave_by_sw_if_index, 1, CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (bm->per_thread_data,
+ vlib_get_thread_main ()->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
return 0;
}