bonding: adjust link state based on active slaves 41/27141/4
authorMatthew Smith <mgsmith@netgate.com>
Mon, 18 May 2020 15:54:17 +0000 (10:54 -0500)
committerNeale Ranns <nranns@cisco.com>
Wed, 27 May 2020 12:58:33 +0000 (12:58 +0000)
Type: improvement

Bond link state is being maintained inconsistently. It is initially set to
up.  If the bond interface admin state is set to down, the link state is
set to down. If the bond interface admin state is set to up, the link
state is only set to up if there are active slave interfaces at that point.
If slaves become active at some later time, it does not get updated. Its
next chance to be updated is the next time the bond interface is set to
admin up.

To address this, do not set the link state to up after creating a bond.
Adjust the link state as slave interfaces are attached or detached
based on whether the bond is getting its first active slave or losing
its last one.

Unit test added to verify correct maintenance of link state.

Change-Id: I31f17321f7f0e727e1ab1e01713423af6566dad9
Signed-off-by: Matthew Smith <mgsmith@netgate.com>
src/vnet/bonding/cli.c
test/test_bond.py

index 92a9ff0..c40d4f3 100644 (file)
@@ -26,6 +26,7 @@ void
 bond_disable_collecting_distributing (vlib_main_t * vm, slave_if_t * sif)
 {
   bond_main_t *bm = &bond_main;
+  vnet_main_t *vnm = vnet_get_main ();
   bond_if_t *bif;
   int i;
   uword p;
@@ -54,6 +55,15 @@ bond_disable_collecting_distributing (vlib_main_t * vm, slave_if_t * sif)
                ASSERT (bif->n_numa_slaves >= 0);
              }
          }
+       /* If that was the last active slave, set bond link state down */
+       if (!vec_len (bif->active_slaves))
+         {
+           vnet_hw_interface_flags_t flags;
+
+           flags = vnet_hw_interface_get_flags (vnm, bif->hw_if_index);
+           flags &= ~VNET_HW_INTERFACE_FLAG_LINK_UP;
+           vnet_hw_interface_set_flags (vnm, bif->hw_if_index, flags);
+         }
        break;
       }
   }
@@ -158,6 +168,16 @@ bond_enable_collecting_distributing (vlib_main_t * vm, slave_if_t * sif)
   else
     vec_add1 (bif->active_slaves, sif->sw_if_index);
 
+  /* If this was the first active slave, set bond link state up */
+  if (vec_len (bif->active_slaves) == 1)
+    {
+      vnet_hw_interface_flags_t flags;
+
+      flags = vnet_hw_interface_get_flags (vnm, bif->hw_if_index);
+      flags |= VNET_HW_INTERFACE_FLAG_LINK_UP;
+      vnet_hw_interface_set_flags (vnm, bif->hw_if_index, flags);
+    }
+
   sif->is_local_numa = (vm->numa_node == hw->numa_node) ? 1 : 0;
   if (bif->mode == BOND_MODE_ACTIVE_BACKUP)
     {
@@ -469,9 +489,6 @@ bond_create_if (vlib_main_t * vm, bond_create_if_args_t * args)
   if (vlib_get_thread_main ()->n_vlib_mains > 1)
     clib_spinlock_init (&bif->lockp);
 
-  vnet_hw_interface_set_flags (vnm, bif->hw_if_index,
-                              VNET_HW_INTERFACE_FLAG_LINK_UP);
-
   hash_set (bm->bond_by_sw_if_index, bif->sw_if_index, bif->dev_instance);
 
   // for return
index dd4a645..dab0dd1 100644 (file)
@@ -8,7 +8,7 @@ from scapy.packet import Raw
 from scapy.layers.l2 import Ether
 from scapy.layers.inet import IP, UDP
 from vpp_bond_interface import VppBondInterface
-from vpp_papi import MACAddress
+from vpp_papi import MACAddress, VppEnum
 
 
 class TestBondInterface(VppTestCase):
@@ -272,6 +272,63 @@ class TestBondInterface(VppTestCase):
         if_dump = self.vapi.sw_interface_bond_dump()
         self.assertFalse(bond0.is_interface_config_in_dump(if_dump))
 
+    def test_bond_link(self):
+        """ Bond hw interface link state test """
+
+        # for convenience
+        bond_modes = VppEnum.vl_api_bond_mode_t
+        intf_flags = VppEnum.vl_api_if_status_flags_t
+
+        # create interface 1 (BondEthernet0)
+        self.logger.info("Create bond interface")
+        # use round-robin mode to avoid negotiation required by LACP
+        bond0 = VppBondInterface(self,
+                                 mode=bond_modes.BOND_API_MODE_ROUND_ROBIN)
+        bond0.add_vpp_config()
+
+        # initially admin state is down and link is down
+        bond0.assert_interface_state(0, 0)
+
+        # set bond admin up. confirm link down because no slaves are active
+        self.logger.info("set interface BondEthernet0 admin up")
+        bond0.admin_up()
+        bond0.assert_interface_state(intf_flags.IF_STATUS_API_FLAG_ADMIN_UP, 0)
+
+        # make sure slaves are down. enslave them to bond.
+        self.logger.info("set interface pg0 admin down")
+        self.pg0.admin_down()
+        self.logger.info("bond enslave interface pg0 to BondEthernet0")
+        bond0.enslave_vpp_bond_interface(sw_if_index=self.pg0.sw_if_index,
+                                         is_passive=0,
+                                         is_long_timeout=0)
+        self.logger.info("set interface pg1 admin down")
+        self.pg1.admin_down()
+        self.logger.info("bond enslave interface pg1 to BondEthernet0")
+        bond0.enslave_vpp_bond_interface(sw_if_index=self.pg1.sw_if_index,
+                                         is_passive=0,
+                                         is_long_timeout=0)
+
+        # bring slaves up, confirm bond link is up
+        self.logger.info("set interface pg0 admin up")
+        self.pg0.admin_up()
+        self.logger.info("set interface pg1 admin up")
+        self.pg1.admin_up()
+        bond0.assert_interface_state(intf_flags.IF_STATUS_API_FLAG_ADMIN_UP,
+                                     intf_flags.IF_STATUS_API_FLAG_LINK_UP)
+
+        # detach pg0, pg1
+        self.logger.info("detach interface pg0")
+        bond0.detach_vpp_bond_interface(sw_if_index=self.pg0.sw_if_index)
+        self.logger.info("detach interface pg1")
+        bond0.detach_vpp_bond_interface(sw_if_index=self.pg1.sw_if_index)
+
+        # link should be down now
+        bond0.assert_interface_state(intf_flags.IF_STATUS_API_FLAG_ADMIN_UP, 0)
+
+        # delete BondEthernet0
+        self.logger.info("Deleting BondEthernet0")
+        bond0.remove_vpp_config()
+
 
 if __name__ == '__main__':
     unittest.main(testRunner=VppTestRunner)