+ rx0 = self.pg0._get_capture(NUM_PKTS)
+ rx1 = self.pg1._get_capture(NUM_PKTS)
+
+ # not testing the LB hashing algorithm so we're not concerned
+ # with the split ratio, just as long as neither is 0
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
+
+ #
+ # use a test CLI command to stop the FIB walk process, this
+ # will prevent the FIB converging the VPN routes and thus allow
+ # us to probe the interim (post-fail, pre-converge) state
+ #
+ self.vapi.ppcli("test fib-walk-process disable")
+
+ #
+ # Withdraw one of the IGP routes
+ #
+ core_10_0_0_46.remove_vpp_config()
+
+ #
+ # now all packets should be forwarded through the remaining peer
+ #
+ self.vapi.ppcli("clear trace")
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg0.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
+
+ #
+ # enable the FIB walk process to converge the FIB
+ #
+ self.vapi.ppcli("test fib-walk-process enable")
+
+ #
+ # packets should still be forwarded through the remaining peer
+ #
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg0.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
+
+ #
+ # Add the IGP route back and we return to load-balancing
+ #
+ core_10_0_0_46.add_vpp_config()
+
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg0._get_capture(NUM_PKTS)
+ rx1 = self.pg1._get_capture(NUM_PKTS)
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
+
+ def test_mpls_ebgp_pic(self):
+ """ MPLS eBGP Prefix-Independent Convergence (PIC) edge convergence
+
+ 1) setup many eBGP VPN routes via a pair of eBGP peers.
+ 2) Check EMCP forwarding to these peers
+ 3) withdraw one eBGP path - expect LB across remaining eBGP
+ """
+
+ #
+ # Lot's of VPN routes. We need more the 64 so VPP will build
+ # the fast convergence indirection
+ #
+ vpn_routes = []
+ vpn_bindings = []
+ pkts = []
+ for ii in range(NUM_PKTS):
+ dst = "192.168.1.%d" % ii
+ local_label = 1600 + ii
+ vpn_routes.append(VppIpRoute(
+ self, dst, 32,
+ [VppRoutePath(
+ self.pg2.remote_ip4,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
+ VppRoutePath(
+ self.pg3.remote_ip4,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
+ table_id=1))
+ vpn_routes[ii].add_vpp_config()
+
+ vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
+ ip_table_id=1))
+ vpn_bindings[ii].add_vpp_config()
+
+ pkts.append(Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=local_label, ttl=64) /
+ IP(src=self.pg0.remote_ip4, dst=dst) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ #
+ # Send the packet stream (one pkt to each VPN route)
+ # - expect a 50-50 split of the traffic
+ #
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg2._get_capture(NUM_PKTS)
+ rx1 = self.pg3._get_capture(NUM_PKTS)
+
+ # not testing the LB hashing algorithm so we're not concerned
+ # with the split ratio, just as long as neither is 0
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
+
+ #
+ # use a test CLI command to stop the FIB walk process, this
+ # will prevent the FIB converging the VPN routes and thus allow
+ # us to probe the interim (post-fail, pre-converge) state
+ #
+ self.vapi.ppcli("test fib-walk-process disable")
+
+ #
+ # withdraw the connected prefix on the interface.
+ #
+ self.pg2.unconfig_ip4()
+
+ #
+ # now all packets should be forwarded through the remaining peer
+ #
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg3.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
+
+ #
+ # enable the FIB walk process to converge the FIB
+ #
+ self.vapi.ppcli("test fib-walk-process enable")
+
+ #
+ # packets should still be forwarded through the remaining peer
+ #
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg3.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
+
+ #
+ # put the connected routes back
+ #
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg2._get_capture(NUM_PKTS)
+ rx1 = self.pg3._get_capture(NUM_PKTS)
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
+
+ def test_mpls_v6_ebgp_pic(self):
+ """ MPLSv6 eBGP Prefix-Independent Convergence (PIC) edge convergence
+
+ 1) setup many eBGP VPNv6 routes via a pair of eBGP peers
+ 2) Check EMCP forwarding to these peers
+ 3) withdraw one eBGP path - expect LB across remaining eBGP
+ """
+
+ #
+ # Lot's of VPN routes. We need more the 64 so VPP will build
+ # the fast convergence indirection
+ #
+ vpn_routes = []
+ vpn_bindings = []
+ pkts = []
+ for ii in range(NUM_PKTS):
+ dst = "3000::%d" % ii
+ local_label = 1600 + ii
+ vpn_routes.append(VppIpRoute(
+ self, dst, 128,
+ [VppRoutePath(
+ self.pg2.remote_ip6,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
+ VppRoutePath(
+ self.pg3.remote_ip6,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
+ table_id=1))
+ vpn_routes[ii].add_vpp_config()
+
+ vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
+ ip_table_id=1))
+ vpn_bindings[ii].add_vpp_config()
+
+ pkts.append(Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=local_label, ttl=64) /
+ IPv6(src=self.pg0.remote_ip6, dst=dst) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.logger.info(self.vapi.cli("sh ip6 fib %s" % dst))
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg2._get_capture(NUM_PKTS)
+ rx1 = self.pg3._get_capture(NUM_PKTS)
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
+
+ #
+ # use a test CLI command to stop the FIB walk process, this
+ # will prevent the FIB converging the VPN routes and thus allow
+ # us to probe the interim (post-fail, pre-converge) state
+ #
+ self.vapi.ppcli("test fib-walk-process disable")
+
+ #
+ # withdraw the connected prefix on the interface.
+ # and shutdown the interface so the ND cache is flushed.
+ #
+ self.pg2.unconfig_ip6()
+ self.pg2.admin_down()
+
+ #
+ # now all packets should be forwarded through the remaining peer
+ #
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg3.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
+
+ #
+ # enable the FIB walk process to converge the FIB
+ #
+ self.vapi.ppcli("test fib-walk-process enable")
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg3.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
+
+ #
+ # put the connected routes back
+ #
+ self.logger.info(self.vapi.cli("sh log"))
+ self.pg2.admin_up()
+ self.pg2.config_ip6()
+ self.pg2.resolve_ndp()
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg2._get_capture(NUM_PKTS)
+ rx1 = self.pg3._get_capture(NUM_PKTS)
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
+
+
+class TestMPLSL2(VppTestCase):
+ """ MPLS-L2 """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestMPLSL2, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMPLSL2, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestMPLSL2, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(2))
+
+ # create the default MPLS table
+ self.tables = []
+ tbl = VppMplsTable(self, 0)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
+ # use pg0 as the core facing interface, don't resolve ARP
+ self.pg0.admin_up()
+ self.pg0.config_ip4()
+ self.pg0.enable_mpls()
+
+ # use the other 2 for customer facing L2 links
+ for i in self.pg_interfaces[1:]:
+ i.admin_up()
+
+ def tearDown(self):
+ for i in self.pg_interfaces[1:]:
+ i.admin_down()
+
+ self.pg0.disable_mpls()
+ self.pg0.unconfig_ip4()
+ self.pg0.admin_down()
+ super(TestMPLSL2, self).tearDown()
+
+ def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels):
+ capture = verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+
+ # the MPLS TTL is 255 since it enters a new tunnel
+ verify_mpls_stack(self, rx, mpls_labels)
+
+ tx_eth = tx[Ether]
+ rx_eth = Ether(scapy.compat.raw(rx[MPLS].payload))
+
+ self.assertEqual(rx_eth.src, tx_eth.src)
+ self.assertEqual(rx_eth.dst, tx_eth.dst)
+
+ def verify_arp_req(self, rx, smac, sip, dip):
+ ether = rx[Ether]
+ self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff")
+ self.assertEqual(ether.src, smac)
+
+ arp = rx[ARP]
+ self.assertEqual(arp.hwtype, 1)
+ self.assertEqual(arp.ptype, 0x800)
+ self.assertEqual(arp.hwlen, 6)
+ self.assertEqual(arp.plen, 4)
+ self.assertEqual(arp.op, ARP.who_has)
+ self.assertEqual(arp.hwsrc, smac)
+ self.assertEqual(arp.hwdst, "00:00:00:00:00:00")
+ self.assertEqual(arp.psrc, sip)
+ self.assertEqual(arp.pdst, dip)
+
+ def test_vpws(self):
+ """ Virtual Private Wire Service """
+
+ #
+ # Create an MPLS tunnel that pushes 1 label
+ # For Ethernet over MPLS the uniform mode is irrelevant since ttl/cos
+ # information is not in the packet, but we test it works anyway
+ #
+ mpls_tun_1 = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(42, MplsLspMode.UNIFORM)])],
+ is_l2=1)
+ mpls_tun_1.add_vpp_config()
+ mpls_tun_1.admin_up()
+
+ #
+ # Create a label entry to for 55 that does L2 input to the tunnel
+ #
+ route_55_eos = VppMplsRoute(
+ self, 55, 1,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun_1.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
+ route_55_eos.add_vpp_config()
+
+ #
+ # Cross-connect the tunnel with one of the customers L2 interfaces
+ #
+ self.vapi.sw_interface_set_l2_xconnect(self.pg1.sw_if_index,
+ mpls_tun_1.sw_if_index,
+ enable=1)
+ self.vapi.sw_interface_set_l2_xconnect(mpls_tun_1.sw_if_index,
+ self.pg1.sw_if_index,
+ enable=1)
+
+ #
+ # inject a packet from the core
+ #
+ pcore = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=55, ttl=64) /
+ Ether(dst="00:00:de:ad:ba:be",
+ src="00:00:de:ad:be:ef") /
+ IP(src="10.10.10.10", dst="11.11.11.11") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ tx0 = pcore * NUM_PKTS
+ rx0 = self.send_and_expect(self.pg0, tx0, self.pg1)
+ payload = pcore[MPLS].payload
+
+ self.assertEqual(rx0[0][Ether].dst, payload[Ether].dst)
+ self.assertEqual(rx0[0][Ether].src, payload[Ether].src)
+
+ #
+ # Inject a packet from the customer/L2 side
+ # there's no resolved ARP entry so the first packet we see should be
+ # an ARP request
+ #
+ tx1 = pcore[MPLS].payload
+ rx1 = self.send_and_expect(self.pg1, [tx1], self.pg0)
+
+ self.verify_arp_req(rx1[0],
+ self.pg0.local_mac,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+
+ #
+ # resolve the ARP entries and send again
+ #
+ self.pg0.resolve_arp()
+ tx1 = pcore[MPLS].payload * NUM_PKTS
+ rx1 = self.send_and_expect(self.pg1, tx1, self.pg0)
+
+ self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)])
+
+ def test_vpls(self):
+ """ Virtual Private LAN Service """
+
+ # we skipped this in the setup
+ self.pg0.resolve_arp()
+
+ #
+ # Create a L2 MPLS tunnels
+ #
+ mpls_tun1 = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(42)])],
+ is_l2=1)
+ mpls_tun1.add_vpp_config()
+ mpls_tun1.admin_up()
+
+ mpls_tun2 = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(43)])],
+ is_l2=1)
+ mpls_tun2.add_vpp_config()
+ mpls_tun2.admin_up()
+
+ #
+ # Create a label entries, 55 and 56, that do L2 input to the tunnel
+ # the latter includes a Psuedo Wire Control Word
+ #
+ route_55_eos = VppMplsRoute(
+ self, 55, 1,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun1.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
+
+ route_56_eos = VppMplsRoute(
+ self, 56, 1,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun2.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+ flags=FibPathFlags.FIB_PATH_FLAG_POP_PW_CW,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
+
+ # move me
+ route_56_eos.add_vpp_config()
+ route_55_eos.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh mpls fib 56"))
+
+ #
+ # add to tunnel to the customers bridge-domain
+ #
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=mpls_tun1.sw_if_index, bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=mpls_tun2.sw_if_index, bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg1.sw_if_index, bd_id=1)
+
+ #
+ # Packet from host on the customer interface to each host
+ # reachable over the core, and vice-versa
+ #
+ p_cust1 = (Ether(dst="00:00:de:ad:ba:b1",
+ src="00:00:de:ad:be:ef") /
+ IP(src="10.10.10.10", dst="11.11.11.11") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ p_cust2 = (Ether(dst="00:00:de:ad:ba:b2",
+ src="00:00:de:ad:be:ef") /
+ IP(src="10.10.10.10", dst="11.11.11.12") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ p_core1 = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=55, ttl=64) /
+ Ether(src="00:00:de:ad:ba:b1",
+ dst="00:00:de:ad:be:ef") /
+ IP(dst="10.10.10.10", src="11.11.11.11") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ p_core2 = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=56, ttl=64) /
+ Raw(b'\x01' * 4) / # PW CW
+ Ether(src="00:00:de:ad:ba:b2",
+ dst="00:00:de:ad:be:ef") /
+ IP(dst="10.10.10.10", src="11.11.11.12") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ #
+ # The BD is learning, so send in one of each packet to learn
+ #
+
+ # 2 packets due to BD flooding
+ rx = self.send_and_expect(self.pg1, p_cust1, self.pg0, n_rx=2)
+ rx = self.send_and_expect(self.pg1, p_cust2, self.pg0, n_rx=2)
+
+ # we've learnt this so expect it be be forwarded not flooded
+ rx = self.send_and_expect(self.pg0, [p_core1], self.pg1)
+ self.assertEqual(rx[0][Ether].dst, p_cust1[Ether].src)
+ self.assertEqual(rx[0][Ether].src, p_cust1[Ether].dst)
+
+ rx = self.send_and_expect(self.pg0, [p_core2], self.pg1)
+ self.assertEqual(rx[0][Ether].dst, p_cust2[Ether].src)
+ self.assertEqual(rx[0][Ether].src, p_cust2[Ether].dst)
+
+ #
+ # now a stream in each direction from each host
+ #
+ rx = self.send_and_expect(self.pg1, p_cust1 * NUM_PKTS, self.pg0)
+ self.verify_capture_tunneled_ethernet(rx, p_cust1 * NUM_PKTS,
+ [VppMplsLabel(42)])
+
+ rx = self.send_and_expect(self.pg1, p_cust2 * NUM_PKTS, self.pg0)
+ self.verify_capture_tunneled_ethernet(rx, p_cust2 * NUM_PKTS,
+ [VppMplsLabel(43)])
+
+ rx = self.send_and_expect(self.pg0, p_core1 * NUM_PKTS, self.pg1)
+ rx = self.send_and_expect(self.pg0, p_core2 * NUM_PKTS, self.pg1)
+
+ #
+ # remove interfaces from customers bridge-domain
+ #
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=mpls_tun1.sw_if_index, bd_id=1, enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=mpls_tun2.sw_if_index, bd_id=1, enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0)