+ rx = self.pg2.get_capture(257)
+ self.verify_capture_labelled(self.pg2, rx, tx,
+ [VppMplsLabel(3401, ttl=63)])
+ rx = self.pg3.get_capture(257)
+ self.verify_capture_labelled(self.pg3, rx, tx,
+ [VppMplsLabel(3402, ttl=63)])
+
+ def test_mcast_head(self):
+ """ MPLS Multicast Head-end """
+
+ #
+ # Create a multicast tunnel with two replications
+ #
+ mpls_tun = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg2.remote_ip4,
+ self.pg2.sw_if_index,
+ labels=[VppMplsLabel(42)]),
+ VppRoutePath(self.pg3.remote_ip4,
+ self.pg3.sw_if_index,
+ labels=[VppMplsLabel(43)])],
+ is_multicast=1)
+ mpls_tun.add_vpp_config()
+ mpls_tun.admin_up()
+
+ #
+ # add an unlabelled route through the new tunnel
+ #
+ route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun._sw_if_index)])
+ route_10_0_0_3.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg2.get_capture(257)
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(42)])
+ rx = self.pg3.get_capture(257)
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(43)])
+
+ #
+ # An an IP multicast route via the tunnel
+ # A (*,G).
+ # one accepting interface, pg0, 1 forwarding interface via the tunnel
+ #
+ route_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+ [VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
+ VppMRoutePath(mpls_tun._sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
+ route_232_1_1_1.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "232.1.1.1")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg2.get_capture(257)
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(42)])
+ rx = self.pg3.get_capture(257)
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(43)])
+
+ def test_mcast_ip4_tail(self):
+ """ MPLS IPv4 Multicast Tail """
+
+ #
+ # Add a multicast route that will forward the traffic
+ # post-disposition
+ #
+ route_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+ table_id=1,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
+ route_232_1_1_1.add_vpp_config()
+
+ #
+ # An interface receive label that maps traffic to RX on interface
+ # pg1
+ # by injecting the packet in on pg0, which is in table 0
+ # doing an rpf-id and matching a route in table 1
+ # if the packet egresses, then we must have matched the route in
+ # table 1
+ #
+ route_34_eos = VppMplsRoute(self, 34, 1,
+ [VppRoutePath("0.0.0.0",
+ self.pg1.sw_if_index,
+ nh_table_id=1,
+ rpf_id=55)],
+ is_multicast=1)
+
+ route_34_eos.add_vpp_config()
+
+ #
+ # Drop due to interface lookup miss
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
+ dst_ip="232.1.1.1", n=1)
+ self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none")
+
+ #
+ # set the RPF-ID of the enrtry to match the input packet's
+ #
+ route_232_1_1_1.update_rpf_id(55)
+
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
+ dst_ip="232.1.1.1")
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
+ self.verify_capture_ip4(self.pg1, rx, tx)
+
+ #
+ # disposed packets have an invalid IPv4 checkusm
+ #
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
+ dst_ip="232.1.1.1", n=65,
+ chksum=1)
+ self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum")
+
+ #
+ # set the RPF-ID of the entry to not match the input packet's
+ #
+ route_232_1_1_1.update_rpf_id(56)
+ tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
+ dst_ip="232.1.1.1")
+ self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
+
+ def test_mcast_ip6_tail(self):
+ """ MPLS IPv6 Multicast Tail """
+
+ #
+ # Add a multicast route that will forward the traffic
+ # post-disposition
+ #
+ route_ff = VppIpMRoute(
+ self,
+ "::",
+ "ff01::1", 32,
+ MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
+ table_id=1,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
+ is_ip6=1)
+ route_ff.add_vpp_config()
+
+ #
+ # An interface receive label that maps traffic to RX on interface
+ # pg1
+ # by injecting the packet in on pg0, which is in table 0
+ # doing an rpf-id and matching a route in table 1
+ # if the packet egresses, then we must have matched the route in
+ # table 1
+ #
+ route_34_eos = VppMplsRoute(
+ self, 34, 1,
+ [VppRoutePath("::",
+ self.pg1.sw_if_index,
+ nh_table_id=1,
+ rpf_id=55,
+ proto=DpoProto.DPO_PROTO_IP6)],
+ is_multicast=1)
+
+ route_34_eos.add_vpp_config()
+
+ #
+ # Drop due to interface lookup miss
+ #
+ tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(34)],
+ dst_ip="ff01::1")
+ self.send_and_assert_no_replies(self.pg0, tx, "RPF Miss")
+
+ #
+ # set the RPF-ID of the enrtry to match the input packet's
+ #
+ route_ff.update_rpf_id(55)
+
+ tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(34)],
+ dst_ip="ff01::1")
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
+ self.verify_capture_ip6(self.pg1, rx, tx)
+
+ #
+ # disposed packets have hop-limit = 1
+ #
+ tx = self.create_stream_labelled_ip6(self.pg0,
+ [VppMplsLabel(34)],
+ dst_ip="ff01::1",
+ hlim=1)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_ip6_icmp(self.pg0, rx, tx)
+
+ #
+ # set the RPF-ID of the enrtry to not match the input packet's
+ #
+ route_ff.update_rpf_id(56)
+ tx = self.create_stream_labelled_ip6(self.pg0,
+ [VppMplsLabel(34)],
+ dst_ip="ff01::1")
+ self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56")
+
+
+class TestMPLSDisabled(VppTestCase):
+ """ MPLS disabled """
+
+ def setUp(self):
+ super(TestMPLSDisabled, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(2))
+
+ self.tbl = VppMplsTable(self, 0)
+ self.tbl.add_vpp_config()
+
+ # PG0 is MPLS enalbed
+ self.pg0.admin_up()
+ self.pg0.config_ip4()
+ self.pg0.resolve_arp()
+ self.pg0.enable_mpls()
+
+ # PG 1 is not MPLS enabled
+ self.pg1.admin_up()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.admin_down()
+
+ self.pg0.disable_mpls()
+ super(TestMPLSDisabled, self).tearDown()
+
+ def test_mpls_disabled(self):
+ """ MPLS Disabled """
+
+ tx = (Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac) /
+ MPLS(label=32, ttl=64) /
+ IPv6(src="2001::1", dst=self.pg0.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ #
+ # A simple MPLS xconnect - eos label in label out
+ #
+ route_32_eos = VppMplsRoute(self, 32, 1,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[33])])
+ route_32_eos.add_vpp_config()
+
+ #
+ # PG1 does not forward IP traffic
+ #
+ self.send_and_assert_no_replies(self.pg1, tx, "MPLS disabled")
+
+ #
+ # MPLS enable PG1
+ #
+ self.pg1.enable_mpls()
+
+ #
+ # Now we get packets through
+ #
+ self.pg1.add_stream(tx)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture(1)
+
+ #
+ # Disable PG1
+ #
+ self.pg1.disable_mpls()
+
+ #
+ # PG1 does not forward IP traffic
+ #
+ self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")
+ self.send_and_assert_no_replies(self.pg1, tx, "IPv6 disabled")
+
+
+class TestMPLSPIC(VppTestCase):
+ """ MPLS PIC edge convergence """
+
+ def setUp(self):
+ super(TestMPLSPIC, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(4))
+
+ mpls_tbl = VppMplsTable(self, 0)
+ mpls_tbl.add_vpp_config()
+ tbl4 = VppIpTable(self, 1)
+ tbl4.add_vpp_config()
+ tbl6 = VppIpTable(self, 1, is_ip6=1)
+ tbl6.add_vpp_config()
+
+ # core links
+ self.pg0.admin_up()
+ self.pg0.config_ip4()
+ self.pg0.resolve_arp()
+ self.pg0.enable_mpls()
+ self.pg1.admin_up()
+ self.pg1.config_ip4()
+ self.pg1.resolve_arp()
+ self.pg1.enable_mpls()
+
+ # VRF (customer facing) link
+ self.pg2.admin_up()
+ self.pg2.set_table_ip4(1)
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+ self.pg2.set_table_ip6(1)
+ self.pg2.config_ip6()
+ self.pg2.resolve_ndp()
+ self.pg3.admin_up()
+ self.pg3.set_table_ip4(1)
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+ self.pg3.set_table_ip6(1)
+ self.pg3.config_ip6()
+ self.pg3.resolve_ndp()
+
+ def tearDown(self):
+ self.pg0.disable_mpls()
+ self.pg1.disable_mpls()
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
+ i.admin_down()
+ super(TestMPLSPIC, self).tearDown()
+
+ def test_mpls_ibgp_pic(self):
+ """ MPLS iBGP PIC edge convergence
+
+ 1) setup many iBGP VPN routes via a pair of iBGP peers.
+ 2) Check EMCP forwarding to these peers
+ 3) withdraw the IGP route to one of these peers.
+ 4) check forwarding continues to the remaining peer
+ """
+
+ #
+ # IGP+LDP core routes
+ #
+ core_10_0_0_45 = VppIpRoute(self, "10.0.0.45", 32,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[45])])
+ core_10_0_0_45.add_vpp_config()
+
+ core_10_0_0_46 = VppIpRoute(self, "10.0.0.46", 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[46])])
+ core_10_0_0_46.add_vpp_config()
+
+ #
+ # Lot's of VPN routes. We need more the 64 so VPP will build
+ # the fast convergence indirection
+ #
+ vpn_routes = []
+ pkts = []
+ for ii in range(64):
+ dst = "192.168.1.%d" % ii
+ vpn_routes.append(VppIpRoute(self, dst, 32,
+ [VppRoutePath("10.0.0.45",
+ 0xffffffff,
+ labels=[145],
+ is_resolve_host=1),
+ VppRoutePath("10.0.0.46",
+ 0xffffffff,
+ labels=[146],
+ is_resolve_host=1)],
+ table_id=1))
+ vpn_routes[ii].add_vpp_config()
+
+ pkts.append(Ether(dst=self.pg2.local_mac,
+ src=self.pg2.remote_mac) /
+ IP(src=self.pg2.remote_ip4, dst=dst) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ #
+ # Send the packet stream (one pkt to each VPN route)
+ # - expect a 50-50 split of the traffic
+ #
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg0._get_capture(1)
+ rx1 = self.pg1._get_capture(1)
+
+ # not testig the LB hashing algorithm so we're not concerned
+ # with the split ratio, just as long as neither is 0
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+
+ #
+ # use a test CLI command to stop the FIB walk process, this
+ # will prevent the FIB converging the VPN routes and thus allow
+ # us to probe the interim (psot-fail, pre-converge) state
+ #
+ self.vapi.ppcli("test fib-walk-process disable")
+
+ #
+ # Withdraw one of the IGP routes
+ #
+ core_10_0_0_46.remove_vpp_config()
+
+ #
+ # now all packets should be forwarded through the remaining peer
+ #
+ self.vapi.ppcli("clear trace")
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg0.get_capture(len(pkts))
+
+ #
+ # enable the FIB walk process to converge the FIB
+ #
+ self.vapi.ppcli("test fib-walk-process enable")
+
+ #
+ # packets should still be forwarded through the remaining peer
+ #
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg0.get_capture(64)
+
+ #
+ # Add the IGP route back and we return to load-balancing
+ #
+ core_10_0_0_46.add_vpp_config()
+
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg0._get_capture(1)
+ rx1 = self.pg1._get_capture(1)
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+
+ def test_mpls_ebgp_pic(self):
+ """ MPLS eBGP PIC edge convergence
+
+ 1) setup many eBGP VPN routes via a pair of eBGP peers
+ 2) Check EMCP forwarding to these peers
+ 3) withdraw one eBGP path - expect LB across remaining eBGP
+ """
+
+ #
+ # Lot's of VPN routes. We need more the 64 so VPP will build
+ # the fast convergence indirection
+ #
+ vpn_routes = []
+ vpn_bindings = []
+ pkts = []
+ for ii in range(64):
+ dst = "192.168.1.%d" % ii
+ local_label = 1600 + ii
+ vpn_routes.append(VppIpRoute(self, dst, 32,
+ [VppRoutePath(self.pg2.remote_ip4,
+ 0xffffffff,
+ nh_table_id=1,
+ is_resolve_attached=1),
+ VppRoutePath(self.pg3.remote_ip4,
+ 0xffffffff,
+ nh_table_id=1,
+ is_resolve_attached=1)],
+ table_id=1))
+ vpn_routes[ii].add_vpp_config()
+
+ vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
+ ip_table_id=1))
+ vpn_bindings[ii].add_vpp_config()
+
+ pkts.append(Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=local_label, ttl=64) /
+ IP(src=self.pg0.remote_ip4, dst=dst) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg2._get_capture(1)
+ rx1 = self.pg3._get_capture(1)
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+
+ #
+ # use a test CLI command to stop the FIB walk process, this
+ # will prevent the FIB converging the VPN routes and thus allow
+ # us to probe the interim (psot-fail, pre-converge) state
+ #
+ self.vapi.ppcli("test fib-walk-process disable")
+
+ #
+ # withdraw the connected prefix on the interface.
+ #
+ self.pg2.unconfig_ip4()
+
+ #
+ # now all packets should be forwarded through the remaining peer
+ #
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg3.get_capture(len(pkts))
+
+ #
+ # enable the FIB walk process to converge the FIB
+ #
+ self.vapi.ppcli("test fib-walk-process enable")
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg3.get_capture(len(pkts))
+
+ #
+ # put the connecteds back
+ #
+ self.pg2.config_ip4()
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg2._get_capture(1)
+ rx1 = self.pg3._get_capture(1)
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+
+ def test_mpls_v6_ebgp_pic(self):
+ """ MPLSv6 eBGP PIC edge convergence
+
+ 1) setup many eBGP VPNv6 routes via a pair of eBGP peers
+ 2) Check EMCP forwarding to these peers
+ 3) withdraw one eBGP path - expect LB across remaining eBGP
+ """
+
+ #
+ # Lot's of VPN routes. We need more the 64 so VPP will build
+ # the fast convergence indirection
+ #
+ vpn_routes = []
+ vpn_bindings = []
+ pkts = []
+ for ii in range(64):
+ dst = "3000::%d" % ii
+ local_label = 1600 + ii
+ vpn_routes.append(VppIpRoute(
+ self, dst, 128,
+ [VppRoutePath(self.pg2.remote_ip6,
+ 0xffffffff,
+ nh_table_id=1,
+ is_resolve_attached=1,
+ proto=DpoProto.DPO_PROTO_IP6),
+ VppRoutePath(self.pg3.remote_ip6,
+ 0xffffffff,
+ nh_table_id=1,
+ proto=DpoProto.DPO_PROTO_IP6,
+ is_resolve_attached=1)],
+ table_id=1,
+ is_ip6=1))
+ vpn_routes[ii].add_vpp_config()
+
+ vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
+ ip_table_id=1,
+ is_ip6=1))
+ vpn_bindings[ii].add_vpp_config()
+
+ pkts.append(Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=local_label, ttl=64) /
+ IPv6(src=self.pg0.remote_ip6, dst=dst) /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg2._get_capture(1)
+ rx1 = self.pg3._get_capture(1)
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+
+ #
+ # use a test CLI command to stop the FIB walk process, this
+ # will prevent the FIB converging the VPN routes and thus allow
+ # us to probe the interim (psot-fail, pre-converge) state
+ #
+ self.vapi.ppcli("test fib-walk-process disable")
+
+ #
+ # withdraw the connected prefix on the interface.
+ # and shutdown the interface so the ND cache is flushed.
+ #
+ self.pg2.unconfig_ip6()
+ self.pg2.admin_down()
+
+ #
+ # now all packets should be forwarded through the remaining peer
+ #
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg3.get_capture(len(pkts))
+
+ #
+ # enable the FIB walk process to converge the FIB
+ #
+ self.vapi.ppcli("test fib-walk-process enable")
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg3.get_capture(len(pkts))
+
+ #
+ # put the connecteds back
+ #
+ self.pg2.admin_up()
+ self.pg2.config_ip6()
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg2._get_capture(1)
+ rx1 = self.pg3._get_capture(1)
+ self.assertNotEqual(0, len(rx0))
+ self.assertNotEqual(0, len(rx1))
+
+
+class TestMPLSL2(VppTestCase):
+ """ MPLS-L2 """
+
+ def setUp(self):
+ super(TestMPLSL2, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(2))
+
+ # create the default MPLS table
+ self.tables = []
+ tbl = VppMplsTable(self, 0)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
+ # use pg0 as the core facing interface
+ self.pg0.admin_up()
+ self.pg0.config_ip4()
+ self.pg0.resolve_arp()
+ self.pg0.enable_mpls()
+
+ # use the other 2 for customer facing L2 links
+ for i in self.pg_interfaces[1:]:
+ i.admin_up()
+
+ def tearDown(self):
+ for i in self.pg_interfaces[1:]:
+ i.admin_down()
+
+ self.pg0.disable_mpls()
+ self.pg0.unconfig_ip4()
+ self.pg0.admin_down()
+ super(TestMPLSL2, self).tearDown()
+
+ def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels):
+ capture = verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+
+ # the MPLS TTL is 255 since it enters a new tunnel
+ verify_mpls_stack(self, rx, mpls_labels)
+
+ tx_eth = tx[Ether]
+ rx_eth = Ether(str(rx[MPLS].payload))
+
+ self.assertEqual(rx_eth.src, tx_eth.src)
+ self.assertEqual(rx_eth.dst, tx_eth.dst)
+
+ def test_vpws(self):
+ """ Virtual Private Wire Service """
+
+ #
+ # Create an MPLS tunnel that pushes 1 label
+ # For Ethernet over MPLS the uniform mode is irrelevant since ttl/cos
+ # information is not in the packet, but we test it works anyway
+ #
+ mpls_tun_1 = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(42, MplsLspMode.UNIFORM)])],
+ is_l2=1)
+ mpls_tun_1.add_vpp_config()
+ mpls_tun_1.admin_up()
+
+ #
+ # Create a label entry to for 55 that does L2 input to the tunnel
+ #
+ route_55_eos = VppMplsRoute(
+ self, 55, 1,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun_1.sw_if_index,
+ is_interface_rx=1,
+ proto=DpoProto.DPO_PROTO_ETHERNET)])
+ route_55_eos.add_vpp_config()
+
+ #
+ # Cross-connect the tunnel with one of the customers L2 interfaces
+ #
+ self.vapi.sw_interface_set_l2_xconnect(self.pg1.sw_if_index,
+ mpls_tun_1.sw_if_index,
+ enable=1)
+ self.vapi.sw_interface_set_l2_xconnect(mpls_tun_1.sw_if_index,
+ self.pg1.sw_if_index,
+ enable=1)
+
+ #
+ # inject a packet from the core
+ #
+ pcore = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=55, ttl=64) /
+ Ether(dst="00:00:de:ad:ba:be",
+ src="00:00:de:ad:be:ef") /
+ IP(src="10.10.10.10", dst="11.11.11.11") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ tx0 = pcore * 65
+ rx0 = self.send_and_expect(self.pg0, tx0, self.pg1)
+ payload = pcore[MPLS].payload
+
+ self.assertEqual(rx0[0][Ether].dst, payload[Ether].dst)
+ self.assertEqual(rx0[0][Ether].src, payload[Ether].src)
+
+ #
+ # Inject a packet from the custoer/L2 side
+ #
+ tx1 = pcore[MPLS].payload * 65
+ rx1 = self.send_and_expect(self.pg1, tx1, self.pg0)
+
+ self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)])
+
+ def test_vpls(self):
+ """ Virtual Private LAN Service """
+ #
+ # Create an L2 MPLS tunnel
+ #
+ mpls_tun = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(42)])],
+ is_l2=1)
+ mpls_tun.add_vpp_config()
+ mpls_tun.admin_up()
+
+ #
+ # Create a label entry to for 55 that does L2 input to the tunnel
+ #
+ route_55_eos = VppMplsRoute(
+ self, 55, 1,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun.sw_if_index,
+ is_interface_rx=1,
+ proto=DpoProto.DPO_PROTO_ETHERNET)])
+ route_55_eos.add_vpp_config()
+
+ #
+ # add to tunnel to the customers bridge-domain
+ #
+ self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index,
+ bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index,
+ bd_id=1)
+
+ #
+ # Packet from the customer interface and from the core
+ #
+ p_cust = (Ether(dst="00:00:de:ad:ba:be",
+ src="00:00:de:ad:be:ef") /
+ IP(src="10.10.10.10", dst="11.11.11.11") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+ p_core = (Ether(src="00:00:de:ad:ba:be",
+ dst="00:00:de:ad:be:ef") /
+ IP(dst="10.10.10.10", src="11.11.11.11") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))
+
+ #
+ # The BD is learning, so send in one of each packet to learn
+ #
+ p_core_encap = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=55, ttl=64) /
+ p_core)
+
+ self.pg1.add_stream(p_cust)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.add_stream(p_core_encap)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # we've learnt this so expect it be be forwarded
+ rx0 = self.pg1.get_capture(1)
+
+ self.assertEqual(rx0[0][Ether].dst, p_core[Ether].dst)
+ self.assertEqual(rx0[0][Ether].src, p_core[Ether].src)
+
+ #
+ # now a stream in each direction
+ #
+ self.pg1.add_stream(p_cust * 65)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx0 = self.pg0.get_capture(65)
+
+ self.verify_capture_tunneled_ethernet(rx0, p_cust*65,
+ [VppMplsLabel(42)])
+
+ #
+ # remove interfaces from customers bridge-domain
+ #
+ self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index,
+ bd_id=1,
+ enable=0)
+ self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index,
+ bd_id=1,
+ enable=0)