X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=test%2Ftest_mpls.py;h=038ffd34f6c737d07ccd949640195bf844cc2c22;hb=282872127;hp=09e47521ee04f8f9de8dbd2ac88a16b40c4295f7;hpb=a5b2eec0535f9025319a752891d77ff9948ae0df;p=vpp.git diff --git a/test/test_mpls.py b/test/test_mpls.py index 09e47521ee0..038ffd34f6c 100644 --- a/test/test_mpls.py +++ b/test/test_mpls.py @@ -1,23 +1,32 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import unittest import socket from framework import VppTestCase, VppTestRunner -from vpp_ip import DpoProto +from vpp_ip import DpoProto, INVALID_INDEX from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \ VppMplsIpBind, VppIpMRoute, VppMRoutePath, \ MRouteItfFlags, MRouteEntryFlags, VppIpTable, VppMplsTable, \ - VppMplsLabel, MplsLspMode, find_mpls_route + VppMplsLabel, MplsLspMode, find_mpls_route, \ + FibPathProto, FibPathType, FibPathFlags, VppMplsLabel, MplsLspMode from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface import scapy.compat from scapy.packet import Raw -from scapy.layers.l2 import Ether +from scapy.layers.l2 import Ether, ARP from scapy.layers.inet import IP, UDP, ICMP from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded from scapy.contrib.mpls import MPLS +NUM_PKTS = 67 + +# scapy removed these attributes. +# we asked that they be restored: https://github.com/secdev/scapy/pull/1878 +# semantic names have more meaning than numbers. so here they are. +ARP.who_has = 1 +ARP.is_at = 2 + def verify_filter(capture, sent): if not len(capture) == len(sent): @@ -52,6 +61,14 @@ def verify_mpls_stack(tst, rx, mpls_labels): class TestMPLS(VppTestCase): """ MPLS Test Case """ + @classmethod + def setUpClass(cls): + super(TestMPLS, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestMPLS, cls).tearDownClass() + def setUp(self): super(TestMPLS, self).setUp() @@ -91,7 +108,6 @@ class TestMPLS(VppTestCase): for i in self.pg_interfaces: i.unconfig_ip4() i.unconfig_ip6() - i.ip6_disable() i.set_table_ip4(0) i.set_table_ip6(0) i.disable_mpls() @@ -143,7 +159,8 @@ class TestMPLS(VppTestCase): pkts.append(p) return pkts - def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0): + def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64, + ip_dscp=0, payload_size=None): self.reset_packet_infos() pkts = [] for i in range(0, 257): @@ -155,6 +172,8 @@ class TestMPLS(VppTestCase): UDP(sport=1234, dport=1234) / Raw(payload)) info.data = p.copy() + if payload_size: + self.extend_packet(p, payload_size) pkts.append(p) return pkts @@ -368,6 +387,30 @@ class TestMPLS(VppTestCase): except: raise + def verify_capture_fragmented_labelled_ip4(self, src_if, capture, sent, + mpls_labels, ip_ttl=None): + try: + capture = verify_filter(capture, sent) + + for i in range(len(capture)): + tx = sent[0] + rx = capture[i] + tx_ip = tx[IP] + rx_ip = rx[IP] + + verify_mpls_stack(self, rx, mpls_labels) + + self.assertEqual(rx_ip.src, tx_ip.src) + self.assertEqual(rx_ip.dst, tx_ip.dst) + if not ip_ttl: + # IP processing post pop has decremented the TTL + self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) + else: + self.assertEqual(rx_ip.ttl, ip_ttl) + + except: + raise + def test_swap(self): """ MPLS label swap tests """ @@ -452,7 +495,7 @@ class TestMPLS(VppTestCase): self.verify_capture_ip4(self.pg0, rx, tx) # - # disposed packets have an invalid IPv4 checkusm + # disposed packets have an invalid IPv4 checksum # tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)], dst_ip=self.pg0.remote_ip4, @@ -488,8 +531,8 @@ class TestMPLS(VppTestCase): self, 333, 1, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - labels=[], - proto=DpoProto.DPO_PROTO_IP6)]) + labels=[])], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6) route_333_eos.add_vpp_config() tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(333)]) @@ -513,8 +556,8 @@ class TestMPLS(VppTestCase): self, 334, 1, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - labels=[VppMplsLabel(3)], - proto=DpoProto.DPO_PROTO_IP6)]) + labels=[VppMplsLabel(3)])], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6) route_334_eos.add_vpp_config() tx = self.create_stream_labelled_ip6(self.pg0, @@ -529,8 +572,8 @@ class TestMPLS(VppTestCase): self, 335, 1, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)], - proto=DpoProto.DPO_PROTO_IP6)]) + labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6) route_335_eos.add_vpp_config() tx = self.create_stream_labelled_ip6( @@ -576,6 +619,7 @@ class TestMPLS(VppTestCase): labels=[VppMplsLabel(44), VppMplsLabel(45)])]) route_34_eos.add_vpp_config() + self.logger.info(self.vapi.cli("sh mpls fib 34")) tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34, ttl=3)]) @@ -765,10 +809,8 @@ class TestMPLS(VppTestCase): self, "2001::3", 128, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - proto=DpoProto.DPO_PROTO_IP6, labels=[VppMplsLabel(32, - mode=MplsLspMode.UNIFORM)])], - is_ip6=1) + mode=MplsLspMode.UNIFORM)])]) route_2001_3.add_vpp_config() tx = self.create_stream_ip6(self.pg0, "2001::3", @@ -841,11 +883,43 @@ class TestMPLS(VppTestCase): route_10_0_0_2.remove_vpp_config() route_10_0_0_1.remove_vpp_config() + def test_imposition_fragmentation(self): + """ MPLS label imposition fragmentation test """ + + # + # Add a ipv4 non-recursive route with a single out label + # + route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32, + [VppRoutePath(self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[VppMplsLabel(32)])]) + route_10_0_0_1.add_vpp_config() + + # + # a stream that matches the route for 10.0.0.1 + # PG0 is in the default table + # + tx = self.create_stream_ip4(self.pg0, "10.0.0.1") + for i in range(0, 257): + self.extend_packet(tx[i], 10000) + + # + # 5 fragments per packet (257*5=1285) + # + rx = self.send_and_expect(self.pg0, tx, self.pg0, 1285) + self.verify_capture_fragmented_labelled_ip4(self.pg0, rx, tx, + [VppMplsLabel(32)]) + + # + # cleanup + # + route_10_0_0_1.remove_vpp_config() + def test_tunnel_pipe(self): """ MPLS Tunnel Tests - Pipe """ # - # Create a tunnel with a single out label + # Create a tunnel with two out labels # mpls_tun = VppMPLSTunnelInterface( self, @@ -898,6 +972,38 @@ class TestMPLS(VppTestCase): VppMplsLabel(46), VppMplsLabel(33, ttl=255)]) + # + # change tunnel's MTU to a low value + # + mpls_tun.set_l3_mtu(1200) + + # send IP into the tunnel to be fragmented + tx = self.create_stream_ip4(self.pg0, "10.0.0.3", + payload_size=1500) + rx = self.send_and_expect(self.pg0, tx, self.pg0, len(tx)*2) + + fake_tx = [] + for p in tx: + fake_tx.append(p) + fake_tx.append(p) + self.verify_capture_tunneled_ip4(self.pg0, rx, fake_tx, + [VppMplsLabel(44), + VppMplsLabel(46)]) + + # send MPLS into the tunnel to be fragmented + tx = self.create_stream_ip4(self.pg0, "10.0.0.4", + payload_size=1500) + rx = self.send_and_expect(self.pg0, tx, self.pg0, len(tx)*2) + + fake_tx = [] + for p in tx: + fake_tx.append(p) + fake_tx.append(p) + self.verify_capture_tunneled_ip4(self.pg0, rx, fake_tx, + [VppMplsLabel(44), + VppMplsLabel(46), + VppMplsLabel(33, ttl=255)]) + def test_tunnel_uniform(self): """ MPLS Tunnel Tests - Uniform """ @@ -958,7 +1064,7 @@ class TestMPLS(VppTestCase): VppMplsLabel(33, ttl=47)]) def test_mpls_tunnel_many(self): - """ Multiple Tunnels """ + """ MPLS Multiple Tunnels """ for ii in range(10): mpls_tun = VppMPLSTunnelInterface( @@ -1101,10 +1207,11 @@ class TestMPLS(VppTestCase): # if the packet egresses, then we must have swapped to pg1 # so as to have matched the route in table 1 # - route_34_eos = VppMplsRoute(self, 34, 1, - [VppRoutePath("0.0.0.0", - self.pg1.sw_if_index, - is_interface_rx=1)]) + route_34_eos = VppMplsRoute( + self, 34, 1, + [VppRoutePath("0.0.0.0", + self.pg1.sw_if_index, + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)]) route_34_eos.add_vpp_config() # @@ -1144,7 +1251,7 @@ class TestMPLS(VppTestCase): labels=[VppMplsLabel(3402)]), VppRoutePath("0.0.0.0", self.pg1.sw_if_index, - is_interface_rx=1)], + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)], is_multicast=1) route_3400_eos.add_vpp_config() @@ -1225,6 +1332,7 @@ class TestMPLS(VppTestCase): VppMRoutePath(mpls_tun._sw_if_index, MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) route_232_1_1_1.add_vpp_config() + self.logger.info(self.vapi.cli("sh ip mfib index 0")) self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "232.1.1.1") @@ -1263,12 +1371,14 @@ class TestMPLS(VppTestCase): # if the packet egresses, then we must have matched the route in # table 1 # - route_34_eos = VppMplsRoute(self, 34, 1, - [VppRoutePath("0.0.0.0", - self.pg1.sw_if_index, - nh_table_id=1, - rpf_id=55)], - is_multicast=1) + route_34_eos = VppMplsRoute( + self, 34, 1, + [VppRoutePath("0.0.0.0", + 0xffffffff, + nh_table_id=1, + rpf_id=55)], + is_multicast=1, + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP4) route_34_eos.add_vpp_config() @@ -1281,9 +1391,10 @@ class TestMPLS(VppTestCase): self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none") # - # set the RPF-ID of the enrtry to match the input packet's + # set the RPF-ID of the entry to match the input packet's # route_232_1_1_1.update_rpf_id(55) + self.logger.info(self.vapi.cli("sh ip mfib index 1 232.1.1.1")) tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)], dst_ip="232.1.1.1") @@ -1291,7 +1402,7 @@ class TestMPLS(VppTestCase): self.verify_capture_ip4(self.pg1, rx, tx) # - # disposed packets have an invalid IPv4 checkusm + # disposed packets have an invalid IPv4 checksum # tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)], dst_ip="232.1.1.1", n=65, @@ -1320,8 +1431,8 @@ class TestMPLS(VppTestCase): MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, table_id=1, paths=[VppMRoutePath(self.pg1.sw_if_index, - MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)], - is_ip6=1) + MRouteItfFlags.MFIB_ITF_FLAG_FORWARD, + proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)]) route_ff.add_vpp_config() # @@ -1335,11 +1446,11 @@ class TestMPLS(VppTestCase): route_34_eos = VppMplsRoute( self, 34, 1, [VppRoutePath("::", - self.pg1.sw_if_index, + 0xffffffff, nh_table_id=1, - rpf_id=55, - proto=DpoProto.DPO_PROTO_IP6)], - is_multicast=1) + rpf_id=55)], + is_multicast=1, + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6) route_34_eos.add_vpp_config() @@ -1351,7 +1462,7 @@ class TestMPLS(VppTestCase): self.send_and_assert_no_replies(self.pg0, tx, "RPF Miss") # - # set the RPF-ID of the enrtry to match the input packet's + # set the RPF-ID of the entry to match the input packet's # route_ff.update_rpf_id(55) @@ -1371,7 +1482,7 @@ class TestMPLS(VppTestCase): self.verify_capture_ip6_icmp(self.pg0, rx, tx) # - # set the RPF-ID of the enrtry to not match the input packet's + # set the RPF-ID of the entry to not match the input packet's # route_ff.update_rpf_id(56) tx = self.create_stream_labelled_ip6(self.pg0, @@ -1379,10 +1490,69 @@ class TestMPLS(VppTestCase): dst_ip="ff01::1") self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56") + def test_6pe(self): + """ MPLS 6PE """ + + # + # Add a non-recursive route with a single out label + # + route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32, + [VppRoutePath(self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[VppMplsLabel(45)])]) + route_10_0_0_1.add_vpp_config() + + # bind a local label to the route + binding = VppMplsIpBind(self, 44, "10.0.0.1", 32) + binding.add_vpp_config() + + # + # a labelled v6 route that resolves through the v4 + # + route_2001_3 = VppIpRoute( + self, "2001::3", 128, + [VppRoutePath("10.0.0.1", + INVALID_INDEX, + labels=[VppMplsLabel(32)])]) + route_2001_3.add_vpp_config() + + tx = self.create_stream_ip6(self.pg0, "2001::3") + rx = self.send_and_expect(self.pg0, tx, self.pg0) + + self.verify_capture_labelled_ip6(self.pg0, rx, tx, + [VppMplsLabel(45), + VppMplsLabel(32)]) + + # + # and a v4 recursive via the v6 + # + route_20_3 = VppIpRoute( + self, "20.0.0.3", 32, + [VppRoutePath("2001::3", + INVALID_INDEX, + labels=[VppMplsLabel(99)])]) + route_20_3.add_vpp_config() + + tx = self.create_stream_ip4(self.pg0, "20.0.0.3") + rx = self.send_and_expect(self.pg0, tx, self.pg0) + + self.verify_capture_labelled_ip4(self.pg0, rx, tx, + [VppMplsLabel(45), + VppMplsLabel(32), + VppMplsLabel(99)]) + class TestMPLSDisabled(VppTestCase): """ MPLS disabled """ + @classmethod + def setUpClass(cls): + super(TestMPLSDisabled, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestMPLSDisabled, cls).tearDownClass() + def setUp(self): super(TestMPLSDisabled, self).setUp() @@ -1392,7 +1562,7 @@ class TestMPLSDisabled(VppTestCase): self.tbl = VppMplsTable(self, 0) self.tbl.add_vpp_config() - # PG0 is MPLS enalbed + # PG0 is MPLS enabled self.pg0.admin_up() self.pg0.config_ip4() self.pg0.resolve_arp() @@ -1417,7 +1587,7 @@ class TestMPLSDisabled(VppTestCase): MPLS(label=32, ttl=64) / IPv6(src="2001::1", dst=self.pg0.remote_ip6) / UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + Raw(b'\xa5' * 100)) # # A simple MPLS xconnect - eos label in label out @@ -1460,7 +1630,15 @@ class TestMPLSDisabled(VppTestCase): class TestMPLSPIC(VppTestCase): - """ MPLS PIC edge convergence """ + """ MPLS Prefix-Independent Convergence (PIC) edge convergence """ + + @classmethod + def setUpClass(cls): + super(TestMPLSPIC, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestMPLSPIC, cls).tearDownClass() def setUp(self): super(TestMPLSPIC, self).setUp() @@ -1480,6 +1658,7 @@ class TestMPLSPIC(VppTestCase): self.pg0.config_ip4() self.pg0.resolve_arp() self.pg0.enable_mpls() + self.pg1.admin_up() self.pg1.config_ip4() self.pg1.resolve_arp() @@ -1493,6 +1672,7 @@ class TestMPLSPIC(VppTestCase): self.pg2.set_table_ip6(1) self.pg2.config_ip6() self.pg2.resolve_ndp() + self.pg3.admin_up() self.pg3.set_table_ip4(1) self.pg3.config_ip4() @@ -1513,7 +1693,7 @@ class TestMPLSPIC(VppTestCase): super(TestMPLSPIC, self).tearDown() def test_mpls_ibgp_pic(self): - """ MPLS iBGP PIC edge convergence + """ MPLS iBGP Prefix-Independent Convergence (PIC) edge convergence 1) setup many iBGP VPN routes via a pair of iBGP peers. 2) Check EMCP forwarding to these peers @@ -1542,25 +1722,28 @@ class TestMPLSPIC(VppTestCase): # vpn_routes = [] pkts = [] - for ii in range(64): + for ii in range(NUM_PKTS): dst = "192.168.1.%d" % ii - vpn_routes.append(VppIpRoute(self, dst, 32, - [VppRoutePath("10.0.0.45", - 0xffffffff, - labels=[145], - is_resolve_host=1), - VppRoutePath("10.0.0.46", - 0xffffffff, - labels=[146], - is_resolve_host=1)], - table_id=1)) + vpn_routes.append(VppIpRoute( + self, dst, 32, + [VppRoutePath( + "10.0.0.45", + 0xffffffff, + labels=[145], + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST), + VppRoutePath( + "10.0.0.46", + 0xffffffff, + labels=[146], + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST)], + table_id=1)) vpn_routes[ii].add_vpp_config() pkts.append(Ether(dst=self.pg2.local_mac, src=self.pg2.remote_mac) / IP(src=self.pg2.remote_ip4, dst=dst) / UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + Raw(b'\xa5' * 100)) # # Send the packet stream (one pkt to each VPN route) @@ -1570,18 +1753,21 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg0._get_capture(1) - rx1 = self.pg1._get_capture(1) + rx0 = self.pg0._get_capture(NUM_PKTS) + rx1 = self.pg1._get_capture(NUM_PKTS) - # not testig the LB hashing algorithm so we're not concerned + # not testing the LB hashing algorithm so we're not concerned # with the split ratio, just as long as neither is 0 self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual(len(pkts), len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1))) # # use a test CLI command to stop the FIB walk process, this # will prevent the FIB converging the VPN routes and thus allow - # us to probe the interim (psot-fail, pre-converge) state + # us to probe the interim (post-fail, pre-converge) state # self.vapi.ppcli("test fib-walk-process disable") @@ -1598,7 +1784,10 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg0.get_capture(len(pkts)) + rx0 = self.pg0.get_capture(NUM_PKTS) + self.assertEqual(len(pkts), len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0))) # # enable the FIB walk process to converge the FIB @@ -1612,7 +1801,10 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg0.get_capture(64) + rx0 = self.pg0.get_capture(NUM_PKTS) + self.assertEqual(len(pkts), len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0))) # # Add the IGP route back and we return to load-balancing @@ -1623,15 +1815,18 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg0._get_capture(1) - rx1 = self.pg1._get_capture(1) + rx0 = self.pg0._get_capture(NUM_PKTS) + rx1 = self.pg1._get_capture(NUM_PKTS) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual(len(pkts), len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1))) def test_mpls_ebgp_pic(self): - """ MPLS eBGP PIC edge convergence + """ MPLS eBGP Prefix-Independent Convergence (PIC) edge convergence - 1) setup many eBGP VPN routes via a pair of eBGP peers + 1) setup many eBGP VPN routes via a pair of eBGP peers. 2) Check EMCP forwarding to these peers 3) withdraw one eBGP path - expect LB across remaining eBGP """ @@ -1643,19 +1838,22 @@ class TestMPLSPIC(VppTestCase): vpn_routes = [] vpn_bindings = [] pkts = [] - for ii in range(64): + for ii in range(NUM_PKTS): dst = "192.168.1.%d" % ii local_label = 1600 + ii - vpn_routes.append(VppIpRoute(self, dst, 32, - [VppRoutePath(self.pg2.remote_ip4, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1), - VppRoutePath(self.pg3.remote_ip4, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1)], - table_id=1)) + vpn_routes.append(VppIpRoute( + self, dst, 32, + [VppRoutePath( + self.pg2.remote_ip4, + 0xffffffff, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED), + VppRoutePath( + self.pg3.remote_ip4, + 0xffffffff, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)], + table_id=1)) vpn_routes[ii].add_vpp_config() vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32, @@ -1667,21 +1865,31 @@ class TestMPLSPIC(VppTestCase): MPLS(label=local_label, ttl=64) / IP(src=self.pg0.remote_ip4, dst=dst) / UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + Raw(b'\xa5' * 100)) + # + # Send the packet stream (one pkt to each VPN route) + # - expect a 50-50 split of the traffic + # self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg2._get_capture(1) - rx1 = self.pg3._get_capture(1) + rx0 = self.pg2._get_capture(NUM_PKTS) + rx1 = self.pg3._get_capture(NUM_PKTS) + + # not testing the LB hashing algorithm so we're not concerned + # with the split ratio, just as long as neither is 0 self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual(len(pkts), len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1))) # # use a test CLI command to stop the FIB walk process, this # will prevent the FIB converging the VPN routes and thus allow - # us to probe the interim (psot-fail, pre-converge) state + # us to probe the interim (post-fail, pre-converge) state # self.vapi.ppcli("test fib-walk-process disable") @@ -1697,34 +1905,48 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg3.get_capture(len(pkts)) + rx0 = self.pg3.get_capture(NUM_PKTS) + self.assertEqual(len(pkts), len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0))) # # enable the FIB walk process to converge the FIB # self.vapi.ppcli("test fib-walk-process enable") + + # + # packets should still be forwarded through the remaining peer + # self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg3.get_capture(len(pkts)) + rx0 = self.pg3.get_capture(NUM_PKTS) + self.assertEqual(len(pkts), len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0))) # - # put the connecteds back + # put the connected routes back # self.pg2.config_ip4() + self.pg2.resolve_arp() self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg2._get_capture(1) - rx1 = self.pg3._get_capture(1) + rx0 = self.pg2._get_capture(NUM_PKTS) + rx1 = self.pg3._get_capture(NUM_PKTS) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual(len(pkts), len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1))) def test_mpls_v6_ebgp_pic(self): - """ MPLSv6 eBGP PIC edge convergence + """ MPLSv6 eBGP Prefix-Independent Convergence (PIC) edge convergence 1) setup many eBGP VPNv6 routes via a pair of eBGP peers 2) Check EMCP forwarding to these peers @@ -1738,28 +1960,26 @@ class TestMPLSPIC(VppTestCase): vpn_routes = [] vpn_bindings = [] pkts = [] - for ii in range(64): + for ii in range(NUM_PKTS): dst = "3000::%d" % ii local_label = 1600 + ii vpn_routes.append(VppIpRoute( self, dst, 128, - [VppRoutePath(self.pg2.remote_ip6, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1, - proto=DpoProto.DPO_PROTO_IP6), - VppRoutePath(self.pg3.remote_ip6, - 0xffffffff, - nh_table_id=1, - proto=DpoProto.DPO_PROTO_IP6, - is_resolve_attached=1)], - table_id=1, - is_ip6=1)) + [VppRoutePath( + self.pg2.remote_ip6, + 0xffffffff, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED), + VppRoutePath( + self.pg3.remote_ip6, + 0xffffffff, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)], + table_id=1)) vpn_routes[ii].add_vpp_config() vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128, - ip_table_id=1, - is_ip6=1)) + ip_table_id=1)) vpn_bindings[ii].add_vpp_config() pkts.append(Ether(dst=self.pg0.local_mac, @@ -1767,21 +1987,25 @@ class TestMPLSPIC(VppTestCase): MPLS(label=local_label, ttl=64) / IPv6(src=self.pg0.remote_ip6, dst=dst) / UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + Raw(b'\xa5' * 100)) + self.logger.info(self.vapi.cli("sh ip6 fib %s" % dst)) self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg2._get_capture(1) - rx1 = self.pg3._get_capture(1) + rx0 = self.pg2._get_capture(NUM_PKTS) + rx1 = self.pg3._get_capture(NUM_PKTS) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual(len(pkts), len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1))) # # use a test CLI command to stop the FIB walk process, this # will prevent the FIB converging the VPN routes and thus allow - # us to probe the interim (psot-fail, pre-converge) state + # us to probe the interim (post-fail, pre-converge) state # self.vapi.ppcli("test fib-walk-process disable") @@ -1799,7 +2023,10 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg3.get_capture(len(pkts)) + rx0 = self.pg3.get_capture(NUM_PKTS) + self.assertEqual(len(pkts), len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0))) # # enable the FIB walk process to converge the FIB @@ -1809,27 +2036,43 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg3.get_capture(len(pkts)) + rx0 = self.pg3.get_capture(NUM_PKTS) + self.assertEqual(len(pkts), len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0))) # - # put the connecteds back + # put the connected routes back # + self.logger.info(self.vapi.cli("sh log")) self.pg2.admin_up() self.pg2.config_ip6() + self.pg2.resolve_ndp() self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg2._get_capture(1) - rx1 = self.pg3._get_capture(1) + rx0 = self.pg2._get_capture(NUM_PKTS) + rx1 = self.pg3._get_capture(NUM_PKTS) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual(len(pkts), len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1))) class TestMPLSL2(VppTestCase): """ MPLS-L2 """ + @classmethod + def setUpClass(cls): + super(TestMPLSL2, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestMPLSL2, cls).tearDownClass() + def setUp(self): super(TestMPLSL2, self).setUp() @@ -1842,10 +2085,9 @@ class TestMPLSL2(VppTestCase): tbl.add_vpp_config() self.tables.append(tbl) - # use pg0 as the core facing interface + # use pg0 as the core facing interface, don't resolve ARP self.pg0.admin_up() self.pg0.config_ip4() - self.pg0.resolve_arp() self.pg0.enable_mpls() # use the other 2 for customer facing L2 links @@ -1879,6 +2121,22 @@ class TestMPLSL2(VppTestCase): self.assertEqual(rx_eth.src, tx_eth.src) self.assertEqual(rx_eth.dst, tx_eth.dst) + def verify_arp_req(self, rx, smac, sip, dip): + ether = rx[Ether] + self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff") + self.assertEqual(ether.src, smac) + + arp = rx[ARP] + self.assertEqual(arp.hwtype, 1) + self.assertEqual(arp.ptype, 0x800) + self.assertEqual(arp.hwlen, 6) + self.assertEqual(arp.plen, 4) + self.assertEqual(arp.op, ARP.who_has) + self.assertEqual(arp.hwsrc, smac) + self.assertEqual(arp.hwdst, "00:00:00:00:00:00") + self.assertEqual(arp.psrc, sip) + self.assertEqual(arp.pdst, dip) + def test_vpws(self): """ Virtual Private Wire Service """ @@ -1903,8 +2161,9 @@ class TestMPLSL2(VppTestCase): self, 55, 1, [VppRoutePath("0.0.0.0", mpls_tun_1.sw_if_index, - is_interface_rx=1, - proto=DpoProto.DPO_PROTO_ETHERNET)]) + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX, + proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET) route_55_eos.add_vpp_config() # @@ -1927,9 +2186,9 @@ class TestMPLSL2(VppTestCase): src="00:00:de:ad:be:ef") / IP(src="10.10.10.10", dst="11.11.11.11") / UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + Raw(b'\xa5' * 100)) - tx0 = pcore * 65 + tx0 = pcore * NUM_PKTS rx0 = self.send_and_expect(self.pg0, tx0, self.pg1) payload = pcore[MPLS].payload @@ -1937,100 +2196,164 @@ class TestMPLSL2(VppTestCase): self.assertEqual(rx0[0][Ether].src, payload[Ether].src) # - # Inject a packet from the custoer/L2 side + # Inject a packet from the customer/L2 side + # there's no resolved ARP entry so the first packet we see should be + # an ARP request + # + tx1 = pcore[MPLS].payload + rx1 = self.send_and_expect(self.pg1, [tx1], self.pg0) + + self.verify_arp_req(rx1[0], + self.pg0.local_mac, + self.pg0.local_ip4, + self.pg0.remote_ip4) + + # + # resolve the ARP entries and send again # - tx1 = pcore[MPLS].payload * 65 + self.pg0.resolve_arp() + tx1 = pcore[MPLS].payload * NUM_PKTS rx1 = self.send_and_expect(self.pg1, tx1, self.pg0) self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)]) def test_vpls(self): """ Virtual Private LAN Service """ + + # we skipped this in the setup + self.pg0.resolve_arp() + # - # Create an L2 MPLS tunnel + # Create a L2 MPLS tunnels # - mpls_tun = VppMPLSTunnelInterface( + mpls_tun1 = VppMPLSTunnelInterface( self, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(42)])], is_l2=1) - mpls_tun.add_vpp_config() - mpls_tun.admin_up() + mpls_tun1.add_vpp_config() + mpls_tun1.admin_up() + + mpls_tun2 = VppMPLSTunnelInterface( + self, + [VppRoutePath(self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[VppMplsLabel(43)])], + is_l2=1) + mpls_tun2.add_vpp_config() + mpls_tun2.admin_up() # - # Create a label entry to for 55 that does L2 input to the tunnel + # Create a label entries, 55 and 56, that do L2 input to the tunnel + # the latter includes a Psuedo Wire Control Word # route_55_eos = VppMplsRoute( self, 55, 1, [VppRoutePath("0.0.0.0", - mpls_tun.sw_if_index, - is_interface_rx=1, - proto=DpoProto.DPO_PROTO_ETHERNET)]) + mpls_tun1.sw_if_index, + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX, + proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET) + + route_56_eos = VppMplsRoute( + self, 56, 1, + [VppRoutePath("0.0.0.0", + mpls_tun2.sw_if_index, + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX, + flags=FibPathFlags.FIB_PATH_FLAG_POP_PW_CW, + proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET) + + # move me + route_56_eos.add_vpp_config() route_55_eos.add_vpp_config() + self.logger.info(self.vapi.cli("sh mpls fib 56")) + # # add to tunnel to the customers bridge-domain # self.vapi.sw_interface_set_l2_bridge( - rx_sw_if_index=mpls_tun.sw_if_index, bd_id=1) + rx_sw_if_index=mpls_tun1.sw_if_index, bd_id=1) + self.vapi.sw_interface_set_l2_bridge( + rx_sw_if_index=mpls_tun2.sw_if_index, bd_id=1) self.vapi.sw_interface_set_l2_bridge( rx_sw_if_index=self.pg1.sw_if_index, bd_id=1) # - # Packet from the customer interface and from the core - # - p_cust = (Ether(dst="00:00:de:ad:ba:be", - src="00:00:de:ad:be:ef") / - IP(src="10.10.10.10", dst="11.11.11.11") / - UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) - p_core = (Ether(src="00:00:de:ad:ba:be", - dst="00:00:de:ad:be:ef") / - IP(dst="10.10.10.10", src="11.11.11.11") / - UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + # Packet from host on the customer interface to each host + # reachable over the core, and vice-versa + # + p_cust1 = (Ether(dst="00:00:de:ad:ba:b1", + src="00:00:de:ad:be:ef") / + IP(src="10.10.10.10", dst="11.11.11.11") / + UDP(sport=1234, dport=1234) / + Raw(b'\xa5' * 100)) + p_cust2 = (Ether(dst="00:00:de:ad:ba:b2", + src="00:00:de:ad:be:ef") / + IP(src="10.10.10.10", dst="11.11.11.12") / + UDP(sport=1234, dport=1234) / + Raw(b'\xa5' * 100)) + p_core1 = (Ether(dst=self.pg0.local_mac, + src=self.pg0.remote_mac) / + MPLS(label=55, ttl=64) / + Ether(src="00:00:de:ad:ba:b1", + dst="00:00:de:ad:be:ef") / + IP(dst="10.10.10.10", src="11.11.11.11") / + UDP(sport=1234, dport=1234) / + Raw(b'\xa5' * 100)) + p_core2 = (Ether(dst=self.pg0.local_mac, + src=self.pg0.remote_mac) / + MPLS(label=56, ttl=64) / + Raw(b'\x01' * 4) / # PW CW + Ether(src="00:00:de:ad:ba:b2", + dst="00:00:de:ad:be:ef") / + IP(dst="10.10.10.10", src="11.11.11.12") / + UDP(sport=1234, dport=1234) / + Raw(b'\xa5' * 100)) # # The BD is learning, so send in one of each packet to learn # - p_core_encap = (Ether(dst=self.pg0.local_mac, - src=self.pg0.remote_mac) / - MPLS(label=55, ttl=64) / - p_core) - self.pg1.add_stream(p_cust) - self.pg_enable_capture(self.pg_interfaces) - self.pg_start() - self.pg0.add_stream(p_core_encap) - self.pg_enable_capture(self.pg_interfaces) - self.pg_start() + # 2 packets due to BD flooding + rx = self.send_and_expect(self.pg1, p_cust1, self.pg0, n_rx=2) + rx = self.send_and_expect(self.pg1, p_cust2, self.pg0, n_rx=2) - # we've learnt this so expect it be be forwarded - rx0 = self.pg1.get_capture(1) + # we've learnt this so expect it be be forwarded not flooded + rx = self.send_and_expect(self.pg0, [p_core1], self.pg1) + self.assertEqual(rx[0][Ether].dst, p_cust1[Ether].src) + self.assertEqual(rx[0][Ether].src, p_cust1[Ether].dst) - self.assertEqual(rx0[0][Ether].dst, p_core[Ether].dst) - self.assertEqual(rx0[0][Ether].src, p_core[Ether].src) + rx = self.send_and_expect(self.pg0, [p_core2], self.pg1) + self.assertEqual(rx[0][Ether].dst, p_cust2[Ether].src) + self.assertEqual(rx[0][Ether].src, p_cust2[Ether].dst) # - # now a stream in each direction + # now a stream in each direction from each host # - self.pg1.add_stream(p_cust * 65) - self.pg_enable_capture(self.pg_interfaces) - self.pg_start() + rx = self.send_and_expect(self.pg1, p_cust1 * NUM_PKTS, self.pg0) + self.verify_capture_tunneled_ethernet(rx, p_cust1 * NUM_PKTS, + [VppMplsLabel(42)]) - rx0 = self.pg0.get_capture(65) + rx = self.send_and_expect(self.pg1, p_cust2 * NUM_PKTS, self.pg0) + self.verify_capture_tunneled_ethernet(rx, p_cust2 * NUM_PKTS, + [VppMplsLabel(43)]) - self.verify_capture_tunneled_ethernet(rx0, p_cust*65, - [VppMplsLabel(42)]) + rx = self.send_and_expect(self.pg0, p_core1 * NUM_PKTS, self.pg1) + rx = self.send_and_expect(self.pg0, p_core2 * NUM_PKTS, self.pg1) # # remove interfaces from customers bridge-domain # self.vapi.sw_interface_set_l2_bridge( - rx_sw_if_index=mpls_tun.sw_if_index, bd_id=1, enable=0) + rx_sw_if_index=mpls_tun1.sw_if_index, bd_id=1, enable=0) + self.vapi.sw_interface_set_l2_bridge( + rx_sw_if_index=mpls_tun2.sw_if_index, bd_id=1, enable=0) self.vapi.sw_interface_set_l2_bridge( rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0) + if __name__ == '__main__': unittest.main(testRunner=VppTestRunner)