from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
VppMplsIpBind, VppIpMRoute, VppMRoutePath, \
MRouteItfFlags, MRouteEntryFlags, VppIpTable, VppMplsTable, \
- VppMplsLabel, MplsLspMode
+ VppMplsLabel, MplsLspMode, find_mpls_route, \
+ FibPathProto, FibPathType, FibPathFlags, VppMplsLabel, MplsLspMode
from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
+import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, ICMP
from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded
from scapy.contrib.mpls import MPLS
+NUM_PKTS = 67
+
def verify_filter(capture, sent):
if not len(capture) == len(sent):
class TestMPLS(VppTestCase):
""" MPLS Test Case """
+ @classmethod
+ def setUpClass(cls):
+ super(TestMPLS, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMPLS, cls).tearDownClass()
+
def setUp(self):
super(TestMPLS, self).setUp()
# ICMP sourced from the interface's address
self.assertEqual(rx_ip.src, src_if.local_ip6)
# hop-limit reset to 255 for IMCP packet
- self.assertEqual(rx_ip.hlim, 254)
+ self.assertEqual(rx_ip.hlim, 255)
icmp = rx[ICMPv6TimeExceeded]
labels=[VppMplsLabel(33)])])
route_32_eos.add_vpp_config()
+ self.assertTrue(
+ find_mpls_route(self, 0, 32, 1,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(33)])]))
+
#
# a stream that matches the route for 10.0.0.1
# PG0 is in the default table
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(33, ttl=31, exp=1)])
+ self.assertEqual(route_32_eos.get_stats_to()['packets'], 257)
+
#
# A simple MPLS xconnect - non-eos label in label out
#
self.verify_capture_labelled(self.pg0, rx, tx,
[VppMplsLabel(33, ttl=20, exp=7),
VppMplsLabel(99)])
+ self.assertEqual(route_32_neos.get_stats_to()['packets'], 257)
#
# A simple MPLS xconnect - non-eos label in label out, uniform mode
self.verify_capture_ip4(self.pg0, rx, tx)
#
- # disposed packets have an invalid IPv4 checkusm
+ # disposed packets have an invalid IPv4 checksum
#
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)],
dst_ip=self.pg0.remote_ip4,
self, 333, 1,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- labels=[],
- proto=DpoProto.DPO_PROTO_IP6)])
+ labels=[])],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
route_333_eos.add_vpp_config()
tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(333)])
self, 334, 1,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- labels=[VppMplsLabel(3)],
- proto=DpoProto.DPO_PROTO_IP6)])
+ labels=[VppMplsLabel(3)])],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
route_334_eos.add_vpp_config()
tx = self.create_stream_labelled_ip6(self.pg0,
self, 335, 1,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)],
- proto=DpoProto.DPO_PROTO_IP6)])
+ labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
route_335_eos.add_vpp_config()
tx = self.create_stream_labelled_ip6(
labels=[VppMplsLabel(44),
VppMplsLabel(45)])])
route_34_eos.add_vpp_config()
+ self.logger.info(self.vapi.cli("sh mpls fib 34"))
tx = self.create_stream_labelled_ip4(self.pg0,
[VppMplsLabel(34, ttl=3)])
VppMplsLabel(44),
VppMplsLabel(45, ttl=2)])
+ self.assertEqual(route_34_eos.get_stats_to()['packets'], 257)
+ self.assertEqual(route_32_neos.get_stats_via()['packets'], 257)
+
#
# A recursive EOS x-connect, which resolves through another x-connect
# in uniform mode
VppMplsLabel(44),
VppMplsLabel(46),
VppMplsLabel(55)])
+ self.assertEqual(ip_10_0_0_1.get_stats_to()['packets'], 257)
ip_10_0_0_1.remove_vpp_config()
route_34_neos.remove_vpp_config()
self, "2001::3", 128,
[VppRoutePath(self.pg0.remote_ip6,
self.pg0.sw_if_index,
- proto=DpoProto.DPO_PROTO_IP6,
labels=[VppMplsLabel(32,
- mode=MplsLspMode.UNIFORM)])],
- is_ip6=1)
+ mode=MplsLspMode.UNIFORM)])])
route_2001_3.add_vpp_config()
tx = self.create_stream_ip6(self.pg0, "2001::3",
[VppMplsLabel(32),
VppMplsLabel(44)])
+ self.assertEqual(route_11_0_0_1.get_stats_to()['packets'], 257)
+
#
# add a recursive path, with 2 labels, via the 3 label route
#
VppMplsLabel(44),
VppMplsLabel(45)])
+ self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 257)
+
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx,
+ [VppMplsLabel(32),
+ VppMplsLabel(33),
+ VppMplsLabel(34),
+ VppMplsLabel(44),
+ VppMplsLabel(45)])
+
+ self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 514)
+
#
# cleanup
#
VppMplsLabel(46, ttl=47),
VppMplsLabel(33, ttl=47)])
+ def test_mpls_tunnel_many(self):
+ """ MPLS Multiple Tunnels """
+
+ for ii in range(10):
+ mpls_tun = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[VppMplsLabel(44, ttl=32),
+ VppMplsLabel(46, MplsLspMode.UNIFORM)])])
+ mpls_tun.add_vpp_config()
+ mpls_tun.admin_up()
+
def test_v4_exp_null(self):
""" MPLS V4 Explicit NULL test """
# if the packet egresses, then we must have swapped to pg1
# so as to have matched the route in table 1
#
- route_34_eos = VppMplsRoute(self, 34, 1,
- [VppRoutePath("0.0.0.0",
- self.pg1.sw_if_index,
- is_interface_rx=1)])
+ route_34_eos = VppMplsRoute(
+ self, 34, 1,
+ [VppRoutePath("0.0.0.0",
+ self.pg1.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)])
route_34_eos.add_vpp_config()
#
labels=[VppMplsLabel(3402)]),
VppRoutePath("0.0.0.0",
self.pg1.sw_if_index,
- is_interface_rx=1)],
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX)],
is_multicast=1)
route_3400_eos.add_vpp_config()
VppMRoutePath(mpls_tun._sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
+ self.logger.info(self.vapi.cli("sh ip mfib index 0"))
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "232.1.1.1")
# if the packet egresses, then we must have matched the route in
# table 1
#
- route_34_eos = VppMplsRoute(self, 34, 1,
- [VppRoutePath("0.0.0.0",
- self.pg1.sw_if_index,
- nh_table_id=1,
- rpf_id=55)],
- is_multicast=1)
+ route_34_eos = VppMplsRoute(
+ self, 34, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_table_id=1,
+ rpf_id=55)],
+ is_multicast=1,
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)
route_34_eos.add_vpp_config()
self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none")
#
- # set the RPF-ID of the enrtry to match the input packet's
+ # set the RPF-ID of the entry to match the input packet's
#
route_232_1_1_1.update_rpf_id(55)
+ self.logger.info(self.vapi.cli("sh ip mfib index 1 232.1.1.1"))
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
dst_ip="232.1.1.1")
self.verify_capture_ip4(self.pg1, rx, tx)
#
- # disposed packets have an invalid IPv4 checkusm
+ # disposed packets have an invalid IPv4 checksum
#
tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)],
dst_ip="232.1.1.1", n=65,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
table_id=1,
paths=[VppMRoutePath(self.pg1.sw_if_index,
- MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
- is_ip6=1)
+ MRouteItfFlags.MFIB_ITF_FLAG_FORWARD,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
route_ff.add_vpp_config()
#
route_34_eos = VppMplsRoute(
self, 34, 1,
[VppRoutePath("::",
- self.pg1.sw_if_index,
+ 0xffffffff,
nh_table_id=1,
- rpf_id=55,
- proto=DpoProto.DPO_PROTO_IP6)],
- is_multicast=1)
+ rpf_id=55)],
+ is_multicast=1,
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)
route_34_eos.add_vpp_config()
self.send_and_assert_no_replies(self.pg0, tx, "RPF Miss")
#
- # set the RPF-ID of the enrtry to match the input packet's
+ # set the RPF-ID of the entry to match the input packet's
#
route_ff.update_rpf_id(55)
self.verify_capture_ip6_icmp(self.pg0, rx, tx)
#
- # set the RPF-ID of the enrtry to not match the input packet's
+ # set the RPF-ID of the entry to not match the input packet's
#
route_ff.update_rpf_id(56)
tx = self.create_stream_labelled_ip6(self.pg0,
class TestMPLSDisabled(VppTestCase):
""" MPLS disabled """
+ @classmethod
+ def setUpClass(cls):
+ super(TestMPLSDisabled, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMPLSDisabled, cls).tearDownClass()
+
def setUp(self):
super(TestMPLSDisabled, self).setUp()
self.tbl = VppMplsTable(self, 0)
self.tbl.add_vpp_config()
- # PG0 is MPLS enalbed
+ # PG0 is MPLS enabled
self.pg0.admin_up()
self.pg0.config_ip4()
self.pg0.resolve_arp()
class TestMPLSPIC(VppTestCase):
- """ MPLS PIC edge convergence """
+ """ MPLS Prefix-Independent Convergence (PIC) edge convergence """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestMPLSPIC, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMPLSPIC, cls).tearDownClass()
def setUp(self):
super(TestMPLSPIC, self).setUp()
self.pg0.config_ip4()
self.pg0.resolve_arp()
self.pg0.enable_mpls()
+
self.pg1.admin_up()
self.pg1.config_ip4()
self.pg1.resolve_arp()
self.pg2.set_table_ip6(1)
self.pg2.config_ip6()
self.pg2.resolve_ndp()
+
self.pg3.admin_up()
self.pg3.set_table_ip4(1)
self.pg3.config_ip4()
super(TestMPLSPIC, self).tearDown()
def test_mpls_ibgp_pic(self):
- """ MPLS iBGP PIC edge convergence
+ """ MPLS iBGP Prefix-Independent Convergence (PIC) edge convergence
1) setup many iBGP VPN routes via a pair of iBGP peers.
2) Check EMCP forwarding to these peers
#
vpn_routes = []
pkts = []
- for ii in range(64):
+ for ii in range(NUM_PKTS):
dst = "192.168.1.%d" % ii
- vpn_routes.append(VppIpRoute(self, dst, 32,
- [VppRoutePath("10.0.0.45",
- 0xffffffff,
- labels=[145],
- is_resolve_host=1),
- VppRoutePath("10.0.0.46",
- 0xffffffff,
- labels=[146],
- is_resolve_host=1)],
- table_id=1))
+ vpn_routes.append(VppIpRoute(
+ self, dst, 32,
+ [VppRoutePath(
+ "10.0.0.45",
+ 0xffffffff,
+ labels=[145],
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST),
+ VppRoutePath(
+ "10.0.0.46",
+ 0xffffffff,
+ labels=[146],
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST)],
+ table_id=1))
vpn_routes[ii].add_vpp_config()
pkts.append(Ether(dst=self.pg2.local_mac,
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg0._get_capture(1)
- rx1 = self.pg1._get_capture(1)
+ rx0 = self.pg0._get_capture(NUM_PKTS)
+ rx1 = self.pg1._get_capture(NUM_PKTS)
- # not testig the LB hashing algorithm so we're not concerned
+ # not testing the LB hashing algorithm so we're not concerned
# with the split ratio, just as long as neither is 0
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
#
# use a test CLI command to stop the FIB walk process, this
# will prevent the FIB converging the VPN routes and thus allow
- # us to probe the interim (psot-fail, pre-converge) state
+ # us to probe the interim (post-fail, pre-converge) state
#
self.vapi.ppcli("test fib-walk-process disable")
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg0.get_capture(len(pkts))
+ rx0 = self.pg0.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
#
# enable the FIB walk process to converge the FIB
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg0.get_capture(64)
+ rx0 = self.pg0.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
#
# Add the IGP route back and we return to load-balancing
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg0._get_capture(1)
- rx1 = self.pg1._get_capture(1)
+ rx0 = self.pg0._get_capture(NUM_PKTS)
+ rx1 = self.pg1._get_capture(NUM_PKTS)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
def test_mpls_ebgp_pic(self):
- """ MPLS eBGP PIC edge convergence
+ """ MPLS eBGP Prefix-Independent Convergence (PIC) edge convergence
- 1) setup many eBGP VPN routes via a pair of eBGP peers
+ 1) setup many eBGP VPN routes via a pair of eBGP peers.
2) Check EMCP forwarding to these peers
3) withdraw one eBGP path - expect LB across remaining eBGP
"""
vpn_routes = []
vpn_bindings = []
pkts = []
- for ii in range(64):
+ for ii in range(NUM_PKTS):
dst = "192.168.1.%d" % ii
local_label = 1600 + ii
- vpn_routes.append(VppIpRoute(self, dst, 32,
- [VppRoutePath(self.pg2.remote_ip4,
- 0xffffffff,
- nh_table_id=1,
- is_resolve_attached=1),
- VppRoutePath(self.pg3.remote_ip4,
- 0xffffffff,
- nh_table_id=1,
- is_resolve_attached=1)],
- table_id=1))
+ vpn_routes.append(VppIpRoute(
+ self, dst, 32,
+ [VppRoutePath(
+ self.pg2.remote_ip4,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
+ VppRoutePath(
+ self.pg3.remote_ip4,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
+ table_id=1))
vpn_routes[ii].add_vpp_config()
vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32,
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
+ #
+ # Send the packet stream (one pkt to each VPN route)
+ # - expect a 50-50 split of the traffic
+ #
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg2._get_capture(1)
- rx1 = self.pg3._get_capture(1)
+ rx0 = self.pg2._get_capture(NUM_PKTS)
+ rx1 = self.pg3._get_capture(NUM_PKTS)
+
+ # not testing the LB hashing algorithm so we're not concerned
+ # with the split ratio, just as long as neither is 0
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
#
# use a test CLI command to stop the FIB walk process, this
# will prevent the FIB converging the VPN routes and thus allow
- # us to probe the interim (psot-fail, pre-converge) state
+ # us to probe the interim (post-fail, pre-converge) state
#
self.vapi.ppcli("test fib-walk-process disable")
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg3.get_capture(len(pkts))
+ rx0 = self.pg3.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
#
# enable the FIB walk process to converge the FIB
#
self.vapi.ppcli("test fib-walk-process enable")
+
+ #
+ # packets should still be forwarded through the remaining peer
+ #
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg3.get_capture(len(pkts))
+ rx0 = self.pg3.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
#
- # put the connecteds back
+ # put the connected routes back
#
self.pg2.config_ip4()
+ self.pg2.resolve_arp()
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg2._get_capture(1)
- rx1 = self.pg3._get_capture(1)
+ rx0 = self.pg2._get_capture(NUM_PKTS)
+ rx1 = self.pg3._get_capture(NUM_PKTS)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
def test_mpls_v6_ebgp_pic(self):
- """ MPLSv6 eBGP PIC edge convergence
+ """ MPLSv6 eBGP Prefix-Independent Convergence (PIC) edge convergence
1) setup many eBGP VPNv6 routes via a pair of eBGP peers
2) Check EMCP forwarding to these peers
vpn_routes = []
vpn_bindings = []
pkts = []
- for ii in range(64):
+ for ii in range(NUM_PKTS):
dst = "3000::%d" % ii
local_label = 1600 + ii
vpn_routes.append(VppIpRoute(
self, dst, 128,
- [VppRoutePath(self.pg2.remote_ip6,
- 0xffffffff,
- nh_table_id=1,
- is_resolve_attached=1,
- proto=DpoProto.DPO_PROTO_IP6),
- VppRoutePath(self.pg3.remote_ip6,
- 0xffffffff,
- nh_table_id=1,
- proto=DpoProto.DPO_PROTO_IP6,
- is_resolve_attached=1)],
- table_id=1,
- is_ip6=1))
+ [VppRoutePath(
+ self.pg2.remote_ip6,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED),
+ VppRoutePath(
+ self.pg3.remote_ip6,
+ 0xffffffff,
+ nh_table_id=1,
+ flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED)],
+ table_id=1))
vpn_routes[ii].add_vpp_config()
vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128,
- ip_table_id=1,
- is_ip6=1))
+ ip_table_id=1))
vpn_bindings[ii].add_vpp_config()
pkts.append(Ether(dst=self.pg0.local_mac,
IPv6(src=self.pg0.remote_ip6, dst=dst) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
+ self.logger.info(self.vapi.cli("sh ip6 fib %s" % dst))
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg2._get_capture(1)
- rx1 = self.pg3._get_capture(1)
+ rx0 = self.pg2._get_capture(NUM_PKTS)
+ rx1 = self.pg3._get_capture(NUM_PKTS)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
#
# use a test CLI command to stop the FIB walk process, this
# will prevent the FIB converging the VPN routes and thus allow
- # us to probe the interim (psot-fail, pre-converge) state
+ # us to probe the interim (post-fail, pre-converge) state
#
self.vapi.ppcli("test fib-walk-process disable")
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg3.get_capture(len(pkts))
+ rx0 = self.pg3.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
#
# enable the FIB walk process to converge the FIB
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg3.get_capture(len(pkts))
+ rx0 = self.pg3.get_capture(NUM_PKTS)
+ self.assertEqual(len(pkts), len(rx0),
+ "Expected all (%s) packets across single path. "
+ "rx0: %s." % (len(pkts), len(rx0)))
#
- # put the connecteds back
+ # put the connected routes back
#
self.pg2.admin_up()
self.pg2.config_ip6()
+ self.pg2.resolve_ndp()
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg2._get_capture(1)
- rx1 = self.pg3._get_capture(1)
+ rx0 = self.pg2._get_capture(NUM_PKTS)
+ rx1 = self.pg3._get_capture(NUM_PKTS)
self.assertNotEqual(0, len(rx0))
self.assertNotEqual(0, len(rx1))
+ self.assertEqual(len(pkts), len(rx0) + len(rx1),
+ "Expected all (%s) packets across both ECMP paths. "
+ "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)))
class TestMPLSL2(VppTestCase):
""" MPLS-L2 """
+ @classmethod
+ def setUpClass(cls):
+ super(TestMPLSL2, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMPLSL2, cls).tearDownClass()
+
def setUp(self):
super(TestMPLSL2, self).setUp()
verify_mpls_stack(self, rx, mpls_labels)
tx_eth = tx[Ether]
- rx_eth = Ether(str(rx[MPLS].payload))
+ rx_eth = Ether(scapy.compat.raw(rx[MPLS].payload))
self.assertEqual(rx_eth.src, tx_eth.src)
self.assertEqual(rx_eth.dst, tx_eth.dst)
self, 55, 1,
[VppRoutePath("0.0.0.0",
mpls_tun_1.sw_if_index,
- is_interface_rx=1,
- proto=DpoProto.DPO_PROTO_ETHERNET)])
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
route_55_eos.add_vpp_config()
#
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
- tx0 = pcore * 65
+ tx0 = pcore * NUM_PKTS
rx0 = self.send_and_expect(self.pg0, tx0, self.pg1)
payload = pcore[MPLS].payload
self.assertEqual(rx0[0][Ether].src, payload[Ether].src)
#
- # Inject a packet from the custoer/L2 side
+ # Inject a packet from the customer/L2 side
#
- tx1 = pcore[MPLS].payload * 65
+ tx1 = pcore[MPLS].payload * NUM_PKTS
rx1 = self.send_and_expect(self.pg1, tx1, self.pg0)
self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)])
self, 55, 1,
[VppRoutePath("0.0.0.0",
mpls_tun.sw_if_index,
- is_interface_rx=1,
- proto=DpoProto.DPO_PROTO_ETHERNET)])
+ type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)],
+ eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET)
route_55_eos.add_vpp_config()
#
# add to tunnel to the customers bridge-domain
#
- self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index,
- bd_id=1)
- self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index,
- bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=mpls_tun.sw_if_index, bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg1.sw_if_index, bd_id=1)
#
# Packet from the customer interface and from the core
#
# now a stream in each direction
#
- self.pg1.add_stream(p_cust * 65)
+ self.pg1.add_stream(p_cust * NUM_PKTS)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
- rx0 = self.pg0.get_capture(65)
+ rx0 = self.pg0.get_capture(NUM_PKTS)
- self.verify_capture_tunneled_ethernet(rx0, p_cust*65,
+ self.verify_capture_tunneled_ethernet(rx0, p_cust*NUM_PKTS,
[VppMplsLabel(42)])
#
# remove interfaces from customers bridge-domain
#
- self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index,
- bd_id=1,
- enable=0)
- self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index,
- bd_id=1,
- enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=mpls_tun.sw_if_index, bd_id=1, enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0)
+
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)