X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=test%2Ftest_mpls.py;h=846179765dc7babe7c18e148c58231ea3a1ffb4f;hb=8a0a9d2600ef4da1da0b884e991a990644658963;hp=2595b039760cfd591ad33871d1b1e5d097dfc708;hpb=f5fa5ae2b021f946fbb8ec56e692459cd34bc7fb;p=vpp.git diff --git a/test/test_mpls.py b/test/test_mpls.py index 2595b039760..846179765dc 100644 --- a/test/test_mpls.py +++ b/test/test_mpls.py @@ -1,22 +1,52 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import unittest import socket +from framework import tag_fixme_vpp_workers from framework import VppTestCase, VppTestRunner -from vpp_ip import DpoProto -from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \ - VppMplsIpBind, VppIpMRoute, VppMRoutePath, \ - MRouteItfFlags, MRouteEntryFlags, VppIpTable, VppMplsTable, \ - VppMplsLabel, MplsLspMode +from vpp_ip import DpoProto, INVALID_INDEX +from vpp_ip_route import ( + VppIpRoute, + VppRoutePath, + VppMplsRoute, + VppMplsIpBind, + VppIpMRoute, + VppMRoutePath, + VppIpTable, + VppMplsTable, + VppMplsLabel, + MplsLspMode, + find_mpls_route, + FibPathProto, + FibPathType, + FibPathFlags, + VppMplsLabel, + MplsLspMode, +) from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface +from vpp_papi import VppEnum +import scapy.compat from scapy.packet import Raw -from scapy.layers.l2 import Ether -from scapy.layers.inet import IP, UDP, ICMP -from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded +from scapy.layers.l2 import Ether, ARP +from scapy.layers.inet import IP, UDP, ICMP, icmptypes, icmpcodes +from scapy.layers.inet6 import ( + IPv6, + ICMPv6TimeExceeded, + ICMPv6EchoRequest, + ICMPv6PacketTooBig, +) from scapy.contrib.mpls import MPLS +NUM_PKTS = 67 + +# scapy removed these attributes. +# we asked that they be restored: https://github.com/secdev/scapy/pull/1878 +# semantic names have more meaning than numbers. so here they are. +ARP.who_has = 1 +ARP.is_at = 2 + def verify_filter(capture, sent): if not len(capture) == len(sent): @@ -48,8 +78,17 @@ def verify_mpls_stack(tst, rx, mpls_labels): rx_mpls = rx_mpls[MPLS].payload +@tag_fixme_vpp_workers class TestMPLS(VppTestCase): - """ MPLS Test Case """ + """MPLS Test Case""" + + @classmethod + def setUpClass(cls): + super(TestMPLS, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestMPLS, cls).tearDownClass() def setUp(self): super(TestMPLS, self).setUp() @@ -90,7 +129,6 @@ class TestMPLS(VppTestCase): for i in self.pg_interfaces: i.unconfig_ip4() i.unconfig_ip6() - i.ip6_disable() i.set_table_ip4(0) i.set_table_ip6(0) i.disable_mpls() @@ -99,15 +137,16 @@ class TestMPLS(VppTestCase): # the default of 64 matches the IP packet TTL default def create_stream_labelled_ip4( - self, - src_if, - mpls_labels, - ping=0, - ip_itf=None, - dst_ip=None, - chksum=None, - ip_ttl=64, - n=257): + self, + src_if, + mpls_labels, + ping=0, + ip_itf=None, + dst_ip=None, + chksum=None, + ip_ttl=64, + n=257, + ): self.reset_packet_infos() pkts = [] for i in range(0, n): @@ -116,25 +155,32 @@ class TestMPLS(VppTestCase): p = Ether(dst=src_if.local_mac, src=src_if.remote_mac) for ii in range(len(mpls_labels)): - p = p / MPLS(label=mpls_labels[ii].value, - ttl=mpls_labels[ii].ttl, - cos=mpls_labels[ii].exp) + p = p / MPLS( + label=mpls_labels[ii].value, + ttl=mpls_labels[ii].ttl, + cos=mpls_labels[ii].exp, + ) if not ping: if not dst_ip: - p = (p / IP(src=src_if.local_ip4, - dst=src_if.remote_ip4, - ttl=ip_ttl) / - UDP(sport=1234, dport=1234) / - Raw(payload)) + p = ( + p + / IP(src=src_if.local_ip4, dst=src_if.remote_ip4, ttl=ip_ttl) + / UDP(sport=1234, dport=1234) + / Raw(payload) + ) else: - p = (p / IP(src=src_if.local_ip4, dst=dst_ip, ttl=ip_ttl) / - UDP(sport=1234, dport=1234) / - Raw(payload)) + p = ( + p + / IP(src=src_if.local_ip4, dst=dst_ip, ttl=ip_ttl) + / UDP(sport=1234, dport=1234) + / Raw(payload) + ) else: - p = (p / IP(src=ip_itf.remote_ip4, - dst=ip_itf.local_ip4, - ttl=ip_ttl) / - ICMP()) + p = ( + p + / IP(src=ip_itf.remote_ip4, dst=ip_itf.local_ip4, ttl=ip_ttl) + / ICMP() + ) if chksum: p[IP].chksum = chksum @@ -142,18 +188,23 @@ class TestMPLS(VppTestCase): pkts.append(p) return pkts - def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0): + def create_stream_ip4( + self, src_if, dst_ip, ip_ttl=64, ip_dscp=0, payload_size=None + ): self.reset_packet_infos() pkts = [] for i in range(0, 257): info = self.create_packet_info(src_if, src_if) payload = self.info_to_payload(info) - p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / - IP(src=src_if.remote_ip4, dst=dst_ip, - ttl=ip_ttl, tos=ip_dscp) / - UDP(sport=1234, dport=1234) / - Raw(payload)) + p = ( + Ether(dst=src_if.local_mac, src=src_if.remote_mac) + / IP(src=src_if.remote_ip4, dst=dst_ip, ttl=ip_ttl, tos=ip_dscp) + / UDP(sport=1234, dport=1234) + / Raw(payload) + ) info.data = p.copy() + if payload_size: + self.extend_packet(p, payload_size) pkts.append(p) return pkts @@ -163,17 +214,19 @@ class TestMPLS(VppTestCase): for i in range(0, 257): info = self.create_packet_info(src_if, src_if) payload = self.info_to_payload(info) - p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / - IPv6(src=src_if.remote_ip6, dst=dst_ip, - hlim=ip_ttl, tc=ip_dscp) / - UDP(sport=1234, dport=1234) / - Raw(payload)) + p = ( + Ether(dst=src_if.local_mac, src=src_if.remote_mac) + / IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=ip_ttl, tc=ip_dscp) + / UDP(sport=1234, dport=1234) + / Raw(payload) + ) info.data = p.copy() pkts.append(p) return pkts - def create_stream_labelled_ip6(self, src_if, mpls_labels, - hlim=64, dst_ip=None): + def create_stream_labelled_ip6( + self, src_if, mpls_labels, hlim=64, dst_ip=None, ping=0, ip_itf=None + ): if dst_ip is None: dst_ip = src_if.remote_ip6 self.reset_packet_infos() @@ -185,15 +238,24 @@ class TestMPLS(VppTestCase): for l in mpls_labels: p = p / MPLS(label=l.value, ttl=l.ttl, cos=l.exp) - p = p / (IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) / - UDP(sport=1234, dport=1234) / - Raw(payload)) + if ping: + p = p / ( + IPv6(src=ip_itf.remote_ip6, dst=ip_itf.local_ip6) + / ICMPv6EchoRequest() + ) + else: + p = p / ( + IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) + / UDP(sport=1234, dport=1234) + / Raw(payload) + ) info.data = p.copy() pkts.append(p) return pkts - def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0, - ip_ttl=None, ip_dscp=0): + def verify_capture_ip4( + self, src_if, capture, sent, ping_resp=0, ip_ttl=None, ip_dscp=0 + ): try: capture = verify_filter(capture, sent) @@ -226,8 +288,9 @@ class TestMPLS(VppTestCase): except: raise - def verify_capture_labelled_ip4(self, src_if, capture, sent, - mpls_labels, ip_ttl=None): + def verify_capture_labelled_ip4( + self, src_if, capture, sent, mpls_labels, ip_ttl=None + ): try: capture = verify_filter(capture, sent) @@ -252,8 +315,9 @@ class TestMPLS(VppTestCase): except: raise - def verify_capture_labelled_ip6(self, src_if, capture, sent, - mpls_labels, ip_ttl=None): + def verify_capture_labelled_ip6( + self, src_if, capture, sent, mpls_labels, ip_ttl=None + ): try: capture = verify_filter(capture, sent) @@ -300,8 +364,7 @@ class TestMPLS(VppTestCase): except: raise - def verify_capture_labelled(self, src_if, capture, sent, - mpls_labels): + def verify_capture_labelled(self, src_if, capture, sent, mpls_labels): try: capture = verify_filter(capture, sent) @@ -313,8 +376,9 @@ class TestMPLS(VppTestCase): except: raise - def verify_capture_ip6(self, src_if, capture, sent, - ip_hlim=None, ip_dscp=0): + def verify_capture_ip6( + self, src_if, capture, sent, ip_hlim=None, ip_dscp=0, ping_resp=0 + ): try: self.assertEqual(len(capture), len(sent)) @@ -329,21 +393,25 @@ class TestMPLS(VppTestCase): tx_ip = tx[IPv6] rx_ip = rx[IPv6] - self.assertEqual(rx_ip.src, tx_ip.src) - self.assertEqual(rx_ip.dst, tx_ip.dst) - self.assertEqual(rx_ip.tc, ip_dscp) - # IP processing post pop has decremented the TTL - if not ip_hlim: - self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim) + if not ping_resp: + self.assertEqual(rx_ip.src, tx_ip.src) + self.assertEqual(rx_ip.dst, tx_ip.dst) + self.assertEqual(rx_ip.tc, ip_dscp) + # IP processing post pop has decremented the TTL + if not ip_hlim: + self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim) + else: + self.assertEqual(rx_ip.hlim, ip_hlim) else: - self.assertEqual(rx_ip.hlim, ip_hlim) - + self.assertEqual(rx_ip.src, tx_ip.dst) + self.assertEqual(rx_ip.dst, tx_ip.src) except: raise def verify_capture_ip6_icmp(self, src_if, capture, sent): try: - self.assertEqual(len(capture), len(sent)) + # rate limited ICMP + self.assertTrue(len(capture) <= len(sent)) for i in range(len(capture)): tx = sent[i] @@ -360,84 +428,174 @@ class TestMPLS(VppTestCase): # ICMP sourced from the interface's address self.assertEqual(rx_ip.src, src_if.local_ip6) # hop-limit reset to 255 for IMCP packet - self.assertEqual(rx_ip.hlim, 254) + self.assertEqual(rx_ip.hlim, 255) icmp = rx[ICMPv6TimeExceeded] except: raise + def verify_capture_fragmented_labelled_ip4( + self, src_if, capture, sent, mpls_labels, ip_ttl=None + ): + try: + capture = verify_filter(capture, sent) + + for i in range(len(capture)): + tx = sent[0] + rx = capture[i] + tx_ip = tx[IP] + rx_ip = rx[IP] + + verify_mpls_stack(self, rx, mpls_labels) + + self.assertEqual(rx_ip.src, tx_ip.src) + self.assertEqual(rx_ip.dst, tx_ip.dst) + if not ip_ttl: + # IP processing post pop has decremented the TTL + self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) + else: + self.assertEqual(rx_ip.ttl, ip_ttl) + + except: + raise + + def verify_capture_fragmented_labelled_ip6( + self, src_if, capture, sent, mpls_labels, ip_ttl=None + ): + try: + capture = verify_filter(capture, sent) + + for i in range(len(capture)): + tx = sent[0] + rx = capture[i] + tx_ip = tx[IPv6] + rx.show() + rx_ip = IPv6(rx[MPLS].payload) + rx_ip.show() + + verify_mpls_stack(self, rx, mpls_labels) + + self.assertEqual(rx_ip.src, tx_ip.src) + self.assertEqual(rx_ip.dst, tx_ip.dst) + if not ip_ttl: + # IP processing post pop has decremented the hop-limit + self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim) + else: + self.assertEqual(rx_ip.hlim, ip_ttl) + except: + raise + def test_swap(self): - """ MPLS label swap tests """ + """MPLS label swap tests""" # # A simple MPLS xconnect - eos label in label out # - route_32_eos = VppMplsRoute(self, 32, 1, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(33)])]) + route_32_eos = VppMplsRoute( + self, + 32, + 1, + [ + VppRoutePath( + self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(33)] + ) + ], + ) route_32_eos.add_vpp_config() + self.assertTrue( + find_mpls_route( + self, + 0, + 32, + 1, + [ + VppRoutePath( + self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[VppMplsLabel(33)], + ) + ], + ) + ) + # # a stream that matches the route for 10.0.0.1 # PG0 is in the default table # - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(32, ttl=32, exp=1)]) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(32, ttl=32, exp=1)] + ) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled(self.pg0, rx, tx, - [VppMplsLabel(33, ttl=31, exp=1)]) + self.verify_capture_labelled( + self.pg0, rx, tx, [VppMplsLabel(33, ttl=31, exp=1)] + ) - self.assertEqual(route_32_eos.get_stats_to()['packets'], 257) + self.assertEqual(route_32_eos.get_stats_to()["packets"], 257) # # A simple MPLS xconnect - non-eos label in label out # - route_32_neos = VppMplsRoute(self, 32, 0, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(33)])]) + route_32_neos = VppMplsRoute( + self, + 32, + 0, + [ + VppRoutePath( + self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(33)] + ) + ], + ) route_32_neos.add_vpp_config() # # a stream that matches the route for 10.0.0.1 # PG0 is in the default table # - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(32, ttl=21, exp=7), - VppMplsLabel(99)]) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(32, ttl=21, exp=7), VppMplsLabel(99)] + ) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled(self.pg0, rx, tx, - [VppMplsLabel(33, ttl=20, exp=7), - VppMplsLabel(99)]) - self.assertEqual(route_32_neos.get_stats_to()['packets'], 257) + self.verify_capture_labelled( + self.pg0, rx, tx, [VppMplsLabel(33, ttl=20, exp=7), VppMplsLabel(99)] + ) + self.assertEqual(route_32_neos.get_stats_to()["packets"], 257) # # A simple MPLS xconnect - non-eos label in label out, uniform mode # route_42_neos = VppMplsRoute( - self, 42, 0, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(43, MplsLspMode.UNIFORM)])]) + self, + 42, + 0, + [ + VppRoutePath( + self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[VppMplsLabel(43, MplsLspMode.UNIFORM)], + ) + ], + ) route_42_neos.add_vpp_config() - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(42, ttl=21, exp=7), - VppMplsLabel(99)]) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(42, ttl=21, exp=7), VppMplsLabel(99)] + ) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled(self.pg0, rx, tx, - [VppMplsLabel(43, ttl=20, exp=7), - VppMplsLabel(99)]) + self.verify_capture_labelled( + self.pg0, rx, tx, [VppMplsLabel(43, ttl=20, exp=7), VppMplsLabel(99)] + ) # # An MPLS xconnect - EOS label in IP out # - route_33_eos = VppMplsRoute(self, 33, 1, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[])]) + route_33_eos = VppMplsRoute( + self, + 33, + 1, + [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[])], + ) route_33_eos.add_vpp_config() tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)]) @@ -445,32 +603,38 @@ class TestMPLS(VppTestCase): self.verify_capture_ip4(self.pg0, rx, tx) # - # disposed packets have an invalid IPv4 checkusm + # disposed packets have an invalid IPv4 checksum # - tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(33)], - dst_ip=self.pg0.remote_ip4, - n=65, - chksum=1) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(33)], dst_ip=self.pg0.remote_ip4, n=65, chksum=1 + ) self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum") # # An MPLS xconnect - EOS label in IP out, uniform mode # route_3333_eos = VppMplsRoute( - self, 3333, 1, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)])]) + self, + 3333, + 1, + [ + VppRoutePath( + self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)], + ) + ], + ) route_3333_eos.add_vpp_config() tx = self.create_stream_labelled_ip4( - self.pg0, - [VppMplsLabel(3333, ttl=55, exp=3)]) + self.pg0, [VppMplsLabel(3333, ttl=55, exp=3)] + ) rx = self.send_and_expect(self.pg0, tx, self.pg0) self.verify_capture_ip4(self.pg0, rx, tx, ip_ttl=54, ip_dscp=0x60) tx = self.create_stream_labelled_ip4( - self.pg0, - [VppMplsLabel(3333, ttl=66, exp=4)]) + self.pg0, [VppMplsLabel(3333, ttl=66, exp=4)] + ) rx = self.send_and_expect(self.pg0, tx, self.pg0) self.verify_capture_ip4(self.pg0, rx, tx, ip_ttl=65, ip_dscp=0x80) @@ -478,11 +642,12 @@ class TestMPLS(VppTestCase): # An MPLS xconnect - EOS label in IPv6 out # route_333_eos = VppMplsRoute( - self, 333, 1, - [VppRoutePath(self.pg0.remote_ip6, - self.pg0.sw_if_index, - labels=[], - proto=DpoProto.DPO_PROTO_IP6)]) + self, + 333, + 1, + [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, labels=[])], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6, + ) route_333_eos.add_vpp_config() tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(333)]) @@ -492,26 +657,29 @@ class TestMPLS(VppTestCase): # # disposed packets have an TTL expired # - tx = self.create_stream_labelled_ip6(self.pg0, - [VppMplsLabel(333, ttl=64)], - dst_ip=self.pg1.remote_ip6, - hlim=1) - rx = self.send_and_expect(self.pg0, tx, self.pg0) + tx = self.create_stream_labelled_ip6( + self.pg0, [VppMplsLabel(333, ttl=64)], dst_ip=self.pg1.remote_ip6, hlim=1 + ) + rx = self.send_and_expect_some(self.pg0, tx, self.pg0) self.verify_capture_ip6_icmp(self.pg0, rx, tx) # # An MPLS xconnect - EOS label in IPv6 out w imp-null # route_334_eos = VppMplsRoute( - self, 334, 1, - [VppRoutePath(self.pg0.remote_ip6, - self.pg0.sw_if_index, - labels=[VppMplsLabel(3)], - proto=DpoProto.DPO_PROTO_IP6)]) + self, + 334, + 1, + [ + VppRoutePath( + self.pg0.remote_ip6, self.pg0.sw_if_index, labels=[VppMplsLabel(3)] + ) + ], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6, + ) route_334_eos.add_vpp_config() - tx = self.create_stream_labelled_ip6(self.pg0, - [VppMplsLabel(334, ttl=64)]) + tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(334, ttl=64)]) rx = self.send_and_expect(self.pg0, tx, self.pg0) self.verify_capture_ip6(self.pg0, rx, tx) @@ -519,129 +687,169 @@ class TestMPLS(VppTestCase): # An MPLS xconnect - EOS label in IPv6 out w imp-null in uniform mode # route_335_eos = VppMplsRoute( - self, 335, 1, - [VppRoutePath(self.pg0.remote_ip6, - self.pg0.sw_if_index, - labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)], - proto=DpoProto.DPO_PROTO_IP6)]) + self, + 335, + 1, + [ + VppRoutePath( + self.pg0.remote_ip6, + self.pg0.sw_if_index, + labels=[VppMplsLabel(3, MplsLspMode.UNIFORM)], + ) + ], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6, + ) route_335_eos.add_vpp_config() tx = self.create_stream_labelled_ip6( - self.pg0, - [VppMplsLabel(335, ttl=27, exp=4)]) + self.pg0, [VppMplsLabel(335, ttl=27, exp=4)] + ) rx = self.send_and_expect(self.pg0, tx, self.pg0) self.verify_capture_ip6(self.pg0, rx, tx, ip_hlim=26, ip_dscp=0x80) # # disposed packets have an TTL expired # - tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(334)], - dst_ip=self.pg1.remote_ip6, - hlim=0) - rx = self.send_and_expect(self.pg0, tx, self.pg0) + tx = self.create_stream_labelled_ip6( + self.pg0, [VppMplsLabel(334)], dst_ip=self.pg1.remote_ip6, hlim=0 + ) + rx = self.send_and_expect_some(self.pg0, tx, self.pg0) self.verify_capture_ip6_icmp(self.pg0, rx, tx) # # An MPLS xconnect - non-EOS label in IP out - an invalid configuration # so this traffic should be dropped. # - route_33_neos = VppMplsRoute(self, 33, 0, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[])]) + route_33_neos = VppMplsRoute( + self, + 33, + 0, + [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[])], + ) route_33_neos.add_vpp_config() - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(33), - VppMplsLabel(99)]) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(33), VppMplsLabel(99)] + ) self.send_and_assert_no_replies( - self.pg0, tx, - "MPLS non-EOS packets popped and forwarded") + self.pg0, tx, "MPLS non-EOS packets popped and forwarded" + ) # # A recursive EOS x-connect, which resolves through another x-connect # in pipe mode # - route_34_eos = VppMplsRoute(self, 34, 1, - [VppRoutePath("0.0.0.0", - 0xffffffff, - nh_via_label=32, - labels=[VppMplsLabel(44), - VppMplsLabel(45)])]) + route_34_eos = VppMplsRoute( + self, + 34, + 1, + [ + VppRoutePath( + "0.0.0.0", + 0xFFFFFFFF, + nh_via_label=32, + labels=[VppMplsLabel(44), VppMplsLabel(45)], + ) + ], + ) route_34_eos.add_vpp_config() + self.logger.info(self.vapi.cli("sh mpls fib 34")) - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(34, ttl=3)]) + tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34, ttl=3)]) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled(self.pg0, rx, tx, - [VppMplsLabel(33), - VppMplsLabel(44), - VppMplsLabel(45, ttl=2)]) + self.verify_capture_labelled( + self.pg0, + rx, + tx, + [VppMplsLabel(33), VppMplsLabel(44), VppMplsLabel(45, ttl=2)], + ) - self.assertEqual(route_34_eos.get_stats_to()['packets'], 257) - self.assertEqual(route_32_neos.get_stats_via()['packets'], 257) + self.assertEqual(route_34_eos.get_stats_to()["packets"], 257) + self.assertEqual(route_32_neos.get_stats_via()["packets"], 257) # # A recursive EOS x-connect, which resolves through another x-connect # in uniform mode # route_35_eos = VppMplsRoute( - self, 35, 1, - [VppRoutePath("0.0.0.0", - 0xffffffff, - nh_via_label=42, - labels=[VppMplsLabel(44)])]) + self, + 35, + 1, + [ + VppRoutePath( + "0.0.0.0", 0xFFFFFFFF, nh_via_label=42, labels=[VppMplsLabel(44)] + ) + ], + ) route_35_eos.add_vpp_config() - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(35, ttl=3)]) + tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(35, ttl=3)]) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled(self.pg0, rx, tx, - [VppMplsLabel(43, ttl=2), - VppMplsLabel(44, ttl=2)]) + self.verify_capture_labelled( + self.pg0, rx, tx, [VppMplsLabel(43, ttl=2), VppMplsLabel(44, ttl=2)] + ) # # A recursive non-EOS x-connect, which resolves through another # x-connect # - route_34_neos = VppMplsRoute(self, 34, 0, - [VppRoutePath("0.0.0.0", - 0xffffffff, - nh_via_label=32, - labels=[VppMplsLabel(44), - VppMplsLabel(46)])]) + route_34_neos = VppMplsRoute( + self, + 34, + 0, + [ + VppRoutePath( + "0.0.0.0", + 0xFFFFFFFF, + nh_via_label=32, + labels=[VppMplsLabel(44), VppMplsLabel(46)], + ) + ], + ) route_34_neos.add_vpp_config() - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(34, ttl=45), - VppMplsLabel(99)]) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(34, ttl=45), VppMplsLabel(99)] + ) rx = self.send_and_expect(self.pg0, tx, self.pg0) # it's the 2nd (counting from 0) label in the stack that is swapped - self.verify_capture_labelled(self.pg0, rx, tx, - [VppMplsLabel(33), - VppMplsLabel(44), - VppMplsLabel(46, ttl=44), - VppMplsLabel(99)]) + self.verify_capture_labelled( + self.pg0, + rx, + tx, + [ + VppMplsLabel(33), + VppMplsLabel(44), + VppMplsLabel(46, ttl=44), + VppMplsLabel(99), + ], + ) # # an recursive IP route that resolves through the recursive non-eos # x-connect # - ip_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32, - [VppRoutePath("0.0.0.0", - 0xffffffff, - nh_via_label=34, - labels=[VppMplsLabel(55)])]) + ip_10_0_0_1 = VppIpRoute( + self, + "10.0.0.1", + 32, + [ + VppRoutePath( + "0.0.0.0", 0xFFFFFFFF, nh_via_label=34, labels=[VppMplsLabel(55)] + ) + ], + ) ip_10_0_0_1.add_vpp_config() tx = self.create_stream_ip4(self.pg0, "10.0.0.1") rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled_ip4(self.pg0, rx, tx, - [VppMplsLabel(33), - VppMplsLabel(44), - VppMplsLabel(46), - VppMplsLabel(55)]) - self.assertEqual(ip_10_0_0_1.get_stats_to()['packets'], 257) + self.verify_capture_labelled_ip4( + self.pg0, + rx, + tx, + [VppMplsLabel(33), VppMplsLabel(44), VppMplsLabel(46), VppMplsLabel(55)], + ) + self.assertEqual(ip_10_0_0_1.get_stats_to()["packets"], 257) ip_10_0_0_1.remove_vpp_config() route_34_neos.remove_vpp_config() @@ -652,15 +860,21 @@ class TestMPLS(VppTestCase): route_32_eos.remove_vpp_config() def test_bind(self): - """ MPLS Local Label Binding test """ + """MPLS Local Label Binding test""" # # Add a non-recursive route with a single out label # - route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(45)])]) + route_10_0_0_1 = VppIpRoute( + self, + "10.0.0.1", + 32, + [ + VppRoutePath( + self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(45)] + ) + ], + ) route_10_0_0_1.add_vpp_config() # bind a local label to the route @@ -668,19 +882,18 @@ class TestMPLS(VppTestCase): binding.add_vpp_config() # non-EOS stream - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(44), - VppMplsLabel(99)]) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(44), VppMplsLabel(99)] + ) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled(self.pg0, rx, tx, - [VppMplsLabel(45, ttl=63), - VppMplsLabel(99)]) + self.verify_capture_labelled( + self.pg0, rx, tx, [VppMplsLabel(45, ttl=63), VppMplsLabel(99)] + ) # EOS stream tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(44)]) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled(self.pg0, rx, tx, - [VppMplsLabel(45, ttl=63)]) + self.verify_capture_labelled(self.pg0, rx, tx, [VppMplsLabel(45, ttl=63)]) # IP stream tx = self.create_stream_ip4(self.pg0, "10.0.0.1") @@ -694,15 +907,21 @@ class TestMPLS(VppTestCase): route_10_0_0_1.remove_vpp_config() def test_imposition(self): - """ MPLS label imposition test """ + """MPLS label imposition test""" # # Add a non-recursive route with a single out label # - route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(32)])]) + route_10_0_0_1 = VppIpRoute( + self, + "10.0.0.1", + 32, + [ + VppRoutePath( + self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(32)] + ) + ], + ) route_10_0_0_1.add_vpp_config() # @@ -716,67 +935,86 @@ class TestMPLS(VppTestCase): # # Add a non-recursive route with a 3 out labels # - route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(32), - VppMplsLabel(33), - VppMplsLabel(34)])]) + route_10_0_0_2 = VppIpRoute( + self, + "10.0.0.2", + 32, + [ + VppRoutePath( + self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[VppMplsLabel(32), VppMplsLabel(33), VppMplsLabel(34)], + ) + ], + ) route_10_0_0_2.add_vpp_config() - tx = self.create_stream_ip4(self.pg0, "10.0.0.2", - ip_ttl=44, ip_dscp=0xff) + tx = self.create_stream_ip4(self.pg0, "10.0.0.2", ip_ttl=44, ip_dscp=0xFF) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled_ip4(self.pg0, rx, tx, - [VppMplsLabel(32), - VppMplsLabel(33), - VppMplsLabel(34)], - ip_ttl=43) + self.verify_capture_labelled_ip4( + self.pg0, + rx, + tx, + [VppMplsLabel(32), VppMplsLabel(33), VppMplsLabel(34)], + ip_ttl=43, + ) # # Add a non-recursive route with a single out label in uniform mode # route_10_0_0_3 = VppIpRoute( - self, "10.0.0.3", 32, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(32, - mode=MplsLspMode.UNIFORM)])]) + self, + "10.0.0.3", + 32, + [ + VppRoutePath( + self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[VppMplsLabel(32, mode=MplsLspMode.UNIFORM)], + ) + ], + ) route_10_0_0_3.add_vpp_config() - tx = self.create_stream_ip4(self.pg0, "10.0.0.3", - ip_ttl=54, ip_dscp=0xbe) + tx = self.create_stream_ip4(self.pg0, "10.0.0.3", ip_ttl=54, ip_dscp=0xBE) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled_ip4(self.pg0, rx, tx, - [VppMplsLabel(32, ttl=53, exp=5)]) + self.verify_capture_labelled_ip4( + self.pg0, rx, tx, [VppMplsLabel(32, ttl=53, exp=5)] + ) # # Add a IPv6 non-recursive route with a single out label in # uniform mode # route_2001_3 = VppIpRoute( - self, "2001::3", 128, - [VppRoutePath(self.pg0.remote_ip6, - self.pg0.sw_if_index, - proto=DpoProto.DPO_PROTO_IP6, - labels=[VppMplsLabel(32, - mode=MplsLspMode.UNIFORM)])], - is_ip6=1) + self, + "2001::3", + 128, + [ + VppRoutePath( + self.pg0.remote_ip6, + self.pg0.sw_if_index, + labels=[VppMplsLabel(32, mode=MplsLspMode.UNIFORM)], + ) + ], + ) route_2001_3.add_vpp_config() - tx = self.create_stream_ip6(self.pg0, "2001::3", - ip_ttl=54, ip_dscp=0xbe) + tx = self.create_stream_ip6(self.pg0, "2001::3", ip_ttl=54, ip_dscp=0xBE) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled_ip6(self.pg0, rx, tx, - [VppMplsLabel(32, ttl=53, exp=5)]) + self.verify_capture_labelled_ip6( + self.pg0, rx, tx, [VppMplsLabel(32, ttl=53, exp=5)] + ) # # add a recursive path, with output label, via the 1 label route # - route_11_0_0_1 = VppIpRoute(self, "11.0.0.1", 32, - [VppRoutePath("10.0.0.1", - 0xffffffff, - labels=[VppMplsLabel(44)])]) + route_11_0_0_1 = VppIpRoute( + self, + "11.0.0.1", + 32, + [VppRoutePath("10.0.0.1", 0xFFFFFFFF, labels=[VppMplsLabel(44)])], + ) route_11_0_0_1.add_vpp_config() # @@ -785,20 +1023,25 @@ class TestMPLS(VppTestCase): # tx = self.create_stream_ip4(self.pg0, "11.0.0.1") rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled_ip4(self.pg0, rx, tx, - [VppMplsLabel(32), - VppMplsLabel(44)]) + self.verify_capture_labelled_ip4( + self.pg0, rx, tx, [VppMplsLabel(32), VppMplsLabel(44)] + ) - self.assertEqual(route_11_0_0_1.get_stats_to()['packets'], 257) + self.assertEqual(route_11_0_0_1.get_stats_to()["packets"], 257) # # add a recursive path, with 2 labels, via the 3 label route # - route_11_0_0_2 = VppIpRoute(self, "11.0.0.2", 32, - [VppRoutePath("10.0.0.2", - 0xffffffff, - labels=[VppMplsLabel(44), - VppMplsLabel(45)])]) + route_11_0_0_2 = VppIpRoute( + self, + "11.0.0.2", + 32, + [ + VppRoutePath( + "10.0.0.2", 0xFFFFFFFF, labels=[VppMplsLabel(44), VppMplsLabel(45)] + ) + ], + ) route_11_0_0_2.add_vpp_config() # @@ -807,24 +1050,36 @@ class TestMPLS(VppTestCase): # tx = self.create_stream_ip4(self.pg0, "11.0.0.2") rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled_ip4(self.pg0, rx, tx, - [VppMplsLabel(32), - VppMplsLabel(33), - VppMplsLabel(34), - VppMplsLabel(44), - VppMplsLabel(45)]) - - self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 257) + self.verify_capture_labelled_ip4( + self.pg0, + rx, + tx, + [ + VppMplsLabel(32), + VppMplsLabel(33), + VppMplsLabel(34), + VppMplsLabel(44), + VppMplsLabel(45), + ], + ) + + self.assertEqual(route_11_0_0_2.get_stats_to()["packets"], 257) rx = self.send_and_expect(self.pg0, tx, self.pg0) - self.verify_capture_labelled_ip4(self.pg0, rx, tx, - [VppMplsLabel(32), - VppMplsLabel(33), - VppMplsLabel(34), - VppMplsLabel(44), - VppMplsLabel(45)]) + self.verify_capture_labelled_ip4( + self.pg0, + rx, + tx, + [ + VppMplsLabel(32), + VppMplsLabel(33), + VppMplsLabel(34), + VppMplsLabel(44), + VppMplsLabel(45), + ], + ) - self.assertEqual(route_11_0_0_2.get_stats_to()['packets'], 514) + self.assertEqual(route_11_0_0_2.get_stats_to()["packets"], 514) # # cleanup @@ -834,27 +1089,110 @@ class TestMPLS(VppTestCase): route_10_0_0_2.remove_vpp_config() route_10_0_0_1.remove_vpp_config() + def test_imposition_fragmentation(self): + """MPLS label imposition fragmentation test""" + + # + # Add a ipv4 non-recursive route with a single out label + # + route_10_0_0_1 = VppIpRoute( + self, + "10.0.0.1", + 32, + [ + VppRoutePath( + self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(32)] + ) + ], + ) + route_10_0_0_1.add_vpp_config() + route_1000_1 = VppIpRoute( + self, + "1000::1", + 128, + [ + VppRoutePath( + self.pg0.remote_ip6, self.pg0.sw_if_index, labels=[VppMplsLabel(32)] + ) + ], + ) + route_1000_1.add_vpp_config() + + # + # a stream that matches the route for 10.0.0.1 + # PG0 is in the default table + # + tx = self.create_stream_ip4(self.pg0, "10.0.0.1") + for i in range(0, 257): + self.extend_packet(tx[i], 10000) + + # + # 5 fragments per packet (257*5=1285) + # + rx = self.send_and_expect(self.pg0, tx, self.pg0, 1285) + self.verify_capture_fragmented_labelled_ip4( + self.pg0, rx, tx, [VppMplsLabel(32)] + ) + + # packets with DF bit set generate ICMP + for t in tx: + t[IP].flags = "DF" + rxs = self.send_and_expect_some(self.pg0, tx, self.pg0) + + for rx in rxs: + self.assertEqual(icmptypes[rx[ICMP].type], "dest-unreach") + self.assertEqual( + icmpcodes[rx[ICMP].type][rx[ICMP].code], "fragmentation-needed" + ) + # the link MTU is 9000, the MPLS over head is 4 bytes + self.assertEqual(rx[ICMP].nexthopmtu, 9000 - 4) + + self.assertEqual( + self.statistics.get_err_counter("/err/mpls-frag/dont_fragment_set"), + len(tx), + ) + # + # a stream that matches the route for 1000::1/128 + # PG0 is in the default table + # + tx = self.create_stream_ip6(self.pg0, "1000::1") + for i in range(0, 257): + self.extend_packet(tx[i], 10000) + + rxs = self.send_and_expect_some(self.pg0, tx, self.pg0) + for rx in rxs: + self.assertEqual(rx[ICMPv6PacketTooBig].mtu, 9000 - 4) + + # + # cleanup + # + route_10_0_0_1.remove_vpp_config() + def test_tunnel_pipe(self): - """ MPLS Tunnel Tests - Pipe """ + """MPLS Tunnel Tests - Pipe""" # - # Create a tunnel with a single out label + # Create a tunnel with two out labels # mpls_tun = VppMPLSTunnelInterface( self, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(44), - VppMplsLabel(46)])]) + [ + VppRoutePath( + self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[VppMplsLabel(44), VppMplsLabel(46)], + ) + ], + ) mpls_tun.add_vpp_config() mpls_tun.admin_up() # # add an unlabelled route through the new tunnel # - route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32, - [VppRoutePath("0.0.0.0", - mpls_tun._sw_if_index)]) + route_10_0_0_3 = VppIpRoute( + self, "10.0.0.3", 32, [VppRoutePath("0.0.0.0", mpls_tun._sw_if_index)] + ) route_10_0_0_3.add_vpp_config() self.vapi.cli("clear trace") @@ -865,17 +1203,19 @@ class TestMPLS(VppTestCase): self.pg_start() rx = self.pg0.get_capture() - self.verify_capture_tunneled_ip4(self.pg0, rx, tx, - [VppMplsLabel(44), - VppMplsLabel(46)]) + self.verify_capture_tunneled_ip4( + self.pg0, rx, tx, [VppMplsLabel(44), VppMplsLabel(46)] + ) # # add a labelled route through the new tunnel # - route_10_0_0_4 = VppIpRoute(self, "10.0.0.4", 32, - [VppRoutePath("0.0.0.0", - mpls_tun._sw_if_index, - labels=[33])]) + route_10_0_0_4 = VppIpRoute( + self, + "10.0.0.4", + 32, + [VppRoutePath("0.0.0.0", mpls_tun._sw_if_index, labels=[33])], + ) route_10_0_0_4.add_vpp_config() self.vapi.cli("clear trace") @@ -886,13 +1226,47 @@ class TestMPLS(VppTestCase): self.pg_start() rx = self.pg0.get_capture() - self.verify_capture_tunneled_ip4(self.pg0, rx, tx, - [VppMplsLabel(44), - VppMplsLabel(46), - VppMplsLabel(33, ttl=255)]) + self.verify_capture_tunneled_ip4( + self.pg0, + rx, + tx, + [VppMplsLabel(44), VppMplsLabel(46), VppMplsLabel(33, ttl=255)], + ) + + # + # change tunnel's MTU to a low value + # + mpls_tun.set_l3_mtu(1200) + + # send IP into the tunnel to be fragmented + tx = self.create_stream_ip4(self.pg0, "10.0.0.3", payload_size=1500) + rx = self.send_and_expect(self.pg0, tx, self.pg0, len(tx) * 2) + + fake_tx = [] + for p in tx: + fake_tx.append(p) + fake_tx.append(p) + self.verify_capture_tunneled_ip4( + self.pg0, rx, fake_tx, [VppMplsLabel(44), VppMplsLabel(46)] + ) + + # send MPLS into the tunnel to be fragmented + tx = self.create_stream_ip4(self.pg0, "10.0.0.4", payload_size=1500) + rx = self.send_and_expect(self.pg0, tx, self.pg0, len(tx) * 2) + + fake_tx = [] + for p in tx: + fake_tx.append(p) + fake_tx.append(p) + self.verify_capture_tunneled_ip4( + self.pg0, + rx, + fake_tx, + [VppMplsLabel(44), VppMplsLabel(46), VppMplsLabel(33, ttl=255)], + ) def test_tunnel_uniform(self): - """ MPLS Tunnel Tests - Uniform """ + """MPLS Tunnel Tests - Uniform""" # # Create a tunnel with a single out label @@ -900,19 +1274,26 @@ class TestMPLS(VppTestCase): # mpls_tun = VppMPLSTunnelInterface( self, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(44, ttl=32), - VppMplsLabel(46, MplsLspMode.UNIFORM)])]) + [ + VppRoutePath( + self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[ + VppMplsLabel(44, ttl=32), + VppMplsLabel(46, MplsLspMode.UNIFORM), + ], + ) + ], + ) mpls_tun.add_vpp_config() mpls_tun.admin_up() # # add an unlabelled route through the new tunnel # - route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32, - [VppRoutePath("0.0.0.0", - mpls_tun._sw_if_index)]) + route_10_0_0_3 = VppIpRoute( + self, "10.0.0.3", 32, [VppRoutePath("0.0.0.0", mpls_tun._sw_if_index)] + ) route_10_0_0_3.add_vpp_config() self.vapi.cli("clear trace") @@ -923,18 +1304,23 @@ class TestMPLS(VppTestCase): self.pg_start() rx = self.pg0.get_capture() - self.verify_capture_tunneled_ip4(self.pg0, rx, tx, - [VppMplsLabel(44, ttl=32), - VppMplsLabel(46, ttl=23)]) + self.verify_capture_tunneled_ip4( + self.pg0, rx, tx, [VppMplsLabel(44, ttl=32), VppMplsLabel(46, ttl=23)] + ) # # add a labelled route through the new tunnel # route_10_0_0_4 = VppIpRoute( - self, "10.0.0.4", 32, - [VppRoutePath("0.0.0.0", - mpls_tun._sw_if_index, - labels=[VppMplsLabel(33, ttl=47)])]) + self, + "10.0.0.4", + 32, + [ + VppRoutePath( + "0.0.0.0", mpls_tun._sw_if_index, labels=[VppMplsLabel(33, ttl=47)] + ) + ], + ) route_10_0_0_4.add_vpp_config() self.vapi.cli("clear trace") @@ -945,35 +1331,63 @@ class TestMPLS(VppTestCase): self.pg_start() rx = self.pg0.get_capture() - self.verify_capture_tunneled_ip4(self.pg0, rx, tx, - [VppMplsLabel(44, ttl=32), - VppMplsLabel(46, ttl=47), - VppMplsLabel(33, ttl=47)]) + self.verify_capture_tunneled_ip4( + self.pg0, + rx, + tx, + [ + VppMplsLabel(44, ttl=32), + VppMplsLabel(46, ttl=47), + VppMplsLabel(33, ttl=47), + ], + ) def test_mpls_tunnel_many(self): - """ Multiple Tunnels """ + """MPLS Multiple Tunnels""" - for ii in range(10): + for ii in range(100): + mpls_tun = VppMPLSTunnelInterface( + self, + [ + VppRoutePath( + self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[ + VppMplsLabel(44, ttl=32), + VppMplsLabel(46, MplsLspMode.UNIFORM), + ], + ) + ], + ) + mpls_tun.add_vpp_config() + mpls_tun.admin_up() + for ii in range(100): mpls_tun = VppMPLSTunnelInterface( self, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(44, ttl=32), - VppMplsLabel(46, MplsLspMode.UNIFORM)])]) + [ + VppRoutePath( + self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[ + VppMplsLabel(44, ttl=32), + VppMplsLabel(46, MplsLspMode.UNIFORM), + ], + ) + ], + is_l2=1, + ) mpls_tun.add_vpp_config() mpls_tun.admin_up() def test_v4_exp_null(self): - """ MPLS V4 Explicit NULL test """ + """MPLS V4 Explicit NULL test""" # # The first test case has an MPLS TTL of 0 # all packet should be dropped # - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(0, ttl=0)]) - self.send_and_assert_no_replies(self.pg0, tx, - "MPLS TTL=0 packets forwarded") + tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(0, ttl=0)]) + self.send_and_assert_no_replies(self.pg0, tx, "MPLS TTL=0 packets forwarded") # # a stream with a non-zero MPLS TTL @@ -993,7 +1407,7 @@ class TestMPLS(VppTestCase): self.verify_capture_ip4(self.pg1, rx, tx) def test_v6_exp_null(self): - """ MPLS V6 Explicit NULL test """ + """MPLS V6 Explicit NULL test""" # # a stream with a non-zero MPLS TTL @@ -1013,36 +1427,41 @@ class TestMPLS(VppTestCase): self.verify_capture_ip6(self.pg0, rx, tx) def test_deag(self): - """ MPLS Deagg """ + """MPLS Deagg""" # # A de-agg route - next-hop lookup in default table # - route_34_eos = VppMplsRoute(self, 34, 1, - [VppRoutePath("0.0.0.0", - 0xffffffff, - nh_table_id=0)]) + route_34_eos = VppMplsRoute( + self, 34, 1, [VppRoutePath("0.0.0.0", 0xFFFFFFFF, nh_table_id=0)] + ) route_34_eos.add_vpp_config() # # ping an interface in the default table # PG0 is in the default table # - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(34)], - ping=1, - ip_itf=self.pg0) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(34)], ping=1, ip_itf=self.pg0 + ) rx = self.send_and_expect(self.pg0, tx, self.pg0) self.verify_capture_ip4(self.pg0, rx, tx, ping_resp=1) # # A de-agg route - next-hop lookup in non-default table # - route_35_eos = VppMplsRoute(self, 35, 1, - [VppRoutePath("0.0.0.0", - 0xffffffff, - nh_table_id=1)]) + route_35_eos = VppMplsRoute( + self, 35, 1, [VppRoutePath("0.0.0.0", 0xFFFFFFFF, nh_table_id=1)] + ) route_35_eos.add_vpp_config() + route_356_eos = VppMplsRoute( + self, + 356, + 1, + [VppRoutePath("0::0", 0xFFFFFFFF, nh_table_id=1)], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6, + ) + route_356_eos.add_vpp_config() # # ping an interface in the non-default table @@ -1050,22 +1469,25 @@ class TestMPLS(VppTestCase): # default table and egress unlabelled in the non-default # tx = self.create_stream_labelled_ip4( - self.pg0, [VppMplsLabel(35)], ping=1, ip_itf=self.pg1) + self.pg0, [VppMplsLabel(35)], ping=1, ip_itf=self.pg1 + ) rx = self.send_and_expect(self.pg0, tx, self.pg1) self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1) + tx = self.create_stream_labelled_ip6( + self.pg0, [VppMplsLabel(356)], ping=1, ip_itf=self.pg1 + ) + rx = self.send_and_expect(self.pg0, tx, self.pg1) + self.verify_capture_ip6(self.pg1, rx, tx, ping_resp=1) # # Double pop # - route_36_neos = VppMplsRoute(self, 36, 0, - [VppRoutePath("0.0.0.0", - 0xffffffff)]) + route_36_neos = VppMplsRoute(self, 36, 0, [VppRoutePath("0.0.0.0", 0xFFFFFFFF)]) route_36_neos.add_vpp_config() - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(36), - VppMplsLabel(35)], - ping=1, ip_itf=self.pg1) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(36), VppMplsLabel(35)], ping=1, ip_itf=self.pg1 + ) rx = self.send_and_expect(self.pg0, tx, self.pg1) self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1) @@ -1074,16 +1496,19 @@ class TestMPLS(VppTestCase): route_34_eos.remove_vpp_config() def test_interface_rx(self): - """ MPLS Interface Receive """ + """MPLS Interface Receive""" # # Add a non-recursive route that will forward the traffic # post-interface-rx # - route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32, - table_id=1, - paths=[VppRoutePath(self.pg1.remote_ip4, - self.pg1.sw_if_index)]) + route_10_0_0_1 = VppIpRoute( + self, + "10.0.0.1", + 32, + table_id=1, + paths=[VppRoutePath(self.pg1.remote_ip4, self.pg1.sw_if_index)], + ) route_10_0_0_1.add_vpp_config() # @@ -1094,33 +1519,44 @@ class TestMPLS(VppTestCase): # if the packet egresses, then we must have swapped to pg1 # so as to have matched the route in table 1 # - route_34_eos = VppMplsRoute(self, 34, 1, - [VppRoutePath("0.0.0.0", - self.pg1.sw_if_index, - is_interface_rx=1)]) + route_34_eos = VppMplsRoute( + self, + 34, + 1, + [ + VppRoutePath( + "0.0.0.0", + self.pg1.sw_if_index, + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX, + ) + ], + ) route_34_eos.add_vpp_config() # # ping an interface in the default table # PG0 is in the default table # - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(34)], - dst_ip="10.0.0.1") + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(34)], dst_ip="10.0.0.1" + ) rx = self.send_and_expect(self.pg0, tx, self.pg1) self.verify_capture_ip4(self.pg1, rx, tx) def test_mcast_mid_point(self): - """ MPLS Multicast Mid Point """ + """MPLS Multicast Mid Point""" # # Add a non-recursive route that will forward the traffic # post-interface-rx # - route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32, - table_id=1, - paths=[VppRoutePath(self.pg1.remote_ip4, - self.pg1.sw_if_index)]) + route_10_0_0_1 = VppIpRoute( + self, + "10.0.0.1", + 32, + table_id=1, + paths=[VppRoutePath(self.pg1.remote_ip4, self.pg1.sw_if_index)], + ) route_10_0_0_1.add_vpp_config() # @@ -1128,17 +1564,28 @@ class TestMPLS(VppTestCase): # and replicate to a interface-rx (like a bud node would) # route_3400_eos = VppMplsRoute( - self, 3400, 1, - [VppRoutePath(self.pg2.remote_ip4, - self.pg2.sw_if_index, - labels=[VppMplsLabel(3401)]), - VppRoutePath(self.pg3.remote_ip4, - self.pg3.sw_if_index, - labels=[VppMplsLabel(3402)]), - VppRoutePath("0.0.0.0", - self.pg1.sw_if_index, - is_interface_rx=1)], - is_multicast=1) + self, + 3400, + 1, + [ + VppRoutePath( + self.pg2.remote_ip4, + self.pg2.sw_if_index, + labels=[VppMplsLabel(3401)], + ), + VppRoutePath( + self.pg3.remote_ip4, + self.pg3.sw_if_index, + labels=[VppMplsLabel(3402)], + ), + VppRoutePath( + "0.0.0.0", + self.pg1.sw_if_index, + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX, + ), + ], + is_multicast=1, + ) route_3400_eos.add_vpp_config() # @@ -1146,10 +1593,9 @@ class TestMPLS(VppTestCase): # PG0 is in the default table # self.vapi.cli("clear trace") - tx = self.create_stream_labelled_ip4(self.pg0, - [VppMplsLabel(3400, ttl=64)], - n=257, - dst_ip="10.0.0.1") + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(3400, ttl=64)], n=257, dst_ip="10.0.0.1" + ) self.pg0.add_stream(tx) self.pg_enable_capture(self.pg_interfaces) @@ -1159,36 +1605,40 @@ class TestMPLS(VppTestCase): self.verify_capture_ip4(self.pg1, rx, tx) rx = self.pg2.get_capture(257) - self.verify_capture_labelled(self.pg2, rx, tx, - [VppMplsLabel(3401, ttl=63)]) + self.verify_capture_labelled(self.pg2, rx, tx, [VppMplsLabel(3401, ttl=63)]) rx = self.pg3.get_capture(257) - self.verify_capture_labelled(self.pg3, rx, tx, - [VppMplsLabel(3402, ttl=63)]) + self.verify_capture_labelled(self.pg3, rx, tx, [VppMplsLabel(3402, ttl=63)]) def test_mcast_head(self): - """ MPLS Multicast Head-end """ + """MPLS Multicast Head-end""" + + MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t + MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t # # Create a multicast tunnel with two replications # mpls_tun = VppMPLSTunnelInterface( self, - [VppRoutePath(self.pg2.remote_ip4, - self.pg2.sw_if_index, - labels=[VppMplsLabel(42)]), - VppRoutePath(self.pg3.remote_ip4, - self.pg3.sw_if_index, - labels=[VppMplsLabel(43)])], - is_multicast=1) + [ + VppRoutePath( + self.pg2.remote_ip4, self.pg2.sw_if_index, labels=[VppMplsLabel(42)] + ), + VppRoutePath( + self.pg3.remote_ip4, self.pg3.sw_if_index, labels=[VppMplsLabel(43)] + ), + ], + is_multicast=1, + ) mpls_tun.add_vpp_config() mpls_tun.admin_up() # # add an unlabelled route through the new tunnel # - route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32, - [VppRoutePath("0.0.0.0", - mpls_tun._sw_if_index)]) + route_10_0_0_3 = VppIpRoute( + self, "10.0.0.3", 32, [VppRoutePath("0.0.0.0", mpls_tun._sw_if_index)] + ) route_10_0_0_3.add_vpp_config() self.vapi.cli("clear trace") @@ -1211,13 +1661,20 @@ class TestMPLS(VppTestCase): route_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", - "232.1.1.1", 32, - MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, - [VppMRoutePath(self.pg0.sw_if_index, - MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), - VppMRoutePath(mpls_tun._sw_if_index, - MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) + "232.1.1.1", + 32, + MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE, + [ + VppMRoutePath( + self.pg0.sw_if_index, MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT + ), + VppMRoutePath( + mpls_tun._sw_if_index, MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD + ), + ], + ) route_232_1_1_1.add_vpp_config() + self.logger.info(self.vapi.cli("sh ip mfib index 0")) self.vapi.cli("clear trace") tx = self.create_stream_ip4(self.pg0, "232.1.1.1") @@ -1232,7 +1689,10 @@ class TestMPLS(VppTestCase): self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [VppMplsLabel(43)]) def test_mcast_ip4_tail(self): - """ MPLS IPv4 Multicast Tail """ + """MPLS IPv4 Multicast Tail""" + + MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t + MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t # # Add a multicast route that will forward the traffic @@ -1241,11 +1701,16 @@ class TestMPLS(VppTestCase): route_232_1_1_1 = VppIpMRoute( self, "0.0.0.0", - "232.1.1.1", 32, - MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, + "232.1.1.1", + 32, + MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE, table_id=1, - paths=[VppMRoutePath(self.pg1.sw_if_index, - MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)]) + paths=[ + VppMRoutePath( + self.pg1.sw_if_index, MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD + ) + ], + ) route_232_1_1_1.add_vpp_config() # @@ -1256,12 +1721,14 @@ class TestMPLS(VppTestCase): # if the packet egresses, then we must have matched the route in # table 1 # - route_34_eos = VppMplsRoute(self, 34, 1, - [VppRoutePath("0.0.0.0", - self.pg1.sw_if_index, - nh_table_id=1, - rpf_id=55)], - is_multicast=1) + route_34_eos = VppMplsRoute( + self, + 34, + 1, + [VppRoutePath("0.0.0.0", 0xFFFFFFFF, nh_table_id=1, rpf_id=55)], + is_multicast=1, + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP4, + ) route_34_eos.add_vpp_config() @@ -1269,38 +1736,45 @@ class TestMPLS(VppTestCase): # Drop due to interface lookup miss # self.vapi.cli("clear trace") - tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)], - dst_ip="232.1.1.1", n=1) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(34)], dst_ip="232.1.1.1", n=1 + ) self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop none") # - # set the RPF-ID of the enrtry to match the input packet's + # set the RPF-ID of the entry to match the input packet's # route_232_1_1_1.update_rpf_id(55) + self.logger.info(self.vapi.cli("sh ip mfib index 1 232.1.1.1")) - tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)], - dst_ip="232.1.1.1") + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(34)], dst_ip="232.1.1.1" + ) rx = self.send_and_expect(self.pg0, tx, self.pg1) self.verify_capture_ip4(self.pg1, rx, tx) # - # disposed packets have an invalid IPv4 checkusm + # disposed packets have an invalid IPv4 checksum # - tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)], - dst_ip="232.1.1.1", n=65, - chksum=1) + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(34)], dst_ip="232.1.1.1", n=65, chksum=1 + ) self.send_and_assert_no_replies(self.pg0, tx, "Invalid Checksum") # # set the RPF-ID of the entry to not match the input packet's # route_232_1_1_1.update_rpf_id(56) - tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(34)], - dst_ip="232.1.1.1") + tx = self.create_stream_labelled_ip4( + self.pg0, [VppMplsLabel(34)], dst_ip="232.1.1.1" + ) self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56") def test_mcast_ip6_tail(self): - """ MPLS IPv6 Multicast Tail """ + """MPLS IPv6 Multicast Tail""" + + MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t + MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t # # Add a multicast route that will forward the traffic @@ -1309,12 +1783,18 @@ class TestMPLS(VppTestCase): route_ff = VppIpMRoute( self, "::", - "ff01::1", 32, - MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, + "ff01::1", + 32, + MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE, table_id=1, - paths=[VppMRoutePath(self.pg1.sw_if_index, - MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)], - is_ip6=1) + paths=[ + VppMRoutePath( + self.pg1.sw_if_index, + MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD, + proto=FibPathProto.FIB_PATH_NH_PROTO_IP6, + ) + ], + ) route_ff.add_vpp_config() # @@ -1326,55 +1806,150 @@ class TestMPLS(VppTestCase): # table 1 # route_34_eos = VppMplsRoute( - self, 34, 1, - [VppRoutePath("::", - self.pg1.sw_if_index, - nh_table_id=1, - rpf_id=55, - proto=DpoProto.DPO_PROTO_IP6)], - is_multicast=1) + self, + 34, + 1, + [VppRoutePath("::", 0xFFFFFFFF, nh_table_id=1, rpf_id=55)], + is_multicast=1, + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_IP6, + ) route_34_eos.add_vpp_config() # # Drop due to interface lookup miss # - tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(34)], - dst_ip="ff01::1") + tx = self.create_stream_labelled_ip6( + self.pg0, [VppMplsLabel(34)], dst_ip="ff01::1" + ) self.send_and_assert_no_replies(self.pg0, tx, "RPF Miss") # - # set the RPF-ID of the enrtry to match the input packet's + # set the RPF-ID of the entry to match the input packet's # route_ff.update_rpf_id(55) - tx = self.create_stream_labelled_ip6(self.pg0, [VppMplsLabel(34)], - dst_ip="ff01::1") + tx = self.create_stream_labelled_ip6( + self.pg0, [VppMplsLabel(34)], dst_ip="ff01::1" + ) rx = self.send_and_expect(self.pg0, tx, self.pg1) self.verify_capture_ip6(self.pg1, rx, tx) # # disposed packets have hop-limit = 1 # - tx = self.create_stream_labelled_ip6(self.pg0, - [VppMplsLabel(34)], - dst_ip="ff01::1", - hlim=1) - rx = self.send_and_expect(self.pg0, tx, self.pg0) + tx = self.create_stream_labelled_ip6( + self.pg0, [VppMplsLabel(34)], dst_ip="ff01::1", hlim=1 + ) + rx = self.send_and_expect_some(self.pg0, tx, self.pg0) self.verify_capture_ip6_icmp(self.pg0, rx, tx) # - # set the RPF-ID of the enrtry to not match the input packet's + # set the RPF-ID of the entry to not match the input packet's # route_ff.update_rpf_id(56) - tx = self.create_stream_labelled_ip6(self.pg0, - [VppMplsLabel(34)], - dst_ip="ff01::1") + tx = self.create_stream_labelled_ip6( + self.pg0, [VppMplsLabel(34)], dst_ip="ff01::1" + ) self.send_and_assert_no_replies(self.pg0, tx, "RPF-ID drop 56") + def test_6pe(self): + """MPLS 6PE""" + + # + # Add a non-recursive route with a single out label + # + route_10_0_0_1 = VppIpRoute( + self, + "10.0.0.1", + 32, + [ + VppRoutePath( + self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(45)] + ) + ], + ) + route_10_0_0_1.add_vpp_config() + + # bind a local label to the route + binding = VppMplsIpBind(self, 44, "10.0.0.1", 32) + binding.add_vpp_config() + + # + # a labelled v6 route that resolves through the v4 + # + route_2001_3 = VppIpRoute( + self, + "2001::3", + 128, + [VppRoutePath("10.0.0.1", INVALID_INDEX, labels=[VppMplsLabel(32)])], + ) + route_2001_3.add_vpp_config() + + tx = self.create_stream_ip6(self.pg0, "2001::3") + rx = self.send_and_expect(self.pg0, tx, self.pg0) + + self.verify_capture_labelled_ip6( + self.pg0, rx, tx, [VppMplsLabel(45), VppMplsLabel(32)] + ) + + # + # and a v4 recursive via the v6 + # + route_20_3 = VppIpRoute( + self, + "20.0.0.3", + 32, + [VppRoutePath("2001::3", INVALID_INDEX, labels=[VppMplsLabel(99)])], + ) + route_20_3.add_vpp_config() + + tx = self.create_stream_ip4(self.pg0, "20.0.0.3") + rx = self.send_and_expect(self.pg0, tx, self.pg0) + + self.verify_capture_labelled_ip4( + self.pg0, rx, tx, [VppMplsLabel(45), VppMplsLabel(32), VppMplsLabel(99)] + ) + + def test_attached(self): + """Attach Routes with Local Label""" + + # + # test that if a local label is associated with an attached/connected + # prefix, that we can reach hosts in the prefix. + # + binding = VppMplsIpBind( + self, 44, self.pg0._local_ip4_subnet, self.pg0.local_ip4_prefix_len + ) + binding.add_vpp_config() + + tx = ( + Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) + / MPLS(label=44, ttl=64) + / IP(src=self.pg0.remote_ip4, dst=self.pg0.remote_ip4) + / UDP(sport=1234, dport=1234) + / Raw(b"\xa5" * 100) + ) + rxs = self.send_and_expect(self.pg0, [tx], self.pg0) + for rx in rxs: + # if there's an ARP then the label is linked to the glean + # which is wrong. + self.assertFalse(rx.haslayer(ARP)) + # it should be unicasted to the host + self.assertEqual(rx[Ether].dst, self.pg0.remote_mac) + self.assertEqual(rx[IP].dst, self.pg0.remote_ip4) + class TestMPLSDisabled(VppTestCase): - """ MPLS disabled """ + """MPLS disabled""" + + @classmethod + def setUpClass(cls): + super(TestMPLSDisabled, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestMPLSDisabled, cls).tearDownClass() def setUp(self): super(TestMPLSDisabled, self).setUp() @@ -1385,7 +1960,7 @@ class TestMPLSDisabled(VppTestCase): self.tbl = VppMplsTable(self, 0) self.tbl.add_vpp_config() - # PG0 is MPLS enalbed + # PG0 is MPLS enabled self.pg0.admin_up() self.pg0.config_ip4() self.pg0.resolve_arp() @@ -1403,22 +1978,29 @@ class TestMPLSDisabled(VppTestCase): super(TestMPLSDisabled, self).tearDown() def test_mpls_disabled(self): - """ MPLS Disabled """ + """MPLS Disabled""" + + self.logger.info(self.vapi.cli("show mpls interface")) + self.logger.info(self.vapi.cli("show mpls interface pg1")) + self.logger.info(self.vapi.cli("show mpls interface pg0")) - tx = (Ether(src=self.pg1.remote_mac, - dst=self.pg1.local_mac) / - MPLS(label=32, ttl=64) / - IPv6(src="2001::1", dst=self.pg0.remote_ip6) / - UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + tx = ( + Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) + / MPLS(label=32, ttl=64) + / IPv6(src="2001::1", dst=self.pg0.remote_ip6) + / UDP(sport=1234, dport=1234) + / Raw(b"\xa5" * 100) + ) # # A simple MPLS xconnect - eos label in label out # - route_32_eos = VppMplsRoute(self, 32, 1, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[33])]) + route_32_eos = VppMplsRoute( + self, + 32, + 1, + [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[33])], + ) route_32_eos.add_vpp_config() # @@ -1431,6 +2013,9 @@ class TestMPLSDisabled(VppTestCase): # self.pg1.enable_mpls() + self.logger.info(self.vapi.cli("show mpls interface")) + self.logger.info(self.vapi.cli("show mpls interface pg1")) + # # Now we get packets through # @@ -1453,7 +2038,15 @@ class TestMPLSDisabled(VppTestCase): class TestMPLSPIC(VppTestCase): - """ MPLS PIC edge convergence """ + """MPLS Prefix-Independent Convergence (PIC) edge convergence""" + + @classmethod + def setUpClass(cls): + super(TestMPLSPIC, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestMPLSPIC, cls).tearDownClass() def setUp(self): super(TestMPLSPIC, self).setUp() @@ -1473,6 +2066,7 @@ class TestMPLSPIC(VppTestCase): self.pg0.config_ip4() self.pg0.resolve_arp() self.pg0.enable_mpls() + self.pg1.admin_up() self.pg1.config_ip4() self.pg1.resolve_arp() @@ -1486,6 +2080,7 @@ class TestMPLSPIC(VppTestCase): self.pg2.set_table_ip6(1) self.pg2.config_ip6() self.pg2.resolve_ndp() + self.pg3.admin_up() self.pg3.set_table_ip4(1) self.pg3.config_ip4() @@ -1506,7 +2101,7 @@ class TestMPLSPIC(VppTestCase): super(TestMPLSPIC, self).tearDown() def test_mpls_ibgp_pic(self): - """ MPLS iBGP PIC edge convergence + """MPLS iBGP Prefix-Independent Convergence (PIC) edge convergence 1) setup many iBGP VPN routes via a pair of iBGP peers. 2) Check EMCP forwarding to these peers @@ -1517,16 +2112,20 @@ class TestMPLSPIC(VppTestCase): # # IGP+LDP core routes # - core_10_0_0_45 = VppIpRoute(self, "10.0.0.45", 32, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[45])]) + core_10_0_0_45 = VppIpRoute( + self, + "10.0.0.45", + 32, + [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[45])], + ) core_10_0_0_45.add_vpp_config() - core_10_0_0_46 = VppIpRoute(self, "10.0.0.46", 32, - [VppRoutePath(self.pg1.remote_ip4, - self.pg1.sw_if_index, - labels=[46])]) + core_10_0_0_46 = VppIpRoute( + self, + "10.0.0.46", + 32, + [VppRoutePath(self.pg1.remote_ip4, self.pg1.sw_if_index, labels=[46])], + ) core_10_0_0_46.add_vpp_config() # @@ -1535,25 +2134,38 @@ class TestMPLSPIC(VppTestCase): # vpn_routes = [] pkts = [] - for ii in range(64): + for ii in range(NUM_PKTS): dst = "192.168.1.%d" % ii - vpn_routes.append(VppIpRoute(self, dst, 32, - [VppRoutePath("10.0.0.45", - 0xffffffff, - labels=[145], - is_resolve_host=1), - VppRoutePath("10.0.0.46", - 0xffffffff, - labels=[146], - is_resolve_host=1)], - table_id=1)) + vpn_routes.append( + VppIpRoute( + self, + dst, + 32, + [ + VppRoutePath( + "10.0.0.45", + 0xFFFFFFFF, + labels=[145], + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST, + ), + VppRoutePath( + "10.0.0.46", + 0xFFFFFFFF, + labels=[146], + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_HOST, + ), + ], + table_id=1, + ) + ) vpn_routes[ii].add_vpp_config() - pkts.append(Ether(dst=self.pg2.local_mac, - src=self.pg2.remote_mac) / - IP(src=self.pg2.remote_ip4, dst=dst) / - UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + pkts.append( + Ether(dst=self.pg2.local_mac, src=self.pg2.remote_mac) + / IP(src=self.pg2.remote_ip4, dst=dst) + / UDP(sport=1234, dport=1234) + / Raw(b"\xa5" * 100) + ) # # Send the packet stream (one pkt to each VPN route) @@ -1563,18 +2175,24 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg0._get_capture(1) - rx1 = self.pg1._get_capture(1) + rx0 = self.pg0._get_capture(NUM_PKTS) + rx1 = self.pg1._get_capture(NUM_PKTS) - # not testig the LB hashing algorithm so we're not concerned + # not testing the LB hashing algorithm so we're not concerned # with the split ratio, just as long as neither is 0 self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual( + len(pkts), + len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)), + ) # # use a test CLI command to stop the FIB walk process, this # will prevent the FIB converging the VPN routes and thus allow - # us to probe the interim (psot-fail, pre-converge) state + # us to probe the interim (post-fail, pre-converge) state # self.vapi.ppcli("test fib-walk-process disable") @@ -1591,7 +2209,13 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg0.get_capture(len(pkts)) + rx0 = self.pg0.get_capture(NUM_PKTS) + self.assertEqual( + len(pkts), + len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0)), + ) # # enable the FIB walk process to converge the FIB @@ -1605,7 +2229,13 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg0.get_capture(64) + rx0 = self.pg0.get_capture(NUM_PKTS) + self.assertEqual( + len(pkts), + len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0)), + ) # # Add the IGP route back and we return to load-balancing @@ -1616,15 +2246,21 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg0._get_capture(1) - rx1 = self.pg1._get_capture(1) + rx0 = self.pg0._get_capture(NUM_PKTS) + rx1 = self.pg1._get_capture(NUM_PKTS) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual( + len(pkts), + len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)), + ) def test_mpls_ebgp_pic(self): - """ MPLS eBGP PIC edge convergence + """MPLS eBGP Prefix-Independent Convergence (PIC) edge convergence - 1) setup many eBGP VPN routes via a pair of eBGP peers + 1) setup many eBGP VPN routes via a pair of eBGP peers. 2) Check EMCP forwarding to these peers 3) withdraw one eBGP path - expect LB across remaining eBGP """ @@ -1636,45 +2272,72 @@ class TestMPLSPIC(VppTestCase): vpn_routes = [] vpn_bindings = [] pkts = [] - for ii in range(64): + for ii in range(NUM_PKTS): dst = "192.168.1.%d" % ii local_label = 1600 + ii - vpn_routes.append(VppIpRoute(self, dst, 32, - [VppRoutePath(self.pg2.remote_ip4, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1), - VppRoutePath(self.pg3.remote_ip4, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1)], - table_id=1)) + vpn_routes.append( + VppIpRoute( + self, + dst, + 32, + [ + VppRoutePath( + self.pg2.remote_ip4, + 0xFFFFFFFF, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED, + ), + VppRoutePath( + self.pg3.remote_ip4, + 0xFFFFFFFF, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED, + ), + ], + table_id=1, + ) + ) vpn_routes[ii].add_vpp_config() - vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32, - ip_table_id=1)) + vpn_bindings.append( + VppMplsIpBind(self, local_label, dst, 32, ip_table_id=1) + ) vpn_bindings[ii].add_vpp_config() - pkts.append(Ether(dst=self.pg0.local_mac, - src=self.pg0.remote_mac) / - MPLS(label=local_label, ttl=64) / - IP(src=self.pg0.remote_ip4, dst=dst) / - UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + pkts.append( + Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) + / MPLS(label=local_label, ttl=64) + / IP(src=self.pg0.remote_ip4, dst=dst) + / UDP(sport=1234, dport=1234) + / Raw(b"\xa5" * 100) + ) + # + # Send the packet stream (one pkt to each VPN route) + # - expect a 50-50 split of the traffic + # self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg2._get_capture(1) - rx1 = self.pg3._get_capture(1) + rx0 = self.pg2._get_capture(NUM_PKTS) + rx1 = self.pg3._get_capture(NUM_PKTS) + + # not testing the LB hashing algorithm so we're not concerned + # with the split ratio, just as long as neither is 0 self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual( + len(pkts), + len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)), + ) # # use a test CLI command to stop the FIB walk process, this # will prevent the FIB converging the VPN routes and thus allow - # us to probe the interim (psot-fail, pre-converge) state + # us to probe the interim (post-fail, pre-converge) state # self.vapi.ppcli("test fib-walk-process disable") @@ -1690,34 +2353,57 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg3.get_capture(len(pkts)) + rx0 = self.pg3.get_capture(NUM_PKTS) + self.assertEqual( + len(pkts), + len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0)), + ) # # enable the FIB walk process to converge the FIB # self.vapi.ppcli("test fib-walk-process enable") + + # + # packets should still be forwarded through the remaining peer + # self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg3.get_capture(len(pkts)) + rx0 = self.pg3.get_capture(NUM_PKTS) + self.assertEqual( + len(pkts), + len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0)), + ) # - # put the connecteds back + # put the connected routes back # self.pg2.config_ip4() + self.pg2.resolve_arp() self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg2._get_capture(1) - rx1 = self.pg3._get_capture(1) + rx0 = self.pg2._get_capture(NUM_PKTS) + rx1 = self.pg3._get_capture(NUM_PKTS) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual( + len(pkts), + len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)), + ) def test_mpls_v6_ebgp_pic(self): - """ MPLSv6 eBGP PIC edge convergence + """MPLSv6 eBGP Prefix-Independent Convergence (PIC) edge convergence 1) setup many eBGP VPNv6 routes via a pair of eBGP peers 2) Check EMCP forwarding to these peers @@ -1731,50 +2417,66 @@ class TestMPLSPIC(VppTestCase): vpn_routes = [] vpn_bindings = [] pkts = [] - for ii in range(64): + for ii in range(NUM_PKTS): dst = "3000::%d" % ii local_label = 1600 + ii - vpn_routes.append(VppIpRoute( - self, dst, 128, - [VppRoutePath(self.pg2.remote_ip6, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1, - proto=DpoProto.DPO_PROTO_IP6), - VppRoutePath(self.pg3.remote_ip6, - 0xffffffff, - nh_table_id=1, - proto=DpoProto.DPO_PROTO_IP6, - is_resolve_attached=1)], - table_id=1, - is_ip6=1)) + vpn_routes.append( + VppIpRoute( + self, + dst, + 128, + [ + VppRoutePath( + self.pg2.remote_ip6, + 0xFFFFFFFF, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED, + ), + VppRoutePath( + self.pg3.remote_ip6, + 0xFFFFFFFF, + nh_table_id=1, + flags=FibPathFlags.FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED, + ), + ], + table_id=1, + ) + ) vpn_routes[ii].add_vpp_config() - vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128, - ip_table_id=1, - is_ip6=1)) + vpn_bindings.append( + VppMplsIpBind(self, local_label, dst, 128, ip_table_id=1) + ) vpn_bindings[ii].add_vpp_config() - pkts.append(Ether(dst=self.pg0.local_mac, - src=self.pg0.remote_mac) / - MPLS(label=local_label, ttl=64) / - IPv6(src=self.pg0.remote_ip6, dst=dst) / - UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + pkts.append( + Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) + / MPLS(label=local_label, ttl=64) + / IPv6(src=self.pg0.remote_ip6, dst=dst) + / UDP(sport=1234, dport=1234) + / Raw(b"\xa5" * 100) + ) + self.logger.info(self.vapi.cli("sh ip6 fib %s" % dst)) self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg2._get_capture(1) - rx1 = self.pg3._get_capture(1) + rx0 = self.pg2._get_capture(NUM_PKTS) + rx1 = self.pg3._get_capture(NUM_PKTS) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual( + len(pkts), + len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)), + ) # # use a test CLI command to stop the FIB walk process, this # will prevent the FIB converging the VPN routes and thus allow - # us to probe the interim (psot-fail, pre-converge) state + # us to probe the interim (post-fail, pre-converge) state # self.vapi.ppcli("test fib-walk-process disable") @@ -1792,7 +2494,13 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg3.get_capture(len(pkts)) + rx0 = self.pg3.get_capture(NUM_PKTS) + self.assertEqual( + len(pkts), + len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0)), + ) # # enable the FIB walk process to converge the FIB @@ -1802,26 +2510,48 @@ class TestMPLSPIC(VppTestCase): self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg3.get_capture(len(pkts)) + rx0 = self.pg3.get_capture(NUM_PKTS) + self.assertEqual( + len(pkts), + len(rx0), + "Expected all (%s) packets across single path. " + "rx0: %s." % (len(pkts), len(rx0)), + ) # - # put the connecteds back + # put the connected routes back # + self.logger.info(self.vapi.cli("sh log")) self.pg2.admin_up() self.pg2.config_ip6() + self.pg2.resolve_ndp() self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() - rx0 = self.pg2._get_capture(1) - rx1 = self.pg3._get_capture(1) + rx0 = self.pg2._get_capture(NUM_PKTS) + rx1 = self.pg3._get_capture(NUM_PKTS) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) + self.assertEqual( + len(pkts), + len(rx0) + len(rx1), + "Expected all (%s) packets across both ECMP paths. " + "rx0: %s rx1: %s." % (len(pkts), len(rx0), len(rx1)), + ) class TestMPLSL2(VppTestCase): - """ MPLS-L2 """ + """MPLS-L2""" + + @classmethod + def setUpClass(cls): + super(TestMPLSL2, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + super(TestMPLSL2, cls).tearDownClass() def setUp(self): super(TestMPLSL2, self).setUp() @@ -1835,10 +2565,9 @@ class TestMPLSL2(VppTestCase): tbl.add_vpp_config() self.tables.append(tbl) - # use pg0 as the core facing interface + # use pg0 as the core facing interface, don't resolve ARP self.pg0.admin_up() self.pg0.config_ip4() - self.pg0.resolve_arp() self.pg0.enable_mpls() # use the other 2 for customer facing L2 links @@ -1867,13 +2596,29 @@ class TestMPLSL2(VppTestCase): verify_mpls_stack(self, rx, mpls_labels) tx_eth = tx[Ether] - rx_eth = Ether(str(rx[MPLS].payload)) + rx_eth = Ether(scapy.compat.raw(rx[MPLS].payload)) self.assertEqual(rx_eth.src, tx_eth.src) self.assertEqual(rx_eth.dst, tx_eth.dst) + def verify_arp_req(self, rx, smac, sip, dip): + ether = rx[Ether] + self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff") + self.assertEqual(ether.src, smac) + + arp = rx[ARP] + self.assertEqual(arp.hwtype, 1) + self.assertEqual(arp.ptype, 0x800) + self.assertEqual(arp.hwlen, 6) + self.assertEqual(arp.plen, 4) + self.assertEqual(arp.op, ARP.who_has) + self.assertEqual(arp.hwsrc, smac) + self.assertEqual(arp.hwdst, "00:00:00:00:00:00") + self.assertEqual(arp.psrc, sip) + self.assertEqual(arp.pdst, dip) + def test_vpws(self): - """ Virtual Private Wire Service """ + """Virtual Private Wire Service""" # # Create an MPLS tunnel that pushes 1 label @@ -1882,10 +2627,15 @@ class TestMPLSL2(VppTestCase): # mpls_tun_1 = VppMPLSTunnelInterface( self, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(42, MplsLspMode.UNIFORM)])], - is_l2=1) + [ + VppRoutePath( + self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[VppMplsLabel(42, MplsLspMode.UNIFORM)], + ) + ], + is_l2=1, + ) mpls_tun_1.add_vpp_config() mpls_tun_1.admin_up() @@ -1893,36 +2643,44 @@ class TestMPLSL2(VppTestCase): # Create a label entry to for 55 that does L2 input to the tunnel # route_55_eos = VppMplsRoute( - self, 55, 1, - [VppRoutePath("0.0.0.0", - mpls_tun_1.sw_if_index, - is_interface_rx=1, - proto=DpoProto.DPO_PROTO_ETHERNET)]) + self, + 55, + 1, + [ + VppRoutePath( + "0.0.0.0", + mpls_tun_1.sw_if_index, + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX, + proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET, + ) + ], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET, + ) route_55_eos.add_vpp_config() # # Cross-connect the tunnel with one of the customers L2 interfaces # - self.vapi.sw_interface_set_l2_xconnect(self.pg1.sw_if_index, - mpls_tun_1.sw_if_index, - enable=1) - self.vapi.sw_interface_set_l2_xconnect(mpls_tun_1.sw_if_index, - self.pg1.sw_if_index, - enable=1) + self.vapi.sw_interface_set_l2_xconnect( + self.pg1.sw_if_index, mpls_tun_1.sw_if_index, enable=1 + ) + self.vapi.sw_interface_set_l2_xconnect( + mpls_tun_1.sw_if_index, self.pg1.sw_if_index, enable=1 + ) # # inject a packet from the core # - pcore = (Ether(dst=self.pg0.local_mac, - src=self.pg0.remote_mac) / - MPLS(label=55, ttl=64) / - Ether(dst="00:00:de:ad:ba:be", - src="00:00:de:ad:be:ef") / - IP(src="10.10.10.10", dst="11.11.11.11") / - UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + pcore = ( + Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) + / MPLS(label=55, ttl=64) + / Ether(dst="00:00:de:ad:ba:be", src="00:00:de:ad:be:ef") + / IP(src="10.10.10.10", dst="11.11.11.11") + / UDP(sport=1234, dport=1234) + / Raw(b"\xa5" * 100) + ) - tx0 = pcore * 65 + tx0 = pcore * NUM_PKTS rx0 = self.send_and_expect(self.pg0, tx0, self.pg1) payload = pcore[MPLS].payload @@ -1930,102 +2688,193 @@ class TestMPLSL2(VppTestCase): self.assertEqual(rx0[0][Ether].src, payload[Ether].src) # - # Inject a packet from the custoer/L2 side + # Inject a packet from the customer/L2 side + # there's no resolved ARP entry so the first packet we see should be + # an ARP request # - tx1 = pcore[MPLS].payload * 65 + tx1 = pcore[MPLS].payload + rx1 = self.send_and_expect(self.pg1, [tx1], self.pg0) + + self.verify_arp_req( + rx1[0], self.pg0.local_mac, self.pg0.local_ip4, self.pg0.remote_ip4 + ) + + # + # resolve the ARP entries and send again + # + self.pg0.resolve_arp() + tx1 = pcore[MPLS].payload * NUM_PKTS rx1 = self.send_and_expect(self.pg1, tx1, self.pg0) self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)]) def test_vpls(self): - """ Virtual Private LAN Service """ + """Virtual Private LAN Service""" + + # we skipped this in the setup + self.pg0.resolve_arp() + # - # Create an L2 MPLS tunnel + # Create a L2 MPLS tunnels # - mpls_tun = VppMPLSTunnelInterface( + mpls_tun1 = VppMPLSTunnelInterface( self, - [VppRoutePath(self.pg0.remote_ip4, - self.pg0.sw_if_index, - labels=[VppMplsLabel(42)])], - is_l2=1) - mpls_tun.add_vpp_config() - mpls_tun.admin_up() + [ + VppRoutePath( + self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(42)] + ) + ], + is_l2=1, + ) + mpls_tun1.add_vpp_config() + mpls_tun1.admin_up() + + mpls_tun2 = VppMPLSTunnelInterface( + self, + [ + VppRoutePath( + self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(43)] + ) + ], + is_l2=1, + ) + mpls_tun2.add_vpp_config() + mpls_tun2.admin_up() # - # Create a label entry to for 55 that does L2 input to the tunnel + # Create a label entries, 55 and 56, that do L2 input to the tunnel + # the latter includes a Psuedo Wire Control Word # route_55_eos = VppMplsRoute( - self, 55, 1, - [VppRoutePath("0.0.0.0", - mpls_tun.sw_if_index, - is_interface_rx=1, - proto=DpoProto.DPO_PROTO_ETHERNET)]) + self, + 55, + 1, + [ + VppRoutePath( + "0.0.0.0", + mpls_tun1.sw_if_index, + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX, + proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET, + ) + ], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET, + ) + + route_56_eos = VppMplsRoute( + self, + 56, + 1, + [ + VppRoutePath( + "0.0.0.0", + mpls_tun2.sw_if_index, + type=FibPathType.FIB_PATH_TYPE_INTERFACE_RX, + flags=FibPathFlags.FIB_PATH_FLAG_POP_PW_CW, + proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET, + ) + ], + eos_proto=FibPathProto.FIB_PATH_NH_PROTO_ETHERNET, + ) + + # move me + route_56_eos.add_vpp_config() route_55_eos.add_vpp_config() - # - # add to tunnel to the customers bridge-domain - # - self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index, - bd_id=1) - self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index, - bd_id=1) + self.logger.info(self.vapi.cli("sh mpls fib 56")) # - # Packet from the customer interface and from the core + # add to tunnel to the customers bridge-domain # - p_cust = (Ether(dst="00:00:de:ad:ba:be", - src="00:00:de:ad:be:ef") / - IP(src="10.10.10.10", dst="11.11.11.11") / - UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) - p_core = (Ether(src="00:00:de:ad:ba:be", - dst="00:00:de:ad:be:ef") / - IP(dst="10.10.10.10", src="11.11.11.11") / - UDP(sport=1234, dport=1234) / - Raw('\xa5' * 100)) + self.vapi.sw_interface_set_l2_bridge( + rx_sw_if_index=mpls_tun1.sw_if_index, bd_id=1 + ) + self.vapi.sw_interface_set_l2_bridge( + rx_sw_if_index=mpls_tun2.sw_if_index, bd_id=1 + ) + self.vapi.sw_interface_set_l2_bridge( + rx_sw_if_index=self.pg1.sw_if_index, bd_id=1 + ) + + # + # Packet from host on the customer interface to each host + # reachable over the core, and vice-versa + # + p_cust1 = ( + Ether(dst="00:00:de:ad:ba:b1", src="00:00:de:ad:be:ef") + / IP(src="10.10.10.10", dst="11.11.11.11") + / UDP(sport=1234, dport=1234) + / Raw(b"\xa5" * 100) + ) + p_cust2 = ( + Ether(dst="00:00:de:ad:ba:b2", src="00:00:de:ad:be:ef") + / IP(src="10.10.10.10", dst="11.11.11.12") + / UDP(sport=1234, dport=1234) + / Raw(b"\xa5" * 100) + ) + p_core1 = ( + Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) + / MPLS(label=55, ttl=64) + / Ether(src="00:00:de:ad:ba:b1", dst="00:00:de:ad:be:ef") + / IP(dst="10.10.10.10", src="11.11.11.11") + / UDP(sport=1234, dport=1234) + / Raw(b"\xa5" * 100) + ) + p_core2 = ( + Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) + / MPLS(label=56, ttl=64) + / Raw(b"\x01" * 4) + / Ether(src="00:00:de:ad:ba:b2", dst="00:00:de:ad:be:ef") # PW CW + / IP(dst="10.10.10.10", src="11.11.11.12") + / UDP(sport=1234, dport=1234) + / Raw(b"\xa5" * 100) + ) # # The BD is learning, so send in one of each packet to learn # - p_core_encap = (Ether(dst=self.pg0.local_mac, - src=self.pg0.remote_mac) / - MPLS(label=55, ttl=64) / - p_core) - self.pg1.add_stream(p_cust) - self.pg_enable_capture(self.pg_interfaces) - self.pg_start() - self.pg0.add_stream(p_core_encap) - self.pg_enable_capture(self.pg_interfaces) - self.pg_start() + # 2 packets due to BD flooding + rx = self.send_and_expect(self.pg1, p_cust1, self.pg0, n_rx=2) + rx = self.send_and_expect(self.pg1, p_cust2, self.pg0, n_rx=2) - # we've learnt this so expect it be be forwarded - rx0 = self.pg1.get_capture(1) + # we've learnt this so expect it be be forwarded not flooded + rx = self.send_and_expect(self.pg0, [p_core1], self.pg1) + self.assertEqual(rx[0][Ether].dst, p_cust1[Ether].src) + self.assertEqual(rx[0][Ether].src, p_cust1[Ether].dst) - self.assertEqual(rx0[0][Ether].dst, p_core[Ether].dst) - self.assertEqual(rx0[0][Ether].src, p_core[Ether].src) + rx = self.send_and_expect(self.pg0, [p_core2], self.pg1) + self.assertEqual(rx[0][Ether].dst, p_cust2[Ether].src) + self.assertEqual(rx[0][Ether].src, p_cust2[Ether].dst) # - # now a stream in each direction + # now a stream in each direction from each host # - self.pg1.add_stream(p_cust * 65) - self.pg_enable_capture(self.pg_interfaces) - self.pg_start() + rx = self.send_and_expect(self.pg1, p_cust1 * NUM_PKTS, self.pg0) + self.verify_capture_tunneled_ethernet( + rx, p_cust1 * NUM_PKTS, [VppMplsLabel(42)] + ) - rx0 = self.pg0.get_capture(65) + rx = self.send_and_expect(self.pg1, p_cust2 * NUM_PKTS, self.pg0) + self.verify_capture_tunneled_ethernet( + rx, p_cust2 * NUM_PKTS, [VppMplsLabel(43)] + ) - self.verify_capture_tunneled_ethernet(rx0, p_cust*65, - [VppMplsLabel(42)]) + rx = self.send_and_expect(self.pg0, p_core1 * NUM_PKTS, self.pg1) + rx = self.send_and_expect(self.pg0, p_core2 * NUM_PKTS, self.pg1) # # remove interfaces from customers bridge-domain # - self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index, - bd_id=1, - enable=0) - self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index, - bd_id=1, - enable=0) + self.vapi.sw_interface_set_l2_bridge( + rx_sw_if_index=mpls_tun1.sw_if_index, bd_id=1, enable=0 + ) + self.vapi.sw_interface_set_l2_bridge( + rx_sw_if_index=mpls_tun2.sw_if_index, bd_id=1, enable=0 + ) + self.vapi.sw_interface_set_l2_bridge( + rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0 + ) + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main(testRunner=VppTestRunner)