+ #
+ # IP address in different subnets are not learnt
+ #
+ self.pg2.configure_ipv4_neighbors()
+
+ for op in ["is-at", "who-has"]:
+ p1 = [(Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg2.remote_hosts[1].mac) /
+ ARP(op=op,
+ hwdst=self.pg2.local_mac,
+ hwsrc=self.pg2.remote_hosts[1].mac,
+ pdst=self.pg2.remote_hosts[1].ip4,
+ psrc=self.pg2.remote_hosts[1].ip4)),
+ (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg2.remote_hosts[1].mac) /
+ ARP(op=op,
+ hwdst="ff:ff:ff:ff:ff:ff",
+ hwsrc=self.pg2.remote_hosts[1].mac,
+ pdst=self.pg2.remote_hosts[1].ip4,
+ psrc=self.pg2.remote_hosts[1].ip4))]
+
+ self.send_and_assert_no_replies(self.pg1, p1)
+ self.assertFalse(find_nbr(self,
+ self.pg1.sw_if_index,
+ self.pg2.remote_hosts[1].ip4))
+
+ # they are all dropped because the subnet's don't match
+ self.assertEqual(4, self.statistics.get_err_counter(
+ "/err/arp-reply/IP4 destination address not local to subnet"))
+
+ def test_arp_incomplete2(self):
+ """ Incomplete Entries """
+
+ #
+ # ensure that we throttle the ARP and ND requests
+ #
+ self.pg0.generate_remote_hosts(2)
+
+ #
+ # IPv4/ARP
+ #
+ ip_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
+ [VppRoutePath(self.pg0.remote_hosts[1].ip4,
+ self.pg0.sw_if_index)])
+ ip_10_0_0_1.add_vpp_config()
+
+ p1 = (Ether(dst=self.pg1.local_mac,
+ src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4,
+ dst="10.0.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ self.pg1.add_stream(p1 * 257)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ rx = self.pg0._get_capture(1)
+
+ #
+ # how many we get is going to be dependent on the time for packet
+ # processing but it should be small
+ #
+ self.assertLess(len(rx), 64)
+
+ #
+ # IPv6/ND
+ #
+ ip_10_1 = VppIpRoute(self, "10::1", 128,
+ [VppRoutePath(self.pg0.remote_hosts[1].ip6,
+ self.pg0.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ ip_10_1.add_vpp_config()
+
+ p1 = (Ether(dst=self.pg1.local_mac,
+ src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6,
+ dst="10::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ self.pg1.add_stream(p1 * 257)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ rx = self.pg0._get_capture(1)
+
+ #
+ # how many we get is going to be dependent on the time for packet
+ # processing but it should be small
+ #
+ self.assertLess(len(rx), 64)
+
+ def test_arp_forus(self):
+ """ ARP for for-us """
+
+ #
+ # Test that VPP responds with ARP requests to addresses that
+ # are connected and local routes.
+ # Use one of the 'remote' addresses in the subnet as a local address
+ # The intention of this route is that it then acts like a secondary
+ # address added to an interface
+ #
+ self.pg0.generate_remote_hosts(2)
+
+ forus = VppIpRoute(
+ self, self.pg0.remote_hosts[1].ip4, 32,
+ [VppRoutePath("0.0.0.0",
+ self.pg0.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_LOCAL)])
+ forus.add_vpp_config()
+
+ p = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg0.remote_mac) /
+ ARP(op="who-has",
+ hwdst=self.pg0.local_mac,
+ hwsrc=self.pg0.remote_mac,
+ pdst=self.pg0.remote_hosts[1].ip4,
+ psrc=self.pg0.remote_ip4))
+
+ rx = self.send_and_expect(self.pg0, [p], self.pg0)
+
+ self.verify_arp_resp(rx[0],
+ self.pg0.local_mac,
+ self.pg0.remote_mac,
+ self.pg0.remote_hosts[1].ip4,
+ self.pg0.remote_ip4)
+
+ def test_arp_table_swap(self):
+ #
+ # Generate some hosts on the LAN
+ #
+ N_NBRS = 4
+ self.pg1.generate_remote_hosts(N_NBRS)
+
+ for n in range(N_NBRS):
+ # a route thru each neighbour
+ VppIpRoute(self, "10.0.0.%d" % n, 32,
+ [VppRoutePath(self.pg1.remote_hosts[n].ip4,
+ self.pg1.sw_if_index)]).add_vpp_config()
+
+ # resolve each neighbour
+ p1 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ ARP(op="is-at", hwdst=self.pg1.local_mac,
+ hwsrc="00:00:5e:00:01:09", pdst=self.pg1.local_ip4,
+ psrc=self.pg1.remote_hosts[n].ip4))
+
+ self.send_and_assert_no_replies(self.pg1, p1, "ARP reply")
+
+ self.logger.info(self.vapi.cli("sh ip neighbors"))
+
+ #
+ # swap the table pg1 is in
+ #
+ table = VppIpTable(self, 100).add_vpp_config()
+
+ self.pg1.unconfig_ip4()
+ self.pg1.set_table_ip4(100)
+ self.pg1.config_ip4()
+
+ #
+ # all neighbours are cleared
+ #
+ for n in range(N_NBRS):
+ self.assertFalse(find_nbr(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[n].ip4))
+
+ #
+ # packets to all neighbours generate ARP requests
+ #
+ for n in range(N_NBRS):
+ # a route thru each neighbour
+ VppIpRoute(self, "10.0.0.%d" % n, 32,
+ [VppRoutePath(self.pg1.remote_hosts[n].ip4,
+ self.pg1.sw_if_index)],
+ table_id=100).add_vpp_config()
+
+ p = (Ether(src=self.pg1.remote_hosts[n].mac,
+ dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_hosts[n].ip4,
+ dst="10.0.0.%d" % n) /
+ Raw(b'0x5' * 100))
+ rxs = self.send_and_expect(self.pg1, [p], self.pg1)
+ for rx in rxs:
+ self.verify_arp_req(rx,
+ self.pg1.local_mac,
+ self.pg1.local_ip4,
+ self.pg1.remote_hosts[n].ip4)
+
+ self.pg1.unconfig_ip4()
+ self.pg1.set_table_ip4(0)
+
+ def test_glean_src_select(self):
+ """ Multi Connecteds """
+
+ #
+ # configure multiple connected subnets on an interface
+ # and ensure that ARP requests for hosts on those subnets
+ # pick up the correct source address
+ #
+ conn1 = VppIpInterfaceAddress(self, self.pg1,
+ "10.0.0.1", 24).add_vpp_config()
+ conn2 = VppIpInterfaceAddress(self, self.pg1,
+ "10.0.1.1", 24).add_vpp_config()
+
+ p1 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src=self.pg1.remote_ip4,
+ dst="10.0.0.128") /
+ Raw(b'0x5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, [p1], self.pg1)
+ for rx in rxs:
+ self.verify_arp_req(rx,
+ self.pg1.local_mac,
+ "10.0.0.1",
+ "10.0.0.128")
+
+ p2 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src=self.pg1.remote_ip4,
+ dst="10.0.1.128") /
+ Raw(b'0x5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, [p2], self.pg1)
+ for rx in rxs:
+ self.verify_arp_req(rx,
+ self.pg1.local_mac,
+ "10.0.1.1",
+ "10.0.1.128")
+
+ #
+ # add a local address in the same subnet
+ # the source addresses are equivalent. VPP happens to
+ # choose the last one that was added
+ conn3 = VppIpInterfaceAddress(self, self.pg1,
+ "10.0.1.2", 24).add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, [p2], self.pg1)
+ for rx in rxs:
+ self.verify_arp_req(rx,
+ self.pg1.local_mac,
+ "10.0.1.2",
+ "10.0.1.128")
+
+ #
+ # remove
+ #
+ conn3.remove_vpp_config()
+ rxs = self.send_and_expect(self.pg0, [p2], self.pg1)
+ for rx in rxs:
+ self.verify_arp_req(rx,
+ self.pg1.local_mac,
+ "10.0.1.1",
+ "10.0.1.128")
+
+ #
+ # add back, this time remove the first one
+ #
+ conn3 = VppIpInterfaceAddress(self, self.pg1,
+ "10.0.1.2", 24).add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, [p2], self.pg1)
+ for rx in rxs:
+ self.verify_arp_req(rx,
+ self.pg1.local_mac,
+ "10.0.1.2",
+ "10.0.1.128")
+
+ conn1.remove_vpp_config()
+ rxs = self.send_and_expect(self.pg0, [p2], self.pg1)
+ for rx in rxs:
+ self.verify_arp_req(rx,
+ self.pg1.local_mac,
+ "10.0.1.2",
+ "10.0.1.128")
+
+ # apply a connected prefix to an interface in a different table
+ VppIpRoute(self, "10.0.1.0", 24,
+ [VppRoutePath("0.0.0.0",
+ self.pg1.sw_if_index)],
+ table_id=1).add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg3, [p2], self.pg1)
+ for rx in rxs:
+ self.verify_arp_req(rx,
+ self.pg1.local_mac,
+ "10.0.1.2",
+ "10.0.1.128")
+
+ # cleanup
+ conn3.remove_vpp_config()
+ conn2.remove_vpp_config()
+
+
+@tag_fixme_vpp_workers
+class NeighborStatsTestCase(VppTestCase):
+ """ ARP/ND Counters """
+
+ @classmethod
+ def setUpClass(cls):
+ super(NeighborStatsTestCase, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(NeighborStatsTestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(NeighborStatsTestCase, self).setUp()
+
+ self.create_pg_interfaces(range(2))
+
+ # pg0 configured with ip4 and 6 addresses used for input
+ # pg1 configured with ip4 and 6 addresses used for output
+ # pg2 is unnumbered to pg0
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.resolve_arp()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ super(NeighborStatsTestCase, self).tearDown()
+
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+
+ def test_arp_stats(self):
+ """ ARP Counters """
+
+ self.vapi.cli("adj counters enable")
+ self.pg1.generate_remote_hosts(2)
+
+ arp1 = VppNeighbor(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[0].mac,
+ self.pg1.remote_hosts[0].ip4)
+ arp1.add_vpp_config()
+ arp2 = VppNeighbor(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[1].mac,
+ self.pg1.remote_hosts[1].ip4)
+ arp2.add_vpp_config()
+
+ p1 = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4,
+ dst=self.pg1.remote_hosts[0].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+ p2 = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4,
+ dst=self.pg1.remote_hosts[1].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ rx = self.send_and_expect(self.pg0, p1 * NUM_PKTS, self.pg1)
+ rx = self.send_and_expect(self.pg0, p2 * NUM_PKTS, self.pg1)
+
+ self.assertEqual(NUM_PKTS, arp1.get_stats()['packets'])
+ self.assertEqual(NUM_PKTS, arp2.get_stats()['packets'])
+
+ rx = self.send_and_expect(self.pg0, p1 * NUM_PKTS, self.pg1)
+ self.assertEqual(NUM_PKTS*2, arp1.get_stats()['packets'])
+
+ def test_nd_stats(self):
+ """ ND Counters """
+
+ self.vapi.cli("adj counters enable")
+ self.pg0.generate_remote_hosts(3)
+
+ nd1 = VppNeighbor(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[1].mac,
+ self.pg0.remote_hosts[1].ip6)
+ nd1.add_vpp_config()
+ nd2 = VppNeighbor(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[2].mac,
+ self.pg0.remote_hosts[2].ip6)
+ nd2.add_vpp_config()
+
+ p1 = (Ether(dst=self.pg1.local_mac,
+ src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6,
+ dst=self.pg0.remote_hosts[1].ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+ p2 = (Ether(dst=self.pg1.local_mac,
+ src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6,
+ dst=self.pg0.remote_hosts[2].ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ rx = self.send_and_expect(self.pg1, p1 * 16, self.pg0)
+ rx = self.send_and_expect(self.pg1, p2 * 16, self.pg0)
+
+ self.assertEqual(16, nd1.get_stats()['packets'])
+ self.assertEqual(16, nd2.get_stats()['packets'])
+
+ rx = self.send_and_expect(self.pg1, p1 * NUM_PKTS, self.pg0)
+ self.assertEqual(NUM_PKTS+16, nd1.get_stats()['packets'])
+
+
+class NeighborAgeTestCase(VppTestCase):
+ """ ARP/ND Aging """
+
+ @classmethod
+ def setUpClass(cls):
+ super(NeighborAgeTestCase, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(NeighborAgeTestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(NeighborAgeTestCase, self).setUp()
+
+ self.create_pg_interfaces(range(1))
+
+ # pg0 configured with ip4 and 6 addresses used for input
+ # pg1 configured with ip4 and 6 addresses used for output
+ # pg2 is unnumbered to pg0
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.resolve_arp()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ super(NeighborAgeTestCase, self).tearDown()
+
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+
+ def wait_for_no_nbr(self, intf, address,
+ n_tries=50, s_time=1):
+ while (n_tries):
+ if not find_nbr(self, intf, address):
+ return True
+ n_tries = n_tries - 1
+ self.sleep(s_time)
+
+ return False
+
+ def verify_arp_req(self, rx, smac, sip, dip):
+ ether = rx[Ether]
+ self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff")
+ self.assertEqual(ether.src, smac)
+
+ arp = rx[ARP]
+ self.assertEqual(arp.hwtype, 1)
+ self.assertEqual(arp.ptype, 0x800)
+ self.assertEqual(arp.hwlen, 6)
+ self.assertEqual(arp.plen, 4)
+ self.assertEqual(arp.op, arp_opts["who-has"])
+ self.assertEqual(arp.hwsrc, smac)
+ self.assertEqual(arp.hwdst, "00:00:00:00:00:00")
+ self.assertEqual(arp.psrc, sip)
+ self.assertEqual(arp.pdst, dip)
+
+ def test_age(self):
+ """ Aging/Recycle """
+
+ self.vapi.cli("set logging unthrottle 0")
+ self.vapi.cli("set logging size %d" % 0xffff)
+
+ self.pg0.generate_remote_hosts(201)
+
+ vaf = VppEnum.vl_api_address_family_t
+
+ #
+ # start listening on all interfaces
+ #
+ self.pg_enable_capture(self.pg_interfaces)
+
+ #
+ # Set the neighbor configuration:
+ # limi = 200
+ # age = 0 seconds
+ # recycle = false
+ #
+ self.vapi.ip_neighbor_config(af=vaf.ADDRESS_IP4,
+ max_number=200,
+ max_age=0,
+ recycle=False)
+
+ self.vapi.cli("sh ip neighbor-config")
+
+ # add the 198 neighbours that should pass (-1 for one created in setup)
+ for ii in range(200):
+ VppNeighbor(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[ii].mac,
+ self.pg0.remote_hosts[ii].ip4).add_vpp_config()
+
+ # one more neighbor over the limit should fail
+ with self.vapi.assert_negative_api_retval():
+ VppNeighbor(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[200].mac,
+ self.pg0.remote_hosts[200].ip4).add_vpp_config()
+
+ #
+ # change the config to allow recycling the old neighbors
+ #
+ self.vapi.ip_neighbor_config(af=vaf.ADDRESS_IP4,
+ max_number=200,
+ max_age=0,
+ recycle=True)
+
+ # now new additions are allowed
+ VppNeighbor(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[200].mac,
+ self.pg0.remote_hosts[200].ip4).add_vpp_config()
+
+ # add the first neighbor we configured has been re-used
+ self.assertFalse(find_nbr(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[0].ip4))
+ self.assertTrue(find_nbr(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[200].ip4))
+
+ #
+ # change the config to age old neighbors
+ #
+ self.vapi.ip_neighbor_config(af=vaf.ADDRESS_IP4,
+ max_number=200,
+ max_age=2,
+ recycle=True)
+
+ self.vapi.cli("sh ip4 neighbor-sorted")
+
+ #
+ # expect probes from all these ARP entries as they age
+ # 3 probes for each neighbor 3*200 = 600
+ rxs = self.pg0.get_capture(600, timeout=8)
+
+ for ii in range(3):
+ for jj in range(200):
+ rx = rxs[ii*200 + jj]
+ # rx.show()
+
+ #
+ # 3 probes sent then 1 more second to see if a reply comes, before
+ # they age out
+ #
+ for jj in range(1, 201):
+ self.wait_for_no_nbr(self.pg0.sw_if_index,
+ self.pg0.remote_hosts[jj].ip4)
+
+ self.assertFalse(self.vapi.ip_neighbor_dump(sw_if_index=0xffffffff,
+ af=vaf.ADDRESS_IP4))
+
+ #
+ # load up some neighbours again with 2s aging enabled
+ # they should be removed after 10s (2s age + 4s for probes + gap)
+ # check for the add and remove events
+ #
+ enum = VppEnum.vl_api_ip_neighbor_event_flags_t
+
+ self.vapi.want_ip_neighbor_events_v2(enable=1)
+ for ii in range(10):
+ VppNeighbor(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[ii].mac,
+ self.pg0.remote_hosts[ii].ip4).add_vpp_config()
+
+ e = self.vapi.wait_for_event(1, "ip_neighbor_event_v2")
+ self.assertEqual(e.flags,
+ enum.IP_NEIGHBOR_API_EVENT_FLAG_ADDED)
+ self.assertEqual(str(e.neighbor.ip_address),
+ self.pg0.remote_hosts[ii].ip4)
+ self.assertEqual(e.neighbor.mac_address,
+ self.pg0.remote_hosts[ii].mac)
+
+ self.sleep(10)
+ self.assertFalse(self.vapi.ip_neighbor_dump(sw_if_index=0xffffffff,
+ af=vaf.ADDRESS_IP4))
+
+ evs = []
+ for ii in range(10):
+ e = self.vapi.wait_for_event(1, "ip_neighbor_event_v2")
+ self.assertEqual(e.flags,
+ enum.IP_NEIGHBOR_API_EVENT_FLAG_REMOVED)
+ evs.append(e)
+
+ # check we got the correct mac/ip pairs - done separately
+ # because we don't care about the order the remove notifications
+ # arrive
+ for ii in range(10):
+ found = False
+ mac = self.pg0.remote_hosts[ii].mac
+ ip = self.pg0.remote_hosts[ii].ip4
+
+ for e in evs:
+ if (e.neighbor.mac_address == mac and
+ str(e.neighbor.ip_address) == ip):
+ found = True
+ break
+ self.assertTrue(found)
+
+ #
+ # check if we can set age and recycle with empty neighbor list
+ #
+ self.vapi.ip_neighbor_config(af=vaf.ADDRESS_IP4,
+ max_number=200,
+ max_age=1000,
+ recycle=True)
+
+ #
+ # load up some neighbours again, then disable the aging
+ # they should still be there in 10 seconds time
+ #
+ for ii in range(10):
+ VppNeighbor(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[ii].mac,
+ self.pg0.remote_hosts[ii].ip4).add_vpp_config()
+ self.vapi.ip_neighbor_config(af=vaf.ADDRESS_IP4,
+ max_number=200,
+ max_age=0,
+ recycle=False)
+
+ self.sleep(10)
+ self.assertTrue(find_nbr(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[0].ip4))
+
+
+class NeighborReplaceTestCase(VppTestCase):
+ """ ARP/ND Replacement """
+
+ @classmethod
+ def setUpClass(cls):
+ super(NeighborReplaceTestCase, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(NeighborReplaceTestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(NeighborReplaceTestCase, self).setUp()
+
+ self.create_pg_interfaces(range(4))
+
+ # pg0 configured with ip4 and 6 addresses used for input
+ # pg1 configured with ip4 and 6 addresses used for output
+ # pg2 is unnumbered to pg0
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.resolve_arp()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ super(NeighborReplaceTestCase, self).tearDown()
+
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+
+ def test_replace(self):
+ """ replace """
+
+ N_HOSTS = 16
+
+ for i in self.pg_interfaces:
+ i.generate_remote_hosts(N_HOSTS)
+ i.configure_ipv4_neighbors()
+ i.configure_ipv6_neighbors()
+
+ # replace them all
+ self.vapi.ip_neighbor_replace_begin()
+ self.vapi.ip_neighbor_replace_end()
+
+ for i in self.pg_interfaces:
+ for h in range(N_HOSTS):
+ self.assertFalse(find_nbr(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[h].ip4))
+ self.assertFalse(find_nbr(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[h].ip6))
+
+ #
+ # and them all back via the API
+ #
+ for i in self.pg_interfaces:
+ for h in range(N_HOSTS):
+ VppNeighbor(self,
+ i.sw_if_index,
+ i.remote_hosts[h].mac,
+ i.remote_hosts[h].ip4).add_vpp_config()
+ VppNeighbor(self,
+ i.sw_if_index,
+ i.remote_hosts[h].mac,
+ i.remote_hosts[h].ip6).add_vpp_config()
+
+ #
+ # begin the replacement again, this time touch some
+ # the neighbours on pg1 so they are not deleted
+ #
+ self.vapi.ip_neighbor_replace_begin()
+
+ # update from the API all neighbours on pg1
+ for h in range(N_HOSTS):
+ VppNeighbor(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[h].mac,
+ self.pg1.remote_hosts[h].ip4).add_vpp_config()
+ VppNeighbor(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[h].mac,
+ self.pg1.remote_hosts[h].ip6).add_vpp_config()
+
+ # update from the data-plane all neighbours on pg3
+ self.pg3.configure_ipv4_neighbors()
+ self.pg3.configure_ipv6_neighbors()
+
+ # complete the replacement
+ self.logger.info(self.vapi.cli("sh ip neighbors"))
+ self.vapi.ip_neighbor_replace_end()
+
+ for i in self.pg_interfaces:
+ if i == self.pg1 or i == self.pg3:
+ # neighbours on pg1 and pg3 are still present
+ for h in range(N_HOSTS):
+ self.assertTrue(find_nbr(self,
+ i.sw_if_index,
+ i.remote_hosts[h].ip4))
+ self.assertTrue(find_nbr(self,
+ i.sw_if_index,
+ i.remote_hosts[h].ip6))
+ else:
+ # all other neighbours are toast
+ for h in range(N_HOSTS):
+ self.assertFalse(find_nbr(self,
+ i.sw_if_index,
+ i.remote_hosts[h].ip4))
+ self.assertFalse(find_nbr(self,
+ i.sw_if_index,
+ i.remote_hosts[h].ip6))
+
+
+class NeighborFlush(VppTestCase):
+ """ Neighbor Flush """
+
+ @classmethod
+ def setUpClass(cls):
+ super(NeighborFlush, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(NeighborFlush, cls).tearDownClass()
+
+ def setUp(self):
+ super(NeighborFlush, self).setUp()
+
+ self.create_pg_interfaces(range(2))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.resolve_arp()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ super(NeighborFlush, self).tearDown()
+
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+
+ def test_flush(self):
+ """ Neighbour Flush """
+
+ e = VppEnum
+ nf = e.vl_api_ip_neighbor_flags_t
+ af = e.vl_api_address_family_t
+ N_HOSTS = 16
+ static = [False, True]
+ self.pg0.generate_remote_hosts(N_HOSTS)
+ self.pg1.generate_remote_hosts(N_HOSTS)
+
+ for s in static:
+ # a few v4 and v6 dynamic neoghbors
+ for n in range(N_HOSTS):
+ VppNeighbor(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[n].mac,
+ self.pg0.remote_hosts[n].ip4,
+ is_static=s).add_vpp_config()
+ VppNeighbor(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[n].mac,
+ self.pg1.remote_hosts[n].ip6,
+ is_static=s).add_vpp_config()
+
+ # flush the interfaces individually
+ self.vapi.ip_neighbor_flush(af.ADDRESS_IP4, self.pg0.sw_if_index)
+
+ # check we haven't flushed that which we shouldn't
+ for n in range(N_HOSTS):
+ self.assertTrue(find_nbr(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[n].ip6,
+ is_static=s))
+
+ self.vapi.ip_neighbor_flush(af.ADDRESS_IP6, self.pg1.sw_if_index)
+
+ for n in range(N_HOSTS):
+ self.assertFalse(find_nbr(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[n].ip4))
+ self.assertFalse(find_nbr(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[n].ip6))
+
+ # add the nieghbours back
+ for n in range(N_HOSTS):
+ VppNeighbor(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[n].mac,
+ self.pg0.remote_hosts[n].ip4,
+ is_static=s).add_vpp_config()
+ VppNeighbor(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[n].mac,
+ self.pg1.remote_hosts[n].ip6,
+ is_static=s).add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh ip neighbor"))
+
+ # flush both interfaces at the same time
+ self.vapi.ip_neighbor_flush(af.ADDRESS_IP6, 0xffffffff)
+
+ # check we haven't flushed that which we shouldn't
+ for n in range(N_HOSTS):
+ self.assertTrue(find_nbr(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[n].ip4,
+ is_static=s))
+
+ self.vapi.ip_neighbor_flush(af.ADDRESS_IP4, 0xffffffff)
+
+ for n in range(N_HOSTS):
+ self.assertFalse(find_nbr(self,
+ self.pg0.sw_if_index,
+ self.pg0.remote_hosts[n].ip4))
+ self.assertFalse(find_nbr(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[n].ip6))
+