1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Interface util library."""
16 from socket import AF_INET, AF_INET6, inet_ntop
17 from time import sleep
19 from enum import IntEnum
20 from ipaddress import ip_address
21 from robot.api import logger
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.CpuUtils import CpuUtils
25 from resources.libraries.python.DUTSetup import DUTSetup
26 from resources.libraries.python.L2Util import L2Util
27 from resources.libraries.python.PapiExecutor import PapiExecutor
28 from resources.libraries.python.parsers.JsonParser import JsonParser
29 from resources.libraries.python.ssh import SSH, exec_cmd_no_error
30 from resources.libraries.python.topology import NodeType, Topology
31 from resources.libraries.python.VPPUtil import VPPUtil
34 class LinkBondLoadBalance(IntEnum):
35 """Link bonding load balance."""
41 class LinkBondMode(IntEnum):
42 """Link bonding load balance."""
50 class InterfaceUtil(object):
51 """General utilities for managing interfaces"""
53 __UDEV_IF_RULES_FILE = '/etc/udev/rules.d/10-network.rules'
56 def pci_to_int(pci_str):
57 """Convert PCI address from string format (0000:18:0a.0) to
58 integer representation (169345024).
60 :param pci_str: PCI address in string representation.
62 :returns: Integer representation of PCI address.
65 pci = list(pci_str.split(':')[0:2])
66 pci.extend(pci_str.split(':')[2].split('.'))
68 return (int(pci[0], 16) | int(pci[1], 16) << 16 |
69 int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
72 def get_interface_index(node, interface):
73 """Get interface sw_if_index from topology file.
75 :param node: Node where the interface is.
76 :param interface: Numeric index or name string of a specific interface.
78 :type interface: str or int
79 :returns: SW interface index.
83 sw_if_index = int(interface)
85 sw_if_index = Topology.get_interface_sw_index(node, interface)
86 if sw_if_index is None:
88 Topology.get_interface_sw_index_by_name(node, interface)
89 except TypeError as err:
90 raise TypeError('Wrong interface format {ifc}: {err}'.format(
91 ifc=interface, err=err.message))
96 def set_interface_state(node, interface, state, if_type='key'):
97 """Set interface state on a node.
99 Function can be used for DUTs as well as for TGs.
101 :param node: Node where the interface is.
102 :param interface: Interface key or sw_if_index or name.
103 :param state: One of 'up' or 'down'.
104 :param if_type: Interface type
106 :type interface: str or int
110 :raises ValueError: If the interface type is unknown.
111 :raises ValueError: If the state of interface is unexpected.
112 :raises ValueError: If the node has an unknown node type.
115 if isinstance(interface, basestring):
116 sw_if_index = Topology.get_interface_sw_index(node, interface)
117 iface_name = Topology.get_interface_name(node, interface)
119 sw_if_index = interface
120 elif if_type == 'name':
121 iface_key = Topology.get_interface_by_name(node, interface)
122 if iface_key is not None:
123 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
124 iface_name = interface
126 raise ValueError('Unknown if_type: {type}'.format(type=if_type))
128 if node['type'] == NodeType.DUT:
131 elif state == 'down':
134 raise ValueError('Unexpected interface state: {state}'.format(
136 cmd = 'sw_interface_set_flags'
137 err_msg = 'Failed to set interface state on host {host}'.format(
139 args = dict(sw_if_index=sw_if_index,
140 admin_up_down=admin_up_down)
141 with PapiExecutor(node) as papi_exec:
142 papi_exec.add(cmd, **args).get_replies(err_msg).\
143 verify_reply(err_msg=err_msg)
144 elif node['type'] == NodeType.TG or node['type'] == NodeType.VM:
145 cmd = 'ip link set {ifc} {state}'.format(
146 ifc=iface_name, state=state)
147 exec_cmd_no_error(node, cmd, sudo=True)
149 raise ValueError('Node {} has unknown NodeType: "{}"'
150 .format(node['host'], node['type']))
153 def set_interface_ethernet_mtu(node, iface_key, mtu):
154 """Set Ethernet MTU for specified interface.
156 Function can be used only for TGs.
158 :param node: Node where the interface is.
159 :param iface_key: Interface key from topology file.
160 :param mtu: MTU to set.
165 :raises ValueError: If the node type is "DUT".
166 :raises ValueError: If the node has an unknown node type.
168 if node['type'] == NodeType.DUT:
169 raise ValueError('Node {}: Setting Ethernet MTU for interface '
170 'on DUT nodes not supported', node['host'])
171 elif node['type'] == NodeType.TG:
172 iface_name = Topology.get_interface_name(node, iface_key)
173 cmd = 'ip link set {} mtu {}'.format(iface_name, mtu)
174 exec_cmd_no_error(node, cmd, sudo=True)
176 raise ValueError('Node {} has unknown NodeType: "{}"'
177 .format(node['host'], node['type']))
180 def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
181 """Set default Ethernet MTU on all interfaces on node.
183 Function can be used only for TGs.
185 :param node: Node where to set default MTU.
189 for ifc in node['interfaces']:
190 InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
193 def vpp_set_interface_mtu(node, interface, mtu=9200):
194 """Set Ethernet MTU on interface.
196 :param node: VPP node.
197 :param interface: Interface to setup MTU. Default: 9200.
198 :param mtu: Ethernet MTU size in Bytes.
200 :type interface: str or int
203 if isinstance(interface, basestring):
204 sw_if_index = Topology.get_interface_sw_index(node, interface)
206 sw_if_index = interface
208 cmd = 'hw_interface_set_mtu'
209 err_msg = 'Failed to set interface MTU on host {host}'.format(
211 args = dict(sw_if_index=sw_if_index,
213 with PapiExecutor(node) as papi_exec:
214 papi_exec.add(cmd, **args).get_replies(err_msg).\
215 verify_reply(err_msg=err_msg)
218 def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
219 """Set Ethernet MTU on all interfaces.
221 :param node: VPP node.
222 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
226 for interface in node['interfaces']:
227 InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
230 def vpp_set_interfaces_mtu_on_all_duts(nodes, mtu=9200):
231 """Set Ethernet MTU on all interfaces on all DUTs.
233 :param nodes: VPP nodes.
234 :param mtu: Ethernet MTU size in Bytes. Default: 9200.
238 for node in nodes.values():
239 if node['type'] == NodeType.DUT:
240 InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
243 def vpp_node_interfaces_ready_wait(node, retries=15):
244 """Wait until all interfaces with admin-up are in link-up state.
246 :param node: Node to wait on.
247 :param retries: Number of retries to check interface status (optional,
252 :raises RuntimeError: If any interface is not in link-up state after
253 defined number of retries.
255 for _ in xrange(0, retries):
257 out = InterfaceUtil.vpp_get_interface_data(node)
258 for interface in out:
259 if interface.get('admin_up_down') == 1:
260 if interface.get('link_up_down') != 1:
261 not_ready.append(interface.get('interface_name'))
265 logger.debug('Interfaces still in link-down state:\n{ifs} '
266 '\nWaiting...'.format(ifs=not_ready))
269 err = 'Timeout, interfaces not up:\n{ifs}'.format(ifs=not_ready) \
270 if 'not_ready' in locals() else 'No check executed!'
271 raise RuntimeError(err)
274 def all_vpp_interfaces_ready_wait(nodes, retries=15):
275 """Wait until all interfaces with admin-up are in link-up state for all
276 nodes in the topology.
278 :param nodes: Nodes in the topology.
279 :param retries: Number of retries to check interface status (optional,
285 for node in nodes.values():
286 if node['type'] == NodeType.DUT:
287 InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
290 def vpp_get_interface_data(node, interface=None):
291 """Get all interface data from a VPP node. If a name or
292 sw_interface_index is provided, return only data for the matching
295 :param node: VPP node to get interface data from.
296 :param interface: Numeric index or name string of a specific interface.
298 :type interface: int or str
299 :returns: List of dictionaries containing data for each interface, or a
300 single dictionary for the specified interface.
302 :raises TypeError: if the data type of interface is neither basestring
305 if interface is not None:
306 if isinstance(interface, basestring):
307 param = 'interface_name'
308 elif isinstance(interface, int):
309 param = 'sw_if_index'
311 raise TypeError('Wrong interface format {ifc}'.format(
316 cmd = 'sw_interface_dump'
317 cmd_reply = 'sw_interface_details'
318 args = dict(name_filter_valid=0,
320 err_msg = 'Failed to get interface dump on host {host}'.format(
322 with PapiExecutor(node) as papi_exec:
323 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
325 papi_if_dump = papi_resp.reply[0]['api_reply']
327 def process_if_dump(if_dump):
328 """Process interface dump.
330 :param if_dump: Interface dump.
332 :returns: Processed interface dump.
335 if_dump['interface_name'] = if_dump['interface_name'].rstrip('\x00')
336 if_dump['tag'] = if_dump['tag'].rstrip('\x00')
337 if_dump['l2_address'] = L2Util.bin_to_mac(if_dump['l2_address'])
338 if_dump['b_dmac'] = L2Util.bin_to_mac(if_dump['b_dmac'])
339 if_dump['b_smac'] = L2Util.bin_to_mac(if_dump['b_smac'])
342 data = list() if interface is None else dict()
343 for item in papi_if_dump:
344 if interface is None:
345 data.append(process_if_dump(item[cmd_reply]))
346 elif str(item[cmd_reply].get(param)).rstrip('\x00') == \
348 data = process_if_dump(item[cmd_reply])
351 logger.debug('Interface data:\n{if_data}'.format(if_data=data))
355 def vpp_get_interface_name(node, sw_if_index):
356 """Get interface name for the given SW interface index from actual
359 :param node: VPP node to get interface data from.
360 :param sw_if_index: SW interface index of the specific interface.
362 :type sw_if_index: int
363 :returns: Name of the given interface.
366 if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
367 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
368 if_data = InterfaceUtil.vpp_get_interface_data(
369 node, if_data['sup_sw_if_index'])
371 return if_data.get('interface_name')
374 def vpp_get_interface_sw_index(node, interface_name):
375 """Get interface name for the given SW interface index from actual
378 :param node: VPP node to get interface data from.
379 :param interface_name: Interface name.
381 :type interface_name: str
382 :returns: Name of the given interface.
385 if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
387 return if_data.get('sw_if_index')
390 def vpp_get_interface_mac(node, interface):
391 """Get MAC address for the given interface from actual interface dump.
393 :param node: VPP node to get interface data from.
394 :param interface: Numeric index or name string of a specific interface.
396 :type interface: int or str
397 :returns: MAC address.
400 if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
401 if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
402 if_data = InterfaceUtil.vpp_get_interface_data(
403 node, if_data['sup_sw_if_index'])
405 return if_data.get('l2_address')
408 def tg_set_interface_driver(node, pci_addr, driver):
409 """Set interface driver on the TG node.
411 :param node: Node to set interface driver on (must be TG node).
412 :param pci_addr: PCI address of the interface.
413 :param driver: Driver name.
417 :raises RuntimeError: If unbinding from the current driver fails.
418 :raises RuntimeError: If binding to the new driver fails.
420 old_driver = InterfaceUtil.tg_get_interface_driver(node, pci_addr)
421 if old_driver == driver:
427 # Unbind from current driver
428 if old_driver is not None:
429 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\
430 .format(pci_addr, old_driver)
431 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
432 if int(ret_code) != 0:
433 raise RuntimeError("'{0}' failed on '{1}'"
434 .format(cmd, node['host']))
436 # Bind to the new driver
437 cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\
438 .format(pci_addr, driver)
439 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
440 if int(ret_code) != 0:
441 raise RuntimeError("'{0}' failed on '{1}'"
442 .format(cmd, node['host']))
445 def tg_get_interface_driver(node, pci_addr):
446 """Get interface driver from the TG node.
448 :param node: Node to get interface driver on (must be TG node).
449 :param pci_addr: PCI address of the interface.
452 :returns: Interface driver or None if not found.
454 :raises RuntimeError: If PCI rescan or lspci command execution failed.
456 return DUTSetup.get_pci_dev_driver(node, pci_addr)
459 def tg_set_interfaces_udev_rules(node):
460 """Set udev rules for interfaces.
462 Create udev rules file in /etc/udev/rules.d where are rules for each
463 interface used by TG node, based on MAC interface has specific name.
464 So after unbind and bind again to kernel driver interface has same
465 name as before. This must be called after TG has set name for each
466 port in topology dictionary.
468 SUBSYSTEM=="net", ACTION=="add", ATTR{address}=="52:54:00:e1:8a:0f",
471 :param node: Node to set udev rules on (must be TG node).
473 :raises RuntimeError: If setting of udev rules fails.
478 cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE)
479 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
480 if int(ret_code) != 0:
481 raise RuntimeError("'{0}' failed on '{1}'"
482 .format(cmd, node['host']))
484 for interface in node['interfaces'].values():
485 rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
486 '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \
487 interface['name'] + '\\"'
488 cmd = 'sh -c "echo \'{0}\' >> {1}"'.format(
489 rule, InterfaceUtil.__UDEV_IF_RULES_FILE)
490 (ret_code, _, _) = ssh.exec_command_sudo(cmd)
491 if int(ret_code) != 0:
492 raise RuntimeError("'{0}' failed on '{1}'"
493 .format(cmd, node['host']))
495 cmd = '/etc/init.d/udev restart'
496 ssh.exec_command_sudo(cmd)
499 def tg_set_interfaces_default_driver(node):
500 """Set interfaces default driver specified in topology yaml file.
502 :param node: Node to setup interfaces driver on (must be TG node).
505 for interface in node['interfaces'].values():
506 InterfaceUtil.tg_set_interface_driver(node,
507 interface['pci_address'],
511 def update_vpp_interface_data_on_node(node):
512 """Update vpp generated interface data for a given node in DICT__nodes.
514 Updates interface names, software if index numbers and any other details
515 generated specifically by vpp that are unknown before testcase run.
516 It does this by dumping interface list from all devices using python
517 api, and pairing known information from topology (mac address) to state
520 :param node: Node selected from DICT__nodes.
523 interface_list = InterfaceUtil.vpp_get_interface_data(node)
524 interface_dict = dict()
525 for ifc in interface_list:
526 interface_dict[ifc['l2_address']] = ifc
528 for if_name, if_data in node['interfaces'].items():
529 ifc_dict = interface_dict.get(if_data['mac_address'])
530 if ifc_dict is not None:
531 if_data['name'] = ifc_dict['interface_name']
532 if_data['vpp_sw_index'] = ifc_dict['sw_if_index']
533 if_data['mtu'] = ifc_dict['mtu'][0]
534 logger.trace('Interface {ifc} found by MAC {mac}'.format(
535 ifc=if_name, mac=if_data['mac_address']))
537 logger.trace('Interface {ifc} not found by MAC {mac}'.format(
538 ifc=if_name, mac=if_data['mac_address']))
539 if_data['vpp_sw_index'] = None
542 def update_nic_interface_names(node):
543 """Update interface names based on nic type and PCI address.
545 This method updates interface names in the same format as VPP does.
547 :param node: Node dictionary.
550 for ifc in node['interfaces'].values():
551 if_pci = ifc['pci_address'].replace('.', ':').split(':')
552 bus = '{:x}'.format(int(if_pci[1], 16))
553 dev = '{:x}'.format(int(if_pci[2], 16))
554 fun = '{:x}'.format(int(if_pci[3], 16))
555 loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun)
556 if ifc['model'] == 'Intel-XL710':
557 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
558 elif ifc['model'] == 'Intel-X710':
559 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
560 elif ifc['model'] == 'Intel-X520-DA2':
561 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
562 elif ifc['model'] == 'Cisco-VIC-1385':
563 ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
564 elif ifc['model'] == 'Cisco-VIC-1227':
565 ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
567 ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc)
570 def update_nic_interface_names_on_all_duts(nodes):
571 """Update interface names based on nic type and PCI address on all DUTs.
573 This method updates interface names in the same format as VPP does.
575 :param nodes: Topology nodes.
578 for node in nodes.values():
579 if node['type'] == NodeType.DUT:
580 InterfaceUtil.update_nic_interface_names(node)
583 def update_tg_interface_data_on_node(node, skip_tg_udev=False):
584 """Update interface name for TG/linux node in DICT__nodes.
587 # for dev in `ls /sys/class/net/`;
588 > do echo "\"`cat /sys/class/net/$dev/address`\": \"$dev\""; done
589 "52:54:00:9f:82:63": "eth0"
590 "52:54:00:77:ae:a9": "eth1"
591 "52:54:00:e1:8a:0f": "eth2"
592 "00:00:00:00:00:00": "lo"
594 :param node: Node selected from DICT__nodes.
595 :param skip_tg_udev: Skip udev rename on TG node.
597 :type skip_tg_udev: bool
598 :raises RuntimeError: If getting of interface name and MAC fails.
600 # First setup interface driver specified in yaml file
601 InterfaceUtil.tg_set_interfaces_default_driver(node)
603 # Get interface names
607 cmd = ('for dev in `ls /sys/class/net/`; do echo "\\"`cat '
608 '/sys/class/net/$dev/address`\\": \\"$dev\\""; done;')
610 (ret_code, stdout, _) = ssh.exec_command(cmd)
611 if int(ret_code) != 0:
612 raise RuntimeError('Get interface name and MAC failed')
613 tmp = "{" + stdout.rstrip().replace('\n', ',') + "}"
614 interfaces = JsonParser().parse_data(tmp)
615 for interface in node['interfaces'].values():
616 name = interfaces.get(interface['mac_address'])
619 interface['name'] = name
621 # Set udev rules for interfaces
623 InterfaceUtil.tg_set_interfaces_udev_rules(node)
626 def iface_update_numa_node(node):
627 """For all interfaces from topology file update numa node based on
628 information from the node.
630 :param node: Node from topology.
633 :raises ValueError: If numa node ia less than 0.
634 :raises RuntimeError: If update of numa node failes.
637 for if_key in Topology.get_node_interfaces(node):
638 if_pci = Topology.get_interface_pci_addr(node, if_key)
640 cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci)
642 (ret, out, _) = ssh.exec_command(cmd)
647 if CpuUtils.cpu_node_count(node) == 1:
652 logger.trace('Reading numa location failed for: {0}'
655 Topology.set_interface_numa_node(node, if_key,
659 raise RuntimeError('Update numa node failed for: {0}'
663 def update_all_numa_nodes(nodes, skip_tg=False):
664 """For all nodes and all their interfaces from topology file update numa
665 node information based on information from the node.
667 :param nodes: Nodes in the topology.
668 :param skip_tg: Skip TG node
673 for node in nodes.values():
674 if node['type'] == NodeType.DUT:
675 InterfaceUtil.iface_update_numa_node(node)
676 elif node['type'] == NodeType.TG and not skip_tg:
677 InterfaceUtil.iface_update_numa_node(node)
680 def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
683 """Update interface names on all nodes in DICT__nodes.
685 This method updates the topology dictionary by querying interface lists
686 of all nodes mentioned in the topology dictionary.
688 :param nodes: Nodes in the topology.
689 :param skip_tg: Skip TG node.
690 :param skip_tg_udev: Skip udev rename on TG node.
691 :param numa_node: Retrieve numa_node location.
694 :type skip_tg_udev: bool
695 :type numa_node: bool
697 for node_data in nodes.values():
698 if node_data['type'] == NodeType.DUT:
699 InterfaceUtil.update_vpp_interface_data_on_node(node_data)
700 elif node_data['type'] == NodeType.TG and not skip_tg:
701 InterfaceUtil.update_tg_interface_data_on_node(
702 node_data, skip_tg_udev)
705 if node_data['type'] == NodeType.DUT:
706 InterfaceUtil.iface_update_numa_node(node_data)
707 elif node_data['type'] == NodeType.TG and not skip_tg:
708 InterfaceUtil.iface_update_numa_node(node_data)
711 def create_vlan_subinterface(node, interface, vlan):
712 """Create VLAN sub-interface on node.
714 :param node: Node to add VLAN subinterface on.
715 :param interface: Interface name on which create VLAN subinterface.
716 :param vlan: VLAN ID of the subinterface to be created.
720 :returns: Name and index of created subinterface.
722 :raises RuntimeError: if it is unable to create VLAN subinterface on the
725 iface_key = Topology.get_interface_by_name(node, interface)
726 sw_if_index = Topology.get_interface_sw_index(node, iface_key)
728 cmd = 'create_vlan_subif'
729 args = dict(sw_if_index=sw_if_index,
731 err_msg = 'Failed to create VLAN sub-interface on host {host}'.format(
733 with PapiExecutor(node) as papi_exec:
734 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
735 verify_reply(err_msg=err_msg)
737 sw_if_idx = papi_resp['sw_if_index']
738 if_key = Topology.add_new_port(node, 'vlan_subif')
739 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
740 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
741 Topology.update_interface_name(node, if_key, ifc_name)
743 return '{ifc}.{vlan}'.format(ifc=interface, vlan=vlan), sw_if_idx
746 def create_vxlan_interface(node, vni, source_ip, destination_ip):
747 """Create VXLAN interface and return sw if index of created interface.
749 :param node: Node where to create VXLAN interface.
750 :param vni: VXLAN Network Identifier.
751 :param source_ip: Source IP of a VXLAN Tunnel End Point.
752 :param destination_ip: Destination IP of a VXLAN Tunnel End Point.
756 :type destination_ip: str
757 :returns: SW IF INDEX of created interface.
759 :raises RuntimeError: if it is unable to create VxLAN interface on the
762 src_address = ip_address(unicode(source_ip))
763 dst_address = ip_address(unicode(destination_ip))
765 cmd = 'vxlan_add_del_tunnel'
766 args = dict(is_add=1,
767 is_ipv6=1 if src_address.version == 6 else 0,
768 instance=Constants.BITWISE_NON_ZERO,
769 src_address=src_address.packed,
770 dst_address=dst_address.packed,
771 mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
773 decap_next_index=Constants.BITWISE_NON_ZERO,
775 err_msg = 'Failed to create VXLAN tunnel interface on host {host}'.\
776 format(host=node['host'])
777 with PapiExecutor(node) as papi_exec:
778 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
779 verify_reply(err_msg=err_msg)
781 sw_if_idx = papi_resp['sw_if_index']
782 if_key = Topology.add_new_port(node, 'vxlan_tunnel')
783 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
784 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
785 Topology.update_interface_name(node, if_key, ifc_name)
790 def vxlan_dump(node, interface=None):
791 """Get VxLAN data for the given interface.
793 :param node: VPP node to get interface data from.
794 :param interface: Numeric index or name string of a specific interface.
795 If None, information about all VxLAN interfaces is returned.
797 :type interface: int or str
798 :returns: Dictionary containing data for the given VxLAN interface or if
799 interface=None, the list of dictionaries with all VxLAN interfaces.
801 :raises TypeError: if the data type of interface is neither basestring
804 if interface is not None:
805 sw_if_index = InterfaceUtil.get_interface_index(node, interface)
807 sw_if_index = int(Constants.BITWISE_NON_ZERO)
809 cmd = 'vxlan_tunnel_dump'
810 cmd_reply = 'vxlan_tunnel_details'
811 args = dict(sw_if_index=sw_if_index)
812 err_msg = 'Failed to get VXLAN dump on host {host}'.format(
814 with PapiExecutor(node) as papi_exec:
815 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
817 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
819 def process_vxlan_dump(vxlan_dump):
820 """Process vxlan dump.
822 :param vxlan_dump: Vxlan interface dump.
823 :type vxlan_dump: dict
824 :returns: Processed vxlan interface dump.
827 if vxlan_dump['is_ipv6']:
828 vxlan_dump['src_address'] = \
829 inet_ntop(AF_INET6, vxlan_dump['src_address'])
830 vxlan_dump['dst_address'] = \
831 inet_ntop(AF_INET6, vxlan_dump['dst_address'])
833 vxlan_dump['src_address'] = \
834 inet_ntop(AF_INET, vxlan_dump['src_address'][0:4])
835 vxlan_dump['dst_address'] = \
836 inet_ntop(AF_INET, vxlan_dump['dst_address'][0:4])
839 data = list() if interface is None else dict()
840 for item in papi_vxlan_dump:
841 if interface is None:
842 data.append(process_vxlan_dump(item[cmd_reply]))
843 elif item[cmd_reply]['sw_if_index'] == sw_if_index:
844 data = process_vxlan_dump(item[cmd_reply])
847 logger.debug('VXLAN data:\n{vxlan_data}'.format(vxlan_data=data))
851 def vhost_user_dump(node):
852 """Get vhost-user data for the given node.
854 TODO: Move to VhostUser.py
856 :param node: VPP node to get interface data from.
858 :returns: List of dictionaries with all vhost-user interfaces.
861 cmd = 'sw_interface_vhost_user_dump'
862 cmd_reply = 'sw_interface_vhost_user_details'
863 err_msg = 'Failed to get vhost-user dump on host {host}'.format(
865 with PapiExecutor(node) as papi_exec:
866 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
868 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
870 def process_vhost_dump(vhost_dump):
871 """Process vhost dump.
873 :param vhost_dump: Vhost interface dump.
874 :type vhost_dump: dict
875 :returns: Processed vhost interface dump.
878 vhost_dump['interface_name'] = \
879 vhost_dump['interface_name'].rstrip('\x00')
880 vhost_dump['sock_filename'] = \
881 vhost_dump['sock_filename'].rstrip('\x00')
885 for item in papi_vxlan_dump:
886 data.append(process_vhost_dump(item[cmd_reply]))
888 logger.debug('Vhost-user data:\n{vhost_data}'.format(vhost_data=data))
892 def tap_dump(node, name=None):
893 """Get all TAP interface data from the given node, or data about
894 a specific TAP interface.
898 :param node: VPP node to get data from.
899 :param name: Optional name of a specific TAP interface.
902 :returns: Dictionary of information about a specific TAP interface, or
903 a List of dictionaries containing all TAP data for the given node.
906 cmd = 'sw_interface_tap_v2_dump'
907 cmd_reply = 'sw_interface_tap_v2_details'
908 err_msg = 'Failed to get TAP dump on host {host}'.format(
910 with PapiExecutor(node) as papi_exec:
911 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
913 papi_tap_dump = papi_resp.reply[0]['api_reply']
915 def process_tap_dump(tap_dump):
918 :param tap_dump: Tap interface dump.
920 :returns: Processed tap interface dump.
923 tap_dump['dev_name'] = tap_dump['dev_name'].rstrip('\x00')
924 tap_dump['host_if_name'] = tap_dump['host_if_name'].rstrip('\x00')
925 tap_dump['host_namespace'] = \
926 tap_dump['host_namespace'].rstrip('\x00')
927 tap_dump['host_mac_addr'] = \
928 L2Util.bin_to_mac(tap_dump['host_mac_addr'])
929 tap_dump['host_ip4_addr'] = \
930 inet_ntop(AF_INET, tap_dump['host_ip4_addr'])
931 tap_dump['host_ip6_addr'] = \
932 inet_ntop(AF_INET6, tap_dump['host_ip6_addr'])
935 data = list() if name is None else dict()
936 for item in papi_tap_dump:
938 data.append(process_tap_dump(item[cmd_reply]))
939 elif item[cmd_reply].get('dev_name').rstrip('\x00') == name:
940 data = process_tap_dump(item[cmd_reply])
943 logger.debug('TAP data:\n{tap_data}'.format(tap_data=data))
947 def create_subinterface(node, interface, sub_id, outer_vlan_id=None,
948 inner_vlan_id=None, type_subif=None):
949 """Create sub-interface on node. It is possible to set required
950 sub-interface type and VLAN tag(s).
952 :param node: Node to add sub-interface.
953 :param interface: Interface name on which create sub-interface.
954 :param sub_id: ID of the sub-interface to be created.
955 :param outer_vlan_id: Optional outer VLAN ID.
956 :param inner_vlan_id: Optional inner VLAN ID.
957 :param type_subif: Optional type of sub-interface. Values supported by
958 VPP: [no_tags] [one_tag] [two_tags] [dot1ad] [exact_match]
961 :type interface: str or int
963 :type outer_vlan_id: int
964 :type inner_vlan_id: int
965 :type type_subif: str
966 :returns: Name and index of created sub-interface.
968 :raises RuntimeError: If it is not possible to create sub-interface.
970 subif_types = type_subif.split()
974 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
976 no_tags=1 if 'no_tags' in subif_types else 0,
977 one_tag=1 if 'one_tag' in subif_types else 0,
978 two_tags=1 if 'two_tags' in subif_types else 0,
979 dot1ad=1 if 'dot1ad' in subif_types else 0,
980 exact_match=1 if 'exact_match' in subif_types else 0,
981 default_sub=1 if 'default_sub' in subif_types else 0,
982 outer_vlan_id_any=1 if type_subif == 'default_sub' else 0,
983 inner_vlan_id_any=1 if type_subif == 'default_sub' else 0,
984 outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
985 inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0)
986 err_msg = 'Failed to create sub-interface on host {host}'.format(
988 with PapiExecutor(node) as papi_exec:
989 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
990 verify_reply(err_msg=err_msg)
992 sw_subif_idx = papi_resp['sw_if_index']
993 if_key = Topology.add_new_port(node, 'subinterface')
994 Topology.update_interface_sw_if_index(node, if_key, sw_subif_idx)
995 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_subif_idx)
996 Topology.update_interface_name(node, if_key, ifc_name)
998 return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_subif_idx
1001 def create_gre_tunnel_interface(node, source_ip, destination_ip):
1002 """Create GRE tunnel interface on node.
1004 :param node: VPP node to add tunnel interface.
1005 :param source_ip: Source of the GRE tunnel.
1006 :param destination_ip: Destination of the GRE tunnel.
1008 :type source_ip: str
1009 :type destination_ip: str
1010 :returns: Name and index of created GRE tunnel interface.
1012 :raises RuntimeError: If unable to create GRE tunnel interface.
1014 cmd = 'gre_tunnel_add_del'
1015 tunnel = dict(type=0,
1016 instance=Constants.BITWISE_NON_ZERO,
1018 dst=str(destination_ip),
1021 args = dict(is_add=1,
1023 err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
1025 with PapiExecutor(node) as papi_exec:
1026 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1027 verify_reply(err_msg=err_msg)
1029 sw_if_idx = papi_resp['sw_if_index']
1030 if_key = Topology.add_new_port(node, 'gre_tunnel')
1031 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1032 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1033 Topology.update_interface_name(node, if_key, ifc_name)
1035 return ifc_name, sw_if_idx
1038 def vpp_create_loopback(node):
1039 """Create loopback interface on VPP node.
1041 :param node: Node to create loopback interface on.
1043 :returns: SW interface index.
1045 :raises RuntimeError: If it is not possible to create loopback on the
1048 cmd = 'create_loopback'
1049 args = dict(mac_address=0)
1050 err_msg = 'Failed to create loopback interface on host {host}'.format(
1052 with PapiExecutor(node) as papi_exec:
1053 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1054 verify_reply(err_msg=err_msg)
1056 sw_if_idx = papi_resp['sw_if_index']
1057 if_key = Topology.add_new_port(node, 'loopback')
1058 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1059 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1060 Topology.update_interface_name(node, if_key, ifc_name)
1065 def vpp_create_bond_interface(node, mode, load_balance=None, mac=None):
1066 """Create bond interface on VPP node.
1068 :param node: DUT node from topology.
1069 :param mode: Link bonding mode.
1070 :param load_balance: Load balance (optional, valid for xor and lacp
1071 modes, otherwise ignored).
1072 :param mac: MAC address to assign to the bond interface (optional).
1075 :type load_balance: str
1077 :returns: Interface key (name) in topology.
1079 :raises RuntimeError: If it is not possible to create bond interface on
1083 args = dict(id=int(Constants.BITWISE_NON_ZERO),
1084 use_custom_mac=0 if mac is None else 1,
1085 mac_address=0 if mac is None else L2Util.mac_to_bin(mac),
1086 mode=getattr(LinkBondMode, '{md}'.format(
1087 md=mode.replace('-', '_').upper())).value,
1088 lb=0 if load_balance is None else getattr(
1089 LinkBondLoadBalance, '{lb}'.format(
1090 lb=load_balance.upper())).value)
1091 err_msg = 'Failed to create bond interface on host {host}'.format(
1093 with PapiExecutor(node) as papi_exec:
1094 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1095 verify_reply(err_msg=err_msg)
1097 sw_if_idx = papi_resp['sw_if_index']
1098 InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
1100 if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
1105 def add_eth_interface(node, ifc_name=None, sw_if_idx=None, ifc_pfx=None):
1106 """Add ethernet interface to current topology.
1108 :param node: DUT node from topology.
1109 :param ifc_name: Name of the interface.
1110 :param sw_if_idx: SW interface index.
1111 :param ifc_pfx: Interface key prefix.
1114 :type sw_if_idx: int
1117 if_key = Topology.add_new_port(node, ifc_pfx)
1119 if ifc_name and sw_if_idx is None:
1120 sw_if_idx = InterfaceUtil.vpp_get_interface_sw_index(node, ifc_name)
1121 Topology.update_interface_sw_if_index(node, if_key, sw_if_idx)
1122 if sw_if_idx and ifc_name is None:
1123 ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_idx)
1124 Topology.update_interface_name(node, if_key, ifc_name)
1125 ifc_mac = InterfaceUtil.vpp_get_interface_mac(node, sw_if_idx)
1126 Topology.update_interface_mac_address(node, if_key, ifc_mac)
1129 def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None):
1130 """Create AVF interface on VPP node.
1132 :param node: DUT node from topology.
1133 :param vf_pci_addr: Virtual Function PCI address.
1134 :param num_rx_queues: Number of RX queues.
1136 :type vf_pci_addr: str
1137 :type num_rx_queues: int
1138 :returns: Interface key (name) in topology.
1140 :raises RuntimeError: If it is not possible to create AVF interface on
1144 args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
1146 rxq_num=int(num_rx_queues) if num_rx_queues else 0,
1149 err_msg = 'Failed to create AVF interface on host {host}'.format(
1151 with PapiExecutor(node) as papi_exec:
1152 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg).\
1153 verify_reply(err_msg=err_msg)
1155 sw_if_idx = papi_resp['sw_if_index']
1156 InterfaceUtil.add_eth_interface(node, sw_if_idx=sw_if_idx,
1158 if_key = Topology.get_interface_by_sw_index(node, sw_if_idx)
1163 def vpp_enslave_physical_interface(node, interface, bond_if):
1164 """Enslave physical interface to bond interface on VPP node.
1166 :param node: DUT node from topology.
1167 :param interface: Physical interface key from topology file.
1168 :param bond_if: Load balance
1170 :type interface: str
1172 :raises RuntimeError: If it is not possible to enslave physical
1173 interface to bond interface on the node.
1175 cmd = 'bond_enslave'
1177 sw_if_index=Topology.get_interface_sw_index(node, interface),
1178 bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
1181 err_msg = 'Failed to enslave physical interface {ifc} to bond ' \
1182 'interface {bond} on host {host}'.format(ifc=interface,
1185 with PapiExecutor(node) as papi_exec:
1186 papi_exec.add(cmd, **args).get_replies(err_msg).\
1187 verify_reply(err_msg=err_msg)
1190 def vpp_show_bond_data_on_node(node, details=False):
1191 """Show (detailed) bond information on VPP node.
1193 :param node: DUT node from topology.
1194 :param details: If detailed information is required or not.
1198 cmd = 'sw_interface_bond_dump'
1199 cmd_reply = 'sw_interface_bond_details'
1200 err_msg = 'Failed to get bond interface dump on host {host}'.format(
1203 data = ('Bond data on node {host}:\n'.format(host=node['host']))
1204 with PapiExecutor(node) as papi_exec:
1205 papi_resp = papi_exec.add(cmd).get_dump(err_msg)
1207 papi_dump = papi_resp.reply[0]['api_reply']
1208 for item in papi_dump:
1209 data += ('{b}\n'.format(b=item[cmd_reply]['interface_name'].
1211 data += (' mode: {m}\n'.
1212 format(m=LinkBondMode(item[cmd_reply]['mode']).name.
1214 data += (' load balance: {lb}\n'.
1215 format(lb=LinkBondLoadBalance(item[cmd_reply]['lb']).name.
1217 data += (' number of active slaves: {n}\n'.
1218 format(n=item[cmd_reply]['active_slaves']))
1220 slave_data = InterfaceUtil.vpp_bond_slave_dump(
1221 node, Topology.get_interface_by_sw_index(
1222 node, item[cmd_reply]['sw_if_index']))
1223 for slave in slave_data:
1224 if not slave['is_passive']:
1225 data += (' {s}\n'.format(s=slave['interface_name']))
1226 data += (' number of slaves: {n}\n'.
1227 format(n=item[cmd_reply]['slaves']))
1229 for slave in slave_data:
1230 data += (' {s}\n'.format(s=slave['interface_name']))
1231 data += (' interface id: {i}\n'.
1232 format(i=item[cmd_reply]['id']))
1233 data += (' sw_if_index: {i}\n'.
1234 format(i=item[cmd_reply]['sw_if_index']))
1238 def vpp_bond_slave_dump(node, interface):
1239 """Get bond interface slave(s) data on VPP node.
1241 :param node: DUT node from topology.
1242 :param interface: Physical interface key from topology file.
1244 :type interface: str
1245 :returns: Bond slave interface data.
1248 cmd = 'sw_interface_slave_dump'
1249 cmd_reply = 'sw_interface_slave_details'
1250 args = dict(sw_if_index=Topology.get_interface_sw_index(
1252 err_msg = 'Failed to get slave dump on host {host}'.format(
1255 with PapiExecutor(node) as papi_exec:
1256 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
1258 papi_dump = papi_resp.reply[0]['api_reply']
1260 def process_slave_dump(slave_dump):
1261 """Process slave dump.
1263 :param slave_dump: Slave interface dump.
1264 :type slave_dump: dict
1265 :returns: Processed slave interface dump.
1268 slave_dump['interface_name'] = slave_dump['interface_name'].\
1273 for item in papi_dump:
1274 data.append(process_slave_dump(item[cmd_reply]))
1276 logger.debug('Slave data:\n{slave_data}'.format(slave_data=data))
1280 def vpp_show_bond_data_on_all_nodes(nodes, details=False):
1281 """Show (detailed) bond information on all VPP nodes in DICT__nodes.
1283 :param nodes: Nodes in the topology.
1284 :param details: If detailed information is required or not.
1288 for node_data in nodes.values():
1289 if node_data['type'] == NodeType.DUT:
1290 InterfaceUtil.vpp_show_bond_data_on_node(node_data, details)
1293 def vpp_enable_input_acl_interface(node, interface, ip_version,
1295 """Enable input acl on interface.
1297 :param node: VPP node to setup interface for input acl.
1298 :param interface: Interface to setup input acl.
1299 :param ip_version: Version of IP protocol.
1300 :param table_index: Classify table index.
1302 :type interface: str or int
1303 :type ip_version: str
1304 :type table_index: int
1306 cmd = 'input_acl_set_interface'
1308 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1309 ip4_table_index=table_index if ip_version == 'ip4'
1310 else Constants.BITWISE_NON_ZERO,
1311 ip6_table_index=table_index if ip_version == 'ip6'
1312 else Constants.BITWISE_NON_ZERO,
1313 l2_table_index=table_index if ip_version == 'l2'
1314 else Constants.BITWISE_NON_ZERO,
1316 err_msg = 'Failed to enable input acl on interface {ifc}'.format(
1318 with PapiExecutor(node) as papi_exec:
1319 papi_exec.add(cmd, **args).get_replies(err_msg).\
1320 verify_reply(err_msg=err_msg)
1323 def get_interface_classify_table(node, interface):
1324 """Get name of classify table for the given interface.
1326 TODO: Move to Classify.py.
1328 :param node: VPP node to get data from.
1329 :param interface: Name or sw_if_index of a specific interface.
1331 :type interface: str or int
1332 :returns: Classify table name.
1335 if isinstance(interface, basestring):
1336 sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
1338 sw_if_index = interface
1340 cmd = 'classify_table_by_interface'
1341 args = dict(sw_if_index=sw_if_index)
1342 err_msg = 'Failed to get classify table name by interface {ifc}'.format(
1344 with PapiExecutor(node) as papi_exec:
1345 papi_resp = papi_exec.add(cmd, **args).get_replies(err_msg). \
1346 verify_reply(err_msg=err_msg)
1351 def get_sw_if_index(node, interface_name):
1352 """Get sw_if_index for the given interface from actual interface dump.
1354 :param node: VPP node to get interface data from.
1355 :param interface_name: Name of the specific interface.
1357 :type interface_name: str
1358 :returns: sw_if_index of the given interface.
1361 interface_data = InterfaceUtil.vpp_get_interface_data(
1362 node, interface=interface_name)
1363 return interface_data.get('sw_if_index')
1366 def vxlan_gpe_dump(node, interface_name=None):
1367 """Get VxLAN GPE data for the given interface.
1369 :param node: VPP node to get interface data from.
1370 :param interface_name: Name of the specific interface. If None,
1371 information about all VxLAN GPE interfaces is returned.
1373 :type interface_name: str
1374 :returns: Dictionary containing data for the given VxLAN GPE interface
1375 or if interface=None, the list of dictionaries with all VxLAN GPE
1377 :rtype: dict or list
1379 if interface_name is not None:
1380 sw_if_index = InterfaceUtil.get_interface_index(
1381 node, interface_name)
1383 sw_if_index = int(Constants.BITWISE_NON_ZERO)
1385 cmd = 'vxlan_gpe_tunnel_dump'
1386 cmd_reply = 'vxlan_gpe_tunnel_details'
1387 args = dict(sw_if_index=sw_if_index)
1388 err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format(
1390 with PapiExecutor(node) as papi_exec:
1391 papi_resp = papi_exec.add(cmd, **args).get_dump(err_msg)
1393 papi_vxlan_dump = papi_resp.reply[0]['api_reply']
1395 def process_vxlan_gpe_dump(vxlan_dump):
1396 """Process vxlan_gpe dump.
1398 :param vxlan_dump: Vxlan_gpe nterface dump.
1399 :type vxlan_dump: dict
1400 :returns: Processed vxlan_gpe interface dump.
1403 if vxlan_dump['is_ipv6']:
1404 vxlan_dump['local'] = \
1405 inet_ntop(AF_INET6, vxlan_dump['local'])
1406 vxlan_dump['remote'] = \
1407 inet_ntop(AF_INET6, vxlan_dump['remote'])
1409 vxlan_dump['local'] = \
1410 inet_ntop(AF_INET, vxlan_dump['local'][0:4])
1411 vxlan_dump['remote'] = \
1412 inet_ntop(AF_INET, vxlan_dump['remote'][0:4])
1415 data = list() if interface_name is None else dict()
1416 for item in papi_vxlan_dump:
1417 if interface_name is None:
1418 data.append(process_vxlan_gpe_dump(item[cmd_reply]))
1419 elif item[cmd_reply]['sw_if_index'] == sw_if_index:
1420 data = process_vxlan_gpe_dump(item[cmd_reply])
1423 logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format(
1424 vxlan_gpe_data=data))
1428 def assign_interface_to_fib_table(node, interface, table_id, ipv6=False):
1429 """Assign VPP interface to specific VRF/FIB table.
1431 :param node: VPP node where the FIB and interface are located.
1432 :param interface: Interface to be assigned to FIB.
1433 :param table_id: VRF table ID.
1434 :param ipv6: Assign to IPv6 table. Default False.
1436 :type interface: str or int
1440 cmd = 'sw_interface_set_table'
1442 sw_if_index=InterfaceUtil.get_interface_index(node, interface),
1443 is_ipv6=1 if ipv6 else 0,
1444 vrf_id=int(table_id))
1445 err_msg = 'Failed to assign interface {ifc} to FIB table'.format(
1447 with PapiExecutor(node) as papi_exec:
1448 papi_exec.add(cmd, **args).get_replies(err_msg). \
1449 verify_reply(err_msg=err_msg)
1452 def set_linux_interface_mac(node, interface, mac, namespace=None,
1454 """Set MAC address for interface in linux.
1456 :param node: Node where to execute command.
1457 :param interface: Interface in namespace.
1458 :param mac: MAC to be assigned to interface.
1459 :param namespace: Execute command in namespace. Optional
1460 :param vf_id: Virtual Function id. Optional
1462 :type interface: str
1464 :type namespace: str
1467 mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \
1468 if vf_id is not None else 'address {mac}'.format(mac=mac)
1469 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1471 cmd = ('{ns} ip link set {interface} {mac}'.
1472 format(ns=ns_str, interface=interface, mac=mac_str))
1473 exec_cmd_no_error(node, cmd, sudo=True)
1476 def set_linux_interface_trust_on(node, interface, namespace=None,
1478 """Set trust on (promisc) for interface in linux.
1480 :param node: Node where to execute command.
1481 :param interface: Interface in namespace.
1482 :param namespace: Execute command in namespace. Optional
1483 :param vf_id: Virtual Function id. Optional
1485 :type interface: str
1486 :type namespace: str
1489 trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \
1490 if vf_id is not None else 'trust on'
1491 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1493 cmd = ('{ns} ip link set dev {interface} {trust}'.
1494 format(ns=ns_str, interface=interface, trust=trust_str))
1495 exec_cmd_no_error(node, cmd, sudo=True)
1498 def set_linux_interface_spoof_off(node, interface, namespace=None,
1500 """Set spoof off for interface in linux.
1502 :param node: Node where to execute command.
1503 :param interface: Interface in namespace.
1504 :param namespace: Execute command in namespace. Optional
1505 :param vf_id: Virtual Function id. Optional
1507 :type interface: str
1508 :type namespace: str
1511 spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \
1512 if vf_id is not None else 'spoof off'
1513 ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
1515 cmd = ('{ns} ip link set dev {interface} {spoof}'.
1516 format(ns=ns_str, interface=interface, spoof=spoof_str))
1517 exec_cmd_no_error(node, cmd, sudo=True)
1520 def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'):
1521 """Init PCI device by creating VFs and bind them to vfio-pci for AVF
1522 driver testing on DUT.
1524 :param node: DUT node.
1525 :param ifc_key: Interface key from topology file.
1526 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
1527 :param osi_layer: OSI Layer type to initialize TG with.
1528 Default value "L2" sets linux interface spoof off.
1532 :type osi_layer: str
1533 :returns: Virtual Function topology interface keys.
1535 :raises RuntimeError: If a reason preventing initialization is found.
1537 # Read PCI address and driver.
1538 pf_pci_addr = Topology.get_interface_pci_addr(node, ifc_key)
1539 pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
1540 uio_driver = Topology.get_uio_driver(node)
1541 kernel_driver = Topology.get_interface_driver(node, ifc_key)
1542 if kernel_driver != "i40e":
1544 "AVF needs i40e driver, not {driver} at node {host} ifc {ifc}"\
1545 .format(driver=kernel_driver, host=node["host"], ifc=ifc_key))
1546 current_driver = DUTSetup.get_pci_dev_driver(
1547 node, pf_pci_addr.replace(':', r'\:'))
1549 VPPUtil.stop_vpp_service(node)
1550 if current_driver != kernel_driver:
1551 # PCI device must be re-bound to kernel driver before creating VFs.
1552 DUTSetup.verify_kernel_module(node, kernel_driver, force_load=True)
1553 # Stop VPP to prevent deadlock.
1554 # Unbind from current driver.
1555 DUTSetup.pci_driver_unbind(node, pf_pci_addr)
1556 # Bind to kernel driver.
1557 DUTSetup.pci_driver_bind(node, pf_pci_addr, kernel_driver)
1559 # Initialize PCI VFs
1560 DUTSetup.set_sriov_numvfs(node, pf_pci_addr, numvfs)
1563 # Set MAC address and bind each virtual function to uio driver.
1564 for vf_id in range(numvfs):
1565 vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2],
1566 pf_mac_addr[3], pf_mac_addr[4],
1567 pf_mac_addr[5], "{:02x}".format(vf_id)])
1569 pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\
1570 format(pci=pf_pci_addr)
1571 InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
1573 if osi_layer == 'L2':
1574 InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
1576 InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
1579 DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
1580 DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
1582 # Add newly created ports into topology file
1583 vf_ifc_name = '{pf_if_key}_vf'.format(pf_if_key=ifc_key)
1584 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
1585 vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
1586 Topology.update_interface_name(node, vf_ifc_key,
1587 vf_ifc_name+str(vf_id+1))
1588 Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
1589 Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
1590 vf_ifc_keys.append(vf_ifc_key)
1595 def vpp_sw_interface_rx_placement_dump(node):
1596 """Dump VPP interface RX placement on node.
1598 :param node: Node to run command on.
1600 :returns: Thread mapping information as a list of dictionaries.
1603 cmd = 'sw_interface_rx_placement_dump'
1604 cmd_reply = 'sw_interface_rx_placement_details'
1605 err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
1606 cmd=cmd, host=node['host'])
1607 with PapiExecutor(node) as papi_exec:
1608 for ifc in node['interfaces'].values():
1609 if ifc['vpp_sw_index'] is not None:
1610 papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
1611 papi_resp = papi_exec.get_dump(err_msg)
1612 thr_mapping = [s[cmd_reply] for r in papi_resp.reply
1613 for s in r['api_reply']]
1614 return sorted(thr_mapping, key=lambda k: k['sw_if_index'])
1617 def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
1619 """Set interface RX placement to worker on node.
1621 :param node: Node to run command on.
1622 :param sw_if_index: VPP SW interface index.
1623 :param queue_id: VPP interface queue ID.
1624 :param worker_id: VPP worker ID (indexing from 0).
1626 :type sw_if_index: int
1628 :type worker_id: int
1629 :raises RuntimeError: If failed to run command on host or if no API
1632 cmd = 'sw_interface_set_rx_placement'
1633 err_msg = "Failed to set interface RX placement to worker on host " \
1634 "{host}!".format(host=node['host'])
1635 args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
1636 worker_id=worker_id)
1637 with PapiExecutor(node) as papi_exec:
1638 papi_exec.add(cmd, **args).get_replies(err_msg).\
1639 verify_reply(err_msg=err_msg)
1642 def vpp_round_robin_rx_placement(node, prefix):
1643 """Set Round Robin interface RX placement on all worker threads
1646 :param node: Topology nodes.
1647 :param prefix: Interface name prefix.
1652 worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
1653 for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
1654 for interface in node['interfaces'].values():
1655 if placement['sw_if_index'] == interface['vpp_sw_index'] \
1656 and prefix in interface['name']:
1657 InterfaceUtil.vpp_sw_interface_set_rx_placement(
1658 node, placement['sw_if_index'], placement['queue_id'],
1659 worker_id % worker_cnt)
1663 def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
1664 """Set Round Robin interface RX placement on all worker threads
1667 :param nodes: Topology nodes.
1668 :param prefix: Interface name prefix.
1672 for node in nodes.values():
1673 if node['type'] == NodeType.DUT:
1674 InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)