-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.DUTSetup import DUTSetup
+from resources.libraries.python.PapiExecutor import PapiExecutor
from resources.libraries.python.IPUtil import convert_ipv4_netmask_prefix
from resources.libraries.python.IPUtil import IPUtil
from resources.libraries.python.parsers.JsonParser import JsonParser
InterfaceUtil.update_nic_interface_names(node)
@staticmethod
- def update_tg_interface_data_on_node(node):
+ def update_tg_interface_data_on_node(node, skip_tg_udev=False):
"""Update interface name for TG/linux node in DICT__nodes.
.. note::
"52:54:00:e1:8a:0f": "eth2"
"00:00:00:00:00:00": "lo"
- .. note:: TODO: parse lshw -json instead
-
:param node: Node selected from DICT__nodes.
+ :param skip_tg_udev: Skip udev rename on TG node.
:type node: dict
+ :type skip_tg_udev: bool
:raises RuntimeError: If getting of interface name and MAC fails.
"""
# First setup interface driver specified in yaml file
interface['name'] = name
# Set udev rules for interfaces
- InterfaceUtil.tg_set_interfaces_udev_rules(node)
+ if not skip_tg_udev:
+ InterfaceUtil.tg_set_interfaces_udev_rules(node)
@staticmethod
def iface_update_numa_node(node):
@staticmethod
def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
+ skip_tg_udev=False,
numa_node=False):
"""Update interface names on all nodes in DICT__nodes.
of all nodes mentioned in the topology dictionary.
:param nodes: Nodes in the topology.
- :param skip_tg: Skip TG node
+ :param skip_tg: Skip TG node.
+ :param skip_tg_udev: Skip udev rename on TG node.
:param numa_node: Retrieve numa_node location.
:type nodes: dict
:type skip_tg: bool
+ :type skip_tg_udev: bool
:type numa_node: bool
"""
for node_data in nodes.values():
if node_data['type'] == NodeType.DUT:
InterfaceUtil.update_vpp_interface_data_on_node(node_data)
elif node_data['type'] == NodeType.TG and not skip_tg:
- InterfaceUtil.update_tg_interface_data_on_node(node_data)
+ InterfaceUtil.update_tg_interface_data_on_node(
+ node_data, skip_tg_udev)
if numa_node:
if node_data['type'] == NodeType.DUT:
Topology.update_interface_mac_address(node, if_key, ifc_mac)
@staticmethod
- def vpp_create_avf_interface(node, vf_pci_addr):
+ def vpp_create_avf_interface(node, vf_pci_addr, num_rx_queues=None):
"""Create AVF interface on VPP node.
:param node: DUT node from topology.
:param vf_pci_addr: Virtual Function PCI address.
+ :param num_rx_queues: Number of RX queues.
:type node: dict
:type vf_pci_addr: str
+ :type num_rx_queues: int
:returns: Interface key (name) in topology.
:rtype: str
:raises RuntimeError: If it is not possible to create AVF interface on
the node.
"""
+ num_rx_queues = 'num-rx-queues {num_rx_queues}'\
+ .format(num_rx_queues=num_rx_queues) if num_rx_queues else ''
+
with VatTerminal(node, json_param=False) as vat:
vat.vat_terminal_exec_cmd_from_template('create_avf_interface.vat',
- vf_pci_addr=vf_pci_addr)
+ vf_pci_addr=vf_pci_addr,
+ num_rx_queues=num_rx_queues)
output = vat.vat_stdout
if output is not None:
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def init_avf_interface(node, ifc_key, numvfs=1, topology_type='L2'):
+ def init_avf_interface(node, ifc_key, numvfs=1, traffic_type='L2'):
"""Init PCI device by creating VFs and bind them to vfio-pci for AVF
driver testing on DUT.
:param node: DUT node.
:param ifc_key: Interface key from topology file.
:param numvfs: Number of VFs to initialize, 0 - disable the VFs.
- :param topology_type: Topology type.
+ :param traffic_type: Expected type of traffic, affects spoofing.
+ Default value "L2" sets linux interface spoof off.
+ Other values do not do that.
:type node: dict
:type ifc_key: str
:type numvfs: int
- :type topology_type: str
+ :type traffic_type: str
:returns: Virtual Function topology interface keys.
:rtype: list
"""
format(pci=pf_pci_addr)
InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
vf_id=vf_id)
- if topology_type == 'L2':
+ if traffic_type == 'L2':
InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
vf_id=vf_id)
InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
node, 'vlan_subif{nr}'.format(nr=subif_id)), bd_id=bd_id))
VatExecutor().write_and_execute_script(node, tmp_fn, commands)
+
+ @staticmethod
+ def vpp_sw_interface_rx_placement_dump(node):
+ """Dump VPP interface RX placement on node.
+
+ :param node: Node to run command on.
+ :type node: dict
+ :returns: Thread mapping information as a list of dictionaries.
+ :rtype: list
+ """
+
+ cmd = 'sw_interface_rx_placement_dump'
+ cmd_reply = 'sw_interface_rx_placement_details'
+ err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
+ cmd=cmd, host=node['host'])
+ with PapiExecutor(node) as papi_exec:
+ for ifc in node['interfaces'].values():
+ if ifc['vpp_sw_index'] is not None:
+ papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
+ papi_resp = papi_exec.get_dump(err_msg)
+ thr_mapping = [s[cmd_reply] for r in papi_resp.reply
+ for s in r['api_reply']]
+ return sorted(thr_mapping, key=lambda k: k['sw_if_index'])
+
+ @staticmethod
+ def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
+ worker_id):
+ """Set interface RX placement to worker on node.
+
+ :param node: Node to run command on.
+ :param sw_if_index: VPP SW interface index.
+ :param queue_id: VPP interface queue ID.
+ :param worker_id: VPP worker ID (indexing from 0).
+ :type node: dict
+ :type sw_if_index: int
+ :type queue_id: int
+ :type worker_id: int
+ :raises RuntimeError: If failed to run command on host or if no API
+ reply received.
+ """
+
+ cmd = 'sw_interface_set_rx_placement'
+ cmd_reply = 'sw_interface_set_rx_placement_reply'
+ err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
+ host=node['host'], cmd=cmd)
+ args = dict(sw_if_index=sw_if_index, queue_id=queue_id,
+ worker_id=worker_id)
+ with PapiExecutor(node) as papi_exec:
+ papi_resp = papi_exec.add(cmd, **args).execute_should_pass(err_msg)
+ data = papi_resp.reply[0]['api_reply'][cmd_reply]
+ if data['retval'] != 0:
+ raise RuntimeError("Failed to set interface RX placement "
+ "to worker on host {host}".
+ format(host=node['host']))
+
+ @staticmethod
+ def vpp_round_robin_rx_placement(node, prefix):
+ """Set Round Robin interface RX placement on all worker threads
+ on node.
+
+ :param node: Topology nodes.
+ :param prefix: Interface name prefix.
+ :type node: dict
+ :type prefix: str
+ """
+ worker_id = 0
+ worker_cnt = len(VPPUtil.vpp_show_threads(node)) - 1
+ for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
+ for interface in node['interfaces'].values():
+ if placement['sw_if_index'] == interface['vpp_sw_index'] \
+ and prefix in interface['name']:
+ InterfaceUtil.vpp_sw_interface_set_rx_placement(
+ node, placement['sw_if_index'], placement['queue_id'],
+ worker_id % worker_cnt)
+ worker_id += 1
+
+ @staticmethod
+ def vpp_round_robin_rx_placement_on_all_duts(nodes, prefix):
+ """Set Round Robin interface RX placement on all worker threads
+ on all DUTs.
+
+ :param nodes: Topology nodes.
+ :param prefix: Interface name prefix.
+ :type nodes: dict
+ :type prefix: str
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)