"""Classify utilities library."""
-import binascii
import re
from ipaddress import ip_address
from robot.api import logger
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.topology import Topology
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
+from resources.libraries.python.topology import Topology
class Classify(object):
"""Classify utilities."""
@staticmethod
- def _build_mac_mask(dst_mac='', src_mac='', ether_type=''):
- """Build MAC ACL mask data in hexstring format.
+ def _build_mac_mask(dst_mac=u"", src_mac=u"", ether_type=u""):
+ """Build MAC ACL mask data in bytes format.
:param dst_mac: Source MAC address <0-ffffffffffff>.
:param src_mac: Destination MAC address <0-ffffffffffff>.
:type dst_mac: str
:type src_mac: str
:type ether_type: str
- :returns MAC ACL mask in hexstring format.
- :rtype: str
+ :returns MAC ACL mask in bytes format.
+ :rtype: bytes
"""
-
- return ('{!s:0>12}{!s:0>12}{!s:0>4}'.format(
- dst_mac.replace(':', ''), src_mac.replace(':', ''),
- ether_type)).decode('hex').rstrip('\0')
+ return bytes.fromhex(
+ f"{dst_mac.replace(u':', u'')!s:0>12}"
+ f"{src_mac.replace(u':', u'')!s:0>12}"
+ f"{ether_type!s:0>4}"
+ ).rstrip(b'\0')
@staticmethod
- def _build_ip_mask(proto='', src_ip='', dst_ip='', src_port='',
- dst_port=''):
- """Build IP ACL mask data in hexstring format.
+ def _build_ip_mask(
+ proto=u"", src_ip=u"", dst_ip=u"", src_port=u"", dst_port=u""):
+ """Build IP ACL mask data in bytes format.
:param proto: Protocol number <0-ff>.
:param src_ip: Source ip address <0-ffffffff>.
:type dst_ip: str
:type src_port: str
:type dst_port:src
- :returns: IP mask in hexstring format.
- :rtype: str
+ :returns: IP mask in bytes format.
+ :rtype: bytes
"""
-
- return ('{!s:0>20}{!s:0>12}{!s:0>8}{!s:0>4}{!s:0>4}'.format(
- proto, src_ip, dst_ip, src_port, dst_port)).decode('hex').\
- rstrip('\0')
+ return bytes.fromhex(
+ f"{proto!s:0>20}{src_ip!s:0>12}{dst_ip!s:0>8}{src_port!s:0>4}"
+ f"{dst_port!s:0>4}"
+ ).rstrip(b'\0')
@staticmethod
- def _build_ip6_mask(next_hdr='', src_ip='', dst_ip='', src_port='',
- dst_port=''):
- """Build IPv6 ACL mask data in hexstring format.
+ def _build_ip6_mask(
+ next_hdr=u"", src_ip=u"", dst_ip=u"", src_port=u"", dst_port=u""):
+ """Build IPv6 ACL mask data in bytes format.
:param next_hdr: Next header number <0-ff>.
:param src_ip: Source ip address <0-ffffffff>.
:type dst_ip: str
:type src_port: str
:type dst_port: str
- :returns: IPv6 ACL mask in hexstring format.
- :rtype: str
+ :returns: IPv6 ACL mask in bytes format.
+ :rtype: bytes
"""
-
- return ('{!s:0>14}{!s:0>34}{!s:0>32}{!s:0>4}{!s:0>4}'.format(
- next_hdr, src_ip, dst_ip, src_port, dst_port)).decode('hex').\
- rstrip('\0')
+ return bytes.fromhex(
+ f"{next_hdr!s:0>14}{src_ip!s:0>34}{dst_ip!s:0>32}{src_port!s:0>4}"
+ f"{dst_port!s:0>4}"
+ ).rstrip(b'\0')
@staticmethod
- def _build_mac_match(dst_mac='', src_mac='', ether_type=''):
- """Build MAC ACL match data in hexstring format.
+ def _build_mac_match(dst_mac=u"", src_mac=u"", ether_type=u""):
+ """Build MAC ACL match data in bytes format.
:param dst_mac: Source MAC address <x:x:x:x:x:x>.
:param src_mac: Destination MAC address <x:x:x:x:x:x>.
:type dst_mac: str
:type src_mac: str
:type ether_type: str
- :returns: MAC ACL match data in hexstring format.
- :rtype: str
+ :returns: MAC ACL match data in bytes format.
+ :rtype: bytes
"""
-
- return ('{!s:0>12}{!s:0>12}{!s:0>4}'.format(
- dst_mac.replace(':', ''), src_mac.replace(':', ''),
- ether_type)).decode('hex').rstrip('\0')
+ return bytes.fromhex(
+ f"{dst_mac.replace(u':', u'')!s:0>12}"
+ f"{src_mac.replace(u':', u'')!s:0>12}"
+ f"{ether_type!s:0>4}"
+ ).rstrip(b'\0')
@staticmethod
- def _build_ip_match(proto=0, src_ip='', dst_ip='', src_port=0, dst_port=0):
- """Build IP ACL match data in byte-string format.
+ def _build_ip_match(
+ proto=0, src_ip=u"", dst_ip=u"", src_port=0, dst_port=0):
+ """Build IP ACL match data in bytes format.
:param proto: Protocol number with valid option "x".
:param src_ip: Source ip address in packed format.
:returns: IP ACL match data in byte-string format.
:rtype: str
"""
-
- return ('{!s:0>20}{!s:0>12}{!s:0>8}{!s:0>4}{!s:0>4}'.format(
- hex(proto)[2:], src_ip, dst_ip, hex(src_port)[2:],
- hex(dst_port)[2:])).decode('hex').rstrip('\0')
+ return bytes.fromhex(
+ f"{hex(proto)[2:]!s:0>20}{src_ip!s:0>12}{dst_ip!s:0>8}"
+ f"{hex(src_port)[2:]!s:0>4}{hex(dst_port)[2:]!s:0>4}"
+ ).rstrip(b'\0')
@staticmethod
- def _build_ip6_match(next_hdr=0, src_ip='', dst_ip='', src_port=0,
- dst_port=0):
+ def _build_ip6_match(
+ next_hdr=0, src_ip=u"", dst_ip=u"", src_port=0, dst_port=0):
"""Build IPv6 ACL match data in byte-string format.
:param next_hdr: Next header number with valid option "x".
:type dst_ip: str
:type src_port: int
:type dst_port: int
- :returns: IPv6 ACL match data in byte-string format.
- :rtype: str
+ :returns: IPv6 ACL match data in bytes format.
+ :rtype: bytes
"""
-
- return ('{!s:0>14}{!s:0>34}{!s:0>32}{!s:0>4}{!s:0>4}'.format(
- hex(next_hdr)[2:], src_ip, dst_ip, hex(src_port)[2:],
- hex(dst_port)[2:])).decode('hex').rstrip('\0')
+ return bytes.fromhex(
+ f"{hex(next_hdr)[2:]!s:0>14}{src_ip!s:0>34}{dst_ip!s:0>32}"
+ f"{hex(src_port)[2:]!s:0>4}{hex(dst_port)[2:]!s:0>4}"
+ ).rstrip(b'\0')
@staticmethod
def _classify_add_del_table(
(Default value = 0)
:type node: dict
:type is_add: int
- :type mask: str
+ :type mask: bytes
:type match_n_vectors: int
:type table_index: int
:type nbuckets: int
match_n: Number of match vectors.
:rtype: tuple(int, int, int)
"""
- cmd = 'classify_add_del_table'
+ cmd = u"classify_add_del_table"
args = dict(
is_add=is_add,
table_index=table_index,
mask_len=len(mask),
mask=mask
)
- err_msg = "Failed to create a classify table on host {host}".format(
- host=node['host'])
+ err_msg = f"Failed to create a classify table on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)
- return int(reply["new_table_index"]), int(reply["skip_n_vectors"]),\
- int(reply["match_n_vectors"])
+ return int(reply[u"new_table_index"]), int(reply[u"skip_n_vectors"]),\
+ int(reply[u"match_n_vectors"])
@staticmethod
def _classify_add_del_session(
:type node: dict
:type is_add: int
:type table_index: int
- :type match: str
+ :type match: bytes
:type opaque_index: int
:type hit_next_index: int
:type advance: int
:type action: int
:type metadata: int
"""
- cmd = 'classify_add_del_session'
+ cmd = u"classify_add_del_session"
args = dict(
is_add=is_add,
table_index=table_index,
match_len=len(match),
match=match
)
- err_msg = "Failed to create a classify session on host {host}".format(
- host=node['host'])
+ err_msg = f"Failed to create a classify session on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type rules: list
:type tag: str
"""
- cmd = "macip_acl_add"
+ cmd = u"macip_acl_add"
args = dict(
r=rules,
count=len(rules),
tag=tag
)
- err_msg = "Failed to create a classify session on host {host}".format(
- host=node['host'])
+ err_msg = f"Failed to add MACIP ACL on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type acl_type: str
:type acls: list
"""
- cmd = "acl_interface_set_acl_list"
- n_input = len(acls) if acl_type == "input" else 0
+ cmd = u"acl_interface_set_acl_list"
+ n_input = len(acls) if acl_type == u"input" else 0
args = dict(
sw_if_index=sw_if_index,
acls=acls,
count=len(acls)
)
- err_msg = "Failed to set acl list for interface {idx} on host {host}".\
- format(idx=sw_if_index, host=node['host'])
+ err_msg = f"Failed to set acl list for interface {sw_if_index} " \
+ f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type rules: list
:type tag: str
"""
- cmd = "acl_add_replace"
+ cmd = u"acl_add_replace"
args = dict(
tag=tag.encode("utf-8"),
acl_index=4294967295 if acl_idx is None else acl_idx,
r=rules
)
- err_msg = "Failed to add/replace acls on host {host}".format(
- host=node['host'])
+ err_msg = f"Failed to add/replace ACLs on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
ip6=Classify._build_ip6_mask
)
- if ip_version == "ip4" or ip_version == "ip6":
- netmask = binascii.hexlify(ip_address(unicode(netmask)).packed)
+ if ip_version == u"ip4" or ip_version == u"ip6":
+ netmask = ip_address(netmask).packed
else:
- raise ValueError("IP version {ver} is not supported.".format(
- ver=ip_version))
+ raise ValueError(f"IP version {ip_version} is not supported.")
- if direction == "src":
- mask = mask_f[ip_version](src_ip=netmask)
- elif direction == "dst":
- mask = mask_f[ip_version](dst_ip=netmask)
+ if direction == u"src":
+ mask = mask_f[ip_version](src_ip=netmask.hex())
+ elif direction == u"dst":
+ mask = mask_f[ip_version](dst_ip=netmask.hex())
else:
- raise ValueError("Direction {dir} is not supported.".format(
- dir=direction))
+ raise ValueError(f"Direction {direction} is not supported.")
# Add l2 ethernet header to mask
- mask = 14 * '\0' + mask
+ mask = 14 * b'\0' + mask
# Get index of the first significant mask octet
- i = len(mask) - len(mask.lstrip('\0'))
+ i = len(mask) - len(mask.lstrip(b'\0'))
# Compute skip_n parameter
skip_n = i // 16
# Remove octets to be skipped from the mask
mask = mask[skip_n*16:]
# Pad mask to an even multiple of the vector size
- mask = mask + (16 - len(mask) % 16 if len(mask) % 16 else 0) * '\0'
+ mask = mask + (16 - len(mask) % 16 if len(mask) % 16 else 0) * b'\0'
# Compute match_n parameter
match_n = len(mask) // 16
deny=1
)
- if ip_version == "ip4" or ip_version == "ip6":
- address = binascii.hexlify(ip_address(unicode(address)).packed)
+ if ip_version == u"ip4" or ip_version == u"ip6":
+ address = ip_address(address).packed
else:
- raise ValueError("IP version {ver} is not supported.".format(
- ver=ip_version))
+ raise ValueError(f"IP version {ip_version} is not supported.")
- if direction == "src":
+ if direction == u"src":
match = match_f[ip_version](src_ip=address)
- elif direction == "dst":
+ elif direction == u"dst":
match = match_f[ip_version](dst_ip=address)
else:
- raise ValueError("Direction {dir} is not supported.".format(
- dir=direction))
+ raise ValueError(f"Direction {direction} is not supported.")
# Prepend match with l2 ethernet header part
- match = 14 * '\0' + match
+ match = 14 * b'\0' + match
# Pad match to match skip_n_vector + match_n_vector size
match = match + ((match_n + skip_n) * 16 - len(match)
if len(match) < (match_n + skip_n) * 16
- else 0) * '\0'
+ else 0) * b'\0'
Classify._classify_add_del_session(
node,
action=action[acl_method]
)
- @staticmethod
- def compute_classify_hex_mask(ip_version, protocol, direction):
- """Compute classify hex mask for TCP or UDP packet matching.
-
- :param ip_version: Version of IP protocol.
- :param protocol: Type of protocol.
- :param direction: Traffic direction.
- :type ip_version: str
- :type protocol: str
- :type direction: str
- :returns: Classify hex mask.
- :rtype: str
- :raises ValueError: If protocol is not TCP or UDP.
- :raises ValueError: If direction is not source or destination or
- source + destination.
- """
- if protocol in ('TCP', 'UDP'):
- base_mask = Classify._compute_base_mask(ip_version)
-
- if direction == 'source':
- return base_mask + 'FFFF0000'
- elif direction == 'destination':
- return base_mask + '0000FFFF'
- elif direction == 'source + destination':
- return base_mask + 'FFFFFFFF'
- else:
- raise ValueError("Invalid direction!")
- else:
- raise ValueError("Invalid protocol!")
-
- @staticmethod
- def compute_classify_hex_value(hex_mask, source_port, destination_port):
- """Compute classify hex value for TCP or UDP packet matching.
-
- :param hex_mask: Classify hex mask.
- :param source_port: Source TCP/UDP port.
- :param destination_port: Destination TCP/UDP port.
- :type hex_mask: str
- :type source_port: str
- :type destination_port: str
- :returns: Classify hex value.
- :rtype: str
- """
- source_port_hex = Classify._port_convert(source_port)
- destination_port_hex = Classify._port_convert(destination_port)
-
- return hex_mask[:-8] + source_port_hex + destination_port_hex
-
- @staticmethod
- def _port_convert(port):
- """Convert port number for classify hex table format.
-
- :param port: TCP/UDP port number.
- :type port: str
- :returns: TCP/UDP port number in 4-digit hexadecimal format.
- :rtype: str
- """
- return '{0:04x}'.format(int(port))
-
- @staticmethod
- def _compute_base_mask(ip_version):
- """Compute base classify hex mask based on IP version.
-
- :param ip_version: Version of IP protocol.
- :type ip_version: str
- :returns: Base hex mask.
- :rtype: str
- """
- if ip_version == 'ip4':
- return 68 * '0'
- # base value of classify hex table for IPv4 TCP/UDP ports
- elif ip_version == 'ip6':
- return 108 * '0'
- # base value of classify hex table for IPv6 TCP/UDP ports
- else:
- raise ValueError("Invalid IP version!")
-
@staticmethod
def get_classify_table_data(node, table_index):
"""Retrieve settings for classify table by ID.
:returns: Classify table settings.
:rtype: dict
"""
- cmd = 'classify_table_info'
- err_msg = "Failed to get 'classify_table_info' on host {host}".format(
- host=node['host'])
+ cmd = u"classify_table_info"
+ err_msg = f"Failed to get 'classify_table_info' on host {node[u'host']}"
args = dict(
table_id=int(table_index)
)
:returns: List of classify session settings.
:rtype: list or dict
"""
- cmd = "classify_session_dump"
+ cmd = u"classify_session_dump"
args = dict(
table_id=int(table_index)
)
:rtype: str
"""
return PapiSocketExecutor.run_cli_cmd(
- node, "show classify tables verbose")
+ node, u"show classify tables verbose"
+ )
@staticmethod
def vpp_log_plugin_acl_settings(node):
:param node: VPP node.
:type node: dict
"""
- PapiSocketExecutor.dump_and_log(node, ["acl_dump", ])
+ PapiSocketExecutor.dump_and_log(node, [u"acl_dump", ])
@staticmethod
def vpp_log_plugin_acl_interface_assignment(node):
:param node: VPP node.
:type node: dict
"""
- PapiSocketExecutor.dump_and_log(node, ["acl_interface_list_dump", ])
+ PapiSocketExecutor.dump_and_log(node, [u"acl_interface_list_dump", ])
@staticmethod
def set_acl_list_for_interface(node, interface, acl_type, acl_idx=None):
:type acl_type: str
:type acl_idx: list
"""
- if isinstance(interface, basestring):
+ if isinstance(interface, str):
sw_if_index = Topology.get_interface_sw_index(node, interface)
else:
sw_if_index = int(interface)
acls = acl_idx if isinstance(acl_idx, list) else list()
- Classify._acl_interface_set_acl_list(node=node,
- sw_if_index=sw_if_index,
- acl_type=acl_type,
- acls=acls)
+ Classify._acl_interface_set_acl_list(
+ node=node, sw_if_index=sw_if_index, acl_type=acl_type, acls=acls
+ )
@staticmethod
- def add_replace_acl_multi_entries(node, acl_idx=None, rules=None, tag=""):
+ def add_replace_acl_multi_entries(node, acl_idx=None, rules=None, tag=u""):
"""Add a new ACL or replace the existing one. To replace an existing
ACL, pass the ID of this ACL.
:type rules: str
:type tag: str
"""
- reg_ex_src_ip = re.compile(r'(src [0-9a-fA-F.:/\d{1,2}]*)')
- reg_ex_dst_ip = re.compile(r'(dst [0-9a-fA-F.:/\d{1,2}]*)')
- reg_ex_sport = re.compile(r'(sport \d{1,5})')
- reg_ex_dport = re.compile(r'(dport \d{1,5})')
- reg_ex_proto = re.compile(r'(proto \d{1,5})')
+ reg_ex_src_ip = re.compile(r"(src [0-9a-fA-F.:/\d{1,2}]*)")
+ reg_ex_dst_ip = re.compile(r"(dst [0-9a-fA-F.:/\d{1,2}]*)")
+ reg_ex_sport = re.compile(r"(sport \d{1,5})")
+ reg_ex_dport = re.compile(r"(dport \d{1,5})")
+ reg_ex_proto = re.compile(r"(proto \d{1,5})")
acl_rules = list()
- for rule in rules.split(", "):
+ for rule in rules.split(u", "):
acl_rule = dict()
- acl_rule["is_permit"] = 1 if "permit" in rule else 0
- acl_rule["is_ipv6"] = 1 if "ipv6" in rule else 0
+ acl_rule[u"is_permit"] = 1 if u"permit" in rule else 0
+ acl_rule[u"is_ipv6"] = 1 if u"ipv6" in rule else 0
groups = re.search(reg_ex_src_ip, rule)
if groups:
- grp = groups.group(1).split(' ')[1].split('/')
- acl_rule["src_ip_addr"] = ip_address(unicode(grp[0])).packed
- acl_rule["src_ip_prefix_len"] = int(grp[1])
+ grp = groups.group(1).split(u" ")[1].split(u"/")
+ acl_rule[u"src_ip_addr"] = ip_address(grp[0]).packed
+ acl_rule[u"src_ip_prefix_len"] = int(grp[1])
groups = re.search(reg_ex_dst_ip, rule)
if groups:
- grp = groups.group(1).split(' ')[1].split('/')
- acl_rule["dst_ip_addr"] = ip_address(unicode(grp[0])).packed
- acl_rule["dst_ip_prefix_len"] = int(grp[1])
+ grp = groups.group(1).split(u" ")[1].split(u"/")
+ acl_rule[u"dst_ip_addr"] = ip_address(grp[0]).packed
+ acl_rule[u"dst_ip_prefix_len"] = int(grp[1])
groups = re.search(reg_ex_sport, rule)
if groups:
- port = int(groups.group(1).split(' ')[1])
- acl_rule["srcport_or_icmptype_first"] = port
- acl_rule["srcport_or_icmptype_last"] = port
+ port = int(groups.group(1).split(u" ")[1])
+ acl_rule[u"srcport_or_icmptype_first"] = port
+ acl_rule[u"srcport_or_icmptype_last"] = port
else:
- acl_rule["srcport_or_icmptype_first"] = 0
- acl_rule["srcport_or_icmptype_last"] = 65535
+ acl_rule[u"srcport_or_icmptype_first"] = 0
+ acl_rule[u"srcport_or_icmptype_last"] = 65535
groups = re.search(reg_ex_dport, rule)
if groups:
- port = int(groups.group(1).split(' ')[1])
- acl_rule["dstport_or_icmpcode_first"] = port
- acl_rule["dstport_or_icmpcode_last"] = port
+ port = int(groups.group(1).split(u" ")[1])
+ acl_rule[u"dstport_or_icmpcode_first"] = port
+ acl_rule[u"dstport_or_icmpcode_last"] = port
else:
- acl_rule["dstport_or_icmpcode_first"] = 0
- acl_rule["dstport_or_icmpcode_last"] = 65535
+ acl_rule[u"dstport_or_icmpcode_first"] = 0
+ acl_rule[u"dstport_or_icmpcode_last"] = 65535
groups = re.search(reg_ex_proto, rule)
if groups:
proto = int(groups.group(1).split(' ')[1])
- acl_rule["proto"] = proto
+ acl_rule[u"proto"] = proto
else:
- acl_rule["proto"] = 0
+ acl_rule[u"proto"] = 0
acl_rules.append(acl_rule)
node, acl_idx=acl_idx, rules=acl_rules, tag=tag)
@staticmethod
- def add_macip_acl_multi_entries(node, rules=""):
+ def add_macip_acl_multi_entries(node, rules=u""):
"""Add a new MACIP ACL.
:param node: VPP node to set MACIP ACL on.
:type node: dict
:type rules: str
"""
- reg_ex_ip = re.compile(r'(ip [0-9a-fA-F.:/\d{1,2}]*)')
- reg_ex_mac = re.compile(r'(mac \S\S:\S\S:\S\S:\S\S:\S\S:\S\S)')
- reg_ex_mask = re.compile(r'(mask \S\S:\S\S:\S\S:\S\S:\S\S:\S\S)')
+ reg_ex_ip = re.compile(r"(ip [0-9a-fA-F.:/\d{1,2}]*)")
+ reg_ex_mac = re.compile(r"(mac \S\S:\S\S:\S\S:\S\S:\S\S:\S\S)")
+ reg_ex_mask = re.compile(r"(mask \S\S:\S\S:\S\S:\S\S:\S\S:\S\S)")
acl_rules = list()
- for rule in rules.split(", "):
+ for rule in rules.split(u", "):
acl_rule = dict()
- acl_rule["is_permit"] = 1 if "permit" in rule else 0
- acl_rule["is_ipv6"] = 1 if "ipv6" in rule else 0
+ acl_rule[u"is_permit"] = 1 if u"permit" in rule else 0
+ acl_rule[u"is_ipv6"] = 1 if u"ipv6" in rule else 0
groups = re.search(reg_ex_mac, rule)
if groups:
- mac = groups.group(1).split(' ')[1].replace(':', '')
- acl_rule["src_mac"] = binascii.unhexlify(unicode(mac))
+ mac = groups.group(1).split(u" ")[1].replace(u":", u"")
+ acl_rule[u"src_mac"] = bytes.fromhex(mac)
groups = re.search(reg_ex_mask, rule)
if groups:
- mask = groups.group(1).split(' ')[1].replace(':', '')
- acl_rule["src_mac_mask"] = binascii.unhexlify(unicode(mask))
+ mask = groups.group(1).split(u" ")[1].replace(u":", u"")
+ acl_rule[u"src_mac_mask"] = bytes.fromhex(mask)
groups = re.search(reg_ex_ip, rule)
if groups:
- grp = groups.group(1).split(' ')[1].split('/')
- acl_rule["src_ip_addr"] = ip_address(unicode(grp[0])).packed
- acl_rule["src_ip_prefix_len"] = int(grp[1])
+ grp = groups.group(1).split(u" ")[1].split(u"/")
+ acl_rule[u"src_ip_addr"] = ip_address((grp[0])).packed
+ acl_rule[u"src_ip_prefix_len"] = int(grp[1])
acl_rules.append(acl_rule)
:param node: VPP node.
:type node: dict
"""
- PapiSocketExecutor.dump_and_log(node, ["macip_acl_dump", ])
+ PapiSocketExecutor.dump_and_log(node, [u"macip_acl_dump", ])
@staticmethod
def add_del_macip_acl_interface(node, interface, action, acl_idx):
:type acl_idx: str or int
:raises RuntimeError: If unable to set MACIP ACL for the interface.
"""
- if isinstance(interface, basestring):
+ if isinstance(interface, str):
sw_if_index = Topology.get_interface_sw_index(node, interface)
else:
sw_if_index = interface
- is_add = 1 if action == "add" else 0
+ is_add = 1 if action == u"add" else 0
- cmd = 'macip_acl_interface_add_del'
- err_msg = "Failed to get 'macip_acl_interface' on host {host}".format(
- host=node['host'])
+ cmd = u"macip_acl_interface_add_del"
+ err_msg = f"Failed to get 'macip_acl_interface' on host {node[u'host']}"
args = dict(
is_add=is_add,
sw_if_index=int(sw_if_index),
:param node: VPP node.
:type node: dict
"""
- cmd = 'macip_acl_interface_get'
- err_msg = "Failed to get 'macip_acl_interface' on host {host}".format(
- host=node['host'])
+ cmd = u"macip_acl_interface_get"
+ err_msg = f"Failed to get 'macip_acl_interface' on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd).get_reply(err_msg)
logger.info(reply)
:returns: The value read, or default value.
:rtype: str
"""
- prefixes = ("FDIO_CSIT_", "CSIT_", "")
+ prefixes = (u"FDIO_CSIT_", u"CSIT_", u"")
if not isinstance(env_var_names, (list, tuple)):
env_var_names = [env_var_names]
for name in env_var_names:
:returns: The value read, or default value.
:rtype: int
"""
- env_str = get_str_from_env(env_var_names, "")
+ env_str = get_str_from_env(env_var_names, u"")
try:
return int(env_str)
except ValueError:
:returns: The value read, or default value.
:rtype: float
"""
- env_str = get_str_from_env(env_var_names, "")
+ env_str = get_str_from_env(env_var_names, u"")
try:
return float(env_str)
except ValueError:
:returns: The value read, or False.
:rtype: bool
"""
- env_str = get_str_from_env(env_var_names, "").lower()
- return True if env_str in ("true", "yes", "y", "1") else False
+ env_str = get_str_from_env(env_var_names, u"").lower()
+ return True if env_str in (u"true", u"yes", u"y", u"1") else False
def get_optimistic_bool_from_env(env_var_names):
:rtype: bool
"""
env_str = get_str_from_env(env_var_names, "").lower()
- return False if env_str in ("false", "no", "n", "0") else True
+ return False if env_str in (u"false", u"no", u"n", u"0") else True
class Constants(object):
"""
# OpenVPP testing directory location at topology nodes
- REMOTE_FW_DIR = '/tmp/openvpp-testing'
+ REMOTE_FW_DIR = u"/tmp/openvpp-testing"
# shell scripts location
- RESOURCES_LIB_SH = 'resources/libraries/bash'
+ RESOURCES_LIB_SH = u"resources/libraries/bash"
# Python API provider location
- RESOURCES_PAPI_PROVIDER = 'resources/tools/papi/vpp_papi_provider.py'
+ RESOURCES_PAPI_PROVIDER = u"resources/tools/papi/vpp_papi_provider.py"
# vat templates location
- RESOURCES_TPL_VAT = 'resources/templates/vat'
+ RESOURCES_TPL_VAT = u"resources/templates/vat"
# Kubernetes templates location
- RESOURCES_TPL_K8S = 'resources/templates/kubernetes'
+ RESOURCES_TPL_K8S = u"resources/templates/kubernetes"
# KernelVM templates location
- RESOURCES_TPL_VM = 'resources/templates/vm'
+ RESOURCES_TPL_VM = u"resources/templates/vm"
# Container templates location
- RESOURCES_TPL_CONTAINER = 'resources/templates/container'
+ RESOURCES_TPL_CONTAINER = u"resources/templates/container"
# HTTP Server www root directory
- RESOURCES_TP_WRK_WWW = 'resources/traffic_profiles/wrk/www'
+ RESOURCES_TP_WRK_WWW = u"resources/traffic_profiles/wrk/www"
# OpenVPP VAT binary name
- VAT_BIN_NAME = 'vpp_api_test'
+ VAT_BIN_NAME = u"vpp_api_test"
# VPP service unit name
- VPP_UNIT = 'vpp'
+ VPP_UNIT = u"vpp"
# Number of system CPU cores.
CPU_CNT_SYSTEM = 1
CPU_CNT_MAIN = 1
# QEMU binary path
- QEMU_BIN_PATH = '/usr/bin'
+ QEMU_BIN_PATH = u"/usr/bin"
# QEMU VM kernel image path
- QEMU_VM_KERNEL = '/opt/boot/vmlinuz'
+ QEMU_VM_KERNEL = u"/opt/boot/vmlinuz"
# QEMU VM kernel initrd path
- QEMU_VM_KERNEL_INITRD = '/opt/boot/initrd.img'
+ QEMU_VM_KERNEL_INITRD = u"/opt/boot/initrd.img"
# QEMU VM nested image path
- QEMU_VM_IMAGE = '/var/lib/vm/vhost-nested.img'
+ QEMU_VM_IMAGE = u"/var/lib/vm/vhost-nested.img"
# QEMU VM DPDK path
- QEMU_VM_DPDK = '/opt/dpdk-19.02'
+ QEMU_VM_DPDK = u"/opt/dpdk-19.02"
# Docker container SUT image
- DOCKER_SUT_IMAGE_UBUNTU = 'snergster/csit-sut:latest'
+ DOCKER_SUT_IMAGE_UBUNTU = u"snergster/csit-sut:latest"
# Docker container arm SUT image
- DOCKER_SUT_IMAGE_UBUNTU_ARM = 'snergster/csit-arm-sut:latest'
+ DOCKER_SUT_IMAGE_UBUNTU_ARM = u"snergster/csit-arm-sut:latest"
# TRex install directory
- TREX_INSTALL_DIR = '/opt/trex-core-2.61'
+ TREX_INSTALL_DIR = u"/opt/trex-core-2.61"
# Honeycomb directory location at topology nodes:
- REMOTE_HC_DIR = '/opt/honeycomb'
+ REMOTE_HC_DIR = u"/opt/honeycomb"
# Honeycomb persistence files location
- REMOTE_HC_PERSIST = '/var/lib/honeycomb/persist'
+ REMOTE_HC_PERSIST = u"/var/lib/honeycomb/persist"
# Honeycomb log file location
- REMOTE_HC_LOG = '/var/log/honeycomb/honeycomb.log'
+ REMOTE_HC_LOG = u"/var/log/honeycomb/honeycomb.log"
# Honeycomb templates location
- RESOURCES_TPL_HC = 'resources/templates/honeycomb'
+ RESOURCES_TPL_HC = u"resources/templates/honeycomb"
# ODL Client Restconf listener port
ODL_PORT = 8181
# Sysctl kernel.core_pattern
- KERNEL_CORE_PATTERN = '/tmp/%p-%u-%g-%s-%t-%h-%e.core'
+ KERNEL_CORE_PATTERN = u"/tmp/%p-%u-%g-%s-%t-%h-%e.core"
# Core dump directory
- CORE_DUMP_DIR = '/tmp'
+ CORE_DUMP_DIR = u"/tmp"
# Equivalent to ~0 used in vpp code
BITWISE_NON_ZERO = 0xffffffff
# Default path to VPP API socket.
- SOCKSVR_PATH = "/run/vpp/api.sock"
+ SOCKSVR_PATH = u"/run/vpp/api.sock"
# Number of trials to execute in MRR test.
- PERF_TRIAL_MULTIPLICITY = get_int_from_env("PERF_TRIAL_MULTIPLICITY", 10)
+ PERF_TRIAL_MULTIPLICITY = get_int_from_env(u"PERF_TRIAL_MULTIPLICITY", 10)
# Duration of one trial in MRR test.
- PERF_TRIAL_DURATION = get_float_from_env("PERF_TRIAL_DURATION", 1.0)
+ PERF_TRIAL_DURATION = get_float_from_env(u"PERF_TRIAL_DURATION", 1.0)
# UUID string of DUT1 /tmp volume created outside of the
# DUT1 docker in case of vpp-device test. ${EMPTY} value means that
# /tmp directory is inside the DUT1 docker.
- DUT1_UUID = get_str_from_env("DUT1_UUID", "")
+ DUT1_UUID = get_str_from_env(u"DUT1_UUID", u"")
# Default path to VPP API Stats socket.
- SOCKSTAT_PATH = "/run/vpp/stats.sock"
+ SOCKSTAT_PATH = u"/run/vpp/stats.sock"
# Global "kill switch" for CRC checking during runtime.
- FAIL_ON_CRC_MISMATCH = get_pessimistic_bool_from_env("FAIL_ON_CRC_MISMATCH")
+ FAIL_ON_CRC_MISMATCH = get_pessimistic_bool_from_env(
+ u"FAIL_ON_CRC_MISMATCH")
# Mapping from NIC name to its bps limit.
NIC_NAME_TO_BPS_LIMIT = {
- "Cisco-VIC-1227": 10000000000,
- "Cisco-VIC-1385": 24500000000,
- "Intel-X520-DA2": 10000000000,
- "Intel-X553": 10000000000,
- "Intel-X710": 10000000000,
- "Intel-XL710": 24500000000,
- "Intel-XXV710": 24500000000,
- "Mellanox-CX556A": 100000000000,
- "virtual": 100000000,
+ u"Cisco-VIC-1227": 10000000000,
+ u"Cisco-VIC-1385": 24500000000,
+ u"Intel-X520-DA2": 10000000000,
+ u"Intel-X553": 10000000000,
+ u"Intel-X710": 10000000000,
+ u"Intel-XL710": 24500000000,
+ u"Intel-XXV710": 24500000000,
+ u"Mellanox-CX556A": 100000000000,
+ u"virtual": 100000000,
}
# Mapping from NIC name to its pps limit.
NIC_NAME_TO_PPS_LIMIT = {
- "Cisco-VIC-1227": 14880952,
- "Cisco-VIC-1385": 18750000,
- "Intel-X520-DA2": 14880952,
- "Intel-X553": 14880952,
- "Intel-X710": 14880952,
- "Intel-XL710": 18750000,
- "Intel-XXV710": 18750000,
- "Mellanox-CX556A": 60000000, #148809523,
- "virtual": 14880952,
+ u"Cisco-VIC-1227": 14880952,
+ u"Cisco-VIC-1385": 18750000,
+ u"Intel-X520-DA2": 14880952,
+ u"Intel-X553": 14880952,
+ u"Intel-X710": 14880952,
+ u"Intel-XL710": 18750000,
+ u"Intel-XXV710": 18750000,
+ u"Mellanox-CX556A": 60000000, #148809523,
+ u"virtual": 14880952,
}
# Suite file names use codes for NICs.
NIC_NAME_TO_CODE = {
- "Cisco-VIC-1227": "10ge2p1vic1227",
- "Cisco-VIC-1385": "40ge2p1vic1385",
- "Intel-X520-DA2": "10ge2p1x520",
- "Intel-X553": "10ge2p1x553",
- "Intel-X710": "10ge2p1x710",
- "Intel-XL710": "40ge2p1xl710",
- "Intel-XXV710": "25ge2p1xxv710",
- "Mellanox-CX556A": "100ge2p1cx556a",
+ u"Cisco-VIC-1227": u"10ge2p1vic1227",
+ u"Cisco-VIC-1385": u"40ge2p1vic1385",
+ u"Intel-X520-DA2": u"10ge2p1x520",
+ u"Intel-X553": u"10ge2p1x553",
+ u"Intel-X710": u"10ge2p1x710",
+ u"Intel-XL710": u"40ge2p1xl710",
+ u"Intel-XXV710": u"25ge2p1xxv710",
+ u"Mellanox-CX556A": u"100ge2p1cx556a",
}
# TODO CSIT-1481: Crypto HW should be read from topology file instead.
NIC_NAME_TO_CRYPTO_HW = {
- "Intel-X553": "HW_C3xxx",
- "Intel-X710": "HW_DH895xcc",
- "Intel-XL710": "HW_DH895xcc",
+ u"Intel-X553": u"HW_C3xxx",
+ u"Intel-X710": u"HW_DH895xcc",
+ u"Intel-XL710": u"HW_DH895xcc",
}
PERF_TYPE_TO_KEYWORD = {
- "mrr": "Traffic should pass with maximum rate",
- "ndrpdr": "Find NDR and PDR intervals using optimized search",
- "soak": "Find critical load using PLRsearch",
+ u"mrr": u"Traffic should pass with maximum rate",
+ u"ndrpdr": u"Find NDR and PDR intervals using optimized search",
+ u"soak": u"Find critical load using PLRsearch",
}
PERF_TYPE_TO_SUITE_DOC_VER = {
- "mrr" : '''fication:* In MaxReceivedRate tests TG sends traffic\\
+ u"mrr": u'''fication:* In MaxReceivedRate tests TG sends traffic\\
| ... | at line rate and reports total received packets over trial period.\\''',
# TODO: Figure out how to include the full "*[Ver] TG verification:*"
# while keeping this readable and without breaking line length limit.
- "ndrpdr": '''fication:* TG finds and reports throughput NDR (Non Drop\\
-| ... | Rate) with zero packet loss tolerance and throughput PDR (Partial Drop\\
-| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage\\
-| ... | of packets transmitted. NDR and PDR are discovered for different\\
-| ... | Ethernet L2 frame sizes using MLRsearch library.\\''',
- "soak": '''fication:* TG sends traffic at dynamically computed\\
+ u"ndrpdr": u'''fication:* TG finds and reports throughput NDR (Non\\
+| ... | Drop Rate) with zero packet loss tolerance and throughput PDR (Partial\\
+| ... | Drop Rate) with non-zero packet loss tolerance (LT) expressed in\\
+| ... | percentage of packets transmitted. NDR and PDR are discovered for\\
+| ... | different Ethernet L2 frame sizes using MLRsearch library.\\''',
+ u"soak": u'''fication:* TG sends traffic at dynamically computed\\
| ... | rate as PLRsearch algorithm gathers data and improves its estimate\\
| ... | of a rate at which a prescribed small fraction of packets\\
| ... | would be lost. After set time, the serarch stops\\
}
PERF_TYPE_TO_TEMPLATE_DOC_VER = {
- "mrr": '''Measure MaxReceivedRate for ${frame_size}B frames\\
+ u"mrr": u'''Measure MaxReceivedRate for ${frame_size}B frames\\
| | ... | using burst trials throughput test.\\''',
- "ndrpdr": '''Measure NDR and PDR values using MLRsearch algorithm.\\''',
- "soak": '''Estimate critical rate using PLRsearch algorithm.\\''',
+ u"ndrpdr": u"Measure NDR and PDR values using MLRsearch algorithm.\\",
+ u"soak": u"Estimate critical rate using PLRsearch algorithm.\\",
}
-
"""Library to manipulate Containers."""
-from string import Template
from collections import OrderedDict, Counter
+from io import open
+from string import Template
-from resources.libraries.python.ssh import SSH
from resources.libraries.python.Constants import Constants
+from resources.libraries.python.ssh import SSH
from resources.libraries.python.topology import Topology, SocketType
from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
-__all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
+__all__ = [
+ u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
+]
-SUPERVISOR_CONF = '/etc/supervisor/supervisord.conf'
+SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
class ContainerManager(object):
try:
self.engine = globals()[engine]()
except KeyError:
- raise NotImplementedError('{engine} is not implemented.'.
- format(engine=engine))
+ raise NotImplementedError(f"{engine} is not implemented.")
self.containers = OrderedDict()
def get_container_by_name(self, name):
try:
return self.containers[name]
except KeyError:
- raise RuntimeError('Failed to get container with name: {name}'.
- format(name=name))
+ raise RuntimeError(f"Failed to get container with name: {name}")
def construct_container(self, **kwargs):
"""Construct container object on node with specified parameters.
setattr(self.engine.container, key, kwargs[key])
# Set additional environmental variables
- setattr(self.engine.container, 'env',
- 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
+ setattr(
+ self.engine.container, u"env",
+ f"MICROSERVICE_LABEL={kwargs[u'name']}"
+ )
# Store container instance
self.containers[kwargs['name']] = self.engine.container
:param kwargs: Named parameters.
:param kwargs: dict
"""
- name = kwargs['name']
- for i in range(kwargs['count']):
+ name = kwargs[u"name"]
+ for i in range(kwargs[u"count"]):
# Name will contain ordinal suffix
- kwargs['name'] = ''.join([name, str(i+1)])
+ kwargs[u"name"] = u"".join([name, str(i+1)])
# Create container
self.construct_container(i=i, **kwargs)
:param kwargs: dict
"""
# Count number of DUTs based on node's host information
- dut_cnt = len(Counter([self.containers[container].node['host']
- for container in self.containers]))
- mod = len(self.containers)/dut_cnt
+ dut_cnt = len(
+ Counter(
+ [self.containers[container].node[u"host"]
+ for container in self.containers]
+ )
+ )
+ mod = len(self.containers) / dut_cnt
for i, container in enumerate(self.containers):
mid1 = i % mod + 1
sid1 = i % mod * 2 + 1
sid2 = i % mod * 2 + 2
self.engine.container = self.containers[container]
- guest_dir = self.engine.container.mnt[0].split(':')[1]
-
- if chain_topology == 'chain':
- self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
- sid1=sid1, sid2=sid2,
- guest_dir=guest_dir,
- **kwargs)
- elif chain_topology == 'cross_horiz':
- self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
- sid1=sid1, sid2=sid2,
- guest_dir=guest_dir,
- **kwargs)
- elif chain_topology == 'chain_functional':
- self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
- sid1=sid1, sid2=sid2,
- guest_dir=guest_dir,
- **kwargs)
- elif chain_topology == 'chain_ip4':
- self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
- sid1=sid1, sid2=sid2,
- guest_dir=guest_dir,
- **kwargs)
- elif chain_topology == 'pipeline_ip4':
- self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
- sid1=sid1, sid2=sid2,
- guest_dir=guest_dir,
- **kwargs)
+ guest_dir = self.engine.container.mnt[0].split(u":")[1]
+
+ if chain_topology == u"chain":
+ self._configure_vpp_chain_l2xc(
+ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir, **kwargs
+ )
+ elif chain_topology == u"cross_horiz":
+ self._configure_vpp_cross_horiz(
+ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir, **kwargs
+ )
+ elif chain_topology == u"chain_functional":
+ self._configure_vpp_chain_functional(
+ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir, **kwargs
+ )
+ elif chain_topology == u"chain_ip4":
+ self._configure_vpp_chain_ip4(
+ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir, **kwargs
+ )
+ elif chain_topology == u"pipeline_ip4":
+ self._configure_vpp_pipeline_ip4(
+ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir, **kwargs
+ )
else:
- raise RuntimeError('Container topology {name} not implemented'.
- format(name=chain_topology))
+ raise RuntimeError(
+ f"Container topology {chain_topology} not implemented"
+ )
def _configure_vpp_chain_l2xc(self, **kwargs):
"""Configure VPP in chain topology with l2xc.
"""
self.engine.create_vpp_startup_config()
self.engine.create_vpp_exec_config(
- 'memif_create_chain_l2xc.exec',
- mid1=kwargs['mid1'], mid2=kwargs['mid2'],
- sid1=kwargs['sid1'], sid2=kwargs['sid2'],
- socket1='{guest_dir}/memif-{c.name}-{sid1}'.
- format(c=self.engine.container, **kwargs),
- socket2='{guest_dir}/memif-{c.name}-{sid2}'.
- format(c=self.engine.container, **kwargs))
+ u"memif_create_chain_l2xc.exec",
+ mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
+ sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
+ socket1=u"{guest_dir}/memif-{c.name}-{sid1}".format(
+ c=self.engine.container, **kwargs),
+ socket2=u"{guest_dir}/memif-{c.name}-{sid2}".format(
+ c=self.engine.container, **kwargs)
+ )
def _configure_vpp_cross_horiz(self, **kwargs):
"""Configure VPP in cross horizontal topology (single memif).
:param kwargs: Named parameters.
:param kwargs: dict
"""
- if 'DUT1' in self.engine.container.name:
+ if u"DUT1" in self.engine.container.name:
if_pci = Topology.get_interface_pci_addr(
- self.engine.container.node, kwargs['dut1_if'])
+ self.engine.container.node, kwargs[u"dut1_if"])
if_name = Topology.get_interface_name(
- self.engine.container.node, kwargs['dut1_if'])
- if 'DUT2' in self.engine.container.name:
+ self.engine.container.node, kwargs[u"dut1_if"])
+ if u"DUT2" in self.engine.container.name:
if_pci = Topology.get_interface_pci_addr(
- self.engine.container.node, kwargs['dut2_if'])
+ self.engine.container.node, kwargs[u"dut2_if"])
if_name = Topology.get_interface_name(
- self.engine.container.node, kwargs['dut2_if'])
+ self.engine.container.node, kwargs[u"dut2_if"])
self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
self.engine.create_vpp_exec_config(
- 'memif_create_cross_horizon.exec',
- mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
- socket1='{guest_dir}/memif-{c.name}-{sid1}'.
- format(c=self.engine.container, **kwargs))
+ u"memif_create_cross_horizon.exec",
+ mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
+ socket1=u"{guest_dir}/memif-{c.name}-{sid1}".format(
+ c=self.engine.container, **kwargs)
+ )
def _configure_vpp_chain_functional(self, **kwargs):
"""Configure VPP in chain topology with l2xc (functional).
"""
self.engine.create_vpp_startup_config_func_dev()
self.engine.create_vpp_exec_config(
- 'memif_create_chain_functional.exec',
- mid1=kwargs['mid1'], mid2=kwargs['mid2'],
- sid1=kwargs['sid1'], sid2=kwargs['sid2'],
- socket1='{guest_dir}/memif-{c.name}-{sid1}'.
- format(c=self.engine.container, **kwargs),
- socket2='{guest_dir}/memif-{c.name}-{sid2}'.
- format(c=self.engine.container, **kwargs),
- rx_mode='interrupt')
+ u"memif_create_chain_functional.exec",
+ mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
+ sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
+ socket1=u"{guest_dir}/memif-{c.name}-{sid1}".format(
+ c=self.engine.container, **kwargs),
+ socket2=u"{guest_dir}/memif-{c.name}-{sid2}".format(
+ c=self.engine.container, **kwargs),
+ rx_mode=u"interrupt"
+ )
def _configure_vpp_chain_ip4(self, **kwargs):
"""Configure VPP in chain topology with ip4.
"""
self.engine.create_vpp_startup_config()
- vif1_mac = kwargs['tg_if1_mac'] \
- if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
- else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
- vif2_mac = kwargs['tg_if2_mac'] \
- if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
- else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
+ vif1_mac = kwargs[u"tg_if1_mac"] \
+ if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
+ else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
+ vif2_mac = kwargs[u"tg_if2_mac"] \
+ if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
+ else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
self.engine.create_vpp_exec_config(
- 'memif_create_chain_ip4.exec',
- mid1=kwargs['mid1'], mid2=kwargs['mid2'],
- sid1=kwargs['sid1'], sid2=kwargs['sid2'],
- socket1='{guest_dir}/memif-{c.name}-{sid1}'.
- format(c=self.engine.container, **kwargs),
- socket2='{guest_dir}/memif-{c.name}-{sid2}'.
- format(c=self.engine.container, **kwargs),
- mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
- mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
- vif1_mac=vif1_mac, vif2_mac=vif2_mac)
+ u"memif_create_chain_ip4.exec",
+ mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
+ sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
+ socket1=u"{guest_dir}/memif-{c.name}-{sid1}".format(
+ c=self.engine.container, **kwargs),
+ socket2=u"{guest_dir}/memif-{c.name}-{sid2}".format(
+ c=self.engine.container, **kwargs),
+ mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
+ mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
+ vif1_mac=vif1_mac, vif2_mac=vif2_mac
+ )
def _configure_vpp_pipeline_ip4(self, **kwargs):
"""Configure VPP in pipeline topology with ip4.
:param kwargs: dict
"""
self.engine.create_vpp_startup_config()
- node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
- mid1 = kwargs['mid1']
- mid2 = kwargs['mid2']
- role1 = 'master'
- role2 = 'master' \
- if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
- else 'slave'
- kwargs['mid2'] = kwargs['mid2'] \
- if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
- else kwargs['mid2'] + 1
- vif1_mac = kwargs['tg_if1_mac'] \
- if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
- else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
- vif2_mac = kwargs['tg_if2_mac'] \
- if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
- else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
- socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
- format(c=self.engine.container, **kwargs) \
- if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
- format(c=self.engine.container, **kwargs)
- socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
- format(c=self.engine.container, **kwargs) \
- if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
- else '{guest_dir}/memif-pipe-{mid2}'.\
- format(c=self.engine.container, **kwargs)
+ node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
+ mid1 = kwargs[u"mid1"]
+ mid2 = kwargs[u"mid2"]
+ role1 = u"master"
+ role2 = u"master" if node == kwargs[u"nodes"] or node == 1 else u"slave"
+ kwargs[u"mid2"] = kwargs[u"mid2"] \
+ if node == kwargs[u"nodes"] or node == 1 else kwargs[u"mid2"] + 1
+ vif1_mac = kwargs[u"tg_if1_mac"] \
+ if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
+ else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
+ vif2_mac = kwargs[u"tg_if2_mac"] \
+ if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
+ else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
+ socket1 = u"{guest_dir}/memif-{c.name}-{sid1}".format(
+ c=self.engine.container, **kwargs) \
+ if node == 1 else u"{guest_dir}/memif-pipe-{mid1}".format(
+ c=self.engine.container, **kwargs)
+ socket2 = u"{guest_dir}/memif-{c.name}-{sid2}".format(
+ c=self.engine.container, **kwargs) \
+ if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
+ else u"{guest_dir}/memif-pipe-{mid2}".format(
+ c=self.engine.container, **kwargs)
self.engine.create_vpp_exec_config(
- 'memif_create_pipeline_ip4.exec',
- mid1=kwargs['mid1'], mid2=kwargs['mid2'],
- sid1=kwargs['sid1'], sid2=kwargs['sid2'],
+ u"memif_create_pipeline_ip4.exec",
+ mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
+ sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
socket1=socket1, socket2=socket2, role1=role1, role2=role2,
- mac1='52:54:00:00:{0:02X}:01'.format(mid1),
- mac2='52:54:00:00:{0:02X}:02'.format(mid2),
- vif1_mac=vif1_mac, vif2_mac=vif2_mac)
+ mac1=f"52:54:00:00:{mid1:02X}:01",
+ mac2=f"52:54:00:00:{mid2:02X}:02",
+ vif1_mac=vif1_mac, vif2_mac=vif2_mac
+ )
def stop_all_containers(self):
"""Stop all containers."""
def install_supervisor(self):
"""Install supervisord inside a container."""
if isinstance(self, LXC):
- self.execute('sleep 3; apt-get update')
- self.execute('apt-get install -y supervisor')
- self.execute('echo "{config}" > {config_file} && '
- 'supervisord -c {config_file}'.
- format(
- config='[unix_http_server]\n'
- 'file = /tmp/supervisor.sock\n\n'
- '[rpcinterface:supervisor]\n'
- 'supervisor.rpcinterface_factory = supervisor.'
- 'rpcinterface:make_main_rpcinterface\n\n'
- '[supervisorctl]\n'
- 'serverurl = unix:///tmp/supervisor.sock\n\n'
- '[supervisord]\n'
- 'pidfile = /tmp/supervisord.pid\n'
- 'identifier = supervisor\n'
- 'directory = /tmp\n'
- 'logfile = /tmp/supervisord.log\n'
- 'loglevel = debug\n'
- 'nodaemon = false\n\n',
- config_file=SUPERVISOR_CONF))
+ self.execute(u"sleep 3; apt-get update")
+ self.execute(u"apt-get install -y supervisor")
+ config = \
+ u"[unix_http_server]\n" \
+ u"file = /tmp/supervisor.sock\n\n" \
+ u"[rpcinterface:supervisor]\n" \
+ u"supervisor.rpcinterface_factory = " \
+ u"supervisor.rpcinterface:make_main_rpcinterface\n\n" \
+ u"[supervisorctl]\n" \
+ u"serverurl = unix:///tmp/supervisor.sock\n\n" \
+ u"[supervisord]\n" \
+ u"pidfile = /tmp/supervisord.pid\n" \
+ u"identifier = supervisor\n" \
+ u"directory = /tmp\n" \
+ u"logfile = /tmp/supervisord.log\n" \
+ u"loglevel = debug\n" \
+ u"nodaemon = false\n\n"
+ self.execute(
+ f"echo '{config}' > {SUPERVISOR_CONF} && "
+ f"supervisord -c {SUPERVISOR_CONF}"
+ )
def start_vpp(self):
"""Start VPP inside a container."""
- self.execute('echo "{config}" >> {config_file} && '
- 'supervisorctl reload'.
- format(
- config='[program:vpp]\n'
- 'command = /usr/bin/vpp -c /etc/vpp/startup.conf\n'
- 'autostart = false\n'
- 'autorestart = false\n'
- 'redirect_stderr = true\n'
- 'priority = 1',
- config_file=SUPERVISOR_CONF))
+
+ config = \
+ u"[program:vpp]\n" \
+ u"command = /usr/bin/vpp -c /etc/vpp/startup.conf\n" \
+ u"autostart = false\n" \
+ u"autorestart = false\n" \
+ u"redirect_stderr = true\n" \
+ u"priority = 1"
+ self.execute(
+ f"echo '{config}' >> {SUPERVISOR_CONF} && supervisorctl reload"
+ )
self.execute('supervisorctl start vpp')
from robot.libraries.BuiltIn import BuiltIn
topo_instance = BuiltIn().get_library_instance(
- 'resources.libraries.python.topology.Topology')
+ u"resources.libraries.python.topology.Topology"
+ )
topo_instance.add_new_socket(
self.container.node,
SocketType.PAPI,
self.container.name,
- '{root}/tmp/vpp_sockets/{name}/api.sock'.
- format(root=self.container.root, name=self.container.name))
+ f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
+ f"api.sock"
+ )
topo_instance.add_new_socket(
self.container.node,
SocketType.STATS,
self.container.name,
- '{root}/tmp/vpp_sockets/{name}/stats.sock'.
- format(root=self.container.root, name=self.container.name))
+ f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
+ f"stats.sock"
+ )
def restart_vpp(self):
"""Restart VPP service inside a container."""
- self.execute('supervisorctl restart vpp')
- self.execute('cat /tmp/supervisord.log')
+ self.execute(u"supervisorctl restart vpp")
+ self.execute(u"cat /tmp/supervisord.log")
def create_base_vpp_startup_config(self):
"""Create base startup configuration of VPP on container.
vpp_config.set_node(self.container.node)
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_nodaemon()
- vpp_config.add_unix_exec('/tmp/running.exec')
+ vpp_config.add_unix_exec(u"/tmp/running.exec")
vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
- vpp_config.add_statseg_per_node_counters(value='on')
+ vpp_config.add_statseg_per_node_counters(value=u"on")
# We will pop the first core from the list to be a main core
vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
# If more cores in the list, the rest will be used as workers.
if cpuset_cpus:
- corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+ corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
vpp_config.add_cpu_corelist_workers(corelist_workers)
return vpp_config
"""Create startup configuration of VPP without DPDK on container.
"""
vpp_config = self.create_base_vpp_startup_config()
- vpp_config.add_plugin('disable', 'dpdk_plugin.so')
+ vpp_config.add_plugin(u"disable", [u"dpdk_plugin.so"])
# Apply configuration
- self.execute('mkdir -p /etc/vpp/')
- self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
- .format(config=vpp_config.get_config_str()))
+ self.execute(u"mkdir -p /etc/vpp/")
+ self.execute(
+ f"echo '{vpp_config.get_config_str()}' | tee /etc/vpp/startup.conf"
+ )
def create_vpp_startup_config_dpdk_dev(self, *devices):
"""Create startup configuration of VPP with DPDK on container.
vpp_config = self.create_base_vpp_startup_config()
vpp_config.add_dpdk_dev(*devices)
vpp_config.add_dpdk_no_tx_checksum_offload()
- vpp_config.add_dpdk_log_level('debug')
- vpp_config.add_plugin('disable', 'default')
- vpp_config.add_plugin('enable', 'dpdk_plugin.so')
- vpp_config.add_plugin('enable', 'memif_plugin.so')
+ vpp_config.add_dpdk_log_level(u"debug")
+ vpp_config.add_plugin(u"disable", [u"default"])
+ vpp_config.add_plugin(u"enable", [u"dpdk_plugin.so"])
+ vpp_config.add_plugin(u"enable", [u"memif_plugin.so"])
# Apply configuration
- self.execute('mkdir -p /etc/vpp/')
- self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
- .format(config=vpp_config.get_config_str()))
+ self.execute(u"mkdir -p /etc/vpp/")
+ self.execute(
+ f"echo '{vpp_config.get_config_str()}' | tee /etc/vpp/startup.conf"
+ )
def create_vpp_startup_config_func_dev(self):
"""Create startup configuration of VPP on container for functional
vpp_config.set_node(self.container.node)
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_nodaemon()
- vpp_config.add_unix_exec('/tmp/running.exec')
+ vpp_config.add_unix_exec(u"/tmp/running.exec")
vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
- vpp_config.add_statseg_per_node_counters(value='on')
- vpp_config.add_plugin('disable', 'dpdk_plugin.so')
+ vpp_config.add_statseg_per_node_counters(value=u"on")
+ vpp_config.add_plugin(u"disable", [u"dpdk_plugin.so"])
# Apply configuration
- self.execute('mkdir -p /etc/vpp/')
- self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
- .format(config=vpp_config.get_config_str()))
+ self.execute(u"mkdir -p /etc/vpp/")
+ self.execute(
+ f"echo '{vpp_config.get_config_str()}' | tee /etc/vpp/startup.conf"
+ )
def create_vpp_exec_config(self, template_file, **kwargs):
"""Create VPP exec configuration on container.
:type template_file: str
:type kwargs: dict
"""
- running = '/tmp/running.exec'
+ running = u"/tmp/running.exec"
- template = '{res}/{tpl}'.format(
- res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
+ template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
with open(template, 'r') as src_file:
src = Template(src_file.read())
- self.execute('echo "{out}" > {running}'.format(
- out=src.safe_substitute(**kwargs), running=running))
+ self.execute(
+ u"echo '{out}' > {running}".format(
+ out=src.safe_substitute(**kwargs), running=running)
+ )
def is_container_running(self):
"""Check if container is running."""
:raises RuntimeError: If applying cgroup settings via cgset failed.
"""
ret, _, _ = self.container.ssh.exec_command_sudo(
- 'cgset -r cpuset.cpu_exclusive=0 /')
+ u"cgset -r cpuset.cpu_exclusive=0 /"
+ )
if int(ret) != 0:
- raise RuntimeError('Failed to apply cgroup settings.')
+ raise RuntimeError(u"Failed to apply cgroup settings.")
ret, _, _ = self.container.ssh.exec_command_sudo(
- 'cgset -r cpuset.mem_exclusive=0 /')
+ u"cgset -r cpuset.mem_exclusive=0 /"
+ )
if int(ret) != 0:
- raise RuntimeError('Failed to apply cgroup settings.')
+ raise RuntimeError(u"Failed to apply cgroup settings.")
ret, _, _ = self.container.ssh.exec_command_sudo(
- 'cgcreate -g cpuset:/{name}'.format(name=name))
+ f"cgcreate -g cpuset:/{name}"
+ )
if int(ret) != 0:
- raise RuntimeError('Failed to copy cgroup settings from root.')
+ raise RuntimeError(u"Failed to copy cgroup settings from root.")
ret, _, _ = self.container.ssh.exec_command_sudo(
- 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
+ f"cgset -r cpuset.cpu_exclusive=0 /{name}"
+ )
if int(ret) != 0:
- raise RuntimeError('Failed to apply cgroup settings.')
+ raise RuntimeError(u"Failed to apply cgroup settings.")
ret, _, _ = self.container.ssh.exec_command_sudo(
- 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
+ f"cgset -r cpuset.mem_exclusive=0 /{name}"
+ )
if int(ret) != 0:
- raise RuntimeError('Failed to apply cgroup settings.')
+ raise RuntimeError(u"Failed to apply cgroup settings.")
class LXC(ContainerEngine):
else:
return
- target_arch = 'arm64' \
- if Topology.get_node_arch(self.container.node) == 'aarch64' \
- else 'amd64'
+ target_arch = u"arm64" \
+ if Topology.get_node_arch(self.container.node) == u"aarch64" \
+ else u"amd64"
- image = self.container.image if self.container.image else\
- "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
+ image = self.container.image if self.container.image \
+ else f"-d ubuntu -r bionic -a {target_arch}"
- cmd = 'lxc-create -t download --name {c.name} -- {image} '\
- '--no-validate'.format(c=self.container, image=image)
+ cmd = f"lxc-create -t download --name {self.container.name} " \
+ f"-- {image} --no-validate"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
if int(ret) != 0:
- raise RuntimeError('Failed to create container.')
+ raise RuntimeError(u"Failed to create container.")
- self._configure_cgroup('lxc')
+ self._configure_cgroup(u"lxc")
def create(self):
"""Create/deploy an application inside a container on system.
if self.container.mnt:
# LXC fix for tmpfs
# https://github.com/lxc/lxc/issues/434
+ mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
ret, _, _ = self.container.ssh.exec_command_sudo(
- "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
- format(e="lxc.mount.entry = tmpfs run tmpfs defaults",
- c=self.container))
+ f"sh -c 'echo \"{mnt_e}\" >> "
+ f"/var/lib/lxc/{self.container.name}/config'"
+ )
if int(ret) != 0:
- raise RuntimeError('Failed to write {c.name} config.'.
- format(c=self.container))
+ raise RuntimeError(
+ f"Failed to write {self.container.name} config."
+ )
for mount in self.container.mnt:
- host_dir, guest_dir = mount.split(':')
- options = 'bind,create=dir' \
- if guest_dir.endswith('/') else 'bind,create=file'
- entry = 'lxc.mount.entry = {host_dir} {guest_dir} none ' \
- '{options} 0 0'.format(
- host_dir=host_dir, guest_dir=guest_dir[1:],
- options=options)
+ host_dir, guest_dir = mount.split(u":")
+ options = u"bind,create=dir" if guest_dir.endswith(u"/") \
+ else u"bind,create=file"
+ entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
+ f"none {options} 0 0"
self.container.ssh.exec_command_sudo(
- "sh -c 'mkdir -p {host_dir}'".format(host_dir=host_dir))
+ f"sh -c 'mkdir -p {host_dir}'"
+ )
ret, _, _ = self.container.ssh.exec_command_sudo(
- "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
- format(e=entry, c=self.container))
+ f"sh -c 'echo \"{entry}\" "
+ f">> /var/lib/lxc/{self.container.name}/config'"
+ )
if int(ret) != 0:
- raise RuntimeError('Failed to write {c.name} config.'
- .format(c=self.container))
+ raise RuntimeError(
+ f"Failed to write {self.container.name} config."
+ )
- cpuset_cpus = '{0}'.format(
- ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
- if self.container.cpuset_cpus else ''
+ cpuset_cpus = u",".join(
+ f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
+ if self.container.cpuset_cpus else u""
ret, _, _ = self.container.ssh.exec_command_sudo(
- 'lxc-start --name {c.name} --daemon'.format(c=self.container))
+ f"lxc-start --name {self.container.name} --daemon"
+ )
if int(ret) != 0:
- raise RuntimeError('Failed to start container {c.name}.'.
- format(c=self.container))
- self._lxc_wait('RUNNING')
+ raise RuntimeError(
+ f"Failed to start container {self.container.name}."
+ )
+ self._lxc_wait(u"RUNNING")
# Workaround for LXC to be able to allocate all cpus including isolated.
ret, _, _ = self.container.ssh.exec_command_sudo(
- 'cgset --copy-from / lxc/')
+ u"cgset --copy-from / lxc/"
+ )
if int(ret) != 0:
- raise RuntimeError('Failed to copy cgroup to LXC')
+ raise RuntimeError(u"Failed to copy cgroup to LXC")
ret, _, _ = self.container.ssh.exec_command_sudo(
- 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
- format(c=self.container, cpus=cpuset_cpus))
+ f"lxc-cgroup --name {self.c.name} cpuset.cpus {cpuset_cpus}"
+ )
if int(ret) != 0:
- raise RuntimeError('Failed to set cpuset.cpus to container '
- '{c.name}.'.format(c=self.container))
+ raise RuntimeError(
+ f"Failed to set cpuset.cpus to container {self.container.name}."
+ )
def execute(self, command):
"""Start a process inside a running container.
:type command: str
:raises RuntimeError: If running the command failed.
"""
- env = '--keep-env {0}'.format(
- ' '.join('--set-var %s' % env for env in self.container.env))\
- if self.container.env else ''
+ env = u"--keep-env " + u" ".join(
+ f"--set-var {env!s}" for env in self.container.env) \
+ if self.container.env else u""
- cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
- "exit $?'".format(env=env, c=self.container, command=command)
+ cmd = f"lxc-attach {env} --name {self.container.name} " \
+ f"-- /bin/sh -c '{command}; exit $?'"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
if int(ret) != 0:
- raise RuntimeError('Failed to run command inside container '
- '{c.name}.'.format(c=self.container))
+ raise RuntimeError(
+ f"Failed to run command inside container {self.container.name}."
+ )
def stop(self):
"""Stop a container.
:raises RuntimeError: If stopping the container failed.
"""
- cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
+ cmd = f"lxc-stop --name {self.container.name}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to stop container {c.name}.'
- .format(c=self.container))
- self._lxc_wait('STOPPED|FROZEN')
+ raise RuntimeError(
+ f"Failed to stop container {self.container.name}."
+ )
+ self._lxc_wait(u"STOPPED|FROZEN")
def destroy(self):
"""Destroy a container.
:raises RuntimeError: If destroying container failed.
"""
- cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
+ cmd = f"lxc-destroy --force --name {self.container.name}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to destroy container {c.name}.'
- .format(c=self.container))
+ raise RuntimeError(
+ f"Failed to destroy container {self.container.name}."
+ )
def info(self):
"""Query and shows information about a container.
:raises RuntimeError: If getting info about a container failed.
"""
- cmd = 'lxc-info --name {c.name}'.format(c=self.container)
+ cmd = f"lxc-info --name {self.container.name}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to get info about container {c.name}.'
- .format(c=self.container))
+ raise RuntimeError(
+ f"Failed to get info about container {self.container.name}."
+ )
def system_info(self):
"""Check the current kernel for LXC support.
:raises RuntimeError: If checking LXC support failed.
"""
- cmd = 'lxc-checkconfig'
+ cmd = u"lxc-checkconfig"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to check LXC support.')
+ raise RuntimeError(u"Failed to check LXC support.")
def is_container_running(self):
"""Check if container is running on node.
:rtype: bool
:raises RuntimeError: If getting info about a container failed.
"""
- cmd = 'lxc-info --no-humanize --state --name {c.name}'\
- .format(c=self.container)
+ cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to get info about container {c.name}.'
- .format(c=self.container))
- return True if 'RUNNING' in stdout else False
+ raise RuntimeError(
+ f"Failed to get info about container {self.container.name}."
+ )
+ return True if u"RUNNING" in stdout else False
def is_container_present(self):
"""Check if container is existing on node.
:rtype: bool
:raises RuntimeError: If getting info about a container failed.
"""
- cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
+ cmd = f"lxc-info --no-humanize --name {self.container.name}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
return False if int(ret) else True
:type state: str
:raises RuntimeError: If waiting for state of a container failed.
"""
- cmd = 'lxc-wait --name {c.name} --state "{s}"'\
- .format(c=self.container, s=state)
+ cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to wait for state "{s}" of container '
- '{c.name}.'.format(s=state, c=self.container))
+ raise RuntimeError(
+ f"Failed to wait for state '{state}' "
+ f"of container {self.container.name}."
+ )
class Docker(ContainerEngine):
if not self.container.image:
img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
- if Topology.get_node_arch(self.container.node) == 'aarch64' \
+ if Topology.get_node_arch(self.container.node) == u"aarch64" \
else Constants.DOCKER_SUT_IMAGE_UBUNTU
- setattr(self.container, 'image', img)
+ setattr(self.container, u"image", img)
- cmd = 'docker pull {image}'.format(image=self.container.image)
+ cmd = f"docker pull {self.container.image}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
if int(ret) != 0:
- raise RuntimeError('Failed to create container {c.name}.'
- .format(c=self.container))
+ raise RuntimeError(
+ f"Failed to create container {self.container.name}."
+ )
if self.container.cpuset_cpus:
- self._configure_cgroup('docker')
+ self._configure_cgroup(u"docker")
def create(self):
"""Create/deploy container.
:raises RuntimeError: If creating a container failed.
"""
- cpuset_cpus = '--cpuset-cpus={0}'.format(
- ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
- if self.container.cpuset_cpus else ''
+ cpuset_cpus = u"--cpuset-cpus=" + u",".join(
+ f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
+ if self.container.cpuset_cpus else u""
- cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
- if self.container.cpuset_mems is not None else ''
+ cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
+ if self.container.cpuset_mems is not None else u""
# Temporary workaround - disabling due to bug in memif
- cpuset_mems = ''
+ cpuset_mems = u""
- env = '{0}'.format(
- ' '.join('--env %s' % env for env in self.container.env))\
- if self.container.env else ''
+ env = u" ".join(f"--env {env!s}" for env in self.container.env) \
+ if self.container.env else u""
- command = '{0}'.format(self.container.command)\
- if self.container.command else ''
+ command = str(self.container.command) if self.container.command else u""
- publish = '{0}'.format(
- ' '.join('--publish %s' % var for var in self.container.publish))\
- if self.container.publish else ''
+ publish = u" ".join(
+ f"--publish {var!s}" for var in self.container.publish) \
+ if self.container.publish else u""
- volume = '{0}'.format(
- ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
- if self.container.mnt else ''
+ volume = u" ".join(
+ f"--volume {mnt!s}" for mnt in self.container.mnt) \
+ if self.container.mnt else u""
- cmd = 'docker run '\
- '--privileged --detach --interactive --tty --rm '\
- '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
- '{env} {volume} --name {container.name} {container.image} '\
- '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
- container=self.container, command=command,
- env=env, publish=publish, volume=volume)
+ cmd = f"docker run --privileged --detach --interactive --tty --rm " \
+ f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
+ f"{env} {volume}--name {self.container.name} " \
+ f"{self.container.image} {command}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to create container {c.name}'
- .format(c=self.container))
+ raise RuntimeError(
+ f"Failed to create container {self.container.name}"
+ )
self.info()
:type command: str
:raises RuntimeError: If running the command in a container failed.
"""
- cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
- "exit $?'".format(c=self.container, command=command)
+ cmd = f"docker exec --interactive {self.container.name} " \
+ f"/bin/sh -c '{command}; exit $?'"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
if int(ret) != 0:
- raise RuntimeError('Failed to execute command in container '
- '{c.name}.'.format(c=self.container))
+ raise RuntimeError(
+ f"Failed to execute command in container {self.container.name}."
+ )
def stop(self):
"""Stop running container.
:raises RuntimeError: If stopping a container failed.
"""
- cmd = 'docker stop {c.name}'.format(c=self.container)
+ cmd = f"docker stop {self.container.name}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to stop container {c.name}.'
- .format(c=self.container))
+ raise RuntimeError(
+ f"Failed to stop container {self.container.name}."
+ )
def destroy(self):
"""Remove a container.
:raises RuntimeError: If removing a container failed.
"""
- cmd = 'docker rm --force {c.name}'.format(c=self.container)
+ cmd = f"docker rm --force {self.container.name}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to destroy container {c.name}.'
- .format(c=self.container))
+ raise RuntimeError(
+ f"Failed to destroy container {self.container.name}."
+ )
def info(self):
"""Return low-level information on Docker objects.
:raises RuntimeError: If getting info about a container failed.
"""
- cmd = 'docker inspect {c.name}'.format(c=self.container)
+ cmd = f"docker inspect {self.container.name}"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to get info about container {c.name}.'
- .format(c=self.container))
+ raise RuntimeError(
+ f"Failed to get info about container {self.container.name}."
+ )
def system_info(self):
"""Display the docker system-wide information.
:raises RuntimeError: If displaying system information failed.
"""
- cmd = 'docker system info'
+ cmd = u"docker system info"
ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to get system info.')
+ raise RuntimeError(u"Failed to get system info.")
def is_container_present(self):
"""Check if container is present on node.
:rtype: bool
:raises RuntimeError: If getting info about a container failed.
"""
- cmd = 'docker ps --all --quiet --filter name={c.name}'\
- .format(c=self.container)
+ cmd = f"docker ps --all --quiet --filter name={self.container.name}"
ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to get info about container {c.name}.'
- .format(c=self.container))
+ raise RuntimeError(
+ f"Failed to get info about container {self.container.name}."
+ )
return True if stdout else False
def is_container_running(self):
:rtype: bool
:raises RuntimeError: If getting info about a container failed.
"""
- cmd = 'docker ps --quiet --filter name={c.name}'\
- .format(c=self.container)
+ cmd = f"docker ps --quiet --filter name={self.container.name}"
ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
if int(ret) != 0:
- raise RuntimeError('Failed to get info about container {c.name}.'
- .format(c=self.container))
+ raise RuntimeError(
+ f"Failed to get info about container {self.container.name}."
+ )
return True if stdout else False
self.__dict__[attr]
except KeyError:
# Creating new attribute
- if attr == 'node':
- self.__dict__['ssh'] = SSH()
- self.__dict__['ssh'].connect(value)
+ if attr == u"node":
+ self.__dict__[u"ssh"] = SSH()
+ self.__dict__[u"ssh"].connect(value)
self.__dict__[attr] = value
else:
# Updating attribute base of type
:type default_cop: int
:raises ValueError: If parameter 'ip_version' has incorrect value.
"""
- if ip_version not in ('ip4', 'ip6'):
- raise ValueError('IP version is not in correct format')
+ if ip_version not in (u"ip4", u"ip6"):
+ raise ValueError(u"IP version is not in correct format")
- cmd = 'cop_whitelist_enable_disable'
- err_msg = 'Failed to add COP whitelist on interface {ifc} on host' \
- ' {host}'.format(ifc=interface, host=node['host'])
+ cmd = u"cop_whitelist_enable_disable"
+ err_msg = f"Failed to add COP whitelist on interface {interface} " \
+ f"on host {node[u'host']}"
args = dict(
sw_if_index=Topology.get_interface_sw_index(node, interface),
fib_id=int(fib_id),
- ip4=True if ip_version == 'ip4' else False,
- ip6=True if ip_version == 'ip6' else False,
+ ip4=True if ip_version == u"ip4" else False,
+ ip6=True if ip_version == u"ip6" else False,
default_cop=default_cop
)
:raises ValueError: If parameter 'state' has incorrect value.
"""
state = state.lower()
- if state in ('enable', 'disable'):
- enable = True if state == 'enable' else False
+ if state in (u"enable", u"disable"):
+ enable = True if state == u"enable" else False
else:
- raise ValueError("Possible state values are 'enable' or 'disable'")
+ raise ValueError(u"Possible state values are 'enable' or 'disable'")
- cmd = 'cop_interface_enable_disable'
- err_msg = 'Failed to enable/disable COP on interface {ifc} on host' \
- ' {host}'.format(ifc=interface, host=node['host'])
+ cmd = u"cop_interface_enable_disable"
+ err_msg = f"Failed to enable/disable COP on interface {interface} " \
+ f"on host {node[u'host']}"
args = dict(
sw_if_index=Topology.get_interface_sw_index(node, interface),
enable_disable=enable
"""Core dump library."""
-from time import time
-
from resources.libraries.python.Constants import Constants
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.LimitUtil import LimitUtil
from resources.libraries.python.SysctlUtil import SysctlUtil
-from resources.libraries.python.ssh import exec_cmd_no_error, scp_node
+from resources.libraries.python.ssh import exec_cmd_no_error
from resources.libraries.python.topology import NodeType
-__all__ = ["CoreDumpUtil"]
+__all__ = [u"CoreDumpUtil"]
class CoreDumpUtil(object):
# Use one instance of class for all tests. If the functionality should
# be enabled per suite or per test case, change the scope to "TEST SUITE" or
# "TEST CASE" respectively.
- ROBOT_LIBRARY_SCOPE = 'GLOBAL'
+ ROBOT_LIBRARY_SCOPE = u"GLOBAL"
def __init__(self):
"""Initialize CoreDumpUtil class."""
# environment, and either have a core dump pipe handler that knows
# to treat privileged core dumps with care, or specific directory
# defined for catching core dumps. If a core dump happens without a
- # pipe handler or fully qualifid path, a message will be emitted to
+ # pipe handler or fully qualified path, a message will be emitted to
# syslog warning about the lack of a correct setting.
- SysctlUtil.set_sysctl_value(node, 'fs.suid_dumpable', 2)
+ SysctlUtil.set_sysctl_value(node, u"fs.suid_dumpable", 2)
# Specify a core dumpfile pattern name (for the output filename).
# %p pid
# %t UNIX time of dump
# %h hostname
# %e executable filename (may be shortened)
- SysctlUtil.set_sysctl_value(node, 'kernel.core_pattern',
- Constants.KERNEL_CORE_PATTERN)
+ SysctlUtil.set_sysctl_value(
+ node, u"kernel.core_pattern", Constants.KERNEL_CORE_PATTERN
+ )
self._corekeeper_configured = True
"""
if isinstance(pid, list):
for item in pid:
- LimitUtil.set_pid_limit(node, item, 'core', 'unlimited')
+ LimitUtil.set_pid_limit(node, item, u"core", u"unlimited")
LimitUtil.get_pid_limit(node, item)
else:
- LimitUtil.set_pid_limit(node, pid, 'core', 'unlimited')
+ LimitUtil.set_pid_limit(node, pid, u"core", u"unlimited")
LimitUtil.get_pid_limit(node, pid)
def enable_coredump_limit_vpp_on_all_duts(self, nodes):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT and self.is_core_limit_enabled():
+ if node[u"type"] == NodeType.DUT and self.is_core_limit_enabled():
vpp_pid = DUTSetup.get_vpp_pid(node)
self.enable_coredump_limit(node, vpp_pid)
:type disable_on_success: bool
"""
for node in nodes.values():
- command = ('for f in {dir}/*.core; do '
- 'sudo gdb /usr/bin/vpp ${{f}} '
- '--eval-command="set pagination off" '
- '--eval-command="thread apply all bt" '
- '--eval-command="quit"; '
- 'sudo rm -f ${{f}}; done'
- .format(dir=Constants.CORE_DUMP_DIR))
+ command = f"for f in {Constants.CORE_DUMP_DIR}/*.core; do " \
+ f"sudo gdb /usr/bin/vpp ${{f}} " \
+ f"--eval-command='set pagination off' " \
+ f"--eval-command='thread apply all bt' " \
+ f"--eval-command='quit'; " \
+ f"sudo rm -f ${{f}}; done"
try:
exec_cmd_no_error(node, command, timeout=3600)
if disable_on_success:
self.set_core_limit_disabled()
except RuntimeError:
- # If compress was not sucessfull ignore error and skip further
+ # If compress was not successful ignore error and skip further
# processing.
continue
from resources.libraries.python.ssh import exec_cmd_no_error
from resources.libraries.python.topology import Topology
-__all__ = ["CpuUtils"]
+__all__ = [u"CpuUtils"]
class CpuUtils(object):
:param nodes: DICT__nodes from Topology.DICT__nodes.
:type nodes: dict
:raises RuntimeError: If an ssh command retrieving cpu information
- fails.
+ fails.
"""
for node in nodes.values():
- stdout, _ = exec_cmd_no_error(node, 'uname -m')
- node['arch'] = stdout.strip()
- stdout, _ = exec_cmd_no_error(node, 'lscpu -p')
- node['cpuinfo'] = list()
- for line in stdout.split("\n"):
- if line and line[0] != "#":
- node['cpuinfo'].append([CpuUtils.__str2int(x) for x in
- line.split(",")])
+ stdout, _ = exec_cmd_no_error(node, u"uname -m")
+ node[u"arch"] = stdout.strip()
+ stdout, _ = exec_cmd_no_error(node, u"lscpu -p")
+ node[u"cpuinfo"] = list()
+ for line in stdout.split(u"\n"):
+ if line and line[0] != u"#":
+ node[u"cpuinfo"].append(
+ [CpuUtils.__str2int(x) for x in line.split(u",")]
+ )
@staticmethod
def cpu_node_count(node):
:rtype: int
:raises RuntimeError: If node cpuinfo is not available.
"""
- cpu_info = node.get("cpuinfo")
+ cpu_info = node.get(u"cpuinfo")
if cpu_info is not None:
- return node["cpuinfo"][-1][3] + 1
+ return node[u"cpuinfo"][-1][3] + 1
else:
- raise RuntimeError("Node cpuinfo not available.")
+ raise RuntimeError(u"Node cpuinfo not available.")
@staticmethod
def cpu_list_per_node(node, cpu_node, smt_used=False):
or if SMT is not enabled.
"""
cpu_node = int(cpu_node)
- cpu_info = node.get("cpuinfo")
+ cpu_info = node.get(u"cpuinfo")
if cpu_info is None:
- raise RuntimeError("Node cpuinfo not available.")
+ raise RuntimeError(u"Node cpuinfo not available.")
smt_enabled = CpuUtils.is_smt_enabled(cpu_info)
if not smt_enabled and smt_used:
- raise RuntimeError("SMT is not enabled.")
+ raise RuntimeError(u"SMT is not enabled.")
cpu_list = []
for cpu in cpu_info:
return cpu_list
@staticmethod
- def cpu_slice_of_list_per_node(node, cpu_node, skip_cnt=0, cpu_cnt=0,
- smt_used=False):
+ def cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=0, cpu_cnt=0, smt_used=False):
"""Return string of node related list of CPU numbers.
:param node: Node dictionary with cpuinfo.
cpu_list_len = len(cpu_list)
if cpu_cnt + skip_cnt > cpu_list_len:
- raise RuntimeError("cpu_cnt + skip_cnt > length(cpu list).")
+ raise RuntimeError(u"cpu_cnt + skip_cnt > length(cpu list).")
if cpu_cnt == 0:
cpu_cnt = cpu_list_len - skip_cnt
cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:]
cpu_list = [cpu for cpu in cpu_list_0[skip_cnt:skip_cnt + cpu_cnt]]
cpu_list_ex = [cpu for cpu in
- cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]]
+ cpu_list_1[skip_cnt:skip_cnt + cpu_cnt]
+ ]
cpu_list.extend(cpu_list_ex)
else:
cpu_list = [cpu for cpu in cpu_list[skip_cnt:skip_cnt + cpu_cnt]]
return cpu_list
@staticmethod
- def cpu_list_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=",",
- smt_used=False):
+ def cpu_list_per_node_str(
+ node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u",", smt_used=False):
"""Return string of node related list of CPU numbers.
:param node: Node dictionary with cpuinfo.
:returns: Cpu numbers related to numa from argument.
:rtype: str
"""
- cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=cpu_cnt,
- smt_used=smt_used)
+ cpu_list = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=smt_used
+ )
return sep.join(str(cpu) for cpu in cpu_list)
@staticmethod
- def cpu_range_per_node_str(node, cpu_node, skip_cnt=0, cpu_cnt=0, sep="-",
- smt_used=False):
+ def cpu_range_per_node_str(
+ node, cpu_node, skip_cnt=0, cpu_cnt=0, sep=u"-", smt_used=False):
"""Return string of node related range of CPU numbers, e.g. 0-4.
:param node: Node dictionary with cpuinfo.
:returns: String of node related range of CPU numbers.
:rtype: str
"""
- cpu_list = CpuUtils.cpu_slice_of_list_per_node(node, cpu_node,
- skip_cnt=skip_cnt,
- cpu_cnt=cpu_cnt,
- smt_used=smt_used)
+ cpu_list = CpuUtils.cpu_slice_of_list_per_node(
+ node, cpu_node, skip_cnt=skip_cnt, cpu_cnt=cpu_cnt,
+ smt_used=smt_used
+ )
if smt_used:
cpu_list_len = len(cpu_list)
cpu_list_0 = cpu_list[:cpu_list_len / CpuUtils.NR_OF_THREADS]
cpu_list_1 = cpu_list[cpu_list_len / CpuUtils.NR_OF_THREADS:]
- cpu_range = "{}{}{},{}{}{}".format(cpu_list_0[0], sep,
- cpu_list_0[-1],
- cpu_list_1[0], sep,
- cpu_list_1[-1])
+ cpu_range = f"{cpu_list_0[0]}{sep}{cpu_list_0[-1]}," \
+ f"{cpu_list_1[0]}{sep}{cpu_list_1[-1]}"
else:
- cpu_range = "{}{}{}".format(cpu_list[0], sep, cpu_list[-1])
+ cpu_range = f"{cpu_list[0]}{sep}{cpu_list[-1]}"
return cpu_range
@staticmethod
- def cpu_slice_of_list_for_nf(node, cpu_node, nf_chains=1, nf_nodes=1,
- nf_chain=1, nf_node=1, nf_dtc=1, nf_mtcr=2,
- nf_dtcr=1, skip_cnt=0):
+ def cpu_slice_of_list_for_nf(
+ node, cpu_node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
+ nf_dtc=1, nf_mtcr=2, nf_dtcr=1, skip_cnt=0):
"""Return list of DUT node related list of CPU numbers. The main
computing unit is physical core count.
:param nf_node: Node number indexed from 1.
:param nf_dtc: Amount of physical cores for NF dataplane.
:param nf_mtcr: NF main thread per core ratio.
- :param nf_dtcr: NF dataplane thread per core ratio.
+ :param nf_dtcr: NF data plane thread per core ratio.
:param skip_cnt: Skip first "skip_cnt" CPUs.
:type node: dict
:param cpu_node: int.
placement is not possible due to wrong parameters.
"""
if not 1 <= nf_chain <= nf_chains:
- raise RuntimeError("ChainID is out of range!")
+ raise RuntimeError(u"ChainID is out of range!")
if not 1 <= nf_node <= nf_nodes:
- raise RuntimeError("NodeID is out of range!")
+ raise RuntimeError(u"NodeID is out of range!")
- smt_used = CpuUtils.is_smt_enabled(node['cpuinfo'])
+ smt_used = CpuUtils.is_smt_enabled(node[u"cpuinfo"])
cpu_list = CpuUtils.cpu_list_per_node(node, cpu_node, smt_used)
# CPU thread sibling offset.
sib = len(cpu_list) / CpuUtils.NR_OF_THREADS
dtc_is_integer = isinstance(nf_dtc, int)
if not smt_used and not dtc_is_integer:
- raise RuntimeError("Cannot allocate if SMT is not enabled!")
+ raise RuntimeError(u"Cannot allocate if SMT is not enabled!")
# TODO: Please reword the following todo if it is still relevant
# TODO: Workaround as we are using physical core as main unit, we must
# adjust number of physical dataplane cores in case of float for further
dt_req = ((nf_chains * nf_nodes) + nf_dtcr - 1) // nf_dtcr
if (skip_cnt + mt_req + dt_req) > (sib if smt_used else len(cpu_list)):
- raise RuntimeError("Not enough CPU cores available for placement!")
+ raise RuntimeError(u"Not enough CPU cores available for placement!")
offset = (nf_node - 1) + (nf_chain - 1) * nf_nodes
mt_skip = skip_cnt + (offset % mt_req)
return result
@staticmethod
- def get_affinity_nf(nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1,
- nf_node=1, vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
+ def get_affinity_nf(
+ nodes, node, nf_chains=1, nf_nodes=1, nf_chain=1, nf_node=1,
+ vs_dtc=1, nf_dtc=1, nf_mtcr=2, nf_dtcr=1):
"""Get affinity of NF (network function). Result will be used to compute
the amount of CPUs and also affinity.
:param nf_nodes: Number of NF nodes in chain.
:param nf_chain: Chain number indexed from 1.
:param nf_node: Node number indexed from 1.
- :param vs_dtc: Amount of physical cores for vswitch dataplane.
- :param nf_dtc: Amount of physical cores for NF dataplane.
+ :param vs_dtc: Amount of physical cores for vswitch data plane.
+ :param nf_dtc: Amount of physical cores for NF data plane.
:param nf_mtcr: NF main thread per core ratio.
- :param nf_dtcr: NF dataplane thread per core ratio.
+ :param nf_dtcr: NF data plane thread per core ratio.
:type nodes: dict
:type node: dict
:type nf_chains: int
"""
skip_cnt = Constants.CPU_CNT_SYSTEM + Constants.CPU_CNT_MAIN + vs_dtc
- interface_list = []
- interface_list.append(
- BuiltIn().get_variable_value('${{{node}_if1}}'.format(node=node)))
- interface_list.append(
- BuiltIn().get_variable_value('${{{node}_if2}}'.format(node=node)))
+ interface_list = list()
+ interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if1}}"))
+ interface_list.append(BuiltIn().get_variable_value(f"${{{node}_if2}}"))
cpu_node = Topology.get_interfaces_numa_node(
nodes[node], *interface_list)
return CpuUtils.cpu_slice_of_list_for_nf(
node=nodes[node], cpu_node=cpu_node, nf_chains=nf_chains,
nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
- nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt)
+ nf_mtcr=nf_mtcr, nf_dtcr=nf_dtcr, nf_dtc=nf_dtc, skip_cnt=skip_cnt
+ )
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from robot.api import logger
-from resources.libraries.python.ssh import SSH, exec_cmd_no_error
from resources.libraries.python.Constants import Constants
+from resources.libraries.python.ssh import SSH, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
:type dut_if2: str
:raises RuntimeError: If it fails to bind the interfaces to igb_uio.
"""
- if dut_node['type'] == NodeType.DUT:
+ if dut_node["type"] == NodeType.DUT:
pci_address1 = Topology.get_interface_pci_addr(dut_node, dut_if1)
pci_address2 = Topology.get_interface_pci_addr(dut_node, dut_if2)
ssh.connect(dut_node)
arch = Topology.get_node_arch(dut_node)
- cmd = '{fwdir}/tests/dpdk/dpdk_scripts/init_dpdk.sh '\
- '{pci1} {pci2} {arch}'.format(fwdir=Constants.REMOTE_FW_DIR,
- pci1=pci_address1,
- pci2=pci_address2,
- arch=arch)
+ cmd = f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts" \
+ f"/init_dpdk.sh {pci_address1} {pci_address2} {arch}"
ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=600)
if ret_code != 0:
- raise RuntimeError('Failed to bind the interfaces to igb_uio '
- 'at node {name}'.\
- format(name=dut_node['host']))
+ raise RuntimeError(
+ f"Failed to bind the interfaces to igb_uio at node "
+ f"{dut_node['host']}"
+ )
@staticmethod
def cleanup_dpdk_environment(dut_node, dut_if1, dut_if2):
:type dut_if2: str
:raises RuntimeError: If it fails to cleanup the dpdk.
"""
- if dut_node['type'] == NodeType.DUT:
+ if dut_node["type"] == NodeType.DUT:
pci_address1 = Topology.get_interface_pci_addr(dut_node, dut_if1)
if1_driver = Topology.get_interface_driver(dut_node, dut_if1)
pci_address2 = Topology.get_interface_pci_addr(dut_node, dut_if2)
ssh = SSH()
ssh.connect(dut_node)
- cmd = '{fwdir}/tests/dpdk/dpdk_scripts/cleanup_dpdk.sh ' \
- '{drv1} {pci1} {drv2} {pci2}'.\
- format(fwdir=Constants.REMOTE_FW_DIR, drv1=if1_driver,
- pci1=pci_address1, drv2=if2_driver, pci2=pci_address2)
+ cmd = f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts" \
+ f"/cleanup_dpdk.sh {if1_driver} {pci_address1} {if2_driver} " \
+ f"{pci_address2}"
ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=600)
if ret_code != 0:
- raise RuntimeError('Failed to cleanup the dpdk at node {name}'.
- format(name=dut_node['host']))
+ raise RuntimeError(
+ f"Failed to cleanup the dpdk at node {dut_node['host']}"
+ )
@staticmethod
def install_dpdk_test(node):
"""
arch = Topology.get_node_arch(node)
- command = ('{fwdir}/tests/dpdk/dpdk_scripts/install_dpdk.sh {arch}'.
- format(fwdir=Constants.REMOTE_FW_DIR, arch=arch))
- message = 'Install the DPDK failed!'
+ command = f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts" \
+ f"/install_dpdk.sh {arch}"
+ message = u"Install the DPDK failed!"
exec_cmd_no_error(node, command, timeout=600, message=message)
- command = ('cat {fwdir}/dpdk*/VERSION'.
- format(fwdir=Constants.REMOTE_FW_DIR))
- message = 'Get DPDK version failed!'
+ command = f"cat {Constants.REMOTE_FW_DIR}/dpdk*/VERSION"
+ message = u"Get DPDK version failed!"
stdout, _ = exec_cmd_no_error(node, command, message=message)
- logger.info('DPDK Version: {version}'.format(version=stdout))
+ logger.info(f"DPDK Version: {stdout}")
@staticmethod
def install_dpdk_test_on_all_duts(nodes):
:type nodes: dict
:returns: nothing
"""
- for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ for node in list(nodes.values()):
+ if node["type"] == NodeType.DUT:
DPDKTools.install_dpdk_test(node)
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
DUT nodes.
"""
-from resources.libraries.python.ssh import SSH
from resources.libraries.python.Constants import Constants
+from resources.libraries.python.ssh import SSH
from resources.libraries.python.topology import NodeType, Topology
"""Setup the DPDK for l2fwd performance test."""
@staticmethod
- def start_the_l2fwd_test(dut_node, cpu_cores, nb_cores, queue_nums,
- jumbo_frames):
+ def start_the_l2fwd_test(
+ dut_node, cpu_cores, nb_cores, queue_nums, jumbo_frames):
"""
Execute the l2fwd on the dut_node.
:type jumbo_frames: bool
:raises RuntimeError: If the script "run_l2fwd.sh" fails.
"""
- if dut_node['type'] == NodeType.DUT:
+ if dut_node["type"] == NodeType.DUT:
ssh = SSH()
ssh.connect(dut_node)
arch = Topology.get_node_arch(dut_node)
- jumbo = 'yes' if jumbo_frames else 'no'
- cmd = '{fwdir}/tests/dpdk/dpdk_scripts/run_l2fwd.sh {cpu_cores} ' \
- '{nb_cores} {queues} {jumbo} {arch}'.\
- format(fwdir=Constants.REMOTE_FW_DIR, cpu_cores=cpu_cores,
- nb_cores=nb_cores, queues=queue_nums,
- jumbo=jumbo, arch=arch)
+ jumbo = u"yes" if jumbo_frames else u"no"
+ cmd = f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts" \
+ f"/run_l2fwd.sh {cpu_cores} {nb_cores} {queue_nums} {jumbo} " \
+ f"{arch}"
ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=600)
if ret_code != 0:
- raise RuntimeError('Failed to execute l2fwd test at node '
- '{name}'.format(name=dut_node['host']))
+ raise RuntimeError(
+ f"Failed to execute l2fwd test at node {dut_node['host']}"
+ )
This module exists to provide the l3fwd test for DPDK on topology nodes.
"""
-from resources.libraries.python.ssh import SSH
from resources.libraries.python.Constants import Constants
+from resources.libraries.python.ssh import SSH
from resources.libraries.python.topology import NodeType, Topology
"""Test the DPDK l3fwd performance."""
@staticmethod
- def start_the_l3fwd_test(nodes_info, dut_node, dut_if1, dut_if2,
- nb_cores, lcores_list, queue_nums, jumbo_frames):
+ def start_the_l3fwd_test(
+ nodes_info, dut_node, dut_if1, dut_if2, nb_cores, lcores_list,
+ queue_nums, jumbo_frames):
"""
Execute the l3fwd on the dut_node.
:type queue_nums: str
:type jumbo_frames: bool
"""
- if dut_node['type'] == NodeType.DUT:
- adj_mac0, adj_mac1 = L3fwdTest.get_adj_mac(nodes_info, dut_node,
- dut_if1, dut_if2)
+ if dut_node["type"] == NodeType.DUT:
+ adj_mac0, adj_mac1 = L3fwdTest.get_adj_mac(
+ nodes_info, dut_node,dut_if1, dut_if2)
list_cores = [int(item) for item in lcores_list.split(',')]
for port in range(0, 2):
for queue in range(0, int(queue_nums)):
index = 0 if nb_cores == 1 else index
- port_config += '({port}, {queue}, {core}),'.\
- format(port=port, queue=queue, core=list_cores[index])
+ port_config += f"({port}, {queue}, {list_cores[index]}),"
index += 1
ssh = SSH()
ssh.connect(dut_node)
- cmd = '{fwdir}/tests/dpdk/dpdk_scripts/run_l3fwd.sh ' \
- '"{lcores}" "{ports}" {mac1} {mac2} {jumbo}'.\
- format(fwdir=Constants.REMOTE_FW_DIR, lcores=lcores_list,
- ports=port_config.rstrip(','), mac1=adj_mac0,
- mac2=adj_mac1, jumbo='yes' if jumbo_frames else 'no')
+ cmd = f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts" \
+ f"/run_l3fwd.sh '{lcores_list}' '{port_config.rstrip(',')}' " \
+ f"{adj_mac0} {adj_mac1} {'yes' if jumbo_frames else 'no'}"
ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=600)
if ret_code != 0:
- raise Exception('Failed to execute l3fwd test at node {name}'
- .format(name=dut_node['host']))
+ raise Exception(
+ f"Failed to execute l3fwd test at node {dut_node['host']}"
+ )
@staticmethod
def get_adj_mac(nodes_info, dut_node, dut_if1, dut_if2):
# detect which is the port 0
if min(if_pci0, if_pci1) != if_pci0:
if_key0, if_key1 = if_key1, if_key0
- L3fwdTest.patch_l3fwd(dut_node, 'patch_l3fwd_flip_routes')
+ L3fwdTest.patch_l3fwd(dut_node, u"patch_l3fwd_flip_routes")
- adj_node0, adj_if_key0 = Topology.get_adjacent_node_and_interface( \
- nodes_info, dut_node, if_key0)
- adj_node1, adj_if_key1 = Topology.get_adjacent_node_and_interface( \
- nodes_info, dut_node, if_key1)
+ adj_node0, adj_if_key0 = Topology.get_adjacent_node_and_interface(
+ nodes_info, dut_node, if_key0)
+ adj_node1, adj_if_key1 = Topology.get_adjacent_node_and_interface(
+ nodes_info, dut_node, if_key1)
adj_mac0 = Topology.get_interface_mac(adj_node0, adj_if_key0)
adj_mac1 = Topology.get_interface_mac(adj_node1, adj_if_key1)
ssh.connect(node)
ret_code, _, _ = ssh.exec_command(
- '{fwdir}/tests/dpdk/dpdk_scripts/patch_l3fwd.sh {arch} '
- '{fwdir}/tests/dpdk/dpdk_scripts/{patch}'.
- format(fwdir=Constants.REMOTE_FW_DIR, arch=arch, patch=patch),
+ f"{Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts/patch_l3fwd.sh "
+ f"{arch} {Constants.REMOTE_FW_DIR}/tests/dpdk/dpdk_scripts/{patch}",
timeout=600)
if ret_code != 0:
- raise RuntimeError('Patch of l3fwd failed.')
-
+ raise RuntimeError(u"Patch of l3fwd failed.")
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
:type node: dict
:type service: str
"""
- if DUTSetup.running_in_container(node):
- command = ('echo $(< /tmp/*supervisor*.log)')
- else:
- command = ('journalctl --no-pager --unit={name} '
- '--since="$(echo `systemctl show -p '
- 'ActiveEnterTimestamp {name}` | '
- 'awk \'{{print $2 $3}}\')"'.
- format(name=service))
- message = 'Node {host} failed to get logs from unit {name}'.\
- format(host=node['host'], name=service)
+ command = u"echo $(< /tmp/*supervisor*.log)"\
+ if DUTSetup.running_in_container(node) \
+ else f"journalctl --no-pager --unit={service} " \
+ f"--since='$(echo `systemctl show -p ActiveEnterTimestamp " \
+ f"{service}` | awk \'{{print $2 $3}}\')'"
+ message = f"Node {node[u'host']} failed to get logs from unit {service}"
- exec_cmd_no_error(node, command, timeout=30, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=30, sudo=True, message=message
+ )
@staticmethod
def get_service_logs_on_all_duts(nodes, service):
:type service: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
DUTSetup.get_service_logs(node, service)
@staticmethod
:type node: dict
:type service: str
"""
- if DUTSetup.running_in_container(node):
- command = 'supervisorctl restart {name}'.format(name=service)
- else:
- command = 'service {name} restart'.format(name=service)
- message = 'Node {host} failed to restart service {name}'.\
- format(host=node['host'], name=service)
+ command = f"supervisorctl restart {service}" \
+ if DUTSetup.running_in_container(node) \
+ else f"service {service} restart"
+ message = f"Node {node[u'host']} failed to restart service {service}"
exec_cmd_no_error(
- node, command, timeout=180, sudo=True, message=message)
+ node, command, timeout=180, sudo=True, message=message
+ )
DUTSetup.get_service_logs(node, service)
def restart_service_on_all_duts(nodes, service):
"""Restart the named service on all DUTs.
- :param node: Nodes in the topology.
+ :param nodes: Nodes in the topology.
:param service: Service unit name.
- :type node: dict
+ :type nodes: dict
:type service: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
DUTSetup.restart_service(node, service)
@staticmethod
:type service: str
"""
# TODO: change command to start once all parent function updated.
- if DUTSetup.running_in_container(node):
- command = 'supervisorctl restart {name}'.format(name=service)
- else:
- command = 'service {name} restart'.format(name=service)
- message = 'Node {host} failed to start service {name}'.\
- format(host=node['host'], name=service)
+ command = f"supervisorctl restart {service}" \
+ if DUTSetup.running_in_container(node) \
+ else f"service {service} restart"
+ message = f"Node {node[u'host']} failed to start service {service}"
exec_cmd_no_error(
- node, command, timeout=180, sudo=True, message=message)
+ node, command, timeout=180, sudo=True, message=message
+ )
DUTSetup.get_service_logs(node, service)
def start_service_on_all_duts(nodes, service):
"""Start up the named service on all DUTs.
- :param node: Nodes in the topology.
+ :param nodes: Nodes in the topology.
:param service: Service unit name.
- :type node: dict
+ :type nodes: dict
:type service: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
DUTSetup.start_service(node, service)
@staticmethod
:type node: dict
:type service: str
"""
- if DUTSetup.running_in_container(node):
- command = 'supervisorctl stop {name}'.format(name=service)
- else:
- command = 'service {name} stop'.format(name=service)
- message = 'Node {host} failed to stop service {name}'.\
- format(host=node['host'], name=service)
+ command = f"supervisorctl stop {service}" \
+ if DUTSetup.running_in_container(node) \
+ else f"service {service} stop"
+ message = f"Node {node[u'host']} failed to stop service {service}"
exec_cmd_no_error(
- node, command, timeout=180, sudo=True, message=message)
+ node, command, timeout=180, sudo=True, message=message
+ )
DUTSetup.get_service_logs(node, service)
def stop_service_on_all_duts(nodes, service):
"""Stop the named service on all DUTs.
- :param node: Nodes in the topology.
+ :param nodes: Nodes in the topology.
:param service: Service unit name.
- :type node: dict
+ :type nodes: dict
:type service: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
DUTSetup.stop_service(node, service)
@staticmethod
ssh.connect(node)
for i in range(3):
- logger.trace('Try {}: Get VPP PID'.format(i))
- ret_code, stdout, stderr = ssh.exec_command('pidof vpp')
+ logger.trace(f"Try {i}: Get VPP PID")
+ ret_code, stdout, stderr = ssh.exec_command(u"pidof vpp")
if int(ret_code):
- raise RuntimeError('Not possible to get PID of VPP process '
- 'on node: {0}\n {1}'.
- format(node['host'], stdout + stderr))
+ raise RuntimeError(
+ f"Not possible to get PID of VPP process on node: "
+ f"{node[u'host']}\n {stdout + stderr}"
+ )
pid_list = stdout.split()
if len(pid_list) == 1:
return int(stdout)
elif not pid_list:
- logger.debug("No VPP PID found on node {0}".
- format(node['host']))
+ logger.debug(f"No VPP PID found on node {node[u'host']}")
continue
else:
- logger.debug("More then one VPP PID found on node {0}".
- format(node['host']))
+ logger.debug(
+ f"More then one VPP PID found on node {node[u'host']}"
+ )
return [int(pid) for pid in pid_list]
return None
"""
pids = dict()
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- pids[node['host']] = DUTSetup.get_vpp_pid(node)
+ if node[u"type"] == NodeType.DUT:
+ pids[node[u"host"]] = DUTSetup.get_vpp_pid(node)
return pids
@staticmethod
# QAT is not initialized and we want to initialize with numvfs
DUTSetup.crypto_device_init(node, crypto_type, numvfs)
else:
- raise RuntimeError('QAT device failed to create VFs on {host}'.
- format(host=node['host']))
+ raise RuntimeError(
+ f"QAT device failed to create VFs on {node[u'host']}"
+ )
@staticmethod
def crypto_device_init(node, crypto_type, numvfs):
:returns: nothing
:raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
"""
- if crypto_type == "HW_DH895xcc":
- kernel_mod = "qat_dh895xcc"
- kernel_drv = "dh895xcc"
- elif crypto_type == "HW_C3xxx":
- kernel_mod = "qat_c3xxx"
- kernel_drv = "c3xxx"
+ if crypto_type == u"HW_DH895xcc":
+ kernel_mod = u"qat_dh895xcc"
+ kernel_drv = u"dh895xcc"
+ elif crypto_type == u"HW_C3xxx":
+ kernel_mod = u"qat_c3xxx"
+ kernel_drv = u"c3xxx"
else:
- raise RuntimeError('Unsupported crypto device type on {host}'.
- format(host=node['host']))
+ raise RuntimeError(
+ f"Unsupported crypto device type on {node[u'host']}"
+ )
pci_addr = Topology.get_cryptodev(node)
DUTSetup.stop_service(node, Constants.VPP_UNIT)
current_driver = DUTSetup.get_pci_dev_driver(
- node, pci_addr.replace(':', r'\:'))
+ node, pci_addr.replace(u":", r"\:")
+ )
if current_driver is not None:
DUTSetup.pci_driver_unbind(node, pci_addr)
:rtype: int
:raises RuntimeError: If failed to get Virtual Function PCI address.
"""
- command = "sh -c "\
- "'basename $(readlink /sys/bus/pci/devices/{pci}/virtfn{vf_id})'".\
- format(pci=pf_pci_addr, vf_id=vf_id)
- message = 'Failed to get virtual function PCI address.'
+ command = f"sh -c 'basename $(readlink " \
+ f"/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id})'"
+ message = u"Failed to get virtual function PCI address."
- stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
- message=message)
+ stdout, _ = exec_cmd_no_error(
+ node, command, timeout=30, sudo=True, message=message
+ )
return stdout.strip()
:rtype: int
:raises RuntimeError: If PCI device is not SR-IOV capable.
"""
- command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'.\
- format(pci=pf_pci_addr.replace(':', r'\:'))
- message = 'PCI device {pci} is not a SR-IOV device.'.\
- format(pci=pf_pci_addr)
+ pci = pf_pci_addr.replace(u":", r"\:")
+ command = f"cat /sys/bus/pci/devices/{pci}/sriov_numvfs"
+ message = f"PCI device {pf_pci_addr} is not a SR-IOV device."
for _ in range(3):
- stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
- message=message)
+ stdout, _ = exec_cmd_no_error(
+ node, command, timeout=30, sudo=True, message=message
+ )
try:
sriov_numvfs = int(stdout)
except ValueError:
- logger.trace('Reading sriov_numvfs info failed on {host}'.
- format(host=node['host']))
+ logger.trace(
+ f"Reading sriov_numvfs info failed on {node[u'host']}"
+ )
else:
return sriov_numvfs
:type numvfs: int
:raises RuntimeError: Failed to create VFs on PCI.
"""
- command = "sh -c "\
- "'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'".\
- format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
- message = 'Failed to create {num} VFs on {pci} device on {host}'.\
- format(num=numvfs, pci=pf_pci_addr, host=node['host'])
+ pci = pf_pci_addr.replace(u":", r"\:")
+ command = f"sh -c 'echo {numvfs} | " \
+ f"tee /sys/bus/pci/devices/{pci}/sriov_numvfs'"
+ message = f"Failed to create {numvfs} VFs on {pf_pci_addr} device " \
+ f"on {node[u'host']}"
- exec_cmd_no_error(node, command, timeout=120, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
@staticmethod
def pci_driver_unbind(node, pci_addr):
:type pci_addr: str
:raises RuntimeError: If PCI device unbind failed.
"""
- command = "sh -c "\
- "'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'".\
- format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
- message = 'Failed to unbind PCI device {pci} on {host}'.\
- format(pci=pci_addr, host=node['host'])
+ pci = pci_addr.replace(u":", r"\:")
+ command = f"sh -c 'echo {pci_addr} | " \
+ f"tee /sys/bus/pci/devices/{pci}/driver/unbind'"
+ message = f"Failed to unbind PCI device {pci_addr} on {node[u'host']}"
- exec_cmd_no_error(node, command, timeout=120, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
@staticmethod
def pci_driver_bind(node, pci_addr, driver):
:type driver: str
:raises RuntimeError: If PCI device bind failed.
"""
- message = 'Failed to bind PCI device {pci} to {driver} on host {host}'.\
- format(pci=pci_addr, driver=driver, host=node['host'])
+ message = f"Failed to bind PCI device {pci_addr} to {driver} " \
+ f"on host {node[u'host']}"
+ pci = pci_addr.replace(u":", r"\:")
+ command = f"sh -c 'echo {driver} | " \
+ f"tee /sys/bus/pci/devices/{pci}/driver_override'"
- command = "sh -c "\
- "'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
- format(driver=driver, pci=pci_addr.replace(':', r'\:'))
-
- exec_cmd_no_error(node, command, timeout=120, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
- command = "sh -c "\
- "'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
- format(pci=pci_addr, driver=driver)
+ command = f"sh -c 'echo {pci_addr} | " \
+ f"tee /sys/bus/pci/drivers/{driver}/bind'"
- exec_cmd_no_error(node, command, timeout=120, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
- command = "sh -c "\
- "'echo | tee /sys/bus/pci/devices/{pci}/driver_override'".\
- format(pci=pci_addr.replace(':', r'\:'))
+ command = f"sh -c 'echo | " \
+ f"tee /sys/bus/pci/devices/{pci}/driver_override'"
- exec_cmd_no_error(node, command, timeout=120, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
@staticmethod
def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
:raises RuntimeError: If Virtual Function unbind failed.
"""
vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
- vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
- format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
-
- command = "sh -c "\
- "'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'".\
- format(vf_pci_addr=vf_pci_addr, vf_path=vf_path)
+ pf_pci = pf_pci_addr.replace(u":", r"\:")
+ vf_path = f"/sys/bus/pci/devices/{pf_pci}/virtfn{vf_id}"
- message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
- format(vf_pci_addr=vf_pci_addr, host=node['host'])
+ command = f"sh -c 'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'"
+ message = f"Failed to unbind VF {vf_pci_addr} on {node[u'host']}"
- exec_cmd_no_error(node, command, timeout=120, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
@staticmethod
def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
:raises RuntimeError: If PCI device bind failed.
"""
vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
- vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
- format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
-
- message = 'Failed to bind VF {vf_pci_addr} to {driver} on {host}'.\
- format(vf_pci_addr=vf_pci_addr, driver=driver, host=node['host'])
+ pf_pci = pf_pci_addr.replace(':', r'\:')
+ vf_path = f"/sys/bus/pci/devices/{pf_pci}/virtfn{vf_id}"
- command = "sh -c "\
- "'echo {driver} | tee {vf_path}/driver_override'".\
- format(driver=driver, vf_path=vf_path)
+ message = f"Failed to bind VF {vf_pci_addr} to {driver} " \
+ f"on {node[u'host']}"
+ command = f"sh -c 'echo {driver} | tee {vf_path}/driver_override'"
- exec_cmd_no_error(node, command, timeout=120, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
- command = "sh -c "\
- "'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
- format(vf_pci_addr=vf_pci_addr, driver=driver)
+ command = f"sh -c 'echo {vf_pci_addr} | " \
+ f"tee /sys/bus/pci/drivers/{driver}/bind'"
- exec_cmd_no_error(node, command, timeout=120, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
- command = "sh -c "\
- "'echo | tee {vf_path}/driver_override'".\
- format(vf_path=vf_path)
+ command = f"sh -c 'echo | tee {vf_path}/driver_override'"
- exec_cmd_no_error(node, command, timeout=120, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=120, sudo=True, message=message
+ )
@staticmethod
def get_pci_dev_driver(node, pci_addr):
ssh.connect(node)
for i in range(3):
- logger.trace('Try number {0}: Get PCI device driver'.format(i))
+ logger.trace(f"Try number {i}: Get PCI device driver")
- cmd = 'lspci -vmmks {0}'.format(pci_addr)
+ cmd = f"lspci -vmmks {pci_addr}"
ret_code, stdout, _ = ssh.exec_command(cmd)
if int(ret_code):
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
for line in stdout.splitlines():
if not line:
name = None
value = None
try:
- name, value = line.split("\t", 1)
+ name, value = line.split(u"\t", 1)
except ValueError:
- if name == "Driver:":
+ if name == u"Driver:":
return None
- if name == 'Driver:':
+ if name == u"Driver:":
return value
if i < 2:
- logger.trace('Driver for PCI device {} not found, executing '
- 'pci rescan and retrying'.format(pci_addr))
- cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
+ logger.trace(
+ f"Driver for PCI device {pci_addr} not found, "
+ f"executing pci rescan and retrying"
+ )
+ cmd = u"sh -c 'echo 1 > /sys/bus/pci/rescan'"
ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
return None
:type force_load: bool
:raises RuntimeError: If module is not loaded or failed to load.
"""
- command = 'grep -w {module} /proc/modules'.format(module=module)
- message = 'Kernel module {module} is not loaded on host {host}'.\
- format(module=module, host=node['host'])
+ command = f"grep -w {module} /proc/modules"
+ message = f"Kernel module {module} is not loaded " \
+ f"on host {node[u'host']}"
try:
- exec_cmd_no_error(node, command, timeout=30, sudo=False,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=30, sudo=False, message=message
+ )
except RuntimeError:
if force_load:
# Module is not loaded and we want to load it
"""Verify if kernel module is loaded on all DUTs. If parameter force
load is set to True, then try to load the modules.
- :param node: DUT nodes.
+ :param nodes: DUT nodes.
:param module: Module to verify.
:param force_load: If True then try to load module.
- :type node: dict
+ :type nodes: dict
:type module: str
:type force_load: bool
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
DUTSetup.verify_kernel_module(node, module, force_load)
@staticmethod
"""Verify if uio driver kernel module is loaded on all DUTs. If module
is not present it will try to load it.
- :param node: DUT nodes.
- :type node: dict
+ :param nodes: DUT nodes.
+ :type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
uio_driver = Topology.get_uio_driver(node)
DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
:returns: nothing
:raises RuntimeError: If loading failed.
"""
- command = 'modprobe {module}'.format(module=module)
- message = 'Failed to load {module} on host {host}'.\
- format(module=module, host=node['host'])
+ command = f"modprobe {module}"
+ message = f"Failed to load {module} on host {node[u'host']}"
exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
:raises RuntimeError: If failed to remove or install VPP.
"""
for node in nodes.values():
- message = 'Failed to install VPP on host {host}!'.\
- format(host=node['host'])
- if node['type'] == NodeType.DUT:
- command = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true'
+ message = f"Failed to install VPP on host {node[u'host']}!"
+ if node[u"type"] == NodeType.DUT:
+ command = u"ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true"
exec_cmd_no_error(node, command, sudo=True)
- command = '. /etc/lsb-release; echo "${DISTRIB_ID}"'
+ command = u". /etc/lsb-release; echo '${DISTRIB_ID}'"
stdout, _ = exec_cmd_no_error(node, command)
- if stdout.strip() == 'Ubuntu':
- exec_cmd_no_error(node, 'apt-get purge -y "*vpp*" || true',
- timeout=120, sudo=True)
- exec_cmd_no_error(node, 'dpkg -i --force-all {dir}*.deb'.
- format(dir=vpp_pkg_dir), timeout=120,
- sudo=True, message=message)
- exec_cmd_no_error(node, 'dpkg -l | grep vpp', sudo=True)
+ if stdout.strip() == u"Ubuntu":
+ exec_cmd_no_error(
+ node, u"apt-get purge -y '*vpp*' || true",
+ timeout=120, sudo=True
+ )
+ exec_cmd_no_error(
+ node, f"dpkg -i --force-all {vpp_pkg_dir}*.deb",
+ timeout=120, sudo=True, message=message
+ )
+ exec_cmd_no_error(node, u"dpkg -l | grep vpp", sudo=True)
if DUTSetup.running_in_container(node):
DUTSetup.restart_service(node, Constants.VPP_UNIT)
else:
- exec_cmd_no_error(node, 'yum -y remove "*vpp*" || true',
- timeout=120, sudo=True)
- exec_cmd_no_error(node, 'rpm -ivh {dir}*.rpm'.
- format(dir=vpp_pkg_dir), timeout=120,
- sudo=True, message=message)
- exec_cmd_no_error(node, 'rpm -qai *vpp*', sudo=True)
+ exec_cmd_no_error(
+ node, u"yum -y remove '*vpp*' || true",
+ timeout=120, sudo=True
+ )
+ exec_cmd_no_error(
+ node, f"rpm -ivh {vpp_pkg_dir}*.rpm",
+ timeout=120, sudo=True, message=message
+ )
+ exec_cmd_no_error(node, u"rpm -qai *vpp*", sudo=True)
DUTSetup.restart_service(node, Constants.VPP_UNIT)
@staticmethod
:param node: Topology node.
:type node: dict
:returns: True if running in docker container, false if not or failed
- to detect.
+ to detect.
:rtype: bool
"""
- command = "fgrep docker /proc/1/cgroup"
- message = 'Failed to get cgroup settings.'
+ command = u"fgrep docker /proc/1/cgroup"
+ message = u"Failed to get cgroup settings."
try:
- exec_cmd_no_error(node, command, timeout=30, sudo=False,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=30, sudo=False, message=message
+ )
except RuntimeError:
return False
return True
:rtype: str
:raises RuntimeError: If getting output failed.
"""
- command = "docker inspect --format='"\
- "{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid)
- message = 'Failed to get directory of {uuid} on host {host}'.\
- format(uuid=uuid, host=node['host'])
+ command = f"docker inspect " \
+ f"--format='{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}"
+ message = f"Failed to get directory of {uuid} on host {node['uhost']}"
stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
return stdout.strip()
for _ in range(3):
ret_code, stdout, _ = ssh.exec_command_sudo(
- "grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
+ u"grep Hugepagesize /proc/meminfo | awk '{ print $2 }'"
+ )
if ret_code == 0:
try:
huge_size = int(stdout)
except ValueError:
- logger.trace('Reading huge page size information failed')
+ logger.trace(u"Reading huge page size information failed")
else:
break
else:
- raise RuntimeError('Getting huge page size information failed.')
+ raise RuntimeError(u"Getting huge page size information failed.")
return huge_size
@staticmethod
for _ in range(3):
ret_code, stdout, _ = ssh.exec_command_sudo(
- 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
- format(huge_size))
+ f"cat /sys/kernel/mm/hugepages/hugepages-{huge_size}kB/"
+ f"free_hugepages"
+ )
if ret_code == 0:
try:
huge_free = int(stdout)
except ValueError:
- logger.trace('Reading free huge pages information failed')
+ logger.trace(u"Reading free huge pages information failed")
else:
break
else:
- raise RuntimeError('Getting free huge pages information failed.')
+ raise RuntimeError(u"Getting free huge pages information failed.")
return huge_free
@staticmethod
:param huge_size: Size of hugepages.
:type node: dict
:type huge_size: int
-
:returns: Total number of huge pages in system.
:rtype: int
:raises RuntimeError: If reading failed for three times.
for _ in range(3):
ret_code, stdout, _ = ssh.exec_command_sudo(
- 'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
- format(huge_size))
+ f"cat /sys/kernel/mm/hugepages/hugepages-{huge_size}kB/"
+ f"nr_hugepages"
+ )
if ret_code == 0:
try:
huge_total = int(stdout)
except ValueError:
- logger.trace('Reading total huge pages information failed')
+ logger.trace(u"Reading total huge pages information failed")
else:
break
else:
- raise RuntimeError('Getting total huge pages information failed.')
+ raise RuntimeError(u"Getting total huge pages information failed.")
return huge_total
@staticmethod
:type huge_mnt: str
:type mem_size: str
:type allocate: bool
-
:raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
- or increasing map count failed.
+ or increasing map count failed.
"""
# TODO: split function into smaller parts.
ssh = SSH()
huge_free = DUTSetup.get_huge_page_free(node, huge_size)
huge_total = DUTSetup.get_huge_page_total(node, huge_size)
- # Check if memory reqested is available on host
+ # Check if memory requested is available on
+ mem_size = int(mem_size)
if (mem_size * 1024) > (huge_free * huge_size):
# If we want to allocate hugepage dynamically
if allocate:
max_map_count = huge_to_allocate*4
# Increase maximum number of memory map areas a process may have
ret_code, _, _ = ssh.exec_command_sudo(
- 'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
- format(max_map_count))
+ f"echo '{max_map_count}' | "
+ f"sudo tee /proc/sys/vm/max_map_count"
+ )
if int(ret_code) != 0:
- raise RuntimeError('Increase map count failed on {host}'.
- format(host=node['host']))
+ raise RuntimeError(
+ f"Increase map count failed on {node[u'host']}"
+ )
# Increase hugepage count
ret_code, _, _ = ssh.exec_command_sudo(
- 'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
- format(huge_to_allocate))
+ f"echo '{huge_to_allocate}' | "
+ f"sudo tee /proc/sys/vm/nr_hugepages"
+ )
if int(ret_code) != 0:
- raise RuntimeError('Mount huge pages failed on {host}'.
- format(host=node['host']))
- # If we do not want to allocate dynamicaly end with error
+ raise RuntimeError(
+ f"Mount huge pages failed on {node[u'host']}"
+ )
+ # If we do not want to allocate dynamically end with error
else:
- raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
- format(huge_free, huge_free * huge_size))
+ raise RuntimeError(
+ f"Not enough free huge pages: {huge_free}, "
+ f"{huge_free * huge_size} MB"
+ )
# Check if huge pages mount point exist
has_huge_mnt = False
- ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
+ ret_code, stdout, _ = ssh.exec_command(u"cat /proc/mounts")
if int(ret_code) == 0:
for line in stdout.splitlines():
# Try to find something like:
- # none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
+ # none /mnt/huge hugetlbfs rw,realtime,pagesize=2048k 0 0
mount = line.split()
- if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
+ if mount[2] == u"hugetlbfs" and mount[1] == huge_mnt:
has_huge_mnt = True
break
# If huge page mount point not exist create one
if not has_huge_mnt:
- ret_code, _, _ = ssh.exec_command_sudo(
- 'mkdir -p {mnt}'.format(mnt=huge_mnt))
+ ret_code, _, _ = ssh.exec_command_sudo(f"mkdir -p {huge_mnt}")
if int(ret_code) != 0:
- raise RuntimeError('Create mount dir failed on {host}'.
- format(host=node['host']))
+ raise RuntimeError(
+ f"Create mount dir failed on {node[u'host']}"
+ )
ret_code, _, _ = ssh.exec_command_sudo(
- 'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
- format(mnt=huge_mnt))
+ f"mount -t hugetlbfs -o pagesize=2048k none {huge_mnt}"
+ )
if int(ret_code) != 0:
- raise RuntimeError('Mount huge pages failed on {host}'.
- format(host=node['host']))
+ raise RuntimeError(
+ f"Mount huge pages failed on {node[u'host']}"
+ )
:returns: DHCP relay data.
:rtype: list
"""
- cmd = 'dhcp_proxy_dump'
- args = dict(is_ip6=1 if ip_version == 'ipv6' else 0)
- err_msg = 'Failed to get DHCP proxy dump on host {host}'.format(
- host=node['host'])
+ cmd = u"dhcp_proxy_dump"
+ args = dict(is_ip6=1 if ip_version == u"ipv6" else 0)
+ err_msg = f"Failed to get DHCP proxy dump on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
:returns: EAL parameters.
:rtype: OptionString
"""
- options = OptionString(prefix='-')
- options.add('v')
+ options = OptionString(prefix=u"-")
+ options.add(u"v")
# Set the hexadecimal bitmask of the cores to run on.
- options.add_with_value_from_dict('l', 'eal_corelist', kwargs)
+ options.add_with_value_from_dict(u"l", u"eal_corelist", kwargs)
# Set master core.
- options.add_with_value('-master-lcore', '0')
+ options.add_with_value(u"-master-lcore", u"0")
# Load an external driver. Multiple -d options are allowed.
options.add_with_value_if_from_dict(
- 'd', '/usr/lib/librte_pmd_virtio.so', 'eal_driver', kwargs, True)
+ u"d", u"/usr/lib/librte_pmd_virtio.so", u"eal_driver", kwargs, True
+ )
options.add_if_from_dict(
- '-in-memory', 'eal_in_memory', kwargs, False)
+ u"-in-memory", u"eal_in_memory", kwargs, False
+ )
return options
@staticmethod
:returns: PMD parameters.
:rtype: OptionString
"""
- options = OptionString(prefix='--')
+ options = OptionString(prefix=u"--")
# Set the forwarding mode: io, mac, mac_retry, mac_swap, flowgen,
# rxonly, txonly, csum, icmpecho, ieee1588
options.add_equals_from_dict(
- 'forward-mode', 'pmd_fwd_mode', kwargs, 'io')
+ u"forward-mode", u"pmd_fwd_mode", kwargs, u"io"
+ )
# Set the number of packets per burst to N.
- options.add_equals('burst', 64)
+ options.add_equals(u"burst", 64)
# Set the number of descriptors in the TX rings to N.
- options.add_equals_from_dict('txd', 'pmd_txd', kwargs, 1024)
+ options.add_equals_from_dict(u"txd", u"pmd_txd", kwargs, 1024)
# Set the number of descriptors in the RX rings to N.
- options.add_equals_from_dict('rxd', 'pmd_rxd', kwargs, 1024)
+ options.add_equals_from_dict(u"rxd", u"pmd_rxd", kwargs, 1024)
# Set the number of queues in the TX to N.
- options.add_equals_from_dict('txq', 'pmd_txq', kwargs, 1)
+ options.add_equals_from_dict(u"txq", u"pmd_txq", kwargs, 1)
# Set the number of queues in the RX to N.
- options.add_equals_from_dict('rxq', 'pmd_rxq', kwargs, 1)
+ options.add_equals_from_dict(u"rxq", u"pmd_rxq", kwargs, 1)
# Set the hexadecimal bitmask of offloads.
- options.add_equals_from_dict('tx-offloads', 'pmd_tx_offloads', kwargs)
+ options.add_equals_from_dict(u"tx-offloads", u"pmd_tx_offloads", kwargs)
# Set the number of mbufs to be allocated in the mbuf pools.
- options.add_equals_from_dict('total-num-mbufs', 'pmd_num_mbufs', kwargs)
+ options.add_equals_from_dict(
+ u"total-num-mbufs", u"pmd_num_mbufs", kwargs
+ )
# Disable hardware VLAN.
options.add_if_from_dict(
- 'disable-hw-vlan', 'pmd_disable_hw_vlan', kwargs, True)
+ u"disable-hw-vlan", u"pmd_disable_hw_vlan", kwargs, True
+ )
# Set the MAC address XX:XX:XX:XX:XX:XX of the peer port N
- options.add_equals_from_dict('eth-peer', 'pmd_eth_peer_0', kwargs)
- options.add_equals_from_dict('eth-peer', 'pmd_eth_peer_1', kwargs)
+ options.add_equals_from_dict(u"eth-peer", u"pmd_eth_peer_0", kwargs)
+ options.add_equals_from_dict(u"eth-peer", u"pmd_eth_peer_1", kwargs)
# Set the max packet length.
- options.add_equals_from_dict('max-pkt-len', 'pmd_max_pkt_len', kwargs)
+ options.add_equals_from_dict(u"max-pkt-len", u"pmd_max_pkt_len", kwargs)
# Set the number of forwarding cores based on coremask.
- options.add_equals_from_dict('nb-cores', 'pmd_nb_cores', kwargs)
+ options.add_equals_from_dict(u"nb-cores", u"pmd_nb_cores", kwargs)
return options
@staticmethod
def get_testpmd_cmdline(**kwargs):
"""Get DPDK testpmd command line arguments.
- :param args: Key-value testpmd parameters.
- :type args: dict
+ :param kwargs: Key-value testpmd parameters.
+ :type kwargs: dict
:returns: Command line string.
:rtype: OptionString
"""
options = OptionString()
- options.add('testpmd')
+ options.add(u"testpmd")
options.extend(DpdkUtil.get_eal_options(**kwargs))
- options.add('--')
+ options.add(u"--")
options.extend(DpdkUtil.get_pmd_options(**kwargs))
return options
"""Start DPDK testpmd app on VM node.
:param node: VM Node to start testpmd on.
- :param args: Key-value testpmd parameters.
+ :param kwargs: Key-value testpmd parameters.
:type node: dict
:type kwargs: dict
"""
cmd_options = OptionString()
- cmd_options.add("/start-testpmd.sh")
+ cmd_options.add(u"/start-testpmd.sh")
cmd_options.extend(DpdkUtil.get_eal_options(**kwargs))
- cmd_options.add('--')
+ cmd_options.add(u"--")
cmd_options.extend(DpdkUtil.get_pmd_options(**kwargs))
exec_cmd_no_error(node, cmd_options, sudo=True, disconnect=True)
:type node: dict
:returns: nothing
"""
- cmd = "/stop-testpmd.sh" # Completed string, simpler than OptionString.
+ cmd = u"/stop-testpmd.sh" # Completed string, simple one.
exec_cmd_no_error(node, cmd, sudo=True, disconnect=True)
@unique
class SearchDirection(Enum):
"""Direction of linear search."""
-
TOP_DOWN = 1
BOTTOM_UP = 2
@unique
class SearchResults(Enum):
"""Result of the drop rate search."""
-
SUCCESS = 1
FAILURE = 2
SUSPICIOUS = 3
@unique
class RateType(Enum):
"""Type of rate units."""
-
PERCENTAGE = 1
PACKETS_PER_SECOND = 2
BITS_PER_SECOND = 3
@unique
class LossAcceptanceType(Enum):
"""Type of the loss acceptance criteria."""
-
FRAMES = 1
PERCENTAGE = 2
@unique
class SearchResultType(Enum):
"""Type of search result evaluation."""
-
BEST_OF_N = 1
WORST_OF_N = 2
-class DropRateSearch(object):
+class DropRateSearch(object, metaclass=ABCMeta):
"""Abstract class with search algorithm implementation."""
- __metaclass__ = ABCMeta
-
def __init__(self):
# duration of traffic run (binary, linear)
self._duration = 60
# permitted values: LossAcceptanceType
self._loss_acceptance_type = LossAcceptanceType.FRAMES
# size of frames to send
- self._frame_size = "64"
- # binary convergence criterium type is self._rate_type
+ self._frame_size = u"64"
+ # binary convergence criterion type is self._rate_type
self._binary_convergence_threshold = 5000
# numbers of traffic runs during one rate step
self._max_attempts = 1
pass
@abstractmethod
- def measure_loss(self, rate, frame_size, loss_acceptance,
- loss_acceptance_type, traffic_profile, skip_warmup=False):
+ def measure_loss(
+ self, rate, frame_size, loss_acceptance, loss_acceptance_type,
+ traffic_profile, skip_warmup=False):
"""Send traffic from TG and measure count of dropped frames.
:param rate: Offered traffic load.
:param loss_acceptance_type: Type of permitted loss.
:param traffic_profile: Module name to use for traffic generation.
:param skip_warmup: Start TRex without warmup traffic if true.
- :type rate: int
+ :type rate: float
:type frame_size: str
:type loss_acceptance: float
:type loss_acceptance_type: LossAcceptanceType
:raises ValueError: If min rate is lower than 0 or higher than max rate.
"""
if float(min_rate) <= 0:
- raise ValueError("min_rate must be higher than 0")
+ raise ValueError(u"min_rate must be higher than 0")
elif float(min_rate) > float(max_rate):
- raise ValueError("min_rate must be lower than max_rate")
+ raise ValueError(u"min_rate must be lower than max_rate")
else:
self._rate_max = float(max_rate)
self._rate_min = float(min_rate)
def set_loss_acceptance(self, loss_acceptance):
- """Set loss acceptance treshold for PDR search.
+ """Set loss acceptance threshold for PDR search.
- :param loss_acceptance: Loss acceptance treshold for PDR search.
+ :param loss_acceptance: Loss acceptance threshold for PDR search.
:type loss_acceptance: str
:returns: nothing
:raises ValueError: If loss acceptance is lower than zero.
"""
if float(loss_acceptance) < 0:
- raise ValueError("Loss acceptance must be higher or equal 0")
+ raise ValueError(u"Loss acceptance must be higher or equal 0")
else:
self._loss_acceptance = float(loss_acceptance)
def get_loss_acceptance(self):
- """Return configured loss acceptance treshold.
+ """Return configured loss acceptance threshold.
- :returns: Loss acceptance treshold.
+ :returns: Loss acceptance threshold.
:rtype: float
"""
return self._loss_acceptance
def set_loss_acceptance_type_percentage(self):
- """Set loss acceptance treshold type to percentage.
+ """Set loss acceptance threshold type to percentage.
:returns: nothing
"""
self._loss_acceptance_type = LossAcceptanceType.PERCENTAGE
def set_loss_acceptance_type_frames(self):
- """Set loss acceptance treshold type to frames.
+ """Set loss acceptance threshold type to frames.
:returns: nothing
"""
self._loss_acceptance_type = LossAcceptanceType.FRAMES
def loss_acceptance_type_is_percentage(self):
- """Return true if loss acceptance treshold type is percentage,
+ """Return true if loss acceptance threshold type is percentage,
false otherwise.
- :returns: True if loss acceptance treshold type is percentage.
+ :returns: True if loss acceptance threshold type is percentage.
:rtype: boolean
"""
return self._loss_acceptance_type == LossAcceptanceType.PERCENTAGE
:raises Exception: If rate type is unknown.
"""
if rate_type not in RateType:
- raise Exception("rate_type unknown: {}".format(rate_type))
+ raise Exception(f"rate_type unknown: {rate_type}")
else:
self._rate_type = rate_type
def set_binary_convergence_threshold(self, convergence):
"""Set convergence for binary search.
- :param convergence: Treshold value number.
+ :param convergence: Threshold value number.
:type convergence: float
:returns: nothing
"""
def get_binary_convergence_threshold(self):
"""Get convergence for binary search.
- :returns: Treshold value number.
+ :returns: Threshold value number.
:rtype: float
"""
return self._binary_convergence_threshold
:raises ValueError: If rate type is unknown.
"""
if self._rate_type == RateType.PERCENTAGE:
- return "%"
+ return u"%"
elif self._rate_type == RateType.BITS_PER_SECOND:
- return "bps"
+ return u"bps"
elif self._rate_type == RateType.PACKETS_PER_SECOND:
- return "pps"
+ return u"pps"
else:
- raise ValueError("RateType unknown")
+ raise ValueError(u"RateType unknown")
def set_max_attempts(self, max_attempts):
"""Set maximum number of traffic runs during one rate step.
if int(max_attempts) > 0:
self._max_attempts = int(max_attempts)
else:
- raise ValueError("Max attempt must by greater than zero")
+ raise ValueError(u"Max attempt must by greater than zero")
def get_max_attempts(self):
"""Return maximum number of traffic runs during one rate step.
:raises ValueError: If search type is unknown.
"""
if search_type not in SearchResultType:
- raise ValueError("search_type unknown: {}".format(search_type))
+ raise ValueError(f"search_type unknown: {search_type}")
else:
self._search_result_type = search_type
elif self._search_result_type == SearchResultType.WORST_OF_N:
return self._get_worst_of_n(res_list)
else:
- raise ValueError("Unknown search result type")
+ raise ValueError(u"Unknown search result type")
def linear_search(self, start_rate, traffic_profile):
"""Linear search of rate with loss below acceptance criteria.
:returns: nothing
:raises ValueError: If start rate is not in range.
"""
-
if not self._rate_min <= float(start_rate) <= self._rate_max:
- raise ValueError("Start rate is not in min,max range")
+ raise ValueError(u"Start rate is not in min,max range")
rate = float(start_rate)
# the last but one step
for dummy in range(self._max_attempts):
res.append(self.measure_loss(
rate, self._frame_size, self._loss_acceptance,
- self._loss_acceptance_type, traffic_profile))
+ self._loss_acceptance_type, traffic_profile
+ ))
res = self._get_res_based_on_search_type(res)
self._search_result_rate = rate
return
else:
- raise RuntimeError("Unknown search result")
+ raise RuntimeError(u"Unknown search result")
else:
- raise Exception("Unknown search direction")
+ raise Exception(u"Unknown search direction")
def verify_search_result(self):
"""Fail if search was not successful.
if self._search_result in [
SearchResults.SUCCESS, SearchResults.SUSPICIOUS]:
return self._search_result_rate, self.get_latency()
- raise Exception('Search FAILED')
+ raise Exception(u"Search FAILED")
- def binary_search(self, b_min, b_max, traffic_profile, skip_max_rate=False,
- skip_warmup=False):
+ def binary_search(
+ self, b_min, b_max, traffic_profile, skip_max_rate=False,
+ skip_warmup=False):
"""Binary search of rate with loss below acceptance criteria.
:param b_min: Min range rate.
:returns: nothing
:raises ValueError: If input values are not valid.
"""
-
if not self._rate_min <= float(b_min) <= self._rate_max:
- raise ValueError("Min rate is not in min,max range")
+ raise ValueError(u"Min rate is not in min,max range")
if not self._rate_min <= float(b_max) <= self._rate_max:
- raise ValueError("Max rate is not in min,max range")
+ raise ValueError(u"Max rate is not in min,max range")
if float(b_max) < float(b_min):
- raise ValueError("Min rate is greater than max rate")
+ raise ValueError(u"Min rate is greater than max rate")
# rate is half of interval + start of interval if not using max rate
rate = ((float(b_max) - float(b_min)) / 2) + float(b_min) \
res.append(self.measure_loss(
rate, self._frame_size, self._loss_acceptance,
self._loss_acceptance_type, traffic_profile,
- skip_warmup=skip_warmup))
+ skip_warmup=skip_warmup
+ ))
res = self._get_res_based_on_search_type(res)
:returns: nothing
:raises RuntimeError: If linear search failed.
"""
-
self.linear_search(start_rate, traffic_profile)
if self._search_result in [SearchResults.SUCCESS,
self._search_result = SearchResults.SUSPICIOUS
self._search_result_rate = temp_rate
else:
- raise RuntimeError("Linear search FAILED")
+ raise RuntimeError(u"Linear search FAILED")
@staticmethod
def floats_are_close_equal(num_a, num_b, rel_tol=1e-9, abs_tol=0.0):
:param num_a: First number to compare.
:param num_b: Second number to compare.
- :param rel_tol=1e-9: The relative tolerance.
- :param abs_tol=0.0: The minimum absolute tolerance level.
+ :param rel_tol: The relative tolerance.
+ :param abs_tol: The minimum absolute tolerance level. (Optional,
+ default value: 0.0)
:type num_a: float
:type num_b: float
:type rel_tol: float
:type abs_tol: float
:returns: Returns True if num_a is close in value to num_b or equal.
- False otherwise.
+ False otherwise.
:rtype: boolean
:raises ValueError: If input values are not valid.
"""
-
if num_a == num_b:
return True
if rel_tol < 0.0 or abs_tol < 0.0:
- raise ValueError('Error tolerances must be non-negative')
+ raise ValueError(u"Error tolerances must be non-negative")
- return abs(num_b - num_a) <= max(rel_tol * max(abs(num_a), abs(num_b)),
- abs_tol)
+ return abs(num_b - num_a) <= max(
+ rel_tol * max(abs(num_a), abs(num_b)), abs_tol
+ )
import logging
_LEVELS = {
- "TRACE": logging.DEBUG // 2,
- "DEBUG": logging.DEBUG,
- "INFO": logging.INFO,
- "HTML": logging.INFO,
- "WARN": logging.WARN,
- "ERROR": logging.ERROR,
- "CRITICAL": logging.CRITICAL,
- "NONE": logging.CRITICAL,
+ u"TRACE": logging.DEBUG // 2,
+ u"DEBUG": logging.DEBUG,
+ u"INFO": logging.INFO,
+ u"HTML": logging.INFO,
+ u"WARN": logging.WARN,
+ u"ERROR": logging.ERROR,
+ u"CRITICAL": logging.CRITICAL,
+ u"NONE": logging.CRITICAL,
}
+
class FilteredLogger(object):
"""Instances of this class have the similar API to robot.api.logger.
self.logger_module = logger_module
self.min_level_num = _LEVELS[min_level.upper()]
- def write(self, message, farg=None, level="INFO"):
+ def write(self, message, farg=None, level=u"INFO"):
"""Forwards the message to logger if min_level is reached.
Formatting using '%' operator is used when farg argument is suplied.
def trace(self, message, farg=None):
"""Forward the message using the ``TRACE`` level."""
- self.write(message, farg=farg, level="TRACE")
+ self.write(message, farg=farg, level=u"TRACE")
def debug(self, message, farg=None):
"""Forward the message using the ``DEBUG`` level."""
- self.write(message, farg=farg, level="DEBUG")
+ self.write(message, farg=farg, level=u"DEBUG")
def info(self, message, farg=None):
"""Forward the message using the ``INFO`` level."""
- self.write(message, farg=farg, level="INFO")
+ self.write(message, farg=farg, level=u"INFO")
def warn(self, message, farg=None):
"""Forward the message using the ``WARN`` level."""
- self.write(message, farg=farg, level="WARN")
+ self.write(message, farg=farg, level=u"WARN")
def error(self, message, farg=None):
"""Forward the message using the ``ERROR`` level."""
- self.write(message, farg=farg, level="ERROR")
+ self.write(message, farg=farg, level=u"ERROR")
"""GBP utilities library."""
from enum import IntEnum
+
from ipaddress import ip_address
from resources.libraries.python.IPUtil import IPUtil
:type ip4_uu_sw_if_index: int
:type ip6_uu_sw_if_index: int
"""
- cmd = 'gbp_route_domain_add'
- err_msg = 'Failed to add GBP route domain on {node}!'\
- .format(node=node['host'])
+ cmd = u"gbp_route_domain_add"
+ err_msg = f"Failed to add GBP route domain on {node[u'host']}!"
args_in = dict(
rd=dict(
:type uu_fwd_sw_if_index: int
:type bm_flood_sw_if_index: int
"""
- cmd = 'gbp_bridge_domain_add'
- err_msg = 'Failed to add GBP bridge domain on {node}!'\
- .format(node=node['host'])
+ cmd = u"gbp_bridge_domain_add"
+ err_msg = f"Failed to add GBP bridge domain on {node[u'host']}!"
args_in = dict(
bd=dict(
flags=getattr(
- GBPBridgeDomainFlags, 'GBP_BD_API_FLAG_NONE').value,
+ GBPBridgeDomainFlags, u"GBP_BD_API_FLAG_NONE"
+ ).value,
bvi_sw_if_index=bvi_sw_if_index,
uu_fwd_sw_if_index=uu_fwd_sw_if_index,
bm_flood_sw_if_index=bm_flood_sw_if_index,
:type uplink_sw_if_index: int
:type remote_ep_timeout: int
"""
- cmd = 'gbp_endpoint_group_add'
- err_msg = 'Failed to add GBP endpoint group on {node}!'\
- .format(node=node['host'])
+ cmd = u"gbp_endpoint_group_add"
+ err_msg = f"Failed to add GBP endpoint group on {node[u'host']}!"
args_in = dict(
epg=dict(
:type mac_addr: str
:type sclass: int
"""
- cmd = 'gbp_endpoint_add'
- err_msg = 'Failed to add GBP endpoint on {node}!'\
- .format(node=node['host'])
+ cmd = u"gbp_endpoint_add"
+ err_msg = f"Failed to add GBP endpoint on {node[u'host']}!"
ips = list()
- ips.append(IPUtil.create_ip_address_object(
- ip_address(unicode(ip_addr))))
- tun_src = IPUtil.create_ip_address_object(
- ip_address(unicode('0.0.0.0')))
- tun_dst = IPUtil.create_ip_address_object(
- ip_address(unicode('0.0.0.0')))
+ ips.append(IPUtil.create_ip_address_object(ip_address(ip_addr)))
+ tun_src = IPUtil.create_ip_address_object(ip_address(u"0.0.0.0"))
+ tun_dst = IPUtil.create_ip_address_object(ip_address(u"0.0.0.0"))
args_in = dict(
endpoint=dict(
mac=L2Util.mac_to_bin(mac_addr),
sclass=sclass,
flags=getattr(
- GBPEndpointFlags, 'GBP_API_ENDPOINT_FLAG_EXTERNAL').value,
+ GBPEndpointFlags, u"GBP_API_ENDPOINT_FLAG_EXTERNAL"
+ ).value,
tun=dict(
src=tun_src,
dst=tun_dst
:type bd_id: int
:type rd_id: int
"""
- cmd = 'gbp_ext_itf_add_del'
- err_msg = 'Failed to add external GBP interface on {node}!'\
- .format(node=node['host'])
+ cmd = u"gbp_ext_itf_add_del"
+ err_msg = u"Failed to add external GBP interface on {node[u'host']}!"
args_in = dict(
is_add=1,
sw_if_index=sw_if_index,
bd_id=bd_id,
rd_id=rd_id,
- flags=getattr(GBPExtItfFlags, 'GBP_API_EXT_ITF_F_NONE').value
+ flags=getattr(GBPExtItfFlags, u"GBP_API_EXT_ITF_F_NONE").value
)
)
:type rd_id: int
:type sw_if_index: int
"""
- cmd = 'gbp_subnet_add_del'
- err_msg = 'Failed to add GBP subnet on {node}!'\
- .format(node=node['host'])
+ cmd = u"gbp_subnet_add_del"
+ err_msg = f"Failed to add GBP subnet on {node[u'host']}!"
args_in = dict(
is_add=1,
subnet=dict(
- type=getattr(GBPSubnetType, 'GBP_API_SUBNET_L3_OUT').value,
+ type=getattr(GBPSubnetType, u"GBP_API_SUBNET_L3_OUT").value,
sw_if_index=sw_if_index,
sclass=sclass,
prefix=dict(
address=IPUtil.create_ip_address_object(
- ip_address(unicode(address))),
+ ip_address(address)
+ ),
len=int(subnet_length)
),
rd_id=rd_id
:type acl_index: int
:type hash_mode: str
"""
- cmd = 'gbp_contract_add_del'
- err_msg = 'Failed to add GBP contract on {node}!'\
- .format(node=node['host'])
+ cmd = u"gbp_contract_add_del"
+ err_msg = f"Failed to add GBP contract on {node[u'host']}!"
- hash_mode = 'GBP_API_HASH_MODE_SRC_IP' if hash_mode is None \
+ hash_mode = u"GBP_API_HASH_MODE_SRC_IP" if hash_mode is None \
else hash_mode
rule_permit = dict(
- action=getattr(GBPRuleAction, 'GBP_API_RULE_PERMIT').value,
+ action=getattr(GBPRuleAction, u"GBP_API_RULE_PERMIT").value,
nh_set=dict(
hash_mode=getattr(GBPHashMode, hash_mode).value,
n_nhs=8,
+++ /dev/null
-# Copyright (c) 2018 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Implementation of HTTP requests GET, PUT, POST and DELETE used in
-communication with Honeycomb.
-
-The HTTP requests are implemented in the class HTTPRequest which uses
-requests.request.
-"""
-
-from ipaddress import IPv6Address, AddressValueError
-from enum import IntEnum, unique
-
-from robot.api.deco import keyword
-from robot.api import logger
-from robot.libraries.BuiltIn import BuiltIn
-
-from requests import request, RequestException, Timeout, TooManyRedirects, \
- HTTPError, ConnectionError
-from requests.auth import HTTPBasicAuth
-
-
-@unique
-class HTTPCodes(IntEnum):
- """HTTP status codes"""
- OK = 200 # HTTP standard code name. # pylint: disable=invalid-name
- ACCEPTED = 201
- UNAUTHORIZED = 401
- FORBIDDEN = 403
- NOT_FOUND = 404
- CONFLICT = 409
- INTERNAL_SERVER_ERROR = 500
- SERVICE_UNAVAILABLE = 503
-
-
-class HTTPRequestError(Exception):
- """Exception raised by HTTPRequest objects.
-
- When raising this exception, put this information to the message in this
- order:
-
- - short description of the encountered problem,
- - relevant messages if there are any collected, e.g., from caught
- exception,
- - relevant data if there are any collected.
-
- The logging is performed on two levels: 1. error - short description of the
- problem; 2. debug - detailed information.
- """
-
- def __init__(self, msg, details='', enable_logging=True):
- """Sets the exception message and enables / disables logging.
-
- It is not wanted to log errors when using these keywords together
- with keywords like "Wait until keyword succeeds". So you can disable
- logging by setting enable_logging to False.
-
- :param msg: Message to be displayed and logged.
- :param enable_logging: When True, logging is enabled, otherwise
- logging is disabled.
- :type msg: str
- :type enable_logging: bool
- """
- super(HTTPRequestError, self).__init__()
- self._msg = "{0}: {1}".format(self.__class__.__name__, msg)
- self._details = details
- if enable_logging:
- logger.info(self._msg)
- logger.debug(self._details)
-
- def __repr__(self):
- return repr(self._msg)
-
- def __str__(self):
- return str(self._msg)
-
-
-class HTTPRequest(object):
- """A class implementing HTTP requests GET, PUT, POST and DELETE used in
- communication with Honeycomb.
-
- The communication with Honeycomb and processing of all exceptions is done in
- the method _http_request which uses requests.request to send requests and
- receive responses. The received status code and content of response are
- logged on the debug level.
- All possible exceptions raised by requests.request are also processed there.
-
- The other methods (get, put, post and delete) use _http_request to send
- corresponding request.
-
- These methods must not be used as keywords in tests. Use keywords
- implemented in the module HoneycombAPIKeywords instead.
- """
-
- def __init__(self):
- pass
-
- @staticmethod
- def create_full_url(ip_addr, port, path):
- """Creates full url including host, port, and path to data.
-
- :param ip_addr: Server IP.
- :param port: Communication port.
- :param path: Path to data.
- :type ip_addr: str
- :type port: str or int
- :type path: str
- :returns: Full url.
- :rtype: str
- """
-
- try:
- IPv6Address(unicode(ip_addr))
- # IPv6 address must be in brackets
- ip_addr = "[{0}]".format(ip_addr)
- except (AttributeError, AddressValueError):
- pass
-
- return "http://{ip}:{port}{path}".format(ip=ip_addr, port=port,
- path=path)
-
- @staticmethod
- def _http_request(method, node, path, enable_logging=True, **kwargs):
- """Sends specified HTTP request and returns status code and response
- content.
-
- :param method: The method to be performed on the resource identified by
- the given request URI.
- :param node: Honeycomb node.
- :param path: URL path, e.g. /index.html.
- :param enable_logging: Used to suppress errors when checking Honeycomb
- state during suite setup and teardown.
- :param kwargs: Named parameters accepted by request.request:
- params -- (optional) Dictionary or bytes to be sent in the query
- string for the Request.
- data -- (optional) Dictionary, bytes, or file-like object to
- send in the body of the Request.
- json -- (optional) json data to send in the body of the Request.
- headers -- (optional) Dictionary of HTTP Headers to send with
- the Request.
- cookies -- (optional) Dict or CookieJar object to send with the
- Request.
- files -- (optional) Dictionary of 'name': file-like-objects
- (or {'name': ('filename', fileobj)}) for multipart encoding upload.
- timeout (float or tuple) -- (optional) How long to wait for the
- server to send data before giving up, as a float, or a (connect
- timeout, read timeout) tuple.
- allow_redirects (bool) -- (optional) Boolean. Set to True if POST/
- PUT/DELETE redirect following is allowed.
- proxies -- (optional) Dictionary mapping protocol to the URL of
- the proxy.
- verify -- (optional) whether the SSL cert will be verified.
- A CA_BUNDLE path can also be provided. Defaults to True.
- stream -- (optional) if False, the response content will be
- immediately downloaded.
- cert -- (optional) if String, path to ssl client cert file (.pem).
- If Tuple, ('cert', 'key') pair.
- :type method: str
- :type node: dict
- :type path: str
- :type enable_logging: bool
- :type kwargs: dict
- :returns: Status code and content of response.
- :rtype: tuple
- :raises HTTPRequestError: If
- 1. it is not possible to connect,
- 2. invalid HTTP response comes from server,
- 3. request exceeded the configured number of maximum re-directions,
- 4. request timed out,
- 5. there is any other unexpected HTTP request exception.
- """
- timeout = kwargs["timeout"]
-
- use_odl = BuiltIn().get_variable_value("${use_odl_client}")
-
- if use_odl:
- port = 8181
- # Using default ODL Restconf port
- # TODO: add node["honeycomb"]["odl_port"] to topology, use it here
- odl_url_part = "/network-topology:network-topology/topology/" \
- "topology-netconf/node/vpp/yang-ext:mount"
- else:
- port = node["honeycomb"]["port"]
- odl_url_part = ""
-
- try:
- path = path.format(odl_url_part=odl_url_part)
- except KeyError:
- pass
-
- url = HTTPRequest.create_full_url(node['host'],
- port,
- path)
- try:
- auth = HTTPBasicAuth(node['honeycomb']['user'],
- node['honeycomb']['passwd'])
- rsp = request(method, url, auth=auth, verify=False, **kwargs)
-
- logger.debug("Status code: {0}".format(rsp.status_code))
- logger.debug("Response: {0}".format(rsp.content))
-
- return rsp.status_code, rsp.content
-
- except ConnectionError as err:
- # Switching the logging on / off is needed only for
- # "requests.ConnectionError"
- raise HTTPRequestError("Not possible to connect to {0}:{1}.".
- format(node['host'],
- node['honeycomb']['port']),
- repr(err), enable_logging=enable_logging)
- except HTTPError as err:
- raise HTTPRequestError("Invalid HTTP response from {0}.".
- format(node['host']), repr(err))
- except TooManyRedirects as err:
- raise HTTPRequestError("Request exceeded the configured number "
- "of maximum re-directions.", repr(err))
- except Timeout as err:
- raise HTTPRequestError("Request timed out. Timeout is set to {0}.".
- format(timeout), repr(err))
- except RequestException as err:
- raise HTTPRequestError("Unexpected HTTP request exception.",
- repr(err))
-
- @staticmethod
- @keyword(name="HTTP Get")
- def get(node, path, headers=None, timeout=15, enable_logging=True):
- """Sends a GET request and returns the response and status code.
-
- :param node: Honeycomb node.
- :param path: URL path, e.g. /index.html.
- :param headers: Dictionary of HTTP Headers to send with the Request.
- :param timeout: How long to wait for the server to send data before
- giving up, as a float, or a (connect timeout, read timeout) tuple.
- :param enable_logging: Used to suppress errors when checking Honeycomb
- state during suite setup and teardown. When True,
- logging is enabled, otherwise logging is disabled.
- :type node: dict
- :type path: str
- :type headers: dict
- :type timeout: float or tuple
- :type enable_logging: bool
- :returns: Status code and content of response.
- :rtype: tuple
- """
-
- return HTTPRequest._http_request('GET', node, path,
- enable_logging=enable_logging,
- headers=headers, timeout=timeout)
-
- @staticmethod
- @keyword(name="HTTP Put")
- def put(node, path, headers=None, payload=None, json=None, timeout=15):
- """Sends a PUT request and returns the response and status code.
-
- :param node: Honeycomb node.
- :param path: URL path, e.g. /index.html.
- :param headers: Dictionary of HTTP Headers to send with the Request.
- :param payload: Dictionary, bytes, or file-like object to send in
- the body of the Request.
- :param json: JSON formatted string to send in the body of the Request.
- :param timeout: How long to wait for the server to send data before
- giving up, as a float, or a (connect timeout, read timeout) tuple.
- :type node: dict
- :type path: str
- :type headers: dict
- :type payload: dict, bytes, or file-like object
- :type json: str
- :type timeout: float or tuple
- :returns: Status code and content of response.
- :rtype: tuple
- """
- return HTTPRequest._http_request('PUT', node, path, headers=headers,
- data=payload, json=json,
- timeout=timeout)
-
- @staticmethod
- @keyword(name="HTTP Post")
- def post(node, path, headers=None, payload=None, json=None, timeout=15,
- enable_logging=True):
- """Sends a POST request and returns the response and status code.
-
- :param node: Honeycomb node.
- :param path: URL path, e.g. /index.html.
- :param headers: Dictionary of HTTP Headers to send with the Request.
- :param payload: Dictionary, bytes, or file-like object to send in
- the body of the Request.
- :param json: JSON formatted string to send in the body of the Request.
- :param timeout: How long to wait for the server to send data before
- giving up, as a float, or a (connect timeout, read timeout) tuple.
- :param enable_logging: Used to suppress errors when checking ODL
- state during suite setup and teardown. When True,
- logging is enabled, otherwise logging is disabled.
- :type node: dict
- :type path: str
- :type headers: dict
- :type payload: dict, bytes, or file-like object
- :type json: str
- :type timeout: float or tuple
- :type enable_logging: bool
- :returns: Status code and content of response.
- :rtype: tuple
- """
- return HTTPRequest._http_request('POST', node, path,
- enable_logging=enable_logging,
- headers=headers, data=payload,
- json=json, timeout=timeout)
-
- @staticmethod
- @keyword(name="HTTP Delete")
- def delete(node, path, timeout=15):
- """Sends a DELETE request and returns the response and status code.
-
- :param node: Honeycomb node.
- :param path: URL path, e.g. /index.html.
- :param timeout: How long to wait for the server to send data before
- giving up, as a float, or a (connect timeout, read timeout) tuple.
- :type node: dict
- :type path: str
- :type timeout: float or tuple
- :returns: Status code and content of response.
- :rtype: tuple
- """
- return HTTPRequest._http_request('DELETE', node, path, timeout=timeout)
import re
from enum import IntEnum
+
from ipaddress import ip_address
from resources.libraries.python.Constants import Constants
class FibPathFlags(IntEnum):
"""FIB path flags."""
FIB_PATH_FLAG_NONE = 0
- FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED = 1 #pylint: disable=invalid-name
+ FIB_PATH_FLAG_RESOLVE_VIA_ATTACHED = 1 # pylint: disable=invalid-name
FIB_PATH_FLAG_RESOLVE_VIA_HOST = 2
:returns: Integer representation of IP address.
:rtype: int
"""
- return int(ip_address(unicode(ip_str)))
+ return int(ip_address(ip_str))
@staticmethod
def int_to_ip(ip_int):
if not sw_if_index:
return list()
- is_ipv6 = 1 if ip_version == 'ipv6' else 0
+ is_ipv6 = 1 if ip_version == u"ipv6" else 0
- cmd = 'ip_address_dump'
- args = dict(sw_if_index=sw_if_index,
- is_ipv6=is_ipv6)
- err_msg = 'Failed to get L2FIB dump on host {host}'.format(
- host=node['host'])
+ cmd = u"ip_address_dump"
+ args = dict(
+ sw_if_index=sw_if_index,
+ is_ipv6=is_ipv6
+ )
+ err_msg = f"Failed to get L2FIB dump on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
:param node: VPP node.
:type node: dict
"""
-
- PapiSocketExecutor.run_cli_cmd(node, 'show ip fib')
- PapiSocketExecutor.run_cli_cmd(node, 'show ip fib summary')
- PapiSocketExecutor.run_cli_cmd(node, 'show ip6 fib')
- PapiSocketExecutor.run_cli_cmd(node, 'show ip6 fib summary')
+ PapiSocketExecutor.run_cli_cmd(node, u"show ip fib")
+ PapiSocketExecutor.run_cli_cmd(node, u"show ip fib summary")
+ PapiSocketExecutor.run_cli_cmd(node, u"show ip6 fib")
+ PapiSocketExecutor.run_cli_cmd(node, u"show ip6 fib summary")
@staticmethod
def vpp_get_ip_tables_prefix(node, address):
:type node: dict
:type address: str
"""
- addr = ip_address(unicode(address))
+ addr = ip_address(address)
+ ip_ver = u"ip6" if addr.version == 6 else u"ip"
PapiSocketExecutor.run_cli_cmd(
- node, 'show {ip_ver} fib {addr}/{addr_len}'.format(
- ip_ver='ip6' if addr.version == 6 else 'ip',
- addr=addr,
- addr_len=addr.max_prefixlen))
+ node, f"show {ip_ver} fib {addr}/{addr.max_prefixlen}"
+ )
@staticmethod
def get_interface_vrf_table(node, interface, ip_version='ipv4'):
"""
sw_if_index = InterfaceUtil.get_interface_index(node, interface)
- cmd = 'sw_interface_get_table'
+ cmd = u"sw_interface_get_table"
args = dict(
sw_if_index=sw_if_index,
- is_ipv6=True if ip_version == 'ipv6' else False
+ is_ipv6=True if ip_version == u"ipv6" else False
)
- err_msg = 'Failed to get VRF id assigned to interface {ifc}'.format(
- ifc=interface)
+ err_msg = f"Failed to get VRF id assigned to interface {interface}"
with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)
- return reply['vrf_id']
+ return reply[u"vrf_id"]
@staticmethod
def vpp_ip_source_check_setup(node, if_name):
:type node: dict
:type if_name: str
"""
- cmd = 'ip_source_check_interface_add_del'
+ cmd = u"ip_source_check_interface_add_del"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, if_name),
is_add=1,
loose=0
)
- err_msg = 'Failed to enable source check on interface {ifc}'.format(
- ifc=if_name)
+ err_msg = f"Failed to enable source check on interface {if_name}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type interface: str
:type addr: str
"""
- cmd = 'ip_probe_neighbor'
+ cmd = u"ip_probe_neighbor"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
- dst=str(addr))
- err_msg = 'VPP ip probe {dev} {ip} failed on {h}'.format(
- dev=interface, ip=addr, h=node['host'])
+ dst=str(addr)
+ )
+ err_msg = f"VPP ip probe {interface} {addr} failed on {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type ip1: str
:type ip2: str
"""
- addr1 = ip_address(unicode(ip1))
- addr2 = ip_address(unicode(ip2))
+ addr1 = ip_address(ip1)
+ addr2 = ip_address(ip2)
if addr1 != addr2:
- raise AssertionError('IP addresses are not equal: {0} != {1}'.
- format(ip1, ip2))
+ raise AssertionError(f"IP addresses are not equal: {ip1} != {ip2}")
@staticmethod
- def setup_network_namespace(node, namespace_name, interface_name,
- ip_addr, prefix):
+ def setup_network_namespace(
+ node, namespace_name, interface_name, ip_addr, prefix):
"""Setup namespace on given node and attach interface and IP to
this namespace. Applicable also on TG node.
:type ip_addr: str
:type prefix: int
"""
- cmd = ('ip netns add {0}'.format(namespace_name))
+ cmd = f"ip netns add {namespace_name}"
exec_cmd_no_error(node, cmd, sudo=True)
- cmd = ('ip link set dev {0} up netns {1}'.format(interface_name,
- namespace_name))
+ cmd = f"ip link set dev {interface_name} up netns {namespace_name}"
exec_cmd_no_error(node, cmd, sudo=True)
- cmd = ('ip netns exec {0} ip addr add {1}/{2} dev {3}'.format(
- namespace_name, ip_addr, prefix, interface_name))
+ cmd = f"ip netns exec {namespace_name} ip addr add {ip_addr}/{prefix}" \
+ f" dev {interface_name}"
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def linux_enable_forwarding(node, ip_ver='ipv4'):
+ def linux_enable_forwarding(node, ip_ver=u"ipv4"):
"""Enable forwarding on a Linux node, e.g. VM.
:param node: VPP node.
:type node: dict
:type ip_ver: str
"""
- cmd = 'sysctl -w net.{0}.ip_forward=1'.format(ip_ver)
+ cmd = f"sysctl -w net.{ip_ver}.ip_forward=1"
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
:rtype: str
:raises RuntimeError: If cannot get the information about interfaces.
"""
- regex_intf_info = r"pci@" \
- r"([0-9a-f]{4}:[0-9a-f]{2}:[0-9a-f]{2}.[0-9a-f])\s*" \
- r"([a-zA-Z0-9]*)\s*network"
+ regex_intf_info = \
+ r"pci@([0-9a-f]{4}:[0-9a-f]{2}:[0-9a-f]{2}.[0-9a-f])\s" \
+ r"*([a-zA-Z0-9]*)\s*network"
- cmd = "lshw -class network -businfo"
+ cmd = u"lshw -class network -businfo"
ret_code, stdout, stderr = exec_cmd(node, cmd, timeout=30, sudo=True)
if ret_code != 0:
- raise RuntimeError('Could not get information about interfaces:\n'
- '{err}'.format(err=stderr))
+ raise RuntimeError(
+ f"Could not get information about interfaces:\n{stderr}"
+ )
for line in stdout.splitlines()[2:]:
try:
:type interface: str
:raises RuntimeError: If the interface could not be set up.
"""
- cmd = "ip link set {0} up".format(interface)
+ cmd = f"ip link set {interface} up"
exec_cmd_no_error(node, cmd, timeout=30, sudo=True)
@staticmethod
- def set_linux_interface_ip(node, interface, ip_addr, prefix,
- namespace=None):
+ def set_linux_interface_ip(
+ node, interface, ip_addr, prefix, namespace=None):
"""Set IP address to interface in linux.
:param node: VPP/TG node.
:raises RuntimeError: IP could not be set.
"""
if namespace is not None:
- cmd = 'ip netns exec {ns} ip addr add {ip}/{p} dev {dev}'.format(
- ns=namespace, ip=ip_addr, p=prefix, dev=interface)
+ cmd = f"ip netns exec {namespace} ip addr add {ip_addr}/{prefix}" \
+ f" dev {interface}"
else:
- cmd = 'ip addr add {ip}/{p} dev {dev}'.format(
- ip=ip_addr, p=prefix, dev=interface)
+ cmd = f"ip addr add {ip_addr}/{prefix} dev {interface}"
exec_cmd_no_error(node, cmd, timeout=5, sudo=True)
:type namespace: str
"""
if namespace is not None:
- cmd = 'ip netns exec {} ip route add {}/{} via {}'.format(
- namespace, ip_addr, prefix, gateway)
+ cmd = f"ip netns exec {namespace} ip route add {ip_addr}/{prefix}" \
+ f" via {gateway}"
else:
- cmd = 'ip route add {}/{} via {}'.format(ip_addr, prefix, gateway)
+ cmd = f"ip route add {ip_addr}/{prefix} via {gateway}"
+
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def vpp_interface_set_ip_address(node, interface, address,
- prefix_length=None):
+ def vpp_interface_set_ip_address(
+ node, interface, address, prefix_length=None):
"""Set IP address to VPP interface.
:param node: VPP node.
:type address: str
:type prefix_length: int
"""
- ip_addr = ip_address(unicode(address))
+ ip_addr = ip_address(address)
- cmd = 'sw_interface_add_del_address'
+ cmd = u"sw_interface_add_del_address"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
is_add=True,
prefix=IPUtil.create_prefix_object(
ip_addr,
prefix_length if prefix_length else 128
- if ip_addr.version == 6 else 32)
+ if ip_addr.version == 6 else 32
+ )
)
- err_msg = 'Failed to add IP address on interface {ifc}'.format(
- ifc=interface)
+ err_msg = f"Failed to add IP address on interface {interface}"
+
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type ip_addr: str
:type mac_address: str
"""
- dst_ip = ip_address(unicode(ip_addr))
+ dst_ip = ip_address(ip_addr)
neighbor = dict(
sw_if_index=Topology.get_interface_sw_index(node, iface_key),
flags=0,
mac_address=str(mac_address),
- ip_address=str(dst_ip))
- cmd = 'ip_neighbor_add_del'
+ ip_address=str(dst_ip)
+ )
+ cmd = u"ip_neighbor_add_del"
args = dict(
is_add=1,
neighbor=neighbor)
- err_msg = 'Failed to add IP neighbor on interface {ifc}'.format(
- ifc=iface_key)
+ err_msg = f"Failed to add IP neighbor on interface {iface_key}"
+
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
"""
return dict(
af=getattr(
- AddressFamily, 'ADDRESS_IP6' if ip_addr.version == 6
- else 'ADDRESS_IP4').value,
- un=IPUtil.union_addr(ip_addr))
+ AddressFamily, u"ADDRESS_IP6" if ip_addr.version == 6
+ else u"ADDRESS_IP4"
+ ).value,
+ un=IPUtil.union_addr(ip_addr)
+ )
@staticmethod
def create_prefix_object(ip_addr, addr_len):
:returns: route parameter basic structure
:rtype: dict
"""
- interface = kwargs.get('interface', '')
- gateway = kwargs.get('gateway', '')
+ interface = kwargs.get(u"interface", u"")
+ gateway = kwargs.get(u"gateway", u"")
- net_addr = ip_address(unicode(network))
+ net_addr = ip_address(network)
prefix = IPUtil.create_prefix_object(net_addr, prefix_len)
paths = list()
n_hop = dict(
- address=IPUtil.union_addr(ip_address(unicode(gateway))) if gateway
- else 0,
+ address=IPUtil.union_addr(ip_address(gateway)) if gateway else 0,
via_label=MPLS_LABEL_INVALID,
obj_id=Constants.BITWISE_NON_ZERO
)
path = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface)
if interface else Constants.BITWISE_NON_ZERO,
- table_id=int(kwargs.get('lookup_vrf', 0)),
+ table_id=int(kwargs.get(u"lookup_vrf", 0)),
rpf_id=Constants.BITWISE_NON_ZERO,
- weight=int(kwargs.get('weight', 1)),
+ weight=int(kwargs.get(u"weight", 1)),
preference=1,
type=getattr(
- FibPathType, 'FIB_PATH_TYPE_LOCAL'
- if kwargs.get('local', False)
- else 'FIB_PATH_TYPE_NORMAL').value,
- flags=getattr(FibPathFlags, 'FIB_PATH_FLAG_NONE').value,
+ FibPathType, u"FIB_PATH_TYPE_LOCAL"
+ if kwargs.get(u"local", False)
+ else u"FIB_PATH_TYPE_NORMAL"
+ ).value,
+ flags=getattr(FibPathFlags, u"FIB_PATH_FLAG_NONE").value,
proto=getattr(
- FibPathNhProto, 'FIB_PATH_NH_PROTO_IP6'
+ FibPathNhProto, u"FIB_PATH_NH_PROTO_IP6"
if net_addr.version == 6
- else 'FIB_PATH_NH_PROTO_IP4').value,
+ else u"FIB_PATH_NH_PROTO_IP4"
+ ).value,
nh=n_hop,
n_labels=0,
label_stack=list(0 for _ in range(16))
paths.append(path)
route = dict(
- table_id=int(kwargs.get('vrf', 0)),
+ table_id=int(kwargs.get(u"vrf", 0)),
prefix=prefix,
n_paths=len(paths),
paths=paths
:type prefix_len: int
:type kwargs: dict
"""
- count = kwargs.get("count", 1)
+ count = kwargs.get(u"count", 1)
if count > 100:
- gateway = kwargs.get("gateway", '')
- interface = kwargs.get("interface", '')
- vrf = kwargs.get("vrf", None)
- multipath = kwargs.get("multipath", False)
+ gateway = kwargs.get(u"gateway", '')
+ interface = kwargs.get(u"interface", '')
+ vrf = kwargs.get(u"vrf", None)
+ multipath = kwargs.get(u"multipath", False)
with VatTerminal(node, json_param=False) as vat:
+
vat.vat_terminal_exec_cmd_from_template(
- 'vpp_route_add.vat',
+ u"vpp_route_add.vat",
network=network,
prefix_length=prefix_len,
- via='via {}'.format(gateway) if gateway else '',
- sw_if_index='sw_if_index {}'.format(
- InterfaceUtil.get_interface_index(node, interface))
- if interface else '',
- vrf='vrf {}'.format(vrf) if vrf else '',
- count='count {}'.format(count) if count else '',
- multipath='multipath' if multipath else '')
+ via=f"via {gateway}" if gateway else u"",
+ sw_if_index=f"sw_if_index "
+ f"{InterfaceUtil.get_interface_index(node, interface)}"
+ if interface else u"",
+ vrf=f"vrf {vrf}" if vrf else u"",
+ count=f"count {count}" if count else u"",
+ multipath=u"multipath" if multipath else u""
+ )
return
- net_addr = ip_address(unicode(network))
- cmd = 'ip_route_add_del'
+ net_addr = ip_address(network)
+ cmd = u"ip_route_add_del"
args = dict(
is_add=1,
- is_multipath=int(kwargs.get('multipath', False)),
+ is_multipath=int(kwargs.get(u"multipath", False)),
route=None
)
+ err_msg = f"Failed to add route(s) on host {node[u'host']}"
- err_msg = 'Failed to add route(s) on host {host}'.format(
- host=node['host'])
with PapiSocketExecutor(node) as papi_exec:
- for i in xrange(kwargs.get('count', 1)):
- args['route'] = IPUtil.compose_vpp_route_structure(
- node, net_addr + i, prefix_len, **kwargs)
- history = False if 1 < i < kwargs.get('count', 1) else True
+ for i in range(kwargs.get(u"count", 1)):
+ args[u"route"] = IPUtil.compose_vpp_route_structure(
+ node, net_addr + i, prefix_len, **kwargs
+ )
+ history = False if 1 < i < kwargs.get(u"count", 1) else True
papi_exec.add(cmd, history=history, **args)
papi_exec.get_replies(err_msg)
:type node: dict
:type interface: str
"""
- cmd = 'sw_interface_add_del_address'
+ cmd = u"sw_interface_add_del_address"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
is_add=False,
del_all=True
)
- err_msg = 'Failed to flush IP address on interface {ifc}'.format(
- ifc=interface)
+ err_msg = f"Failed to flush IP address on interface {interface}"
+
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type table_id: int
:type ipv6: bool
"""
- cmd = 'ip_table_add_del'
+ cmd = u"ip_table_add_del"
table = dict(
table_id=int(table_id),
- is_ip6=int(ipv6))
+ is_ip6=int(ipv6)
+ )
args = dict(
table=table,
- is_add=1)
- err_msg = 'Failed to add FIB table on host {host}'.format(
- host=node['host'])
+ is_add=1
+ )
+ err_msg = f"Failed to add FIB table on host {node[u'host']}"
+
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
import os
+from enum import Enum, IntEnum
+from io import open
from random import choice
-from string import letters
+from string import ascii_letters
-from enum import Enum, IntEnum
from ipaddress import ip_network, ip_address
from resources.libraries.python.IPUtil import IPUtil
:returns: The generated payload.
:rtype: str
"""
- return ''.join(choice(letters) for _ in range(length))
+ return u"".join(choice(ascii_letters) for _ in range(length))
class PolicyAction(Enum):
"""Policy actions."""
- BYPASS = ('bypass', 0)
- DISCARD = ('discard', 1)
- PROTECT = ('protect', 3)
+ BYPASS = (u"bypass", 0)
+ DISCARD = (u"discard", 1)
+ PROTECT = (u"protect", 3)
def __init__(self, policy_name, policy_int_repr):
self.policy_name = policy_name
class CryptoAlg(Enum):
"""Encryption algorithms."""
- AES_CBC_128 = ('aes-cbc-128', 1, 'AES-CBC', 16)
- AES_CBC_256 = ('aes-cbc-256', 3, 'AES-CBC', 32)
- AES_GCM_128 = ('aes-gcm-128', 7, 'AES-GCM', 16)
- AES_GCM_256 = ('aes-gcm-256', 9, 'AES-GCM', 32)
+ AES_CBC_128 = (u"aes-cbc-128", 1, u"AES-CBC", 16)
+ AES_CBC_256 = (u"aes-cbc-256", 3, u"AES-CBC", 32)
+ AES_GCM_128 = (u"aes-gcm-128", 7, u"AES-GCM", 16)
+ AES_GCM_256 = (u"aes-gcm-256", 9, u"AES-GCM", 32)
def __init__(self, alg_name, alg_int_repr, scapy_name, key_len):
self.alg_name = alg_name
class IntegAlg(Enum):
"""Integrity algorithm."""
- SHA_256_128 = ('sha-256-128', 4, 'SHA2-256-128', 32)
- SHA_512_256 = ('sha-512-256', 6, 'SHA2-512-256', 64)
+ SHA_256_128 = (u"sha-256-128", 4, u"SHA2-256-128", 32)
+ SHA_512_256 = (u"sha-512-256", 6, u"SHA2-512-256", 64)
def __init__(self, alg_name, alg_int_repr, scapy_name, key_len):
self.alg_name = alg_name
:raises RuntimeError: If failed to select IPsec backend or if no API
reply received.
"""
- cmd = 'ipsec_select_backend'
- err_msg = 'Failed to select IPsec backend on host {host}'.format(
- host=node['host'])
+ cmd = u"ipsec_select_backend"
+ err_msg = f"Failed to select IPsec backend on host {node[u'host']}"
args = dict(
protocol=protocol,
index=index
:param node: VPP node to dump IPsec backend on.
:type node: dict
"""
- err_msg = 'Failed to dump IPsec backends on host {host}'.format(
- host=node['host'])
+ err_msg = F"Failed to dump IPsec backends on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
- papi_exec.add('ipsec_backend_dump').get_details(err_msg)
+ papi_exec.add(u"ipsec_backend_dump").get_details(err_msg)
@staticmethod
def vpp_ipsec_add_sad_entry(
node, sad_id, spi, crypto_alg, crypto_key, integ_alg=None,
- integ_key='', tunnel_src=None, tunnel_dst=None):
+ integ_key=u"", tunnel_src=None, tunnel_dst=None):
"""Create Security Association Database entry on the VPP node.
:param node: VPP node to add SAD entry on.
flags = int(IPsecSadFlags.IPSEC_API_SAD_FLAG_NONE)
if tunnel_src and tunnel_dst:
flags = flags | int(IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL)
- src_addr = ip_address(unicode(tunnel_src))
- dst_addr = ip_address(unicode(tunnel_dst))
+ src_addr = ip_address(tunnel_src)
+ dst_addr = ip_address(tunnel_dst)
if src_addr.version == 6:
flags = \
flags | int(IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6)
else:
- src_addr = ''
- dst_addr = ''
+ src_addr = u""
+ dst_addr = u""
- cmd = 'ipsec_sad_entry_add_del'
- err_msg = 'Failed to add Security Association Database entry on ' \
- 'host {host}'.format(host=node['host'])
+ cmd = u"ipsec_sad_entry_add_del"
+ err_msg = f"Failed to add Security Association Database entry " \
+ f"on host {node[u'host']}"
sad_entry = dict(
sad_id=int(sad_id),
spi=int(spi),
@staticmethod
def vpp_ipsec_add_sad_entries(
node, n_entries, sad_id, spi, crypto_alg, crypto_key,
- integ_alg=None, integ_key='', tunnel_src=None, tunnel_dst=None):
+ integ_alg=None, integ_key=u"", tunnel_src=None, tunnel_dst=None):
"""Create multiple Security Association Database entries on VPP node.
:param node: VPP node to add SAD entry on.
:type tunnel_dst: str
"""
if tunnel_src and tunnel_dst:
- src_addr = ip_address(unicode(tunnel_src))
- dst_addr = ip_address(unicode(tunnel_dst))
+ src_addr = ip_address(tunnel_src)
+ dst_addr = ip_address(tunnel_dst)
else:
- src_addr = ''
- dst_addr = ''
+ src_addr = u""
+ dst_addr = u""
addr_incr = 1 << (128 - 96) if src_addr.version == 6 \
else 1 << (32 - 24)
if int(n_entries) > 10:
- tmp_filename = '/tmp/ipsec_sad_{0}_add_del_entry.script'.\
- format(sad_id)
+ tmp_filename = f"/tmp/ipsec_sad_{sad_id}_add_del_entry.script"
with open(tmp_filename, 'w') as tmp_file:
- for i in xrange(n_entries):
- integ = (
- 'integ-alg {integ_alg} integ-key {integ_key}'.format(
- integ_alg=integ_alg.alg_name,
- integ_key=integ_key.encode('hex'))
- if integ_alg else '')
- tunnel = (
- 'tunnel-src {laddr} tunnel-dst {raddr}'.format(
- laddr=src_addr + i * addr_incr,
- raddr=dst_addr + i * addr_incr)
- if tunnel_src and tunnel_dst else '')
- conf = (
- 'exec ipsec sa add {sad_id} esp spi {spi} '
- 'crypto-alg {crypto_alg} crypto-key {crypto_key} '
- '{integ} {tunnel}\n'.format(
- sad_id=sad_id + i,
- spi=spi + i,
- crypto_alg=crypto_alg.alg_name,
- crypto_key=crypto_key.encode('hex'),
- integ=integ,
- tunnel=tunnel))
+ for i in range(n_entries):
+ integ = f"integ-alg {integ_alg.alg_name} " \
+ f"integ-key {integ_key}" \
+ if integ_alg else u""
+ tunnel = f"tunnel-src {src_addr + i * addr_incr} " \
+ f"tunnel-dst {dst_addr + i * addr_incr}" \
+ if tunnel_src and tunnel_dst else u""
+ conf = f"exec ipsec sa add {sad_id + i} esp spi {spi + i} "\
+ f"crypto-alg {crypto_alg.alg_name} " \
+ f"crypto-key {crypto_key.encode().hex()} " \
+ f"{integ} {tunnel}\n"
tmp_file.write(conf)
vat = VatExecutor()
- vat.execute_script(tmp_filename, node, timeout=300, json_out=False,
- copy_on_execute=True)
+ vat.execute_script(
+ tmp_filename, node, timeout=300, json_out=False,
+ copy_on_execute=True
+ )
os.remove(tmp_filename)
return
flags = flags | int(IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL)
if src_addr.version == 6:
flags = flags | int(
- IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6)
+ IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6
+ )
- cmd = 'ipsec_sad_entry_add_del'
- err_msg = 'Failed to add Security Association Database entry on ' \
- 'host {host}'.format(host=node['host'])
+ cmd = u"ipsec_sad_entry_add_del"
+ err_msg = f"Failed to add Security Association Database entry " \
+ f"on host {node[u'host']}"
sad_entry = dict(
sad_id=int(sad_id),
entry=sad_entry
)
with PapiSocketExecutor(node) as papi_exec:
- for i in xrange(n_entries):
- args['entry']['sad_id'] = int(sad_id) + i
- args['entry']['spi'] = int(spi) + i
- args['entry']['tunnel_src'] = str(src_addr + i * addr_incr) \
+ for i in range(n_entries):
+ args[u"entry"][u"sad_id"] = int(sad_id) + i
+ args[u"entry"][u"spi"] = int(spi) + i
+ args[u"entry"][u"tunnel_src"] = str(src_addr + i * addr_incr) \
if tunnel_src and tunnel_dst else src_addr
- args['entry']['tunnel_dst'] = str(dst_addr + i * addr_incr) \
+ args[u"entry"][u"tunnel_dst"] = str(dst_addr + i * addr_incr) \
if tunnel_src and tunnel_dst else dst_addr
- history = False if 1 < i < n_entries - 1 else True
+ history = False if 1 < i < n_entries - 2 else True
papi_exec.add(cmd, history=history, **args)
papi_exec.get_replies(err_msg)
:type interface: str
:type raddr_range: int
"""
- laddr = ip_address(unicode(tunnel_src))
- raddr = ip_address(unicode(tunnel_dst))
- taddr = ip_address(unicode(traffic_addr))
+ laddr = ip_address(tunnel_src)
+ raddr = ip_address(tunnel_dst)
+ taddr = ip_address(traffic_addr)
addr_incr = 1 << (128 - raddr_range) if laddr.version == 6 \
else 1 << (32 - raddr_range)
if int(n_tunnels) > 10:
- tmp_filename = '/tmp/ipsec_set_ip.script'
+ tmp_filename = u"/tmp/ipsec_set_ip.script"
with open(tmp_filename, 'w') as tmp_file:
- for i in xrange(n_tunnels):
- conf = (
- 'exec set interface ip address {interface} '
- '{laddr}/{laddr_l}\n'
- 'exec ip route add {taddr}/{taddr_l} via {raddr} '
- '{interface}\n'.format(
- interface=Topology.get_interface_name(
- node, interface),
- laddr=laddr + i * addr_incr,
- laddr_l=raddr_range,
- raddr=raddr + i * addr_incr,
- taddr=taddr + i,
- taddr_l=128 if taddr.version == 6 else 32))
+ if_name = Topology.get_interface_name(node, interface)
+ for i in range(n_tunnels):
+ conf = f"exec set interface ip address {if_name} " \
+ f"{laddr + i * addr_incr}/{raddr_range}\n" \
+ f"exec ip route add {taddr + i}/" \
+ f"{128 if taddr.version == 6 else 32} " \
+ f"via {raddr + i * addr_incr} {if_name}\n"
tmp_file.write(conf)
vat = VatExecutor()
- vat.execute_script(tmp_filename, node, timeout=300, json_out=False,
- copy_on_execute=True)
+ vat.execute_script(
+ tmp_filename, node, timeout=300, json_out=False,
+ copy_on_execute=True
+ )
os.remove(tmp_filename)
return
- cmd1 = 'sw_interface_add_del_address'
+ cmd1 = u"sw_interface_add_del_address"
args1 = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
is_add=True,
del_all=False,
prefix=None
)
- cmd2 = 'ip_route_add_del'
+ cmd2 = u"ip_route_add_del"
args2 = dict(
is_add=1,
is_multipath=0,
route=None
)
- err_msg = 'Failed to configure IP addresses and IP routes on ' \
- 'interface {ifc} on host {host}'.\
- format(ifc=interface, host=node['host'])
+ err_msg = f"Failed to configure IP addresses and IP routes " \
+ f"on interface {interface} on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
- for i in xrange(n_tunnels):
- args1['prefix'] = IPUtil.create_prefix_object(
- laddr + i * addr_incr, raddr_range)
- args2['route'] = IPUtil.compose_vpp_route_structure(
- node,
- taddr + i,
+ for i in range(n_tunnels):
+ args1[u"prefix"] = IPUtil.create_prefix_object(
+ laddr + i * addr_incr, raddr_range
+ )
+ args2[u"route"] = IPUtil.compose_vpp_route_structure(
+ node, taddr + i,
prefix_len=128 if taddr.version == 6 else 32,
- interface=interface,
- gateway=raddr + i * addr_incr
+ interface=interface, gateway=raddr + i * addr_incr
)
- history = False if 1 < i < n_tunnels - 1 else True
+ history = False if 1 < i < n_tunnels - 2 else True
papi_exec.add(cmd1, history=history, **args1).\
add(cmd2, history=history, **args2)
papi_exec.get_replies(err_msg)
:type node: dict
:type spd_id: int
"""
- cmd = 'ipsec_spd_add_del'
- err_msg = 'Failed to add Security Policy Database on host {host}'.\
- format(host=node['host'])
+ cmd = u"ipsec_spd_add_del"
+ err_msg = f"Failed to add Security Policy Database " \
+ f"on host {node[u'host']}"
args = dict(
is_add=1,
spd_id=int(spd_id)
:type spd_id: int
:type interface: str or int
"""
- cmd = 'ipsec_interface_add_del_spd'
- err_msg = 'Failed to add interface {ifc} to Security Policy Database ' \
- '{spd} on host {host}'.\
- format(ifc=interface, spd=spd_id, host=node['host'])
+ cmd = u"ipsec_interface_add_del_spd"
+ err_msg = f"Failed to add interface {interface} to Security Policy " \
+ f"Database {spd_id} on host {node[u'host']}"
args = dict(
is_add=1,
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
:type rport_range: string
:type is_ipv6: bool
"""
-
if laddr_range is None:
- laddr_range = '::/0' if is_ipv6 else '0.0.0.0/0'
+ laddr_range = u"::/0" if is_ipv6 else u"0.0.0.0/0"
if raddr_range is None:
- raddr_range = '::/0' if is_ipv6 else '0.0.0.0/0'
+ raddr_range = u"::/0" if is_ipv6 else u"0.0.0.0/0"
- cmd = 'ipsec_spd_entry_add_del'
- err_msg = 'Failed to add entry to Security Policy Database ' \
- '{spd} on host {host}'.format(spd=spd_id, host=node['host'])
+ cmd = u"ipsec_spd_entry_add_del"
+ err_msg = f"Failed to add entry to Security Policy Database {spd_id} " \
+ f"on host {node[u'host']}"
spd_entry = dict(
spd_id=int(spd_id),
policy=action.policy_int_repr,
protocol=int(proto) if proto else 0,
remote_address_start=IPUtil.create_ip_address_object(
- ip_network(unicode(raddr_range), strict=False).network_address),
+ ip_network(raddr_range, strict=False).network_address
+ ),
remote_address_stop=IPUtil.create_ip_address_object(
- ip_network(
- unicode(raddr_range), strict=False).broadcast_address),
+ ip_network(raddr_range, strict=False).broadcast_address
+ ),
local_address_start=IPUtil.create_ip_address_object(
- ip_network(
- unicode(laddr_range), strict=False).network_address),
+ ip_network(laddr_range, strict=False).network_address
+ ),
local_address_stop=IPUtil.create_ip_address_object(
- ip_network(
- unicode(laddr_range), strict=False).broadcast_address),
- remote_port_start=int(rport_range.split('-')[0]) if rport_range
+ ip_network(laddr_range, strict=False).broadcast_address
+ ),
+ remote_port_start=int(rport_range.split(u"-")[0]) if rport_range
else 0,
- remote_port_stop=int(rport_range.split('-')[1]) if rport_range
+ remote_port_stop=int(rport_range.split(u"-")[1]) if rport_range
else 65535,
- local_port_start=int(lport_range.split('-')[0]) if lport_range
+ local_port_start=int(lport_range.split(u"-")[0]) if lport_range
else 0,
- local_port_stop=int(lport_range.split('-')[1]) if rport_range
+ local_port_stop=int(lport_range.split(u"-")[1]) if rport_range
else 65535
)
args = dict(
:type raddr_ip: string
"""
if int(n_entries) > 10:
- tmp_filename = '/tmp/ipsec_spd_{0}_add_del_entry.script'.\
- format(sa_id)
+ tmp_filename = f"/tmp/ipsec_spd_{sa_id}_add_del_entry.script"
with open(tmp_filename, 'w') as tmp_file:
- for i in xrange(n_entries):
- raddr_s = ip_address(unicode(raddr_ip)) + i
- raddr_e = ip_address(unicode(raddr_ip)) + (i + 1) - 1
- tunnel = (
- 'exec ipsec policy add spd {spd_id} '
- 'priority {priority} {direction} action protect '
- 'sa {sa_id} remote-ip-range {raddr_s} - {raddr_e} '
- 'local-ip-range 0.0.0.0 - 255.255.255.255\n'.
- format(
- spd_id=spd_id,
- priority=priority,
- direction='inbound' if inbound else 'outbound',
- sa_id=sa_id+i,
- raddr_s=raddr_s,
- raddr_e=raddr_e))
+ for i in range(n_entries):
+ raddr_s = ip_address(raddr_ip) + i
+ raddr_e = ip_address(raddr_ip) + (i + 1) - 1
+ direction = u'inbound' if inbound else u'outbound'
+ tunnel = f"exec ipsec policy add spd {spd_id} " \
+ f"priority {priority} {direction} " \
+ f"action protect sa {sa_id+i} " \
+ f"remote-ip-range {raddr_s} - {raddr_e} " \
+ f"local-ip-range 0.0.0.0 - 255.255.255.255\n"
tmp_file.write(tunnel)
VatExecutor().execute_script(
tmp_filename, node, timeout=300, json_out=False,
- copy_on_execute=True)
+ copy_on_execute=True
+ )
os.remove(tmp_filename)
return
- raddr_ip = ip_address(unicode(raddr_ip))
- laddr_range = '::/0' if raddr_ip.version == 6 else '0.0.0.0/0'
+ raddr_ip = ip_address(raddr_ip)
+ laddr_range = u"::/0" if raddr_ip.version == 6 else u"0.0.0.0/0"
- cmd = 'ipsec_spd_entry_add_del'
- err_msg = 'Failed to add entry to Security Policy Database ' \
- '{spd} on host {host}'.format(spd=spd_id, host=node['host'])
+ cmd = u"psec_spd_entry_add_del"
+ err_msg = f"ailed to add entry to Security Policy Database '{spd_id} " \
+ f"on host {node[u'host']}"
spd_entry = dict(
spd_id=int(spd_id),
remote_address_start=IPUtil.create_ip_address_object(raddr_ip),
remote_address_stop=IPUtil.create_ip_address_object(raddr_ip),
local_address_start=IPUtil.create_ip_address_object(
- ip_network(unicode(laddr_range), strict=False).network_address),
+ ip_network(laddr_range, strict=False).network_address
+ ),
local_address_stop=IPUtil.create_ip_address_object(
- ip_network(
- unicode(laddr_range), strict=False).broadcast_address),
+ ip_network(laddr_range, strict=False).broadcast_address
+ ),
remote_port_start=0,
remote_port_stop=65535,
local_port_start=0,
)
with PapiSocketExecutor(node) as papi_exec:
- for i in xrange(n_entries):
- args['entry']['remote_address_start']['un'] = \
+ for i in range(n_entries):
+ args[u"entry"][u"remote_address_start"][u"un"] = \
IPUtil.union_addr(raddr_ip + i)
- args['entry']['remote_address_stop']['un'] = \
+ args[u"entry"][u"remote_address_stop"][u"un"] = \
IPUtil.union_addr(raddr_ip + i)
- history = False if 1 < i < n_entries - 1 else True
+ history = False if 1 < i < n_entries - 2 else True
papi_exec.add(cmd, history=history, **args)
papi_exec.get_replies(err_msg)
n_tunnels = int(n_tunnels)
spi_1 = 100000
spi_2 = 200000
- if1_ip = ip_address(unicode(if1_ip_addr))
- if2_ip = ip_address(unicode(if2_ip_addr))
- raddr_ip1 = ip_address(unicode(raddr_ip1))
- raddr_ip2 = ip_address(unicode(raddr_ip2))
+ if1_ip = ip_address(if1_ip_addr)
+ if2_ip = ip_address(if2_ip_addr)
+ raddr_ip1 = ip_address(raddr_ip1)
+ raddr_ip2 = ip_address(raddr_ip2)
addr_incr = 1 << (128 - raddr_range) if if1_ip.version == 6 \
else 1 << (32 - raddr_range)
if n_tunnels > 10:
- tmp_fn1 = '/tmp/ipsec_create_tunnel_dut1.config'
- tmp_fn2 = '/tmp/ipsec_create_tunnel_dut2.config'
+ tmp_fn1 = u"/tmp/ipsec_create_tunnel_dut1.config"
+ tmp_fn2 = u"/tmp/ipsec_create_tunnel_dut2.config"
+ if1_n = Topology.get_interface_name(nodes[u"DUT1"], if1_key)
+ if2_n = Topology.get_interface_name(nodes[u"DUT2"], if2_key)
+ mask = 96 if if2_ip.version == 6 else 24
+ mask2 = 128 if if2_ip.version == 6 else 32
vat = VatExecutor()
with open(tmp_fn1, 'w') as tmp_f1, open(tmp_fn2, 'w') as tmp_f2:
+ rmac = Topology.get_interface_mac(nodes[u"DUT2"], if2_key)
tmp_f1.write(
- 'exec create loopback interface\n'
- 'exec set interface state loop0 up\n'
- 'exec set interface ip address {uifc} {iaddr}/{mask}\n'
- 'exec set ip arp {uifc} {raddr}/32 {rmac} static\n'
- .format(
- iaddr=if2_ip - 1,
- raddr=if2_ip,
- uifc=Topology.get_interface_name(
- nodes['DUT1'], if1_key),
- rmac=Topology.get_interface_mac(
- nodes['DUT2'], if2_key),
- mask=96 if if2_ip.version == 6 else 24))
+ f"exec create loopback interface\n"
+ f"exec set interface state loop0 up\n"
+ f"exec set interface ip address "
+ f"{if1_n} {if2_ip - 1}/{mask}\n"
+ f"exec set ip arp {if1_n} {if2_ip}/{mask2} {rmac} static\n"
+ )
tmp_f2.write(
- 'exec set interface ip address {uifc} {iaddr}/{mask}\n'
- .format(
- iaddr=if2_ip,
- uifc=Topology.get_interface_name(
- nodes['DUT2'], if2_key),
- mask=96 if if2_ip.version == 6 else 24))
- for i in xrange(n_tunnels):
- ckey = gen_key(IPsecUtil.get_crypto_alg_key_len(
- crypto_alg)).encode('hex')
+ f"exec set interface ip address {if2_n} {if2_ip}/{mask}\n"
+ )
+ for i in range(n_tunnels):
+ ckey = gen_key(
+ IPsecUtil.get_crypto_alg_key_len(crypto_alg)
+ ).encode().hex()
if integ_alg:
- ikey = gen_key(IPsecUtil.get_integ_alg_key_len(
- integ_alg)).encode('hex')
- integ = (
- 'integ_alg {integ_alg} '
- 'local_integ_key {local_integ_key} '
- 'remote_integ_key {remote_integ_key} '
- .format(
- integ_alg=integ_alg.alg_name,
- local_integ_key=ikey,
- remote_integ_key=ikey))
+ ikey = gen_key(
+ IPsecUtil.get_integ_alg_key_len(integ_alg)
+ ).encode().hex()
+ integ = f"integ_alg {integ_alg.alg_name} " \
+ f"local_integ_key {ikey} remote_integ_key {ikey} "
else:
- integ = ''
+ integ = u""
tmp_f1.write(
- 'exec set interface ip address loop0 {laddr}/32\n'
- 'ipsec_tunnel_if_add_del '
- 'local_spi {local_spi} '
- 'remote_spi {remote_spi} '
- 'crypto_alg {crypto_alg} '
- 'local_crypto_key {local_crypto_key} '
- 'remote_crypto_key {remote_crypto_key} '
- '{integ} '
- 'local_ip {laddr} '
- 'remote_ip {raddr}\n'
- .format(
- local_spi=spi_1 + i,
- remote_spi=spi_2 + i,
- crypto_alg=crypto_alg.alg_name,
- local_crypto_key=ckey,
- remote_crypto_key=ckey,
- integ=integ,
- laddr=if1_ip + i * addr_incr,
- raddr=if2_ip))
+ f"exec set interface ip address loop0 "
+ f"{if1_ip + i * addr_incr}/32\n"
+ f"ipsec_tunnel_if_add_del "
+ f"local_spi {spi_1 + i} remote_spi {spi_2 + i} "
+ f"crypto_alg {crypto_alg.alg_name} "
+ f"local_crypto_key {ckey} remote_crypto_key {ckey} "
+ f"{integ} "
+ f"local_ip {if1_ip + i * addr_incr} "
+ f"remote_ip {if2_ip}\n"
+ )
tmp_f2.write(
- 'ipsec_tunnel_if_add_del '
- 'local_spi {local_spi} '
- 'remote_spi {remote_spi} '
- 'crypto_alg {crypto_alg} '
- 'local_crypto_key {local_crypto_key} '
- 'remote_crypto_key {remote_crypto_key} '
- '{integ} '
- 'local_ip {laddr} '
- 'remote_ip {raddr}\n'
- .format(
- local_spi=spi_2 + i,
- remote_spi=spi_1 + i,
- crypto_alg=crypto_alg.alg_name,
- local_crypto_key=ckey,
- remote_crypto_key=ckey,
- integ=integ,
- laddr=if2_ip,
- raddr=if1_ip + i * addr_incr))
+ f"ipsec_tunnel_if_add_del "
+ f"local_spi {spi_2 + i} remote_spi {spi_1 + i} "
+ f"crypto_alg {crypto_alg.alg_name} "
+ f"local_crypto_key {ckey} remote_crypto_key {ckey} "
+ f"{integ} "
+ f"local_ip {if2_ip} "
+ f"remote_ip {if1_ip + i * addr_incr}\n"
+ )
vat.execute_script(
- tmp_fn1, nodes['DUT1'], timeout=1800, json_out=False,
+ tmp_fn1, nodes[u"DUT1"], timeout=1800, json_out=False,
copy_on_execute=True,
- history=False if n_tunnels > 100 else True)
+ history=False if n_tunnels > 100 else True
+ )
vat.execute_script(
- tmp_fn2, nodes['DUT2'], timeout=1800, json_out=False,
+ tmp_fn2, nodes[u"DUT2"], timeout=1800, json_out=False,
copy_on_execute=True,
- history=False if n_tunnels > 100 else True)
+ history=False if n_tunnels > 100 else True
+ )
os.remove(tmp_fn1)
os.remove(tmp_fn2)
with open(tmp_fn1, 'w') as tmp_f1, open(tmp_fn2, 'w') as tmp_f2:
+ raddr = ip_network(if1_ip_addr + u"/8", False)
tmp_f2.write(
- 'exec ip route add {raddr} via {uifc} {iaddr}\n'
- .format(
- raddr=ip_network(unicode(if1_ip_addr+'/8'), False),
- iaddr=if2_ip - 1,
- uifc=Topology.get_interface_name(
- nodes['DUT2'], if2_key)))
- for i in xrange(n_tunnels):
+ f"exec ip route add {raddr} via {if2_n} {if2_ip - 1}\n"
+ )
+ for i in range(n_tunnels):
tmp_f1.write(
- 'exec set interface unnumbered ipsec{i} use {uifc}\n'
- 'exec set interface state ipsec{i} up\n'
- 'exec ip route add {taddr}/{mask} via ipsec{i}\n'
- .format(
- taddr=raddr_ip2 + i,
- i=i,
- uifc=Topology.get_interface_name(nodes['DUT1'],
- if1_key),
- mask=128 if if2_ip.version == 6 else 32))
+ f"exec set interface unnumbered ipsec{i} use {if1_n}\n"
+ f"exec set interface state ipsec{i} up\n"
+ f"exec ip route add {raddr_ip2 + i}/{mask2} "
+ f"via ipsec{i}\n"
+ )
tmp_f2.write(
- 'exec set interface unnumbered ipsec{i} use {uifc}\n'
- 'exec set interface state ipsec{i} up\n'
- 'exec ip route add {taddr}/{mask} via ipsec{i}\n'
- .format(
- taddr=raddr_ip1 + i,
- i=i,
- uifc=Topology.get_interface_name(nodes['DUT2'],
- if2_key),
- mask=128 if if2_ip.version == 6 else 32))
+ f"exec set interface unnumbered ipsec{i} use {if2_n}\n"
+ f"exec set interface state ipsec{i} up\n"
+ f"exec ip route add {raddr_ip1 + i}/{mask2} "
+ f"via ipsec{i}\n"
+ )
vat.execute_script(
- tmp_fn1, nodes['DUT1'], timeout=1800, json_out=False,
+ tmp_fn1, nodes[u"DUT1"], timeout=1800, json_out=False,
copy_on_execute=True,
history=False if n_tunnels > 100 else True)
vat.execute_script(
- tmp_fn2, nodes['DUT2'], timeout=1800, json_out=False,
+ tmp_fn2, nodes[u"DUT2"], timeout=1800, json_out=False,
copy_on_execute=True,
history=False if n_tunnels > 100 else True)
os.remove(tmp_fn1)
os.remove(tmp_fn2)
return
- with PapiSocketExecutor(nodes['DUT1']) as papi_exec:
+ with PapiSocketExecutor(nodes[u"DUT1"]) as papi_exec:
# Create loopback interface on DUT1, set it to up state
- cmd1 = 'create_loopback'
- args1 = dict(mac_address=0)
- err_msg = 'Failed to create loopback interface on host {host}'.\
- format(host=nodes['DUT1']['host'])
+ cmd1 = u"create_loopback"
+ args1 = dict(
+ mac_address=0
+ )
+ err_msg = f"Failed to create loopback interface " \
+ f"on host {nodes[u'DUT1'][u'host']}"
loop_sw_if_idx = papi_exec.add(cmd1, **args1).\
get_sw_if_index(err_msg)
- cmd1 = 'sw_interface_set_flags'
+ cmd1 = u"sw_interface_set_flags"
args1 = dict(
sw_if_index=loop_sw_if_idx,
- flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value)
- err_msg = 'Failed to set loopback interface state up on host ' \
- '{host}'.format(host=nodes['DUT1']['host'])
+ flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
+ )
+ err_msg = f"Failed to set loopback interface state up " \
+ f"on host {nodes[u'DUT1'][u'host']}"
papi_exec.add(cmd1, **args1).get_reply(err_msg)
# Set IP address on VPP node 1 interface
- cmd1 = 'sw_interface_add_del_address'
+ cmd1 = u"sw_interface_add_del_address"
args1 = dict(
sw_if_index=InterfaceUtil.get_interface_index(
- nodes['DUT1'], if1_key),
+ nodes[u"DUT1"], if1_key
+ ),
is_add=True,
del_all=False,
prefix=IPUtil.create_prefix_object(
- if2_ip - 1, 96 if if2_ip.version == 6 else 24)
+ if2_ip - 1, 96 if if2_ip.version == 6 else 24
+ )
)
- err_msg = 'Failed to set IP address on interface {ifc} on host ' \
- '{host}'.format(ifc=if1_key, host=nodes['DUT1']['host'])
+ err_msg = f"Failed to set IP address on interface {if1_key} " \
+ f"on host {nodes[u'DUT1'][u'host']}"
papi_exec.add(cmd1, **args1).get_reply(err_msg)
- cmd4 = 'ip_neighbor_add_del'
+ cmd4 = u"ip_neighbor_add_del"
args4 = dict(
is_add=1,
neighbor=dict(
sw_if_index=Topology.get_interface_sw_index(
- nodes['DUT1'], if1_key),
+ nodes[u"DUT1"], if1_key
+ ),
flags=1,
mac_address=str(
- Topology.get_interface_mac(nodes['DUT2'], if2_key)),
- ip_address=str(ip_address(unicode(if2_ip_addr)))
+ Topology.get_interface_mac(nodes[u"DUT2"], if2_key)
+ ),
+ ip_address=str(ip_address(if2_ip_addr))
)
)
- err_msg = 'Failed to add IP neighbor on interface {ifc}'.format(
- ifc=if1_key)
+ err_msg = f"Failed to add IP neighbor on interface {if1_key}"
papi_exec.add(cmd4, **args4).get_reply(err_msg)
# Configure IPsec tunnel interfaces
args1 = dict(
del_all=False,
prefix=None
)
- cmd2 = 'ipsec_tunnel_if_add_del'
+ cmd2 = u"ipsec_tunnel_if_add_del"
args2 = dict(
is_add=1,
local_ip=None,
remote_integ_key=None,
tx_table_id=0
)
- err_msg = 'Failed to add IPsec tunnel interfaces on host {host}'.\
- format(host=nodes['DUT1']['host'])
+ err_msg = f"Failed to add IPsec tunnel interfaces " \
+ f"on host {nodes[u'DUT1'][u'host']}"
ipsec_tunnels = list()
ckeys = list()
ikeys = list()
- for i in xrange(n_tunnels):
+ for i in range(n_tunnels):
ckeys.append(
- gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg)))
+ gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg))
+ )
if integ_alg:
ikeys.append(
- gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg)))
- args1['prefix'] = IPUtil.create_prefix_object(
- if1_ip + i * addr_incr, 128 if if1_ip.version == 6 else 32)
- args2['local_spi'] = spi_1 + i
- args2['remote_spi'] = spi_2 + i
- args2['local_ip'] = IPUtil.create_ip_address_object(
- if1_ip + i * addr_incr)
- args2['remote_ip'] = IPUtil.create_ip_address_object(if2_ip)
- args2['local_crypto_key_len'] = len(ckeys[i])
- args2['local_crypto_key'] = ckeys[i]
- args2['remote_crypto_key_len'] = len(ckeys[i])
- args2['remote_crypto_key'] = ckeys[i]
+ gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg))
+ )
+ args1[u"prefix"] = IPUtil.create_prefix_object(
+ if1_ip + i * addr_incr, 128 if if1_ip.version == 6 else 32
+ )
+ args2[u"local_spi"] = spi_1 + i
+ args2[u"remote_spi"] = spi_2 + i
+ args2[u"local_ip"] = IPUtil.create_ip_address_object(
+ if1_ip + i * addr_incr
+ )
+ args2[u"remote_ip"] = IPUtil.create_ip_address_object(if2_ip)
+ args2[u"local_crypto_key_len"] = len(ckeys[i])
+ args2[u"local_crypto_key"] = ckeys[i]
+ args2[u"remote_crypto_key_len"] = len(ckeys[i])
+ args2[u"remote_crypto_key"] = ckeys[i]
if integ_alg:
- args2['local_integ_key_len'] = len(ikeys[i])
- args2['local_integ_key'] = ikeys[i]
- args2['remote_integ_key_len'] = len(ikeys[i])
- args2['remote_integ_key'] = ikeys[i]
- history = False if 1 < i < n_tunnels - 1 else True
+ args2[u"local_integ_key_len"] = len(ikeys[i])
+ args2[u"local_integ_key"] = ikeys[i]
+ args2[u"remote_integ_key_len"] = len(ikeys[i])
+ args2[u"remote_integ_key"] = ikeys[i]
+ history = False if 1 < i < n_tunnels - 2 else True
papi_exec.add(cmd1, history=history, **args1).\
add(cmd2, history=history, **args2)
replies = papi_exec.get_replies(err_msg)
for reply in replies:
- if 'sw_if_index' in reply:
- ipsec_tunnels.append(reply["sw_if_index"])
+ if u"sw_if_index" in reply:
+ ipsec_tunnels.append(reply[u"sw_if_index"])
# Configure IP routes
- cmd1 = 'sw_interface_set_unnumbered'
+ cmd1 = u"sw_interface_set_unnumbered"
args1 = dict(
is_add=True,
sw_if_index=InterfaceUtil.get_interface_index(
- nodes['DUT1'], if1_key),
+ nodes[u"DUT1"], if1_key
+ ),
unnumbered_sw_if_index=0
)
- cmd2 = 'sw_interface_set_flags'
+ cmd2 = u"sw_interface_set_flags"
args2 = dict(
sw_if_index=0,
- flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value)
- cmd3 = 'ip_route_add_del'
+ flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
+ )
+ cmd3 = u"ip_route_add_del"
args3 = dict(
is_add=1,
is_multipath=0,
route=None
)
- err_msg = 'Failed to add IP routes on host {host}'.format(
- host=nodes['DUT1']['host'])
- for i in xrange(n_tunnels):
- args1['unnumbered_sw_if_index'] = ipsec_tunnels[i]
- args2['sw_if_index'] = ipsec_tunnels[i]
- args3['route'] = IPUtil.compose_vpp_route_structure(
- nodes['DUT1'],
- (raddr_ip2 + i).compressed,
+ err_msg = f"Failed to add IP routes " \
+ f"on host {nodes[u'DUT1'][u'host']}"
+ for i in range(n_tunnels):
+ args1[u"unnumbered_sw_if_index"] = ipsec_tunnels[i]
+ args2[u"sw_if_index"] = ipsec_tunnels[i]
+ args3[u"route"] = IPUtil.compose_vpp_route_structure(
+ nodes[u"DUT1"], (raddr_ip2 + i).compressed,
prefix_len=128 if raddr_ip2.version == 6 else 32,
interface=ipsec_tunnels[i]
)
- history = False if 1 < i < n_tunnels - 1 else True
+ history = False if 1 < i < n_tunnels - 2 else True
papi_exec.add(cmd1, history=history, **args1).\
add(cmd2, history=history, **args2).\
add(cmd3, history=history, **args3)
papi_exec.get_replies(err_msg)
- with PapiSocketExecutor(nodes['DUT2']) as papi_exec:
+ with PapiSocketExecutor(nodes[u"DUT2"]) as papi_exec:
# Set IP address on VPP node 2 interface
- cmd1 = 'sw_interface_add_del_address'
+ cmd1 = u"sw_interface_add_del_address"
args1 = dict(
sw_if_index=InterfaceUtil.get_interface_index(
- nodes['DUT2'], if2_key),
+ nodes[u"DUT2"], if2_key
+ ),
is_add=True,
del_all=False,
prefix=IPUtil.create_prefix_object(
- if2_ip, 96 if if2_ip.version == 6 else 24)
+ if2_ip, 96 if if2_ip.version == 6 else 24
+ )
)
- err_msg = 'Failed to set IP address on interface {ifc} on host ' \
- '{host}'.format(ifc=if2_key, host=nodes['DUT2']['host'])
+ err_msg = f"Failed to set IP address on interface {if2_key} " \
+ f"on host {nodes[u'DUT2'][u'host']}"
papi_exec.add(cmd1, **args1).get_reply(err_msg)
# Configure IPsec tunnel interfaces
- cmd2 = 'ipsec_tunnel_if_add_del'
+ cmd2 = u"ipsec_tunnel_if_add_del"
args2 = dict(
is_add=1,
local_ip=IPUtil.create_ip_address_object(if2_ip),
remote_integ_key=None,
tx_table_id=0
)
- err_msg = 'Failed to add IPsec tunnel interfaces on host {host}'. \
- format(host=nodes['DUT2']['host'])
+ err_msg = f"Failed to add IPsec tunnel interfaces " \
+ f"on host {nodes[u'DUT2'][u'host']}"
ipsec_tunnels = list()
- for i in xrange(n_tunnels):
- args2['local_spi'] = spi_2 + i
- args2['remote_spi'] = spi_1 + i
- args2['local_ip'] = IPUtil.create_ip_address_object(if2_ip)
- args2['remote_ip'] = IPUtil.create_ip_address_object(
+ for i in range(n_tunnels):
+ args2[u"local_spi"] = spi_2 + i
+ args2[u"remote_spi"] = spi_1 + i
+ args2[u"local_ip"] = IPUtil.create_ip_address_object(if2_ip)
+ args2[u"remote_ip"] = IPUtil.create_ip_address_object(
if1_ip + i * addr_incr)
- args2['local_crypto_key_len'] = len(ckeys[i])
- args2['local_crypto_key'] = ckeys[i]
- args2['remote_crypto_key_len'] = len(ckeys[i])
- args2['remote_crypto_key'] = ckeys[i]
+ args2[u"local_crypto_key_len"] = len(ckeys[i])
+ args2[u"local_crypto_key"] = ckeys[i]
+ args2[u"remote_crypto_key_len"] = len(ckeys[i])
+ args2[u"remote_crypto_key"] = ckeys[i]
if integ_alg:
- args2['local_integ_key_len'] = len(ikeys[i])
- args2['local_integ_key'] = ikeys[i]
- args2['remote_integ_key_len'] = len(ikeys[i])
- args2['remote_integ_key'] = ikeys[i]
- history = False if 1 < i < n_tunnels - 1 else True
+ args2[u"local_integ_key_len"] = len(ikeys[i])
+ args2[u"local_integ_key"] = ikeys[i]
+ args2[u"remote_integ_key_len"] = len(ikeys[i])
+ args2[u"remote_integ_key"] = ikeys[i]
+ history = False if 1 < i < n_tunnels - 2 else True
papi_exec.add(cmd2, history=history, **args2)
replies = papi_exec.get_replies(err_msg)
for reply in replies:
- if 'sw_if_index' in reply:
- ipsec_tunnels.append(reply["sw_if_index"])
+ if u"sw_if_index" in reply:
+ ipsec_tunnels.append(reply[u"sw_if_index"])
# Configure IP routes
- cmd1 = 'ip_route_add_del'
+ cmd1 = u"ip_route_add_del"
route = IPUtil.compose_vpp_route_structure(
- nodes['DUT2'], if1_ip.compressed,
+ nodes[u"DUT2"], if1_ip.compressed,
prefix_len=32 if if1_ip.version == 6 else 8,
interface=if2_key,
gateway=(if2_ip - 1).compressed
route=route
)
papi_exec.add(cmd1, **args1)
- cmd1 = 'sw_interface_set_unnumbered'
+ cmd1 = u"sw_interface_set_unnumbered"
args1 = dict(
is_add=True,
sw_if_index=InterfaceUtil.get_interface_index(
- nodes['DUT2'], if2_key),
+ nodes[u"DUT2"], if2_key
+ ),
unnumbered_sw_if_index=0
)
- cmd2 = 'sw_interface_set_flags'
+ cmd2 = u"sw_interface_set_flags"
args2 = dict(
sw_if_index=0,
- flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value)
- cmd3 = 'ip_route_add_del'
+ flags=InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
+ )
+ cmd3 = u"ip_route_add_del"
args3 = dict(
is_add=1,
is_multipath=0,
route=None
)
- err_msg = 'Failed to add IP routes on host {host}'.format(
- host=nodes['DUT2']['host'])
- for i in xrange(n_tunnels):
- args1['unnumbered_sw_if_index'] = ipsec_tunnels[i]
- args2['sw_if_index'] = ipsec_tunnels[i]
- args3['route'] = IPUtil.compose_vpp_route_structure(
- nodes['DUT1'],
- (raddr_ip1 + i).compressed,
+ err_msg = f"Failed to add IP routes " \
+ f"on host {nodes[u'DUT2'][u'host']}"
+ for i in range(n_tunnels):
+ args1[u"unnumbered_sw_if_index"] = ipsec_tunnels[i]
+ args2[u"sw_if_index"] = ipsec_tunnels[i]
+ args3[u"route"] = IPUtil.compose_vpp_route_structure(
+ nodes[u"DUT1"], (raddr_ip1 + i).compressed,
prefix_len=128 if raddr_ip1.version == 6 else 32,
interface=ipsec_tunnels[i]
)
- history = False if 1 < i < n_tunnels - 1 else True
+ history = False if 1 < i < n_tunnels - 2 else True
papi_exec.add(cmd1, history=history, **args1). \
add(cmd2, history=history, **args2). \
add(cmd3, history=history, **args3)
crypto_key = gen_key(IPsecUtil.get_crypto_alg_key_len(crypto_alg))
integ_key = gen_key(IPsecUtil.get_integ_alg_key_len(integ_alg)) \
- if integ_alg else ''
+ if integ_alg else u""
IPsecUtil.vpp_ipsec_set_ip_route(
- nodes['DUT1'], n_tunnels, tunnel_ip1, raddr_ip2, tunnel_ip2,
+ nodes[u"DUT1"], n_tunnels, tunnel_ip1, raddr_ip2, tunnel_ip2,
interface1, raddr_range)
IPsecUtil.vpp_ipsec_set_ip_route(
- nodes['DUT2'], n_tunnels, tunnel_ip2, raddr_ip1, tunnel_ip1,
+ nodes[u"DUT2"], n_tunnels, tunnel_ip2, raddr_ip1, tunnel_ip1,
interface2, raddr_range)
- IPsecUtil.vpp_ipsec_add_spd(
- nodes['DUT1'], spd_id)
- IPsecUtil.vpp_ipsec_spd_add_if(
- nodes['DUT1'], spd_id, interface1)
+ IPsecUtil.vpp_ipsec_add_spd(nodes[u"DUT1"], spd_id)
+ IPsecUtil.vpp_ipsec_spd_add_if(nodes[u"DUT1"], spd_id, interface1)
IPsecUtil.vpp_ipsec_policy_add(
- nodes['DUT1'], spd_id, p_hi, PolicyAction.BYPASS, inbound=False,
- proto=50, laddr_range='100.0.0.0/8', raddr_range='100.0.0.0/8')
+ nodes[u"DUT1"], spd_id, p_hi, PolicyAction.BYPASS, inbound=False,
+ proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8"
+ )
IPsecUtil.vpp_ipsec_policy_add(
- nodes['DUT1'], spd_id, p_hi, PolicyAction.BYPASS, inbound=True,
- proto=50, laddr_range='100.0.0.0/8', raddr_range='100.0.0.0/8')
+ nodes[u"DUT1"], spd_id, p_hi, PolicyAction.BYPASS, inbound=True,
+ proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8"
+ )
- IPsecUtil.vpp_ipsec_add_spd(
- nodes['DUT2'], spd_id)
- IPsecUtil.vpp_ipsec_spd_add_if(
- nodes['DUT2'], spd_id, interface2)
+ IPsecUtil.vpp_ipsec_add_spd(nodes[u"DUT2"], spd_id)
+ IPsecUtil.vpp_ipsec_spd_add_if(nodes[u"DUT2"], spd_id, interface2)
IPsecUtil.vpp_ipsec_policy_add(
- nodes['DUT2'], spd_id, p_hi, PolicyAction.BYPASS, inbound=False,
- proto=50, laddr_range='100.0.0.0/8', raddr_range='100.0.0.0/8')
+ nodes[u"DUT2"], spd_id, p_hi, PolicyAction.BYPASS, inbound=False,
+ proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8"
+ )
IPsecUtil.vpp_ipsec_policy_add(
- nodes['DUT2'], spd_id, p_hi, PolicyAction.BYPASS, inbound=True,
- proto=50, laddr_range='100.0.0.0/8', raddr_range='100.0.0.0/8')
+ nodes[u"DUT2"], spd_id, p_hi, PolicyAction.BYPASS, inbound=True,
+ proto=50, laddr_range=u"100.0.0.0/8", raddr_range=u"100.0.0.0/8"
+ )
IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes['DUT1'], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key,
- integ_alg, integ_key, tunnel_ip1, tunnel_ip2)
-
+ nodes[u"DUT1"], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key,
+ integ_alg, integ_key, tunnel_ip1, tunnel_ip2
+ )
IPsecUtil.vpp_ipsec_spd_add_entries(
- nodes['DUT1'], n_tunnels, spd_id, p_lo, False, sa_id_1, raddr_ip2)
+ nodes[u"DUT1"], n_tunnels, spd_id, p_lo, False, sa_id_1, raddr_ip2
+ )
IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes['DUT2'], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key,
- integ_alg, integ_key, tunnel_ip1, tunnel_ip2)
-
+ nodes[u"DUT2"], n_tunnels, sa_id_1, spi_1, crypto_alg, crypto_key,
+ integ_alg, integ_key, tunnel_ip1, tunnel_ip2
+ )
IPsecUtil.vpp_ipsec_spd_add_entries(
- nodes['DUT2'], n_tunnels, spd_id, p_lo, True, sa_id_1, raddr_ip2)
+ nodes[u"DUT2"], n_tunnels, spd_id, p_lo, True, sa_id_1, raddr_ip2
+ )
IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes['DUT2'], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key,
- integ_alg, integ_key, tunnel_ip2, tunnel_ip1)
+ nodes[u"DUT2"], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key,
+ integ_alg, integ_key, tunnel_ip2, tunnel_ip1
+ )
IPsecUtil.vpp_ipsec_spd_add_entries(
- nodes['DUT2'], n_tunnels, spd_id, p_lo, False, sa_id_2, raddr_ip1)
+ nodes[u"DUT2"], n_tunnels, spd_id, p_lo, False, sa_id_2, raddr_ip1
+ )
IPsecUtil.vpp_ipsec_add_sad_entries(
- nodes['DUT1'], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key,
- integ_alg, integ_key, tunnel_ip2, tunnel_ip1)
+ nodes[u"DUT1"], n_tunnels, sa_id_2, spi_2, crypto_alg, crypto_key,
+ integ_alg, integ_key, tunnel_ip2, tunnel_ip1
+ )
IPsecUtil.vpp_ipsec_spd_add_entries(
- nodes['DUT1'], n_tunnels, spd_id, p_lo, True, sa_id_2, raddr_ip1)
+ nodes[u"DUT1"], n_tunnels, spd_id, p_lo, True, sa_id_2, raddr_ip1
+ )
@staticmethod
def vpp_ipsec_show(node):
:param node: Node to run command on.
:type node: dict
"""
- PapiSocketExecutor.run_cli_cmd(node, 'show ipsec')
+ PapiSocketExecutor.run_cli_cmd(node, u"show ipsec")
:type node: dict
:type interface: str
"""
- cmd = 'sw_interface_ip6nd_ra_config'
+ cmd = u"sw_interface_ip6nd_ra_config"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
- suppress=1)
- err_msg = 'Failed to suppress ICMPv6 router advertisement message on ' \
- 'interface {ifc}'.format(ifc=interface)
+ suppress=1
+ )
+ err_msg = f"Failed to suppress ICMPv6 router advertisement message " \
+ f"on interface {interface}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type interface: str
:type interval: int
"""
- cmd = 'sw_interface_ip6nd_ra_config'
+ cmd = u"sw_interface_ip6nd_ra_config"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
- initial_interval=int(interval))
- err_msg = 'Failed to set router advertisement interval on ' \
- 'interface {ifc}'.format(ifc=interface)
+ initial_interval=int(interval)
+ )
+ err_msg = f"Failed to set router advertisement interval " \
+ f"on interface {interface}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.TG:
+ if node[u"type"] == NodeType.TG:
continue
- for port_k in node['interfaces'].keys():
+ for port_k in node[u"interfaces"].keys():
ip6_addr_list = IPUtil.vpp_get_interface_ip_addresses(
- node, port_k, 'ipv6')
+ node, port_k, u"ipv6"
+ )
if ip6_addr_list:
IPv6Util.vpp_ra_suppress_link_layer(node, port_k)
"""Interface util library."""
from time import sleep
-
from enum import IntEnum
+
from ipaddress import ip_address
from robot.api import logger
class InterfaceUtil(object):
"""General utilities for managing interfaces"""
- __UDEV_IF_RULES_FILE = '/etc/udev/rules.d/10-network.rules'
+ __UDEV_IF_RULES_FILE = u"/etc/udev/rules.d/10-network.rules"
@staticmethod
def pci_to_int(pci_str):
:returns: Integer representation of PCI address.
:rtype: int
"""
- pci = list(pci_str.split(':')[0:2])
- pci.extend(pci_str.split(':')[2].split('.'))
+ pci = list(pci_str.split(u":")[0:2])
+ pci.extend(pci_str.split(u":")[2].split(u"."))
return (int(pci[0], 16) | int(pci[1], 16) << 16 |
int(pci[2], 16) << 24 | int(pci[3], 16) << 29)
@staticmethod
def pci_to_eth(node, pci_str):
- """Convert PCI address to Linux ethernet name.
+ """Convert PCI address on DUT to Linux ethernet name.
+ :param node: DUT node
:param pci_str: PCI address.
+ :type node: dict
:type pci_str: str
:returns: Ethernet name.
:rtype: str
"""
- cmd = ('basename /sys/bus/pci/devices/{pci_str}/net/*'.
- format(pci_str=pci_str))
+ cmd = f"basename /sys/bus/pci/devices/{pci_str}/net/*"
try:
stdout, _ = exec_cmd_no_error(node, cmd)
except RuntimeError:
- raise RuntimeError("Cannot convert {pci_str} to ethernet name!".
- format(pci_str=pci_str))
+ raise RuntimeError(f"Cannot convert {pci_str} to ethernet name!")
return stdout.strip()
sw_if_index = \
Topology.get_interface_sw_index_by_name(node, interface)
except TypeError as err:
- raise TypeError('Wrong interface format {ifc}: {err}'.format(
- ifc=interface, err=err.message))
+ raise TypeError(f"Wrong interface format {interface}") from err
return sw_if_index
@staticmethod
- def set_interface_state(node, interface, state, if_type='key'):
+ def set_interface_state(node, interface, state, if_type=u"key"):
"""Set interface state on a node.
Function can be used for DUTs as well as for TGs.
:raises ValueError: If the state of interface is unexpected.
:raises ValueError: If the node has an unknown node type.
"""
- if if_type == 'key':
- if isinstance(interface, basestring):
+ if if_type == u"key":
+ if isinstance(interface, str):
sw_if_index = Topology.get_interface_sw_index(node, interface)
iface_name = Topology.get_interface_name(node, interface)
else:
sw_if_index = interface
- elif if_type == 'name':
+ elif if_type == u"name":
iface_key = Topology.get_interface_by_name(node, interface)
if iface_key is not None:
sw_if_index = Topology.get_interface_sw_index(node, iface_key)
iface_name = interface
else:
- raise ValueError('Unknown if_type: {type}'.format(type=if_type))
+ raise ValueError(f"Unknown if_type: {if_type}")
- if node['type'] == NodeType.DUT:
- if state == 'up':
+ if node[u"type"] == NodeType.DUT:
+ if state == u"up":
flags = InterfaceStatusFlags.IF_STATUS_API_FLAG_ADMIN_UP.value
- elif state == 'down':
+ elif state == u"down":
flags = 0
else:
- raise ValueError('Unexpected interface state: {state}'.format(
- state=state))
- cmd = 'sw_interface_set_flags'
- err_msg = 'Failed to set interface state on host {host}'.format(
- host=node['host'])
+ raise ValueError(f"Unexpected interface state: {state}")
+ cmd = u"sw_interface_set_flags"
+ err_msg = f"Failed to set interface state on host {node[u'host']}"
args = dict(
sw_if_index=int(sw_if_index),
- flags=flags)
+ flags=flags
+ )
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
- elif node['type'] == NodeType.TG or node['type'] == NodeType.VM:
- cmd = 'ip link set {ifc} {state}'.format(
- ifc=iface_name, state=state)
+ elif node[u"type"] == NodeType.TG or node[u"type"] == NodeType.VM:
+ cmd = f"ip link set {iface_name} {state}"
exec_cmd_no_error(node, cmd, sudo=True)
else:
- raise ValueError('Node {} has unknown NodeType: "{}"'
- .format(node['host'], node['type']))
+ raise ValueError(
+ f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
+ )
@staticmethod
def set_interface_ethernet_mtu(node, iface_key, mtu):
:raises ValueError: If the node type is "DUT".
:raises ValueError: If the node has an unknown node type.
"""
- if node['type'] == NodeType.DUT:
- raise ValueError('Node {}: Setting Ethernet MTU for interface '
- 'on DUT nodes not supported', node['host'])
- elif node['type'] == NodeType.TG:
+ if node[u"type"] == NodeType.DUT:
+ raise ValueError(
+ f"Node {node[u'host']}: Setting Ethernet MTU for interface "
+ f"on DUT nodes not supported"
+ )
+ elif node[u"type"] == NodeType.TG:
iface_name = Topology.get_interface_name(node, iface_key)
- cmd = 'ip link set {} mtu {}'.format(iface_name, mtu)
+ cmd = f"ip link set {iface_name} mtu {mtu}"
exec_cmd_no_error(node, cmd, sudo=True)
else:
- raise ValueError('Node {} has unknown NodeType: "{}"'
- .format(node['host'], node['type']))
+ raise ValueError(
+ f"Node {node[u'host']} has unknown NodeType: {node[u'type']}"
+ )
@staticmethod
def set_default_ethernet_mtu_on_all_interfaces_on_node(node):
:type node: dict
:returns: Nothing.
"""
- for ifc in node['interfaces']:
+ for ifc in node[u"interfaces"]:
InterfaceUtil.set_interface_ethernet_mtu(node, ifc, 1500)
@staticmethod
:type interface: str or int
:type mtu: int
"""
- if isinstance(interface, basestring):
+ if isinstance(interface, str):
sw_if_index = Topology.get_interface_sw_index(node, interface)
else:
sw_if_index = interface
- cmd = 'hw_interface_set_mtu'
- err_msg = 'Failed to set interface MTU on host {host}'.format(
- host=node['host'])
- args = dict(sw_if_index=sw_if_index,
- mtu=int(mtu))
+ cmd = u"hw_interface_set_mtu"
+ err_msg = f"Failed to set interface MTU on host {node[u'host']}"
+ args = dict(
+ sw_if_index=sw_if_index,
+ mtu=int(mtu)
+ )
try:
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
except AssertionError as err:
# TODO: Make failure tolerance optional.
- logger.debug("Setting MTU failed. Expected?\n{err}".format(
- err=err))
+ logger.debug(f"Setting MTU failed. Expected?\n{err}")
@staticmethod
def vpp_set_interfaces_mtu_on_node(node, mtu=9200):
:type node: dict
:type mtu: int
"""
- for interface in node['interfaces']:
+ for interface in node[u"interfaces"]:
InterfaceUtil.vpp_set_interface_mtu(node, interface, mtu)
@staticmethod
:type mtu: int
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
InterfaceUtil.vpp_set_interfaces_mtu_on_node(node, mtu)
@staticmethod
:raises RuntimeError: If any interface is not in link-up state after
defined number of retries.
"""
- for _ in xrange(0, retries):
+ for _ in range(0, retries):
not_ready = list()
out = InterfaceUtil.vpp_get_interface_data(node)
for interface in out:
- if interface.get('flags') == 1:
- not_ready.append(interface.get('interface_name'))
+ if interface.get(u"flags") == 1:
+ not_ready.append(interface.get(u"interface_name"))
if not not_ready:
break
else:
- logger.debug('Interfaces still not in link-up state:\n{ifs} '
- '\nWaiting...'.format(ifs=not_ready))
+ logger.debug(
+ f"Interfaces still not in link-up state:\n{not_ready}"
+ )
sleep(1)
else:
- err = 'Timeout, interfaces not up:\n{ifs}'.format(ifs=not_ready) \
- if 'not_ready' in locals() else 'No check executed!'
+ err = f"Timeout, interfaces not up:\n{not_ready}" \
+ if u"not_ready" in locals() else u"No check executed!"
raise RuntimeError(err)
@staticmethod
:returns: Nothing.
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
InterfaceUtil.vpp_node_interfaces_ready_wait(node, retries)
@staticmethod
:returns: Processed interface dump.
:rtype: dict
"""
- if_dump['l2_address'] = str(if_dump['l2_address'])
- if_dump['b_dmac'] = str(if_dump['b_dmac'])
- if_dump['b_smac'] = str(if_dump['b_smac'])
- if_dump['flags'] = if_dump['flags'].value
- if_dump['type'] = if_dump['type'].value
- if_dump['link_duplex'] = if_dump['link_duplex'].value
- if_dump['sub_if_flags'] = if_dump['sub_if_flags'].value \
- if hasattr(if_dump['sub_if_flags'], 'value') \
- else int(if_dump['sub_if_flags'])
+ if_dump[u"l2_address"] = str(if_dump[u"l2_address"])
+ if_dump[u"b_dmac"] = str(if_dump[u"b_dmac"])
+ if_dump[u"b_smac"] = str(if_dump[u"b_smac"])
+ if_dump[u"flags"] = if_dump[u"flags"].value
+ if_dump[u"type"] = if_dump[u"type"].value
+ if_dump[u"link_duplex"] = if_dump[u"link_duplex"].value
+ if_dump[u"sub_if_flags"] = if_dump[u"sub_if_flags"].value \
+ if hasattr(if_dump[u"sub_if_flags"], u"value") \
+ else int(if_dump[u"sub_if_flags"])
return if_dump
if interface is not None:
- if isinstance(interface, basestring):
- param = 'interface_name'
+ if isinstance(interface, str):
+ param = u"interface_name"
elif isinstance(interface, int):
- param = 'sw_if_index'
+ param = u"sw_if_index"
else:
- raise TypeError('Wrong interface format {ifc}'.format(
- ifc=interface))
+ raise TypeError(f"Wrong interface format {interface}")
else:
- param = ''
+ param = u""
- cmd = 'sw_interface_dump'
+ cmd = u"sw_interface_dump"
args = dict(
name_filter_valid=False,
- name_filter=''
+ name_filter=u""
)
- err_msg = 'Failed to get interface dump on host {host}'.format(
- host=node['host'])
+ err_msg = f"Failed to get interface dump on host {node[u'host']}"
+
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
- logger.debug('Received data:\n{d!r}'.format(d=details))
+ logger.debug(f"Received data:\n{details!r}")
data = list() if interface is None else dict()
for dump in details:
if interface is None:
data.append(process_if_dump(dump))
- elif str(dump.get(param)).rstrip('\x00') == str(interface):
+ elif str(dump.get(param)).rstrip(u"\x00") == str(interface):
data = process_if_dump(dump)
break
- logger.debug('Interface data:\n{if_data}'.format(if_data=data))
+ logger.debug(f"Interface data:\n{data}")
return data
@staticmethod
:rtype: str
"""
if_data = InterfaceUtil.vpp_get_interface_data(node, sw_if_index)
- if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
+ if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
if_data = InterfaceUtil.vpp_get_interface_data(
- node, if_data['sup_sw_if_index'])
+ node, if_data[u"sup_sw_if_index"]
+ )
- return if_data.get('interface_name')
+ return if_data.get(u"interface_name")
@staticmethod
def vpp_get_interface_sw_index(node, interface_name):
"""
if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)
- return if_data.get('sw_if_index')
+ return if_data.get(u"sw_if_index")
@staticmethod
def vpp_get_interface_mac(node, interface):
:rtype: str
"""
if_data = InterfaceUtil.vpp_get_interface_data(node, interface)
- if if_data['sup_sw_if_index'] != if_data['sw_if_index']:
+ if if_data[u"sup_sw_if_index"] != if_data[u"sw_if_index"]:
if_data = InterfaceUtil.vpp_get_interface_data(
- node, if_data['sup_sw_if_index'])
+ node, if_data[u"sup_sw_if_index"])
- return if_data.get('l2_address')
+ return if_data.get(u"l2_address")
@staticmethod
def tg_set_interface_driver(node, pci_addr, driver):
# Unbind from current driver
if old_driver is not None:
- cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/unbind"'\
- .format(pci_addr, old_driver)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ cmd = f"sh -c 'echo {pci_addr} > " \
+ f"/sys/bus/pci/drivers/{old_driver}/unbind'"
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
# Bind to the new driver
- cmd = 'sh -c "echo {0} > /sys/bus/pci/drivers/{1}/bind"'\
- .format(pci_addr, driver)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ cmd = f"sh -c 'echo {pci_addr} > /sys/bus/pci/drivers/{driver}/bind'"
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
@staticmethod
def tg_get_interface_driver(node, pci_addr):
ssh = SSH()
ssh.connect(node)
- cmd = 'rm -f {0}'.format(InterfaceUtil.__UDEV_IF_RULES_FILE)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ cmd = f"rm -f {InterfaceUtil.__UDEV_IF_RULES_FILE}"
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
-
- for interface in node['interfaces'].values():
- rule = 'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
- '==\\"' + interface['mac_address'] + '\\", NAME=\\"' + \
- interface['name'] + '\\"'
- cmd = 'sh -c "echo \'{0}\' >> {1}"'.format(
- rule, InterfaceUtil.__UDEV_IF_RULES_FILE)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
+
+ for interface in node[u"interfaces"].values():
+ rule = u'SUBSYSTEM==\\"net\\", ACTION==\\"add\\", ATTR{address}' + \
+ u'==\\"' + interface[u"mac_address"] + u'\\", NAME=\\"' + \
+ interface[u"name"] + u'\\"'
+ cmd = f"sh -c 'echo \"{rule}\" >> " \
+ f"{InterfaceUtil.__UDEV_IF_RULES_FILE}'"
+
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError("'{0}' failed on '{1}'"
- .format(cmd, node['host']))
+ raise RuntimeError(f"'{cmd}' failed on '{node[u'host']}'")
- cmd = '/etc/init.d/udev restart'
+ cmd = u"/etc/init.d/udev restart"
ssh.exec_command_sudo(cmd)
@staticmethod
:param node: Node to setup interfaces driver on (must be TG node).
:type node: dict
"""
- for interface in node['interfaces'].values():
- InterfaceUtil.tg_set_interface_driver(node,
- interface['pci_address'],
- interface['driver'])
+ for interface in node[u"interfaces"].values():
+ InterfaceUtil.tg_set_interface_driver(
+ node, interface[u"pci_address"], interface[u"driver"]
+ )
@staticmethod
def update_vpp_interface_data_on_node(node):
interface_list = InterfaceUtil.vpp_get_interface_data(node)
interface_dict = dict()
for ifc in interface_list:
- interface_dict[ifc['l2_address']] = ifc
+ interface_dict[ifc[u"l2_address"]] = ifc
- for if_name, if_data in node['interfaces'].items():
- ifc_dict = interface_dict.get(if_data['mac_address'])
+ for if_name, if_data in node[u"interfaces"].items():
+ ifc_dict = interface_dict.get(if_data[u"mac_address"])
if ifc_dict is not None:
- if_data['name'] = ifc_dict['interface_name']
- if_data['vpp_sw_index'] = ifc_dict['sw_if_index']
- if_data['mtu'] = ifc_dict['mtu'][0]
- logger.trace('Interface {ifc} found by MAC {mac}'.format(
- ifc=if_name, mac=if_data['mac_address']))
+ if_data[u"name"] = ifc_dict[u"interface_name"]
+ if_data[u"vpp_sw_index"] = ifc_dict[u"sw_if_index"]
+ if_data[u"mtu"] = ifc_dict[u"mtu"][0]
+ logger.trace(
+ f"Interface {if_name} found by MAC "
+ f"{if_data[u'mac_address']}"
+ )
else:
- logger.trace('Interface {ifc} not found by MAC {mac}'.format(
- ifc=if_name, mac=if_data['mac_address']))
- if_data['vpp_sw_index'] = None
+ logger.trace(
+ f"Interface {if_name} not found by MAC "
+ f"{if_data[u'mac_address']}"
+ )
+ if_data[u"vpp_sw_index"] = None
@staticmethod
def update_nic_interface_names(node):
:param node: Node dictionary.
:type node: dict
"""
- for ifc in node['interfaces'].values():
- if_pci = ifc['pci_address'].replace('.', ':').split(':')
- bus = '{:x}'.format(int(if_pci[1], 16))
- dev = '{:x}'.format(int(if_pci[2], 16))
- fun = '{:x}'.format(int(if_pci[3], 16))
- loc = '{bus}/{dev}/{fun}'.format(bus=bus, dev=dev, fun=fun)
- if ifc['model'] == 'Intel-XL710':
- ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
- elif ifc['model'] == 'Intel-X710':
- ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
- elif ifc['model'] == 'Intel-X520-DA2':
- ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
- elif ifc['model'] == 'Cisco-VIC-1385':
- ifc['name'] = 'FortyGigabitEthernet{loc}'.format(loc=loc)
- elif ifc['model'] == 'Cisco-VIC-1227':
- ifc['name'] = 'TenGigabitEthernet{loc}'.format(loc=loc)
+ for ifc in node[u"interfaces"].values():
+ if_pci = ifc[u"pci_address"].replace(u".", u":").split(u":")
+ loc = f"{int(if_pci[1], 16):x}/{int(if_pci[2], 16):x}/" \
+ f"{int(if_pci[3], 16):x}"
+ if ifc[u"model"] == u"Intel-XL710":
+ ifc[u"name"] = f"FortyGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Intel-X710":
+ ifc[u"name"] = f"TenGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Intel-X520-DA2":
+ ifc[u"name"] = f"TenGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Cisco-VIC-1385":
+ ifc[u"name"] = f"FortyGigabitEthernet{loc}"
+ elif ifc[u"model"] == u"Cisco-VIC-1227":
+ ifc[u"name"] = f"TenGigabitEthernet{loc}"
else:
- ifc['name'] = 'UnknownEthernet{loc}'.format(loc=loc)
+ ifc[u"name"] = f"UnknownEthernet{loc}"
@staticmethod
def update_nic_interface_names_on_all_duts(nodes):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
InterfaceUtil.update_nic_interface_names(node)
@staticmethod
ssh = SSH()
ssh.connect(node)
- cmd = ('for dev in `ls /sys/class/net/`; do echo "\\"`cat '
- '/sys/class/net/$dev/address`\\": \\"$dev\\""; done;')
+ cmd = u'for dev in `ls /sys/class/net/`; do echo "\\"`cat ' \
+ u'/sys/class/net/$dev/address`\\": \\"$dev\\""; done;'
- (ret_code, stdout, _) = ssh.exec_command(cmd)
+ ret_code, stdout, _ = ssh.exec_command(cmd)
if int(ret_code) != 0:
- raise RuntimeError('Get interface name and MAC failed')
- tmp = "{" + stdout.rstrip().replace('\n', ',') + "}"
+ raise RuntimeError(u"Get interface name and MAC failed")
+ tmp = u"{" + stdout.rstrip().replace(u"\n", u",") + u"}"
+
interfaces = JsonParser().parse_data(tmp)
- for interface in node['interfaces'].values():
- name = interfaces.get(interface['mac_address'])
+ for interface in node[u"interfaces"].values():
+ name = interfaces.get(interface[u"mac_address"])
if name is None:
continue
- interface['name'] = name
+ interface[u"name"] = name
# Set udev rules for interfaces
if not skip_tg_udev:
for if_key in Topology.get_node_interfaces(node):
if_pci = Topology.get_interface_pci_addr(node, if_key)
ssh.connect(node)
- cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(if_pci)
+ cmd = f"cat /sys/bus/pci/devices/{if_pci}/numa_node"
for _ in range(3):
(ret, out, _) = ssh.exec_command(cmd)
if ret == 0:
else:
raise ValueError
except ValueError:
- logger.trace('Reading numa location failed for: {0}'
- .format(if_pci))
+ logger.trace(
+ f"Reading numa location failed for: {if_pci}"
+ )
else:
- Topology.set_interface_numa_node(node, if_key,
- numa_node)
+ Topology.set_interface_numa_node(
+ node, if_key, numa_node
+ )
break
else:
- raise RuntimeError('Update numa node failed for: {0}'
- .format(if_pci))
+ raise RuntimeError(f"Update numa node failed for: {if_pci}")
@staticmethod
def update_all_numa_nodes(nodes, skip_tg=False):
:returns: Nothing.
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
InterfaceUtil.iface_update_numa_node(node)
- elif node['type'] == NodeType.TG and not skip_tg:
+ elif node[u"type"] == NodeType.TG and not skip_tg:
InterfaceUtil.iface_update_numa_node(node)
@staticmethod
- def update_all_interface_data_on_all_nodes(nodes, skip_tg=False,
- skip_tg_udev=False,
- numa_node=False):
+ def update_all_interface_data_on_all_nodes(
+ nodes, skip_tg=False, skip_tg_udev=False, numa_node=False):
"""Update interface names on all nodes in DICT__nodes.
This method updates the topology dictionary by querying interface lists
:type numa_node: bool
"""
for node_data in nodes.values():
- if node_data['type'] == NodeType.DUT:
+ if node_data[u"type"] == NodeType.DUT:
InterfaceUtil.update_vpp_interface_data_on_node(node_data)
- elif node_data['type'] == NodeType.TG and not skip_tg:
+ elif node_data[u"type"] == NodeType.TG and not skip_tg:
InterfaceUtil.update_tg_interface_data_on_node(
node_data, skip_tg_udev)
if numa_node:
- if node_data['type'] == NodeType.DUT:
+ if node_data[u"type"] == NodeType.DUT:
InterfaceUtil.iface_update_numa_node(node_data)
- elif node_data['type'] == NodeType.TG and not skip_tg:
+ elif node_data[u"type"] == NodeType.TG and not skip_tg:
InterfaceUtil.iface_update_numa_node(node_data)
@staticmethod
"""
sw_if_index = InterfaceUtil.get_interface_index(node, interface)
- cmd = 'create_vlan_subif'
+ cmd = u"create_vlan_subif"
args = dict(
sw_if_index=sw_if_index,
vlan_id=int(vlan)
)
- err_msg = 'Failed to create VLAN sub-interface on host {host}'.format(
- host=node['host'])
+ err_msg = f"Failed to create VLAN sub-interface on host {node[u'host']}"
+
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
- if_key = Topology.add_new_port(node, 'vlan_subif')
+ if_key = Topology.add_new_port(node, u"vlan_subif")
Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
Topology.update_interface_name(node, if_key, ifc_name)
- return '{ifc}.{vlan}'.format(ifc=interface, vlan=vlan), sw_if_index
+ return f"{interface}.{vlan}", sw_if_index
@staticmethod
def create_vxlan_interface(node, vni, source_ip, destination_ip):
:raises RuntimeError: if it is unable to create VxLAN interface on the
node.
"""
- src_address = ip_address(unicode(source_ip))
- dst_address = ip_address(unicode(destination_ip))
-
- cmd = 'vxlan_add_del_tunnel'
- args = dict(is_add=1,
- is_ipv6=1 if src_address.version == 6 else 0,
- instance=Constants.BITWISE_NON_ZERO,
- src_address=src_address.packed,
- dst_address=dst_address.packed,
- mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
- encap_vrf_id=0,
- decap_next_index=Constants.BITWISE_NON_ZERO,
- vni=int(vni))
- err_msg = 'Failed to create VXLAN tunnel interface on host {host}'.\
- format(host=node['host'])
+ src_address = ip_address(source_ip)
+ dst_address = ip_address(destination_ip)
+
+ cmd = u"vxlan_add_del_tunnel"
+ args = dict(
+ is_add=1,
+ is_ipv6=1 if src_address.version == 6 else 0,
+ instance=Constants.BITWISE_NON_ZERO,
+ src_address=src_address.packed,
+ dst_address=dst_address.packed,
+ mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
+ encap_vrf_id=0,
+ decap_next_index=Constants.BITWISE_NON_ZERO,
+ vni=int(vni)
+ )
+ err_msg = f"Failed to create VXLAN tunnel interface " \
+ f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
- if_key = Topology.add_new_port(node, 'vxlan_tunnel')
+ if_key = Topology.add_new_port(node, u"vxlan_tunnel")
Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
Topology.update_interface_name(node, if_key, ifc_name)
"""
sw_if_index = InterfaceUtil.get_interface_index(node, interface)
- cmd = 'sw_interface_set_vxlan_bypass'
- args = dict(is_ipv6=0,
- sw_if_index=sw_if_index,
- enable=1)
- err_msg = 'Failed to set VXLAN bypass on interface on host {host}'.\
- format(host=node['host'])
+ cmd = u"sw_interface_set_vxlan_bypass"
+ args = dict(
+ is_ipv6=0,
+ sw_if_index=sw_if_index,
+ enable=1
+ )
+ err_msg = f"Failed to set VXLAN bypass on interface " \
+ f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_replies(err_msg)
:returns: Processed vxlan interface dump.
:rtype: dict
"""
- if vxlan_dump['is_ipv6']:
- vxlan_dump['src_address'] = \
- ip_address(unicode(vxlan_dump['src_address']))
- vxlan_dump['dst_address'] = \
- ip_address(unicode(vxlan_dump['dst_address']))
+ if vxlan_dump[u"is_ipv6"]:
+ vxlan_dump[u"src_address"] = \
+ ip_address(vxlan_dump[u"src_address"])
+ vxlan_dump[u"dst_address"] = \
+ ip_address(vxlan_dump[u"dst_address"])
else:
- vxlan_dump['src_address'] = \
- ip_address(unicode(vxlan_dump['src_address'][0:4]))
- vxlan_dump['dst_address'] = \
- ip_address(unicode(vxlan_dump['dst_address'][0:4]))
+ vxlan_dump[u"src_address"] = \
+ ip_address(vxlan_dump[u"src_address"][0:4])
+ vxlan_dump[u"dst_address"] = \
+ ip_address(vxlan_dump[u"dst_address"][0:4])
return vxlan_dump
if interface is not None:
else:
sw_if_index = int(Constants.BITWISE_NON_ZERO)
- cmd = 'vxlan_tunnel_dump'
- args = dict(sw_if_index=sw_if_index)
- err_msg = 'Failed to get VXLAN dump on host {host}'.format(
- host=node['host'])
+ cmd = u"vxlan_tunnel_dump"
+ args = dict(
+ sw_if_index=sw_if_index
+ )
+ err_msg = f"Failed to get VXLAN dump on host {node[u'host']}"
+
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
for dump in details:
if interface is None:
data.append(process_vxlan_dump(dump))
- elif dump['sw_if_index'] == sw_if_index:
+ elif dump[u"sw_if_index"] == sw_if_index:
data = process_vxlan_dump(dump)
break
- logger.debug('VXLAN data:\n{vxlan_data}'.format(vxlan_data=data))
+ logger.debug(f"VXLAN data:\n{data}")
return data
@staticmethod
- def vhost_user_dump(node):
- """Get vhost-user data for the given node.
-
- TODO: Move to VhostUser.py
-
- :param node: VPP node to get interface data from.
- :type node: dict
- :returns: List of dictionaries with all vhost-user interfaces.
- :rtype: list
- """
- def process_vhost_dump(vhost_dump):
- """Process vhost dump.
-
- :param vhost_dump: Vhost interface dump.
- :type vhost_dump: dict
- :returns: Processed vhost interface dump.
- :rtype: dict
- """
- vhost_dump['interface_name'] = \
- vhost_dump['interface_name'].rstrip('\x00')
- vhost_dump['sock_filename'] = \
- vhost_dump['sock_filename'].rstrip('\x00')
- return vhost_dump
-
- cmd = 'sw_interface_vhost_user_dump'
- err_msg = 'Failed to get vhost-user dump on host {host}'.format(
- host=node['host'])
- with PapiSocketExecutor(node) as papi_exec:
- details = papi_exec.add(cmd).get_details(err_msg)
-
- for dump in details:
- # In-place edits.
- process_vhost_dump(dump)
-
- logger.debug('Vhost-user details:\n{vhost_details}'.format(
- vhost_details=details))
- return details
-
- @staticmethod
- def create_subinterface(node, interface, sub_id, outer_vlan_id=None,
- inner_vlan_id=None, type_subif=None):
+ def create_subinterface(
+ node, interface, sub_id, outer_vlan_id=None, inner_vlan_id=None,
+ type_subif=None):
"""Create sub-interface on node. It is possible to set required
sub-interface type and VLAN tag(s).
subif_types = type_subif.split()
flags = 0
- if 'no_tags' in subif_types:
+ if u"no_tags" in subif_types:
flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_NO_TAGS
- if 'one_tag' in subif_types:
+ if u"one_tag" in subif_types:
flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_ONE_TAG
- if 'two_tags' in subif_types:
+ if u"two_tags" in subif_types:
flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_TWO_TAGS
- if 'dot1ad' in subif_types:
+ if u"dot1ad" in subif_types:
flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DOT1AD
- if 'exact_match' in subif_types:
+ if u"exact_match" in subif_types:
flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_EXACT_MATCH
- if 'default_sub' in subif_types:
+ if u"default_sub" in subif_types:
flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_DEFAULT
- if type_subif == 'default_sub':
+ if type_subif == u"default_sub":
flags = flags | SubInterfaceFlags.SUB_IF_API_FLAG_INNER_VLAN_ID_ANY\
| SubInterfaceFlags.SUB_IF_API_FLAG_OUTER_VLAN_ID_ANY
- cmd = 'create_subif'
+ cmd = u"create_subif"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
sub_id=int(sub_id),
- sub_if_flags=flags.value if hasattr(flags, 'value') else int(flags),
+ sub_if_flags=flags.value if hasattr(flags, u"value")
+ else int(flags),
outer_vlan_id=int(outer_vlan_id) if outer_vlan_id else 0,
inner_vlan_id=int(inner_vlan_id) if inner_vlan_id else 0
)
- err_msg = 'Failed to create sub-interface on host {host}'.format(
- host=node['host'])
+ err_msg = f"Failed to create sub-interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
- if_key = Topology.add_new_port(node, 'subinterface')
+ if_key = Topology.add_new_port(node, u"subinterface")
Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
Topology.update_interface_name(node, if_key, ifc_name)
- return '{ifc}.{s_id}'.format(ifc=interface, s_id=sub_id), sw_if_index
+ return f"{interface}.{sub_id}", sw_if_index
@staticmethod
def create_gre_tunnel_interface(node, source_ip, destination_ip):
:rtype: tuple
:raises RuntimeError: If unable to create GRE tunnel interface.
"""
- cmd = 'gre_tunnel_add_del'
- tunnel = dict(type=0,
- instance=Constants.BITWISE_NON_ZERO,
- src=str(source_ip),
- dst=str(destination_ip),
- outer_fib_id=0,
- session_id=0)
- args = dict(is_add=1,
- tunnel=tunnel)
- err_msg = 'Failed to create GRE tunnel interface on host {host}'.format(
- host=node['host'])
+ cmd = u"gre_tunnel_add_del"
+ tunnel = dict(
+ type=0,
+ instance=Constants.BITWISE_NON_ZERO,
+ src=str(source_ip),
+ dst=str(destination_ip),
+ outer_fib_id=0,
+ session_id=0
+ )
+ args = dict(
+ is_add=1,
+ tunnel=tunnel
+ )
+ err_msg = f"Failed to create GRE tunnel interface " \
+ f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
- if_key = Topology.add_new_port(node, 'gre_tunnel')
+ if_key = Topology.add_new_port(node, u"gre_tunnel")
Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
Topology.update_interface_name(node, if_key, ifc_name)
:raises RuntimeError: If it is not possible to create loopback on the
node.
"""
- cmd = 'create_loopback'
- args = dict(mac_address=L2Util.mac_to_bin(mac) if mac else 0)
- err_msg = 'Failed to create loopback interface on host {host}'.format(
- host=node['host'])
+ cmd = u"create_loopback"
+ args = dict(
+ mac_address=L2Util.mac_to_bin(mac) if mac else 0
+ )
+ err_msg = f"Failed to create loopback interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
- if_key = Topology.add_new_port(node, 'loopback')
+ if_key = Topology.add_new_port(node, u"loopback")
Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
Topology.update_interface_name(node, if_key, ifc_name)
:raises RuntimeError: If it is not possible to create bond interface on
the node.
"""
- cmd = 'bond_create'
+ cmd = u"bond_create"
args = dict(
id=int(Constants.BITWISE_NON_ZERO),
use_custom_mac=False if mac is None else True,
mac_address=L2Util.mac_to_bin(mac) if mac else None,
- mode=getattr(LinkBondMode, 'BOND_API_MODE_{md}'.format(
- md=mode.replace('-', '_').upper())).value,
+ mode=getattr(
+ LinkBondMode,
+ f"BOND_API_MODE_{mode.replace(u'-', u'_').upper()}"
+ ).value,
lb=0 if load_balance is None else getattr(
- LinkBondLoadBalanceAlgo, 'BOND_API_LB_ALGO_{lb}'.format(
- lb=load_balance.upper())).value,
+ LinkBondLoadBalanceAlgo,
+ f"BOND_API_LB_ALGO_{load_balance.upper()}"
+ ).value,
numa_only=False
)
- err_msg = 'Failed to create bond interface on host {host}'.format(
- host=node['host'])
+ err_msg = f"Failed to create bond interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
InterfaceUtil.add_eth_interface(
- node, sw_if_index=sw_if_index, ifc_pfx='eth_bond')
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_bond"
+ )
if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
return if_key
the node.
"""
PapiSocketExecutor.run_cli_cmd(
- node, 'set logging class avf level debug')
-
- cmd = 'avf_create'
- args = dict(pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
- enable_elog=0,
- rxq_num=int(num_rx_queues) if num_rx_queues else 0,
- rxq_size=0,
- txq_size=0)
- err_msg = 'Failed to create AVF interface on host {host}'.format(
- host=node['host'])
+ node, u"set logging class avf level debug"
+ )
+
+ cmd = u"avf_create"
+ args = dict(
+ pci_addr=InterfaceUtil.pci_to_int(vf_pci_addr),
+ enable_elog=0,
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=0,
+ txq_size=0
+ )
+ err_msg = f"Failed to create AVF interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
InterfaceUtil.add_eth_interface(
- node, sw_if_index=sw_if_index, ifc_pfx='eth_avf')
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_avf"
+ )
if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
return if_key
:raises RuntimeError: If it is not possible to create RDMA interface on
the node.
"""
- cmd = 'rdma_create'
- args = dict(name=InterfaceUtil.pci_to_eth(node, pci_addr),
- host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
- rxq_num=int(num_rx_queues) if num_rx_queues else 0,
- rxq_size=0,
- txq_size=0)
- err_msg = 'Failed to create RDMA interface on host {host}'.format(
- host=node['host'])
+ cmd = u"rdma_create"
+ args = dict(
+ name=InterfaceUtil.pci_to_eth(node, pci_addr),
+ host_if=InterfaceUtil.pci_to_eth(node, pci_addr),
+ rxq_num=int(num_rx_queues) if num_rx_queues else 0,
+ rxq_size=0,
+ txq_size=0
+ )
+ err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
InterfaceUtil.add_eth_interface(
- node, sw_if_index=sw_if_index, ifc_pfx='eth_rdma')
+ node, sw_if_index=sw_if_index, ifc_pfx=u"eth_rdma"
+ )
if_key = Topology.get_interface_by_sw_index(node, sw_if_index)
return if_key
:raises RuntimeError: If it is not possible to enslave physical
interface to bond interface on the node.
"""
- cmd = 'bond_enslave'
+ cmd = u"bond_enslave"
args = dict(
sw_if_index=Topology.get_interface_sw_index(node, interface),
bond_sw_if_index=Topology.get_interface_sw_index(node, bond_if),
is_passive=False,
is_long_timeout=False
)
- err_msg = 'Failed to enslave physical interface {ifc} to bond ' \
- 'interface {bond} on host {host}'.format(ifc=interface,
- bond=bond_if,
- host=node['host'])
+ err_msg = f"Failed to enslave physical interface {interface} to bond " \
+ f"interface {bond_if} on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type node: dict
:type verbose: bool
"""
- cmd = 'sw_interface_bond_dump'
- err_msg = 'Failed to get bond interface dump on host {host}'.format(
- host=node['host'])
+ cmd = u"sw_interface_bond_dump"
+ err_msg = f"Failed to get bond interface dump on host {node[u'host']}"
- data = ('Bond data on node {host}:\n'.format(host=node['host']))
+ data = f"Bond data on node {node[u'host']}:\n"
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd).get_details(err_msg)
for bond in details:
- data += ('{b}\n'.format(b=bond['interface_name']))
- data += (' mode: {m}\n'.format(
- m=bond['mode'].name.replace('BOND_API_MODE_', '').lower()))
- data += (' load balance: {lb}\n'.format(
- lb=bond['lb'].name.replace('BOND_API_LB_ALGO_', '').lower()))
- data += (' number of active slaves: {n}\n'.format(
- n=bond['active_slaves']))
+ data += f"{bond[u'interface_name']}\n"
+ data += u" mode: {m}\n".format(
+ m=bond[u"mode"].name.replace(u"BOND_API_MODE_", u"").lower()
+ )
+ data += u" load balance: {lb}\n".format(
+ lb=bond[u"lb"].name.replace(u"BOND_API_LB_ALGO_", u"").lower()
+ )
+ data += f" number of active slaves: {bond[u'active_slaves']}\n"
if verbose:
slave_data = InterfaceUtil.vpp_bond_slave_dump(
node, Topology.get_interface_by_sw_index(
- node, bond['sw_if_index']))
+ node, bond[u"sw_if_index"]
+ )
+ )
for slave in slave_data:
- if not slave['is_passive']:
- data += (' {s}\n'.format(s=slave['interface_name']))
- data += (' number of slaves: {n}\n'.format(n=bond['slaves']))
+ if not slave[u"is_passive"]:
+ data += f" {slave[u'interface_name']}\n"
+ data += f" number of slaves: {bond[u'slaves']}\n"
if verbose:
for slave in slave_data:
- data += (' {s}\n'.format(s=slave['interface_name']))
- data += (' interface id: {i}\n'.format(i=bond['id']))
- data += (' sw_if_index: {i}\n'.format(i=bond['sw_if_index']))
+ data += f" {slave[u'interface_name']}\n"
+ data += f" interface id: {bond[u'id']}\n"
+ data += f" sw_if_index: {bond[u'sw_if_index']}\n"
logger.info(data)
@staticmethod
:returns: Bond slave interface data.
:rtype: dict
"""
- cmd = 'sw_interface_slave_dump'
- args = dict(sw_if_index=Topology.get_interface_sw_index(
- node, interface))
- err_msg = 'Failed to get slave dump on host {host}'.format(
- host=node['host'])
+ cmd = u"sw_interface_slave_dump"
+ args = dict(
+ sw_if_index=Topology.get_interface_sw_index(node, interface)
+ )
+ err_msg = f"Failed to get slave dump on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
- logger.debug('Slave data:\n{slave_data}'.format(slave_data=details))
+ logger.debug(f"Slave data:\n{details}")
return details
@staticmethod
:type verbose: bool
"""
for node_data in nodes.values():
- if node_data['type'] == NodeType.DUT:
+ if node_data[u"type"] == NodeType.DUT:
InterfaceUtil.vpp_show_bond_data_on_node(node_data, verbose)
@staticmethod
- def vpp_enable_input_acl_interface(node, interface, ip_version,
- table_index):
+ def vpp_enable_input_acl_interface(
+ node, interface, ip_version, table_index):
"""Enable input acl on interface.
:param node: VPP node to setup interface for input acl.
:type ip_version: str
:type table_index: int
"""
- cmd = 'input_acl_set_interface'
+ cmd = u"input_acl_set_interface"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
- ip4_table_index=table_index if ip_version == 'ip4'
+ ip4_table_index=table_index if ip_version == u"ip4"
else Constants.BITWISE_NON_ZERO,
- ip6_table_index=table_index if ip_version == 'ip6'
+ ip6_table_index=table_index if ip_version == u"ip6"
else Constants.BITWISE_NON_ZERO,
- l2_table_index=table_index if ip_version == 'l2'
+ l2_table_index=table_index if ip_version == u"l2"
else Constants.BITWISE_NON_ZERO,
is_add=1)
- err_msg = 'Failed to enable input acl on interface {ifc}'.format(
- ifc=interface)
+ err_msg = f"Failed to enable input acl on interface {interface}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:returns: Classify table name.
:rtype: str
"""
- if isinstance(interface, basestring):
+ if isinstance(interface, str):
sw_if_index = InterfaceUtil.get_sw_if_index(node, interface)
else:
sw_if_index = interface
- cmd = 'classify_table_by_interface'
- args = dict(sw_if_index=sw_if_index)
- err_msg = 'Failed to get classify table name by interface {ifc}'.format(
- ifc=interface)
+ cmd = u"classify_table_by_interface"
+ args = dict(
+ sw_if_index=sw_if_index
+ )
+ err_msg = f"Failed to get classify table name by interface {interface}"
with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)
:rtype: str
"""
interface_data = InterfaceUtil.vpp_get_interface_data(
- node, interface=interface_name)
- return interface_data.get('sw_if_index')
+ node, interface=interface_name
+ )
+ return interface_data.get(u"sw_if_index")
@staticmethod
def vxlan_gpe_dump(node, interface_name=None):
:returns: Processed vxlan_gpe interface dump.
:rtype: dict
"""
- if vxlan_dump['is_ipv6']:
- vxlan_dump['local'] = \
- ip_address(unicode(vxlan_dump['local']))
- vxlan_dump['remote'] = \
- ip_address(unicode(vxlan_dump['remote']))
+ if vxlan_dump[u"is_ipv6"]:
+ vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"])
+ vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"])
else:
- vxlan_dump['local'] = \
- ip_address(unicode(vxlan_dump['local'][0:4]))
- vxlan_dump['remote'] = \
- ip_address(unicode(vxlan_dump['remote'][0:4]))
+ vxlan_dump[u"local"] = ip_address(vxlan_dump[u"local"][0:4])
+ vxlan_dump[u"remote"] = ip_address(vxlan_dump[u"remote"][0:4])
return vxlan_dump
if interface_name is not None:
sw_if_index = InterfaceUtil.get_interface_index(
- node, interface_name)
+ node, interface_name
+ )
else:
sw_if_index = int(Constants.BITWISE_NON_ZERO)
- cmd = 'vxlan_gpe_tunnel_dump'
- args = dict(sw_if_index=sw_if_index)
- err_msg = 'Failed to get VXLAN-GPE dump on host {host}'.format(
- host=node['host'])
+ cmd = u"vxlan_gpe_tunnel_dump"
+ args = dict(
+ sw_if_index=sw_if_index
+ )
+ err_msg = f"Failed to get VXLAN-GPE dump on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
for dump in details:
if interface_name is None:
data.append(process_vxlan_gpe_dump(dump))
- elif dump['sw_if_index'] == sw_if_index:
+ elif dump[u"sw_if_index"] == sw_if_index:
data = process_vxlan_gpe_dump(dump)
break
- logger.debug('VXLAN-GPE data:\n{vxlan_gpe_data}'.format(
- vxlan_gpe_data=data))
+ logger.debug(f"VXLAN-GPE data:\n{data}")
return data
@staticmethod
:type table_id: int
:type ipv6: bool
"""
- cmd = 'sw_interface_set_table'
+ cmd = u"sw_interface_set_table"
args = dict(
sw_if_index=InterfaceUtil.get_interface_index(node, interface),
is_ipv6=ipv6,
- vrf_id=int(table_id))
- err_msg = 'Failed to assign interface {ifc} to FIB table'.format(
- ifc=interface)
+ vrf_id=int(table_id)
+ )
+ err_msg = f"Failed to assign interface {interface} to FIB table"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def set_linux_interface_mac(node, interface, mac, namespace=None,
- vf_id=None):
+ def set_linux_interface_mac(
+ node, interface, mac, namespace=None, vf_id=None):
"""Set MAC address for interface in linux.
:param node: Node where to execute command.
:type namespace: str
:type vf_id: int
"""
- mac_str = 'vf {vf_id} mac {mac}'.format(vf_id=vf_id, mac=mac) \
- if vf_id is not None else 'address {mac}'.format(mac=mac)
- ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
+ mac_str = f"vf {vf_id} mac {mac}" if vf_id is not None \
+ else f"address {mac}"
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
- cmd = ('{ns} ip link set {interface} {mac}'.
- format(ns=ns_str, interface=interface, mac=mac_str))
+ cmd = f"{ns_str} ip link set {interface} {mac_str}"
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def set_linux_interface_trust_on(node, interface, namespace=None,
- vf_id=None):
+ def set_linux_interface_trust_on(
+ node, interface, namespace=None, vf_id=None):
"""Set trust on (promisc) for interface in linux.
:param node: Node where to execute command.
:type namespace: str
:type vf_id: int
"""
- trust_str = 'vf {vf_id} trust on'.format(vf_id=vf_id) \
- if vf_id is not None else 'trust on'
- ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
+ trust_str = f"vf {vf_id} trust on" if vf_id is not None else u"trust on"
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
- cmd = ('{ns} ip link set dev {interface} {trust}'.
- format(ns=ns_str, interface=interface, trust=trust_str))
+ cmd = f"{ns_str} ip link set dev {interface} {trust_str}"
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def set_linux_interface_spoof_off(node, interface, namespace=None,
- vf_id=None):
+ def set_linux_interface_spoof_off(
+ node, interface, namespace=None, vf_id=None):
"""Set spoof off for interface in linux.
:param node: Node where to execute command.
:type namespace: str
:type vf_id: int
"""
- spoof_str = 'vf {vf_id} spoof off'.format(vf_id=vf_id) \
- if vf_id is not None else 'spoof off'
- ns_str = 'ip netns exec {ns}'.format(ns=namespace) if namespace else ''
+ spoof_str = f"vf {vf_id} spoof off" if vf_id is not None \
+ else u"spoof off"
+ ns_str = f"ip netns exec {namespace}" if namespace else u""
- cmd = ('{ns} ip link set dev {interface} {spoof}'.
- format(ns=ns_str, interface=interface, spoof=spoof_str))
+ cmd = f"{ns_str} ip link set dev {interface} {spoof_str}"
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def init_avf_interface(node, ifc_key, numvfs=1, osi_layer='L2'):
+ def init_avf_interface(node, ifc_key, numvfs=1, osi_layer=u"L2"):
"""Init PCI device by creating VIFs and bind them to vfio-pci for AVF
driver testing on DUT.
pf_mac_addr = Topology.get_interface_mac(node, ifc_key).split(":")
uio_driver = Topology.get_uio_driver(node)
kernel_driver = Topology.get_interface_driver(node, ifc_key)
- if kernel_driver not in ("i40e", "i40evf"):
+ if kernel_driver not in (u"i40e", u"i40evf"):
raise RuntimeError(
- "AVF needs i40e-compatible driver, not {driver} at node {host}"
- " ifc {ifc}".format(
- driver=kernel_driver, host=node["host"], ifc=ifc_key))
+ f"AVF needs i40e-compatible driver, not {kernel_driver} "
+ f"at node {node[u'host']} ifc {ifc_key}"
+ )
current_driver = DUTSetup.get_pci_dev_driver(
- node, pf_pci_addr.replace(':', r'\:'))
+ node, pf_pci_addr.replace(u":", r"\:"))
VPPUtil.stop_vpp_service(node)
if current_driver != kernel_driver:
vf_ifc_keys = []
# Set MAC address and bind each virtual function to uio driver.
for vf_id in range(numvfs):
- vf_mac_addr = ":".join([pf_mac_addr[0], pf_mac_addr[2],
- pf_mac_addr[3], pf_mac_addr[4],
- pf_mac_addr[5], "{:02x}".format(vf_id)])
+ vf_mac_addr = u":".join(
+ [pf_mac_addr[0], pf_mac_addr[2], pf_mac_addr[3], pf_mac_addr[4],
+ pf_mac_addr[5], f"{vf_id:02x}"
+ ]
+ )
- pf_dev = '`basename /sys/bus/pci/devices/{pci}/net/*`'.\
- format(pci=pf_pci_addr)
+ pf_dev = f"`basename /sys/bus/pci/devices/{pf_pci_addr}/net/*`"
InterfaceUtil.set_linux_interface_trust_on(node, pf_dev,
vf_id=vf_id)
- if osi_layer == 'L2':
- InterfaceUtil.set_linux_interface_spoof_off(node, pf_dev,
- vf_id=vf_id)
- InterfaceUtil.set_linux_interface_mac(node, pf_dev, vf_mac_addr,
- vf_id=vf_id)
+ if osi_layer == u"L2":
+ InterfaceUtil.set_linux_interface_spoof_off(
+ node, pf_dev, vf_id=vf_id
+ )
+ InterfaceUtil.set_linux_interface_mac(
+ node, pf_dev, vf_mac_addr, vf_id=vf_id
+ )
DUTSetup.pci_vf_driver_unbind(node, pf_pci_addr, vf_id)
DUTSetup.pci_vf_driver_bind(node, pf_pci_addr, vf_id, uio_driver)
# Add newly created ports into topology file
- vf_ifc_name = '{pf_if_key}_vif'.format(pf_if_key=ifc_key)
+ vf_ifc_name = f"{ifc_key}_vif"
vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
vf_ifc_key = Topology.add_new_port(node, vf_ifc_name)
- Topology.update_interface_name(node, vf_ifc_key,
- vf_ifc_name+str(vf_id+1))
+ Topology.update_interface_name(
+ node, vf_ifc_key, vf_ifc_name+str(vf_id+1)
+ )
Topology.update_interface_mac_address(node, vf_ifc_key, vf_mac_addr)
Topology.update_interface_pci_address(node, vf_ifc_key, vf_pci_addr)
vf_ifc_keys.append(vf_ifc_key)
:returns: Thread mapping information as a list of dictionaries.
:rtype: list
"""
- cmd = 'sw_interface_rx_placement_dump'
- err_msg = "Failed to run '{cmd}' PAPI command on host {host}!".format(
- cmd=cmd, host=node['host'])
+ cmd = u"sw_interface_rx_placement_dump"
+ err_msg = f"Failed to run '{cmd}' PAPI command on host {node[u'host']}!"
with PapiSocketExecutor(node) as papi_exec:
- for ifc in node['interfaces'].values():
- if ifc['vpp_sw_index'] is not None:
- papi_exec.add(cmd, sw_if_index=ifc['vpp_sw_index'])
+ for ifc in node[u"interfaces"].values():
+ if ifc[u"vpp_sw_index"] is not None:
+ papi_exec.add(cmd, sw_if_index=ifc[u"vpp_sw_index"])
details = papi_exec.get_details(err_msg)
- return sorted(details, key=lambda k: k['sw_if_index'])
+ return sorted(details, key=lambda k: k[u"sw_if_index"])
@staticmethod
- def vpp_sw_interface_set_rx_placement(node, sw_if_index, queue_id,
- worker_id):
+ def vpp_sw_interface_set_rx_placement(
+ node, sw_if_index, queue_id, worker_id):
"""Set interface RX placement to worker on node.
:param node: Node to run command on.
:raises RuntimeError: If failed to run command on host or if no API
reply received.
"""
- cmd = 'sw_interface_set_rx_placement'
- err_msg = "Failed to set interface RX placement to worker on host " \
- "{host}!".format(host=node['host'])
+ cmd = u"sw_interface_set_rx_placement"
+ err_msg = f"Failed to set interface RX placement to worker " \
+ f"on host {node[u'host']}!"
args = dict(
sw_if_index=sw_if_index,
queue_id=queue_id,
if not worker_cnt:
return
for placement in InterfaceUtil.vpp_sw_interface_rx_placement_dump(node):
- for interface in node['interfaces'].values():
- if placement['sw_if_index'] == interface['vpp_sw_index'] \
- and prefix in interface['name']:
+ for interface in node[u"interfaces"].values():
+ if placement[u"sw_if_index"] == interface[u"vpp_sw_index"] \
+ and prefix in interface[u"name"]:
InterfaceUtil.vpp_sw_interface_set_rx_placement(
- node, placement['sw_if_index'], placement['queue_id'],
- worker_id % worker_cnt)
+ node, placement[u"sw_if_index"], placement[u"queue_id"],
+ worker_id % worker_cnt
+ )
worker_id += 1
@staticmethod
:type prefix: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
InterfaceUtil.vpp_round_robin_rx_placement(node, prefix)
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""Library to control Kubernetes kubectl."""
+from functools import reduce
+from io import open
from time import sleep
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.topology import NodeType
-from resources.libraries.python.ssh import SSH, exec_cmd_no_error
from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.ssh import SSH, exec_cmd_no_error
+from resources.libraries.python.topology import NodeType
from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
-__all__ = ["KubernetesUtils"]
+__all__ = [u"KubernetesUtils"]
# Maximum number of retries to check if PODs are running or deleted.
MAX_RETRY = 48
+
class KubernetesUtils(object):
"""Kubernetes utilities class."""
:type image_path: str
:raises RuntimeError: If loading image failed on node.
"""
- command = 'docker load -i {image_path}'.\
- format(image_path=image_path)
- message = 'Failed to load Docker image on {node}.'.\
- format(node=node['host'])
- exec_cmd_no_error(node, command, timeout=240, sudo=True,
- message=message)
-
- command = "docker rmi $(sudo docker images -f 'dangling=true' -q)".\
- format(image_path=image_path)
- message = 'Failed to clean Docker images on {node}.'.\
- format(node=node['host'])
+ command = f"docker load -i {image_path}"
+ message = f"Failed to load Docker image on {node[u'host']}."
+ exec_cmd_no_error(
+ node, command, timeout=240, sudo=True, message=message
+ )
+
+ command = u"docker rmi $(sudo docker images -f 'dangling=true' -q)"
+ message = f"Failed to clean Docker images on {node[u'host']}."
try:
- exec_cmd_no_error(node, command, timeout=240, sudo=True,
- message=message)
+ exec_cmd_no_error(
+ node, command, timeout=240, sudo=True, message=message
+ )
except RuntimeError:
pass
:type image_path: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.load_docker_image_on_node(node, image_path)
@staticmethod
ssh = SSH()
ssh.connect(node)
- cmd = '{dir}/{lib}/k8s_setup.sh deploy_calico'\
- .format(dir=Constants.REMOTE_FW_DIR,
- lib=Constants.RESOURCES_LIB_SH)
- (ret_code, _, _) = ssh.exec_command(cmd, timeout=240)
+ cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}/" \
+ f"k8s_setup.sh deploy_calico"
+ ret_code, _, _ = ssh.exec_command(cmd, timeout=240)
if int(ret_code) != 0:
- raise RuntimeError('Failed to setup Kubernetes on {node}.'
- .format(node=node['host']))
+ raise RuntimeError(
+ "Failed to setup Kubernetes on {node[u'host']}."
+ )
- KubernetesUtils.wait_for_kubernetes_pods_on_node(node,
- nspace='kube-system')
+ KubernetesUtils.wait_for_kubernetes_pods_on_node(
+ node, nspace=u"kube-system"
+ )
@staticmethod
def setup_kubernetes_on_all_duts(nodes):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.setup_kubernetes_on_node(node)
@staticmethod
ssh = SSH()
ssh.connect(node)
- cmd = '{dir}/{lib}/k8s_setup.sh destroy'\
- .format(dir=Constants.REMOTE_FW_DIR,
- lib=Constants.RESOURCES_LIB_SH)
- (ret_code, _, _) = ssh.exec_command(cmd, timeout=120)
+ cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}/" \
+ f"k8s_setup.sh destroy"
+
+ ret_code, _, _ = ssh.exec_command(cmd, timeout=120)
if int(ret_code) != 0:
- raise RuntimeError('Failed to destroy Kubernetes on {node}.'
- .format(node=node['host']))
+ raise RuntimeError(
+ f"Failed to destroy Kubernetes on {node[u'host']}."
+ )
@staticmethod
def destroy_kubernetes_on_all_duts(nodes):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.destroy_kubernetes_on_node(node)
@staticmethod
ssh = SSH()
ssh.connect(node)
- fqn_file = '{tpl}/{yaml}'.format(tpl=Constants.RESOURCES_TPL_K8S,
- yaml=yaml_file)
+ fqn_file = f"{Constants.RESOURCES_TPL_K8S}/{yaml_file}"
with open(fqn_file, 'r') as src_file:
stream = src_file.read()
- data = reduce(lambda a, kv: a.replace(*kv), kwargs.iteritems(),
- stream)
- cmd = 'cat <<EOF | kubectl apply -f - \n{data}\nEOF'.format(
- data=data)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ data = reduce(
+ lambda a, kv: a.replace(*kv), list(kwargs.items()), stream
+ )
+ cmd = f"cat <<EOF | kubectl apply -f - \n{data}\nEOF"
+
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError('Failed to apply Kubernetes template {yaml} '
- 'on {node}.'.format(yaml=yaml_file,
- node=node['host']))
+ raise RuntimeError(
+ f"Failed to apply Kubernetes template {yaml_file} "
+ f"on {node[u'host']}."
+ )
@staticmethod
def apply_kubernetes_resource_on_all_duts(nodes, yaml_file, **kwargs):
:type kwargs: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- KubernetesUtils.apply_kubernetes_resource_on_node(node,
- yaml_file,
- **kwargs)
+ if node[u"type"] == NodeType.DUT:
+ KubernetesUtils.apply_kubernetes_resource_on_node(
+ node, yaml_file, **kwargs
+ )
@staticmethod
def create_kubernetes_cm_from_file_on_node(node, nspace, name, **kwargs):
ssh = SSH()
ssh.connect(node)
- nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
-
- from_file = '{0}'.format(' '.join('--from-file={0}={1} '\
- .format(key, kwargs[key]) for key in kwargs))
+ nspace = f"-n {nspace}" if nspace else u""
+ from_file = u" ".join(
+ f"--from-file={key}={kwargs[key]} " for key in kwargs
+ )
+ cmd = f"kubectl create {nspace} configmap {name} {from_file}"
- cmd = 'kubectl create {nspace} configmap {name} {from_file}'\
- .format(nspace=nspace, name=name, from_file=from_file)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError('Failed to create Kubernetes ConfigMap '
- 'on {node}.'.format(node=node['host']))
+ raise RuntimeError(
+ f"Failed to create Kubernetes ConfigMap on {node[u'host']}."
+ )
@staticmethod
- def create_kubernetes_cm_from_file_on_all_duts(nodes, nspace, name,
- **kwargs):
+ def create_kubernetes_cm_from_file_on_all_duts(
+ nodes, nspace, name, **kwargs):
"""Create Kubernetes ConfigMap from file on all DUTs.
:param nodes: Topology nodes.
:param kwargs: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- KubernetesUtils.create_kubernetes_cm_from_file_on_node(node,
- nspace,
- name,
- **kwargs)
+ if node[u"type"] == NodeType.DUT:
+ KubernetesUtils.create_kubernetes_cm_from_file_on_node(
+ node, nspace, name, **kwargs
+ )
@staticmethod
- def delete_kubernetes_resource_on_node(node, nspace, name=None,
- rtype='po,cm,deploy,rs,rc,svc'):
+ def delete_kubernetes_resource_on_node(
+ node, nspace, name=None, rtype=u"po,cm,deploy,rs,rc,svc"):
"""Delete Kubernetes resource on node.
:param node: DUT node.
ssh = SSH()
ssh.connect(node)
- name = '{name}'.format(name=name) if name else '--all'
- nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
+ name = f"{name}" if name else u"--all"
+ nspace = f"-n {nspace}" if nspace else u""
+ cmd = f"kubectl delete {nspace} {rtype} {name}"
- cmd = 'kubectl delete {nspace} {rtype} {name}'\
- .format(nspace=nspace, rtype=rtype, name=name)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
+ ret_code, _, _ = ssh.exec_command_sudo(cmd, timeout=120)
if int(ret_code) != 0:
- raise RuntimeError('Failed to delete Kubernetes resources '
- 'on {node}.'.format(node=node['host']))
+ raise RuntimeError(
+ f"Failed to delete Kubernetes resources on {node[u'host']}."
+ )
- cmd = 'kubectl get {nspace} pods --no-headers'\
- .format(nspace=nspace)
+ cmd = f"kubectl get {nspace} pods --no-headers"
for _ in range(MAX_RETRY):
- (ret_code, stdout, stderr) = ssh.exec_command_sudo(cmd)
+ ret_code, stdout, stderr = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
- raise RuntimeError('Failed to retrieve Kubernetes resources on '
- '{node}.'.format(node=node['host']))
- if name == '--all':
+ raise RuntimeError(
+ f"Failed to retrieve Kubernetes resources "
+ f"on {node[u'host']}."
+ )
+ if name == u"--all":
ready = False
for line in stderr.splitlines():
- if 'No resources found.' in line:
+ if u"No resources found." in line:
ready = True
if ready:
break
ready = False
for line in stdout.splitlines():
try:
- state = line.split()[1].split('/')
- ready = True if 'Running' in line and\
+ state = line.split()[1].split(u"/")
+ ready = True if u"Running" in line and\
state == state[::-1] else False
if not ready:
break
break
sleep(5)
else:
- raise RuntimeError('Failed to delete Kubernetes resources on '
- '{node}.'.format(node=node['host']))
+ raise RuntimeError(
+ f"Failed to delete Kubernetes resources on {node[u'host']}."
+ )
@staticmethod
- def delete_kubernetes_resource_on_all_duts(nodes, nspace, name=None,
- rtype='po,cm,deploy,rs,rc,svc'):
+ def delete_kubernetes_resource_on_all_duts(
+ nodes, nspace, name=None, rtype=u"po,cm,deploy,rs,rc,svc"):
"""Delete all Kubernetes resource on all DUTs.
:param nodes: Topology nodes.
:type name: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- KubernetesUtils.delete_kubernetes_resource_on_node(node, nspace,
- name, rtype)
+ if node[u"type"] == NodeType.DUT:
+ KubernetesUtils.delete_kubernetes_resource_on_node(
+ node, nspace, name, rtype
+ )
@staticmethod
def describe_kubernetes_resource_on_node(node, nspace):
ssh = SSH()
ssh.connect(node)
- nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
+ nspace = f"-n {nspace}" if nspace else u""
+ cmd = f"kubectl describe {nspace} all"
- cmd = 'kubectl describe {nspace} all'.format(nspace=nspace)
ssh.exec_command_sudo(cmd)
@staticmethod
:type nspace: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
- KubernetesUtils.describe_kubernetes_resource_on_node(node,
- nspace)
+ if node[u"type"] == NodeType.DUT:
+ KubernetesUtils.describe_kubernetes_resource_on_node(
+ node, nspace
+ )
@staticmethod
def get_kubernetes_logs_on_node(node, nspace):
ssh = SSH()
ssh.connect(node)
- nspace = '-n {nspace}'.format(nspace=nspace) if nspace else ''
+ nspace = f"-n {nspace}" if nspace else u""
+ cmd = f"for p in $(kubectl get pods {nspace} " \
+ f"-o jsonpath='{{.items[*].metadata.name}}'); do echo $p; " \
+ f"kubectl logs {nspace} $p; done"
- cmd = "for p in $(kubectl get pods {nspace} -o jsonpath="\
- "'{{.items[*].metadata.name}}'); do echo $p; kubectl logs "\
- "{nspace} $p; done".format(nspace=nspace)
ssh.exec_command(cmd)
- cmd = "kubectl exec {nspace} etcdv3 -- etcdctl --endpoints "\
- "\"localhost:22379\" get \"/\" --prefix=true".format(nspace=nspace)
+ cmd = f"kubectl exec {nspace} etcdv3 -- etcdctl " \
+ f"--endpoints \"localhost:22379\" get \"/\" --prefix=true"
+
ssh.exec_command(cmd)
@staticmethod
:type nspace: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.get_kubernetes_logs_on_node(node, nspace)
@staticmethod
ssh = SSH()
ssh.connect(node)
- nspace = '-n {nspace}'.format(nspace=nspace) if nspace \
- else '--all-namespaces'
+ nspace = f"-n {nspace}" if nspace else u"--all-namespaces"
+ cmd = f"kubectl get {nspace} pods --no-headers"
- cmd = 'kubectl get {nspace} pods --no-headers' \
- .format(nspace=nspace)
for _ in range(MAX_RETRY):
- (ret_code, stdout, _) = ssh.exec_command_sudo(cmd)
+ ret_code, stdout, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) == 0:
ready = False
for line in stdout.splitlines():
try:
- state = line.split()[1].split('/')
- ready = True if 'Running' in line and \
+ state = line.split()[1].split(u"/")
+ ready = True if u"Running" in line and \
state == state[::-1] else False
if not ready:
break
break
sleep(5)
else:
- raise RuntimeError('Kubernetes PODs are not running on {node}.'
- .format(node=node['host']))
+ raise RuntimeError(
+ f"Kubernetes PODs are not running on {node[u'host']}."
+ )
@staticmethod
def wait_for_kubernetes_pods_on_all_duts(nodes, nspace):
:type nspace: str
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.wait_for_kubernetes_pods_on_node(node, nspace)
@staticmethod
ssh = SSH()
ssh.connect(node)
- cmd = '{dir}/{lib}/k8s_setup.sh affinity_non_vpp'\
- .format(dir=Constants.REMOTE_FW_DIR,
- lib=Constants.RESOURCES_LIB_SH)
+ cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_LIB_SH}/" \
+ f"k8s_setup.sh affinity_non_vpp"
+
ssh.exec_command(cmd)
@staticmethod
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
KubernetesUtils.set_kubernetes_pods_affinity_on_node(node)
@staticmethod
:param kwargs: Key-value pairs used to create configuration.
:param kwargs: dict
"""
- smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
-
- cpuset_cpus = \
- CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
- cpu_node=kwargs['cpu_node'],
- skip_cnt=2,
- cpu_cnt=kwargs['phy_cores'],
- smt_used=smt_used)
- cpuset_main = \
- CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
- cpu_node=kwargs['cpu_node'],
- skip_cnt=1,
- cpu_cnt=1,
- smt_used=smt_used)
+ smt_used = CpuUtils.is_smt_enabled(kwargs[u"node"][u"cpuinfo"])
+
+ cpuset_cpus = CpuUtils.cpu_slice_of_list_per_node(
+ node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"], skip_cnt=2,
+ cpu_cnt=kwargs[u"phy_cores"], smt_used=smt_used
+ )
+ cpuset_main = CpuUtils.cpu_slice_of_list_per_node(
+ node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"], skip_cnt=1,
+ cpu_cnt=1, smt_used=smt_used
+ )
# Create config instance
vpp_config = VppConfigGenerator()
- vpp_config.set_node(kwargs['node'])
- vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
+ vpp_config.set_node(kwargs[u"node"])
+ vpp_config.add_unix_cli_listen(value=u"0.0.0.0:5002")
vpp_config.add_unix_nodaemon()
vpp_config.add_socksvr()
- vpp_config.add_heapsize('4G')
- vpp_config.add_ip_heap_size('4G')
- vpp_config.add_ip6_heap_size('4G')
- vpp_config.add_ip6_hash_buckets('2000000')
- if not kwargs['jumbo']:
+ vpp_config.add_heapsize(u"4G")
+ vpp_config.add_ip_heap_size(u"4G")
+ vpp_config.add_ip6_heap_size(u"4G")
+ vpp_config.add_ip6_hash_buckets(u"2000000")
+ if not kwargs[u"jumbo"]:
vpp_config.add_dpdk_no_multi_seg()
vpp_config.add_dpdk_no_tx_checksum_offload()
- vpp_config.add_dpdk_dev_default_rxq(kwargs['rxq_count_int'])
- vpp_config.add_dpdk_dev(kwargs['if1'], kwargs['if2'])
- vpp_config.add_buffers_per_numa(kwargs['buffers_per_numa'])
+ vpp_config.add_dpdk_dev_default_rxq(kwargs[u"rxq_count_int"])
+ vpp_config.add_dpdk_dev(kwargs[u"if1"], kwargs[u"if2"])
+ vpp_config.add_buffers_per_numa(kwargs[u"buffers_per_numa"])
# We will pop first core from list to be main core
vpp_config.add_cpu_main_core(str(cpuset_main.pop(0)))
# if this is not only core in list, the rest will be used as workers.
if cpuset_cpus:
- corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+ corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
vpp_config.add_cpu_corelist_workers(corelist_workers)
- vpp_config.write_config(filename=kwargs['filename'])
+ vpp_config.write_config(filename=kwargs[u"filename"])
@staticmethod
def create_kubernetes_vnf_startup_config(**kwargs):
:param kwargs: Key-value pairs used to create configuration.
:param kwargs: dict
"""
- smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
- skip_cnt = kwargs['cpu_skip'] + (kwargs['i'] - 1) * \
- (kwargs['phy_cores'] - 1)
- cpuset_cpus = \
- CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
- cpu_node=kwargs['cpu_node'],
- skip_cnt=skip_cnt,
- cpu_cnt=kwargs['phy_cores']-1,
- smt_used=smt_used)
- cpuset_main = \
- CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
- cpu_node=kwargs['cpu_node'],
- skip_cnt=1,
- cpu_cnt=1,
- smt_used=smt_used)
+ smt_used = CpuUtils.is_smt_enabled(kwargs[u"node"][u"cpuinfo"])
+ skip_cnt = kwargs[u"cpu_skip"] + (kwargs[u"i"] - 1) * \
+ (kwargs[u"phy_cores"] - 1)
+ cpuset_cpus =CpuUtils.cpu_slice_of_list_per_node(
+ node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"],
+ skip_cnt=skip_cnt, cpu_cnt=kwargs[u"phy_cores"]-1, smt_used=smt_used
+ )
+ cpuset_main = CpuUtils.cpu_slice_of_list_per_node(
+ node=kwargs[u"node"], cpu_node=kwargs[u"cpu_node"], skip_cnt=1,
+ cpu_cnt=1, smt_used=smt_used
+ )
# Create config instance
vpp_config = VppConfigGenerator()
- vpp_config.set_node(kwargs['node'])
- vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
+ vpp_config.set_node(kwargs[u"node"])
+ vpp_config.add_unix_cli_listen(value=u"0.0.0.0:5002")
vpp_config.add_unix_nodaemon()
vpp_config.add_socksvr()
# We will pop first core from list to be main core
vpp_config.add_cpu_main_core(str(cpuset_main.pop(0)))
# if this is not only core in list, the rest will be used as workers.
if cpuset_cpus:
- corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
+ corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
vpp_config.add_cpu_corelist_workers(corelist_workers)
- vpp_config.add_plugin('disable', 'dpdk_plugin.so')
- vpp_config.write_config(filename=kwargs['filename'])
+ vpp_config.add_plugin(u"disable", [u"dpdk_plugin.so"])
+ vpp_config.write_config(filename=kwargs[u"filename"])
"""L2 Utilities Library."""
-import binascii
-from textwrap import wrap
-
from enum import IntEnum
from resources.libraries.python.Constants import Constants
:returns: Integer representation of MAC address.
:rtype: int
"""
- return int(mac_str.replace(':', ''), 16)
+ return int(mac_str.replace(u":", u""), 16)
@staticmethod
def int_to_mac(mac_int):
:returns: String representation of MAC address.
:rtype: str
"""
- return ':'.join(wrap("{:012x}".format(mac_int), width=2))
+ return u":".join(
+ f"{hex(mac_int)[2:]:0>12}"[i:i+2] for i in range(0, 12, 2)
+ )
@staticmethod
def mac_to_bin(mac_str):
:param mac_str: MAC address in string representation.
:type mac_str: str
:returns: Binary representation of MAC address.
- :rtype: binary
+ :rtype: bytes
"""
- return binascii.unhexlify(mac_str.replace(':', ''))
+ return bytes.fromhex(mac_str.replace(u":", u""))
@staticmethod
def bin_to_mac(mac_bin):
(\x01\x02\x03\x04\x05\x06) to string format (e.g. 01:02:03:04:05:06).
:param mac_bin: MAC address in binary representation.
- :type mac_bin: binary
+ :type mac_bin: bytes
:returns: String representation of MAC address.
:rtype: str
"""
- mac_str = ':'.join(binascii.hexlify(mac_bin)[i:i + 2]
- for i in range(0, 12, 2))
- return str(mac_str.decode('ascii'))
+ return u":".join(mac_bin.hex()[i:i + 2] for i in range(0, 12, 2))
@staticmethod
- def vpp_add_l2fib_entry(node, mac, interface, bd_id, static_mac=1,
- filter_mac=0, bvi_mac=0):
+ def vpp_add_l2fib_entry(
+ node, mac, interface, bd_id, static_mac=1, filter_mac=0, bvi_mac=0):
""" Create a static L2FIB entry on a VPP node.
:param node: Node to add L2FIB entry on.
:type filter_mac: int or str
:type bvi_mac: int or str
"""
-
- if isinstance(interface, basestring):
+ if isinstance(interface, str):
sw_if_index = Topology.get_interface_sw_index(node, interface)
else:
sw_if_index = interface
- cmd = 'l2fib_add_del'
- err_msg = 'Failed to add L2FIB entry on host {host}'.format(
- host=node['host'])
- args = dict(mac=L2Util.mac_to_bin(mac),
- bd_id=int(bd_id),
- sw_if_index=sw_if_index,
- is_add=1,
- static_mac=int(static_mac),
- filter_mac=int(filter_mac),
- bvi_mac=int(bvi_mac))
+ cmd = u"l2fib_add_del"
+ err_msg = f"Failed to add L2FIB entry on host {node[u'host']}"
+ args = dict(
+ mac=L2Util.mac_to_bin(mac),
+ bd_id=int(bd_id),
+ sw_if_index=sw_if_index,
+ is_add=1,
+ static_mac=int(static_mac),
+ filter_mac=int(filter_mac),
+ bvi_mac=int(bvi_mac)
+
+ )
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
@staticmethod
- def create_l2_bd(node, bd_id, flood=1, uu_flood=1, forward=1, learn=1,
- arp_term=0):
+ def create_l2_bd(
+ node, bd_id, flood=1, uu_flood=1, forward=1, learn=1, arp_term=0):
"""Create an L2 bridge domain on a VPP node.
:param node: Node where we wish to crate the L2 bridge domain.
:type learn: int or str
:type arp_term: int or str
"""
-
- cmd = 'bridge_domain_add_del'
- err_msg = 'Failed to create L2 bridge domain on host {host}'.format(
- host=node['host'])
- args = dict(bd_id=int(bd_id),
- flood=int(flood),
- uu_flood=int(uu_flood),
- forward=int(forward),
- learn=int(learn),
- arp_term=int(arp_term),
- is_add=1)
+ cmd = u"bridge_domain_add_del"
+ err_msg = f"Failed to create L2 bridge domain on host {node[u'host']}"
+ args = dict(
+ bd_id=int(bd_id),
+ flood=int(flood),
+ uu_flood=int(uu_flood),
+ forward=int(forward),
+ learn=int(learn),
+ arp_term=int(arp_term),
+ is_add=1
+ )
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type shg: int or str
:type port_type: int or str
"""
-
sw_if_index = Topology.get_interface_sw_index(node, interface)
- cmd = 'sw_interface_set_l2_bridge'
- err_msg = 'Failed to add interface {ifc} to L2 bridge domain on host ' \
- '{host}'.format(ifc=interface, host=node['host'])
- args = dict(rx_sw_if_index=sw_if_index,
- bd_id=int(bd_id),
- shg=int(shg),
- port_type=int(port_type),
- enable=1)
+ cmd = u"sw_interface_set_l2_bridge"
+ err_msg = f"Failed to add interface {interface} to L2 bridge domain " \
+ f"on host {node[u'host']}"
+ args = dict(
+ rx_sw_if_index=sw_if_index,
+ bd_id=int(bd_id),
+ shg=int(shg),
+ port_type=int(port_type),
+ enable=1
+ )
+
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:type port_2: str
:type learn: bool
"""
-
sw_if_index1 = Topology.get_interface_sw_index(node, port_1)
sw_if_index2 = Topology.get_interface_sw_index(node, port_2)
learn_int = 1 if learn else 0
- cmd1 = 'bridge_domain_add_del'
- args1 = dict(bd_id=int(bd_id),
- flood=1,
- uu_flood=1,
- forward=1,
- learn=learn_int,
- arp_term=0,
- is_add=1)
-
- cmd2 = 'sw_interface_set_l2_bridge'
- args2 = dict(rx_sw_if_index=sw_if_index1,
- bd_id=int(bd_id),
- shg=0,
- port_type=0,
- enable=1)
-
- args3 = dict(rx_sw_if_index=sw_if_index2,
- bd_id=int(bd_id),
- shg=0,
- port_type=0,
- enable=1)
-
- err_msg = 'Failed to add L2 bridge domain with 2 interfaces on host' \
- ' {host}'.format(host=node['host'])
+ cmd1 = u"bridge_domain_add_del"
+ args1 = dict(
+ bd_id=int(bd_id),
+ flood=1,
+ uu_flood=1,
+ forward=1,
+ learn=learn_int,
+ arp_term=0,
+ is_add=1
+ )
+
+ cmd2 = u"sw_interface_set_l2_bridge"
+ args2 = dict(
+ rx_sw_if_index=sw_if_index1,
+ bd_id=int(bd_id),
+ shg=0,
+ port_type=0,
+ enable=1
+ )
+
+ args3 = dict(
+ rx_sw_if_index=sw_if_index2,
+ bd_id=int(bd_id),
+ shg=0,
+ port_type=0,
+ enable=1
+ )
+
+ err_msg = f"Failed to add L2 bridge domain with 2 interfaces " \
+ f"on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd1, **args1).add(cmd2, **args2).add(cmd2, **args3)
:type interface1: str or int
:type interface2: str or int
"""
-
- if isinstance(interface1, basestring):
+ if isinstance(interface1, str):
sw_iface1 = Topology().get_interface_sw_index(node, interface1)
else:
sw_iface1 = interface1
- if isinstance(interface2, basestring):
+ if isinstance(interface2, str):
sw_iface2 = Topology().get_interface_sw_index(node, interface2)
else:
sw_iface2 = interface2
- cmd = 'sw_interface_set_l2_xconnect'
- args1 = dict(rx_sw_if_index=sw_iface1,
- tx_sw_if_index=sw_iface2,
- enable=1)
- args2 = dict(rx_sw_if_index=sw_iface2,
- tx_sw_if_index=sw_iface1,
- enable=1)
-
- err_msg = 'Failed to add L2 cross-connect between two interfaces on' \
- ' host {host}'.format(host=node['host'])
+ cmd = u"sw_interface_set_l2_xconnect"
+ args1 = dict(
+ rx_sw_if_index=sw_iface1,
+ tx_sw_if_index=sw_iface2,
+ enable=1
+ )
+ args2 = dict(
+ rx_sw_if_index=sw_iface2,
+ tx_sw_if_index=sw_iface1,
+ enable=1
+ )
+ err_msg = f"Failed to add L2 cross-connect between two interfaces " \
+ f"on host {node['host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg)
:type interface1: str or int
:type interface2: str or int
"""
-
- if isinstance(interface1, basestring):
+ if isinstance(interface1, str):
sw_iface1 = Topology().get_interface_sw_index(node, interface1)
else:
sw_iface1 = interface1
- if isinstance(interface2, basestring):
+ if isinstance(interface2, str):
sw_iface2 = Topology().get_interface_sw_index(node, interface2)
else:
sw_iface2 = interface2
- cmd = 'l2_patch_add_del'
- args1 = dict(rx_sw_if_index=sw_iface1,
- tx_sw_if_index=sw_iface2,
- is_add=1)
- args2 = dict(rx_sw_if_index=sw_iface2,
- tx_sw_if_index=sw_iface1,
- is_add=1)
-
- err_msg = 'Failed to add L2 patch between two interfaces on' \
- ' host {host}'.format(host=node['host'])
+ cmd = u"l2_patch_add_del"
+ args1 = dict(
+ rx_sw_if_index=sw_iface1,
+ tx_sw_if_index=sw_iface2,
+ is_add=1
+ )
+ args2 = dict(
+ rx_sw_if_index=sw_iface2,
+ tx_sw_if_index=sw_iface1,
+ is_add=1
+ )
+ err_msg = f"Failed to add L2 patch between two interfaces " \
+ f"on host {node['host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args1).add(cmd, **args2).get_replies(err_msg)
:type if_2: str
:type set_up: bool
"""
-
- cmd = 'brctl addbr {0}'.format(br_name)
+ cmd = f"brctl addbr {br_name}"
exec_cmd_no_error(node, cmd, sudo=True)
- cmd = 'brctl addif {0} {1}'.format(br_name, if_1)
+
+ cmd = f"brctl addif {br_name} {if_1}"
exec_cmd_no_error(node, cmd, sudo=True)
- cmd = 'brctl addif {0} {1}'.format(br_name, if_2)
+
+ cmd = f"brctl addif {br_name} {if_2}"
exec_cmd_no_error(node, cmd, sudo=True)
+
if set_up:
- cmd = 'ip link set dev {0} up'.format(br_name)
+ cmd = f"ip link set dev {br_name} up"
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
:type br_name: str
:type set_down: bool
"""
-
if set_down:
- cmd = 'ip link set dev {0} down'.format(br_name)
+ cmd = f"ip link set dev {br_name} down"
exec_cmd_no_error(node, cmd, sudo=True)
- cmd = 'brctl delbr {0}'.format(br_name)
+
+ cmd = f"brctl delbr {br_name}"
exec_cmd_no_error(node, cmd, sudo=True)
@staticmethod
- def vpp_get_bridge_domain_data(node, bd_id=0xffffffff):
+ def vpp_get_bridge_domain_data(node, bd_id=Constants.BITWISE_NON_ZERO):
"""Get all bridge domain data from a VPP node. If a domain ID number is
provided, return only data for the matching bridge domain.
or a single dictionary for the specified bridge domain.
:rtype: list or dict
"""
+ cmd = u"bridge_domain_dump"
+ args = dict(
+ bd_id=int(bd_id)
+ )
+ err_msg = f"Failed to get L2FIB dump on host {node[u'host']}"
- cmd = 'bridge_domain_dump'
- args = dict(bd_id=int(bd_id))
- err_msg = 'Failed to get L2FIB dump on host {host}'.format(
- host=node['host'])
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
if bd_id == Constants.BITWISE_NON_ZERO:
return details
+
for bridge_domain in details:
- if bridge_domain['bd_id'] == bd_id:
+ if bridge_domain[u"bd_id"] == bd_id:
return bridge_domain
@staticmethod
- def l2_vlan_tag_rewrite(node, interface, tag_rewrite_method,
- push_dot1q=True, tag1_id=None, tag2_id=None):
+ def l2_vlan_tag_rewrite(
+ node, interface, tag_rewrite_method, push_dot1q=True, tag1_id=None,
+ tag2_id=None):
"""Rewrite tags in ethernet frame.
:param node: Node to rewrite tags.
:type tag1_id: int
:type tag2_id: int
"""
-
tag1_id = int(tag1_id) if tag1_id else 0
tag2_id = int(tag2_id) if tag2_id else 0
- vtr_oper = getattr(L2VtrOp, 'L2_VTR_{}'.format(
- tag_rewrite_method.replace('-', '_').upper()))
+ vtr_oper = getattr(
+ L2VtrOp, f"L2_VTR_{tag_rewrite_method.replace(u'-', u'_').upper()}"
+ )
- if isinstance(interface, basestring):
+ if isinstance(interface, str):
iface_key = Topology.get_interface_by_name(node, interface)
sw_if_index = Topology.get_interface_sw_index(node, iface_key)
else:
sw_if_index = interface
- cmd = 'l2_interface_vlan_tag_rewrite'
- args = dict(sw_if_index=sw_if_index,
- vtr_op=int(vtr_oper),
- push_dot1q=int(push_dot1q),
- tag1=tag1_id,
- tag2=tag2_id)
- err_msg = 'Failed to set VLAN TAG rewrite on host {host}'.format(
- host=node['host'])
+ cmd = u"l2_interface_vlan_tag_rewrite"
+ args = dict(
+ sw_if_index=sw_if_index,
+ vtr_op=int(vtr_oper),
+ push_dot1q=int(push_dot1q),
+ tag1=tag1_id,
+ tag2=tag2_id
+ )
+ err_msg = f"Failed to set VLAN TAG rewrite on host {node['host']}"
+
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:returns: L2 FIB table.
:rtype: list
"""
+ cmd = u"l2_fib_table_dump"
+ args = dict(
+ bd_id=int(bd_id)
+ )
+ err_msg = f"Failed to get L2FIB dump on host {node['host']}"
- cmd = 'l2_fib_table_dump'
- args = dict(bd_id=int(bd_id))
- err_msg = 'Failed to get L2FIB dump on host {host}'.format(
- host=node['host'])
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd, **args).get_details(err_msg)
for fib_item in details:
- fib_item['mac'] = L2Util.bin_to_mac(fib_item['mac'])
+ fib_item[u"mac"] = L2Util.bin_to_mac(fib_item[u"mac"])
return details
:returns: L2 FIB entry
:rtype: dict
"""
-
bd_data = L2Util.vpp_get_bridge_domain_data(node)
- bd_id = bd_data[bd_index-1]['bd_id']
+ bd_id = bd_data[bd_index-1][u"bd_id"]
table = L2Util.get_l2_fib_table(node, bd_id)
for entry in table:
- if entry['mac'] == mac:
+ if entry[u"mac"] == mac:
return entry
return {}
from resources.libraries.python.ssh import exec_cmd_no_error
-__all__ = ["LimitUtil"]
+__all__ = [u"LimitUtil"]
class LimitUtil(object):
:type node: dict
:type pid: int
"""
- command = 'prlimit --noheadings --pid={pid}'.format(pid=pid)
-
- message = 'Node {host} failed to run: {command}'.\
- format(host=node['host'], command=command)
+ command = f"prlimit --noheadings --pid={pid}"
+ message = f"Node {node[u'host']} failed to run: {command}"
exec_cmd_no_error(node, command, sudo=True, message=message)
:type resource: str
:type limit: str
"""
- command = 'prlimit --{resource}={limit} --pid={pid}'.format(
- resource=resource, limit=limit, pid=pid)
-
- message = 'Node {host} failed to run: {command}'.\
- format(host=node['host'], command=command)
+ command = f"prlimit --{resource}={limit} --pid={pid}"
+ message = f"Node {node[u'host']} failed to run: {command}"
exec_cmd_no_error(node, command, sudo=True, message=message)
"""Loadbalancer util library."""
-from socket import htonl
from ipaddress import ip_address
+from socket import htonl
+
from resources.libraries.python.topology import NodeType, Topology
from resources.libraries.python.PapiExecutor import PapiSocketExecutor
+
class LoadBalancerUtil(object):
"""Basic Loadbalancer parameter configuration."""
:returns: Nothing.
:raises ValueError: If the node has an unknown node type.
"""
- if node['type'] == NodeType.DUT:
- ip4_src_addr = ip_address(unicode(kwargs.pop('ip4_src_addr',
- '255.255.255.255')))
- ip6_src_addr = ip_address(unicode(kwargs.pop('ip6_src_addr',\
- 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')))
- flow_timeout = kwargs.pop('flow_timeout', 40)
- sticky_buckets_per_core = kwargs.pop('buckets_per_core', 1024)
-
- cmd = 'lb_conf'
- err_msg = 'Failed to set lb conf on host {host}'.format(
- host=node['host'])
-
- args = dict(ip4_src_address=str(ip4_src_addr),
- ip6_src_address=str(ip6_src_addr),
- sticky_buckets_per_core=sticky_buckets_per_core,
- flow_timeout=flow_timeout)
+ if node[u"type"] == NodeType.DUT:
+ ip4_src_addr = ip_address(
+ kwargs.pop(u"ip4_src_addr", u"255.255.255.255")
+ )
+ ip6_src_addr = ip_address(
+ kwargs.pop(
+ u"ip6_src_addr", u"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"
+ )
+ )
+ flow_timeout = kwargs.pop(u"flow_timeout", 40)
+ sticky_buckets_per_core = kwargs.pop(u"buckets_per_core", 1024)
+
+ cmd = u"lb_conf"
+ err_msg = f"Failed to set lb conf on host {node[u'host']}"
+ args = dict(
+ ip4_src_address=str(ip4_src_addr),
+ ip6_src_address=str(ip6_src_addr),
+ sticky_buckets_per_core=sticky_buckets_per_core,
+ flow_timeout=flow_timeout
+ )
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
else:
- raise ValueError('Node {host} has unknown NodeType: "{type}"'
- .format(host=node['host'], type=node['type']))
+ raise ValueError(
+ f"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'"
+ )
@staticmethod
def vpp_lb_add_del_vip(node, **kwargs):
:returns: Nothing.
:raises ValueError: If the node has an unknown node type.
"""
- if node['type'] == NodeType.DUT:
- vip_addr = kwargs.pop('vip_addr', '0.0.0.0')
- protocol = kwargs.pop('protocol', 255)
- port = kwargs.pop('port', 0)
- encap = kwargs.pop('encap', 0)
- dscp = kwargs.pop('dscp', 0)
- srv_type = kwargs.pop('srv_type', 0)
- target_port = kwargs.pop('target_port', 0)
- node_port = kwargs.pop('node_port', 0)
- new_len = kwargs.pop('new_len', 1024)
- is_del = kwargs.pop('is_del', 0)
-
- cmd = 'lb_add_del_vip'
- err_msg = 'Failed to add vip on host {host}'.format(
- host=node['host'])
-
- vip_addr = ip_address(unicode(vip_addr)).packed
- args = dict(pfx={'len': 128,
- 'address': {'un': {'ip4': vip_addr}, 'af': 0}},
- protocol=protocol,
- port=port,
- encap=htonl(encap),
- dscp=dscp,
- type=srv_type,
- target_port=target_port,
- node_port=node_port,
- new_flows_table_length=int(new_len),
- is_del=is_del)
+ if node[u"type"] == NodeType.DUT:
+ vip_addr = kwargs.pop(u"vip_addr", "0.0.0.0")
+ protocol = kwargs.pop(u"protocol", 255)
+ port = kwargs.pop(u"port", 0)
+ encap = kwargs.pop(u"encap", 0)
+ dscp = kwargs.pop(u"dscp", 0)
+ srv_type = kwargs.pop(u"srv_type", 0)
+ target_port = kwargs.pop(u"target_port", 0)
+ node_port = kwargs.pop(u"node_port", 0)
+ new_len = kwargs.pop(u"new_len", 1024)
+ is_del = kwargs.pop(u"is_del", 0)
+
+ cmd = u"lb_add_del_vip"
+ err_msg = f"Failed to add vip on host {node[u'host']}"
+
+ vip_addr = ip_address(vip_addr).packed
+ args = dict(
+ pfx={
+ u"len": 128,
+ u"address": {u"un": {u"ip": vip_addr}, u"af": 0}
+ },
+ protocol=protocol,
+ port=port,
+ encap=htonl(encap),
+ dscp=dscp,
+ type=srv_type,
+ target_port=target_port,
+ node_port=node_port,
+ new_flows_table_length=int(new_len),
+ is_del=is_del
+ )
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
else:
- raise ValueError('Node {host} has unknown NodeType: "{type}"'
- .format(host=node['host'], type=node['type']))
+ raise ValueError(
+ f"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'"
+ )
@staticmethod
def vpp_lb_add_del_as(node, **kwargs):
:returns: Nothing.
:raises ValueError: If the node has an unknown node type.
"""
- if node['type'] == NodeType.DUT:
- cmd = 'lb_add_del_as'
- err_msg = 'Failed to add lb as on host {host}'.format(
- host=node['host'])
-
- vip_addr = kwargs.pop('vip_addr', '0.0.0.0')
- protocol = kwargs.pop('protocol', 255)
- port = kwargs.pop('port', 0)
- as_addr = kwargs.pop('as_addr', '0.0.0.0')
- is_del = kwargs.pop('is_del', 0)
- is_flush = kwargs.pop('is_flush', 0)
-
- vip_addr = ip_address(unicode(vip_addr)).packed
- as_addr = ip_address(unicode(as_addr)).packed
-
- args = dict(pfx={'len': 128,
- 'address': {'un': {'ip4': vip_addr}, 'af': 0}},
- protocol=protocol,
- port=port,
- as_address={'un': {'ip4': as_addr}, 'af': 0},
- is_del=is_del,
- is_flush=is_flush)
+ if node[u"type"] == NodeType.DUT:
+ cmd = u"lb_add_del_as"
+ err_msg = f"Failed to add lb as on host {node[u'host']}"
+
+ vip_addr = kwargs.pop(u"vip_addr", "0.0.0.0")
+ protocol = kwargs.pop(u"protocol", 255)
+ port = kwargs.pop(u"port", 0)
+ as_addr = kwargs.pop(u"as_addr", u"0.0.0.0")
+ is_del = kwargs.pop(u"is_del", 0)
+ is_flush = kwargs.pop(u"is_flush", 0)
+
+ vip_addr = ip_address(vip_addr).packed
+ as_addr = ip_address(as_addr).packed
+
+ args = dict(
+ pfx={
+ u"len": 128,
+ u"address": {u"un": {u"ip": vip_addr}, u"af": 0}
+ },
+ protocol=protocol,
+ port=port,
+ as_address={u"un": {u"ip": as_addr}, u"af": 0},
+ is_del=is_del,
+ is_flush=is_flush
+ )
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
else:
- raise ValueError('Node {host} has unknown NodeType: "{type}"'
- .format(host=node['host'], type=node['type']))
+ raise ValueError(
+ f"Node {node['uhost']} has unknown NodeType: '{node[u'type']}'"
+ )
@staticmethod
def vpp_lb_add_del_intf_nat4(node, **kwargs):
:returns: Nothing.
:raises ValueError: If the node has an unknown node type.
"""
- if node['type'] == NodeType.DUT:
- cmd = 'lb_add_del_intf_nat4'
- err_msg = 'Failed to add interface nat4 on host {host}'.format(
- host=node['host'])
+ if node[u"type"] == NodeType.DUT:
+ cmd = u"lb_add_del_intf_nat4"
+ err_msg = f"Failed to add interface nat4 on host {node[u'host']}"
- is_add = kwargs.pop('is_add', True)
- interface = kwargs.pop('interface', 0)
+ is_add = kwargs.pop(u"is_add", True)
+ interface = kwargs.pop(u"interface", 0)
sw_if_index = Topology.get_interface_sw_index(node, interface)
- args = dict(is_add=is_add, sw_if_index=sw_if_index)
+ args = dict(
+ is_add=is_add,
+ sw_if_index=sw_if_index
+ )
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
else:
- raise ValueError('Node {host} has unknown NodeType: "{type}"'
- .format(host=node['host'], type=node['type']))
+ raise ValueError(
+ f"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'"
+ )
from resources.libraries.python.OptionString import OptionString
-__all__ = ["run"]
+__all__ = [u"run"]
-MESSAGE_TEMPLATE = "Command {com} ended with RC {ret} and output:\n{out}"
+MESSAGE_TEMPLATE = u"Command {com} ended with RC {ret} and output:\n{out}"
-def run(command, msg="", check=True, log=False, console=False):
+def run(command, msg=u"", check=True, log=False, console=False):
"""Wrapper around subprocess.check_output that can tolerates nonzero RCs.
Stderr is redirected to stdout, so it is part of output
"""
if isinstance(command, OptionString):
command = command.parts
- if not hasattr(command, "__iter__"):
+ if not hasattr(command, u"__iter__"):
# Strings are indexable, but turning into iterator is not supported.
- raise TypeError("Command {cmd!r} is not an iterable.".format(
- cmd=command))
+ raise TypeError(f"Command {command!r} is not an iterable.")
ret_code = 0
- output = ""
+ output = u""
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
output = err.output
ret_code = err.returncode
if check:
- raise RuntimeError(MESSAGE_TEMPLATE.format(
- com=err.cmd, ret=ret_code, out=output))
+ raise RuntimeError(
+ MESSAGE_TEMPLATE.format(com=err.cmd, ret=ret_code, out=output)
+ )
if log:
message = MESSAGE_TEMPLATE.format(com=command, ret=ret_code, out=output)
if msg:
- message = msg + ": " + message
+ message = msg + u": " + message
if console:
logger.console(message)
else:
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from abc import ABCMeta, abstractmethod
-class AbstractMeasurer(object):
+class AbstractMeasurer(object, metaclass=ABCMeta):
"""Abstract class defining common API for measurement providers."""
- __metaclass__ = ABCMeta
-
@abstractmethod
def measure(self, duration, transmit_rate):
"""Perform trial measurement and return the result.
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from abc import ABCMeta, abstractmethod
-class AbstractSearchAlgorithm(object):
+class AbstractSearchAlgorithm(object, metaclass=ABCMeta):
"""Abstract class defining common API for search algorithms."""
- __metaclass__ = ABCMeta
-
def __init__(self, measurer):
"""Store the rate provider.
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
self.minimum_transmit_rate = float(minimum_transmit_rate)
self.maximum_transmit_rate = float(maximum_transmit_rate)
- def __init__(self, measurer, final_relative_width=0.005,
- final_trial_duration=30.0, initial_trial_duration=1.0,
- number_of_intermediate_phases=2, timeout=600.0, doublings=1):
+ def __init__(
+ self, measurer, final_relative_width=0.005,
+ final_trial_duration=30.0, initial_trial_duration=1.0,
+ number_of_intermediate_phases=2, timeout=600.0, doublings=1):
"""Store the measurer object and additional arguments.
:param measurer: Rate provider to use by this search object.
self.timeout = float(timeout)
self.doublings = int(doublings)
-
@staticmethod
def double_relative_width(relative_width):
"""Return relative width corresponding to double logarithmic width.
:rtype: float
"""
return current_bound * (
- 1.0 - MultipleLossRatioSearch.double_relative_width(
- relative_width))
+ 1.0 - MultipleLossRatioSearch.double_relative_width(relative_width)
+ )
@staticmethod
def expand_down(relative_width, doublings, current_bound):
"""
for _ in range(doublings):
relative_width = MultipleLossRatioSearch.double_relative_width(
- relative_width)
+ relative_width
+ )
return current_bound * (1.0 - relative_width)
@staticmethod
:rtype: float
"""
return current_bound / (
- 1.0 - MultipleLossRatioSearch.double_relative_width(
- relative_width))
+ 1.0 - MultipleLossRatioSearch.double_relative_width(relative_width)
+ )
@staticmethod
def expand_up(relative_width, doublings, current_bound):
"""
for _ in range(doublings):
relative_width = MultipleLossRatioSearch.double_relative_width(
- relative_width)
+ relative_width
+ )
return current_bound / (1.0 - relative_width)
@staticmethod
:rtype: float
"""
return current_bound / (
- 1.0 - MultipleLossRatioSearch.half_relative_width(relative_width))
+ 1.0 - MultipleLossRatioSearch.half_relative_width(relative_width)
+ )
def narrow_down_ndr_and_pdr(
self, minimum_transmit_rate, maximum_transmit_rate,
initial_width_goal = self.double_relative_width(initial_width_goal)
max_lo = maximum_transmit_rate * (1.0 - initial_width_goal)
mrr = max(
- minimum_transmit_rate,
- min(max_lo, line_measurement.receive_rate))
+ minimum_transmit_rate, min(max_lo, line_measurement.receive_rate)
+ )
mrr_measurement = self.measurer.measure(
- self.initial_trial_duration, mrr)
+ self.initial_trial_duration, mrr
+ )
# Attempt to get narrower width.
if mrr_measurement.loss_fraction > 0.0:
max2_lo = mrr * (1.0 - initial_width_goal)
mrr2 = min(max2_lo, mrr_measurement.receive_rate)
else:
mrr2 = mrr / (1.0 - initial_width_goal)
- if mrr2 > minimum_transmit_rate and mrr2 < maximum_transmit_rate:
+ if minimum_transmit_rate < mrr2 < maximum_transmit_rate:
line_measurement = mrr_measurement
mrr_measurement = self.measurer.measure(
self.initial_trial_duration, mrr2)
state = self.ProgressState(
starting_result, self.number_of_intermediate_phases,
self.final_trial_duration, self.final_relative_width,
- packet_loss_ratio, minimum_transmit_rate, maximum_transmit_rate)
+ packet_loss_ratio, minimum_transmit_rate, maximum_transmit_rate
+ )
state = self.ndrpdr(state)
return state.result
"""
# TODO: Implement https://stackoverflow.com/a/24683360
# to avoid the string manipulation if log verbosity is too low.
- logging.info("result before update: %s", state.result)
+ logging.info(f"result before update: {state.result}")
logging.debug(
- "relative widths in goals: %s", state.result.width_in_goals(
- self.final_relative_width))
+ f"relative widths in goals: "
+ f"{state.result.width_in_goals(self.final_relative_width)}"
+ )
measurement = self.measurer.measure(state.duration, transmit_rate)
ndr_interval = self._new_interval(
- state.result.ndr_interval, measurement, 0.0)
+ state.result.ndr_interval, measurement, 0.0
+ )
pdr_interval = self._new_interval(
- state.result.pdr_interval, measurement, state.packet_loss_ratio)
+ state.result.pdr_interval, measurement, state.packet_loss_ratio
+ )
state.result = NdrPdrResult(ndr_interval, pdr_interval)
return state
# "invalid upper bound at maximal rate" case.
new_lo = measurement
- return ReceiveRateInterval(old_lo if new_lo is None else new_lo,
- old_hi if new_hi is None else new_hi)
+ return ReceiveRateInterval(
+ old_lo if new_lo is None else new_lo,
+ old_hi if new_hi is None else new_hi
+ )
def ndrpdr(self, state):
- """Pefrom trials for this phase. Return the new state when done.
+ """Perform trials for this phase. Return the new state when done.
:param state: State before this phase.
:type state: ProgressState
duration_multiplier = state.duration / self.initial_trial_duration
phase_exponent = float(state.phases) / saved_phases
state.duration = self.initial_trial_duration * math.pow(
- duration_multiplier, phase_exponent)
+ duration_multiplier, phase_exponent
+ )
# Shorter durations do not need that narrow widths.
saved_width = state.width_goal
state.width_goal = self.double_relative_width(state.width_goal)
state.phases = saved_phases # Not needed, but just in case.
logging.info(
- "starting iterations with duration %s and relative width goal %s",
- state.duration, state.width_goal)
+ f"starting iterations with duration {state.duration} and relative "
+ f"width goal {state.width_goal}"
+ )
while 1:
if time.time() > start_time + self.timeout:
- raise RuntimeError("Optimized search takes too long.")
+ raise RuntimeError(u"Optimized search takes too long.")
# Order of priorities: invalid bounds (nl, pl, nh, ph),
# then narrowing relative Tr widths.
# Durations are not priorities yet,
pdr_lo = state.result.pdr_interval.measured_low
pdr_hi = state.result.pdr_interval.measured_high
ndr_rel_width = max(
- state.width_goal, state.result.ndr_interval.rel_tr_width)
+ state.width_goal, state.result.ndr_interval.rel_tr_width
+ )
pdr_rel_width = max(
- state.width_goal, state.result.pdr_interval.rel_tr_width)
+ state.width_goal, state.result.pdr_interval.rel_tr_width
+ )
# If we are hitting maximal or minimal rate, we cannot shift,
# but we can re-measure.
- new_tr = self._ndrpdr_loss_fraction(state,
- ndr_lo, ndr_hi, pdr_lo, pdr_hi,
- ndr_rel_width, pdr_rel_width)
+ new_tr = self._ndrpdr_loss_fraction(
+ state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
+ pdr_rel_width
+ )
if new_tr is not None:
state = self._measure_and_update_state(state, new_tr)
and pdr_lo.loss_fraction > state.packet_loss_ratio):
pdr_rel_width = 0.0
- new_tr = self._ndrpdr_width_goal(state, ndr_lo, pdr_lo,
- ndr_rel_width, pdr_rel_width)
+ new_tr = self._ndrpdr_width_goal(
+ state, ndr_lo, pdr_lo, ndr_rel_width, pdr_rel_width
+ )
if new_tr is not None:
state = self._measure_and_update_state(state, new_tr)
# We do not need to improve width, but there still might be
# some measurements with smaller duration.
- new_tr = self._ndrpdr_duration(state,
- ndr_lo, ndr_hi, pdr_lo, pdr_hi,
- ndr_rel_width, pdr_rel_width)
+ new_tr = self._ndrpdr_duration(
+ state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
+ pdr_rel_width
+ )
if new_tr is not None:
state = self._measure_and_update_state(state, new_tr)
# Widths are narrow (or lower bound minimal), bound measurements
# are long enough, we can return.
- logging.info("phase done")
+ logging.info(u"phase done")
break
return state
- def _ndrpdr_loss_fraction(self, state, ndr_lo, ndr_hi, pdr_lo, pdr_hi,
- ndr_rel_width, pdr_rel_width):
+ def _ndrpdr_loss_fraction(
+ self, state, ndr_lo, ndr_hi, pdr_lo, pdr_hi, ndr_rel_width,
+ pdr_rel_width):
"""Perform loss_fraction-based trials within a ndrpdr phase
:param state: current state
if ndr_lo.loss_fraction > 0.0:
if ndr_lo.target_tr > state.minimum_transmit_rate:
result = max(
- state.minimum_transmit_rate,
- self.expand_down(
- ndr_rel_width, self.doublings, ndr_lo.target_tr))
- logging.info("ndr lo external %s", result)
+ state.minimum_transmit_rate, self.expand_down(
+ ndr_rel_width, self.doublings, ndr_lo.target_tr
+ )
+ )
+ logging.info(f"ndr lo external {result}")
elif ndr_lo.duration < state.duration:
result = state.minimum_transmit_rate
- logging.info("ndr lo minimal re-measure")
+ logging.info(u"ndr lo minimal re-measure")
if result is None and pdr_lo.loss_fraction > state.packet_loss_ratio:
if pdr_lo.target_tr > state.minimum_transmit_rate:
result = max(
- state.minimum_transmit_rate,
- self.expand_down(
- pdr_rel_width, self.doublings, pdr_lo.target_tr))
- logging.info("pdr lo external %s", result)
+ state.minimum_transmit_rate, self.expand_down(
+ pdr_rel_width, self.doublings, pdr_lo.target_tr
+ )
+ )
+ logging.info(f"pdr lo external {result}")
elif pdr_lo.duration < state.duration:
result = state.minimum_transmit_rate
- logging.info("pdr lo minimal re-measure")
+ logging.info(u"pdr lo minimal re-measure")
if result is None and ndr_hi.loss_fraction <= 0.0:
if ndr_hi.target_tr < state.maximum_transmit_rate:
result = min(
- state.maximum_transmit_rate,
- self.expand_up(
- ndr_rel_width, self.doublings, ndr_hi.target_tr))
- logging.info("ndr hi external %s", result)
+ state.maximum_transmit_rate, self.expand_up(
+ ndr_rel_width, self.doublings, ndr_hi.target_tr
+ )
+ )
+ logging.info(f"ndr hi external {result}")
elif ndr_hi.duration < state.duration:
result = state.maximum_transmit_rate
- logging.info("ndr hi maximal re-measure")
+ logging.info(u"ndr hi maximal re-measure")
if result is None and pdr_hi.loss_fraction <= state.packet_loss_ratio:
if pdr_hi.target_tr < state.maximum_transmit_rate:
result = min(
- state.maximum_transmit_rate,
- self.expand_up(
- pdr_rel_width, self.doublings, pdr_hi.target_tr))
- logging.info("pdr hi external %s", result)
+ state.maximum_transmit_rate, self.expand_up(
+ pdr_rel_width, self.doublings, pdr_hi.target_tr
+ )
+ )
+ logging.info(f"pdr hi external {result}")
elif pdr_hi.duration < state.duration:
result = state.maximum_transmit_rate
- logging.info("ndr hi maximal re-measure")
+ logging.info(u"ndr hi maximal re-measure")
return result
- def _ndrpdr_width_goal(self, state, ndr_lo, pdr_lo,
- ndr_rel_width, pdr_rel_width):
+ def _ndrpdr_width_goal(
+ self, state, ndr_lo, pdr_lo, ndr_rel_width, pdr_rel_width):
"""Perform width_goal-based trials within a ndrpdr phase
:param state: current state
# We have to narrow NDR width first, as NDR internal search
# can invalidate PDR (but not vice versa).
result = self.half_step_up(ndr_rel_width, ndr_lo.target_tr)
- logging.info("Bisecting for NDR at %s", result)
+ logging.info(f"Bisecting for NDR at {result}")
elif pdr_rel_width > state.width_goal:
- # PDR iternal search.
+ # PDR internal search.
result = self.half_step_up(pdr_rel_width, pdr_lo.target_tr)
- logging.info("Bisecting for PDR at %s", result)
+ logging.info(f"Bisecting for PDR at {result}")
else:
result = None
return result
@staticmethod
- def _ndrpdr_duration(state, ndr_lo, pdr_lo, ndr_hi, pdr_hi,
- ndr_rel_width, pdr_rel_width):
+ def _ndrpdr_duration(
+ state, ndr_lo, pdr_lo, ndr_hi, pdr_hi, ndr_rel_width,
+ pdr_rel_width):
"""Perform duration-based trials within a ndrpdr phase
:param state: current state
# creating invalid bounds to resolve (thus broadening width).
if ndr_lo.duration < state.duration:
result = ndr_lo.target_tr
- logging.info("re-measuring NDR lower bound")
+ logging.info(u"re-measuring NDR lower bound")
elif pdr_lo.duration < state.duration:
result = pdr_lo.target_tr
- logging.info("re-measuring PDR lower bound")
+ logging.info(u"re-measuring PDR lower bound")
# Except when lower bounds have high loss fraction, in that case
# we do not need to re-measure _upper_ bounds.
elif ndr_hi.duration < state.duration and ndr_rel_width > 0.0:
result = ndr_hi.target_tr
- logging.info("re-measuring NDR upper bound")
+ logging.info(u"re-measuring NDR upper bound")
elif pdr_hi.duration < state.duration and pdr_rel_width > 0.0:
result = pdr_hi.target_tr
- logging.info("re-measuring PDR upper bound")
+ logging.info(u"re-measuring PDR upper bound")
else:
result = None
return result
"""Module defining NdrPdrResult class."""
-from resources.libraries.python.MLRsearch.ReceiveRateInterval \
- import ReceiveRateInterval
+from .ReceiveRateInterval import ReceiveRateInterval
class NdrPdrResult(object):
# TODO: Type checking is not very pythonic,
# perhaps users can fix wrong usage without it?
if not isinstance(ndr_interval, ReceiveRateInterval):
- raise TypeError("ndr_interval, is not a ReceiveRateInterval: "
- "{ndr!r}".format(ndr=ndr_interval))
+ raise TypeError(
+ f"ndr_interval, is not a ReceiveRateInterval: {ndr_interval!r}"
+ )
if not isinstance(pdr_interval, ReceiveRateInterval):
- raise TypeError("pdr_interval, is not a ReceiveRateInterval: "
- "{pdr!r}".format(pdr=pdr_interval))
+ raise TypeError(
+ f"pdr_interval, is not a ReceiveRateInterval: {pdr_interval!r}"
+ )
self.ndr_interval = ndr_interval
self.pdr_interval = pdr_interval
:returns: Message containing NDR and PDR widths in goals.
:rtype: str
"""
- return "ndr {ndr_in_goals}; pdr {pdr_in_goals}".format(
- ndr_in_goals=self.ndr_interval.width_in_goals(relative_width_goal),
- pdr_in_goals=self.pdr_interval.width_in_goals(relative_width_goal))
+ return f"ndr {self.ndr_interval.width_in_goals(relative_width_goal)};" \
+ f" pdr {self.pdr_interval.width_in_goals(relative_width_goal)}"
def __str__(self):
"""Return string as tuple of named values."""
- return "NDR={ndr!s};PDR={pdr!s}".format(
- ndr=self.ndr_interval, pdr=self.pdr_interval)
+ return f"NDR={self.ndr_interval!s};PDR={self.pdr_interval!s}"
def __repr__(self):
"""Return string evaluable as a constructor call."""
- return "NdrPdrResult(ndr_interval={ndr!r},pdr_interval={pdr!r})".format(
- ndr=self.ndr_interval, pdr=self.pdr_interval)
+ return f"NdrPdrResult(ndr_interval={self.ndr_interval!r}," \
+ f"pdr_interval={self.pdr_interval!r})"
import math
-from resources.libraries.python.MLRsearch.ReceiveRateMeasurement \
- import ReceiveRateMeasurement
+from .ReceiveRateMeasurement import ReceiveRateMeasurement
class ReceiveRateInterval(object):
# TODO: Type checking is not very pythonic,
# perhaps users can fix wrong usage without it?
if not isinstance(measured_low, ReceiveRateMeasurement):
- raise TypeError("measured_low is not a ReceiveRateMeasurement: "
- "{low!r}".format(low=measured_low))
+ raise TypeError(
+ f"measured_low is not a ReceiveRateMeasurement: "
+ f"{measured_low!r}"
+ )
if not isinstance(measured_high, ReceiveRateMeasurement):
- raise TypeError("measured_high is not a ReceiveRateMeasurement: "
- "{high!r}".format(high=measured_high))
+ raise TypeError(
+ f"measured_high is not a ReceiveRateMeasurement: "
+ f"{measured_high!r}"
+ )
self.measured_low = measured_low
self.measured_high = measured_high
# Declare secondary quantities to appease pylint.
"""Sort bounds by target Tr, compute secondary quantities."""
if self.measured_low.target_tr > self.measured_high.target_tr:
self.measured_low, self.measured_high = (
- self.measured_high, self.measured_low)
+ self.measured_high, self.measured_low
+ )
self.abs_tr_width = (
- self.measured_high.target_tr - self.measured_low.target_tr)
+ self.measured_high.target_tr - self.measured_low.target_tr
+ )
self.rel_tr_width = self.abs_tr_width / self.measured_high.target_tr
def width_in_goals(self, relative_width_goal):
def __str__(self):
"""Return string as half-open interval."""
- return "[{low!s};{high!s})".format(
- low=self.measured_low, high=self.measured_high)
+ return f"[{self.measured_low!s};{self.measured_high!s})"
def __repr__(self):
"""Return string evaluable as a constructor call."""
- return ("ReceiveRateInterval(measured_low={low!r}"
- ",measured_high={high!r})".format(
- low=self.measured_low, high=self.measured_high))
+ return f"ReceiveRateInterval(measured_low={self.measured_low!r}," \
+ f"measured_high={self.measured_high!r})"
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
def __str__(self):
"""Return string reporting input and loss fraction."""
- return "d={dur!s},Tr={rate!s},Df={frac!s}".format(
- dur=self.duration, rate=self.target_tr, frac=self.loss_fraction)
+ return f"d={self.duration!s},Tr={self.target_tr!s}," \
+ f"Df={self.loss_fraction!s}"
def __repr__(self):
"""Return string evaluable as a constructor call."""
- return ("ReceiveRateMeasurement(duration={dur!r},target_tr={rate!r}"
- ",transmit_count={trans!r},loss_count={loss!r})".format(
- dur=self.duration, rate=self.target_tr,
- trans=self.transmit_count, loss=self.loss_count))
+ return f"ReceiveRateMeasurement(duration={self.duration!r}," \
+ f"target_tr={self.target_tr!r}," \
+ f"transmit_count={self.transmit_count!r}," \
+ f"loss_count={self.loss_count!r})"
from enum import IntEnum
+
from robot.api import logger
from resources.libraries.python.topology import NodeType, Topology
:returns: List of memif interfaces extracted from Papi response.
:rtype: list
"""
- cmd = "memif_dump"
+ cmd = u"memif_dump"
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd).get_details()
for memif in details:
- memif["hw_addr"] = str(memif["hw_addr"])
- memif["role"] = memif["role"].value
- memif["mode"] = memif["mode"].value
- memif["flags"] = memif["flags"].value \
- if hasattr(memif["flags"], 'value') else int(memif["flags"])
+ memif[u"hw_addr"] = str(memif[u"hw_addr"])
+ memif[u"role"] = memif[u"role"].value
+ memif[u"mode"] = memif[u"mode"].value
+ memif[u"flags"] = memif[u"flags"].value \
+ if hasattr(memif[u"flags"], u"value") else int(memif[u"flags"])
- logger.debug("MEMIF details:\n{details}".format(details=details))
+ logger.debug(f"MEMIF details:\n{details}")
return details
includes only retval.
:rtype: dict
"""
- cmd = 'memif_socket_filename_add_del'
- err_msg = 'Failed to create memif socket on host {host}'.format(
- host=node['host'])
+ cmd = u"memif_socket_filename_add_del"
+ err_msg = f"Failed to create memif socket on host {node[u'host']}"
args = dict(
is_add=is_add,
socket_id=int(sid),
- socket_filename=str('/tmp/' + filename)
+ socket_filename=str(u"/tmp/" + filename)
)
with PapiSocketExecutor(node) as papi_exec:
return papi_exec.add(cmd, **args).get_reply(err_msg)
:returns: sw_if_index
:rtype: int
"""
- cmd = 'memif_create'
- err_msg = 'Failed to create memif interface on host {host}'.format(
- host=node['host'])
+ cmd = u"memif_create"
+ err_msg = f"Failed to create memif interface on host {node[u'host']}"
args = dict(
role=role,
rx_queues=int(rxq),
tx_queues=int(txq),
socket_id=int(sid),
id=int(mid),
- secret=""
+ secret=u""
)
+
with PapiSocketExecutor(node) as papi_exec:
return papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
@staticmethod
- def create_memif_interface(node, filename, mid, sid, rxq=1, txq=1,
- role="SLAVE"):
+ def create_memif_interface(
+ node, filename, mid, sid, rxq=1, txq=1, role=u"SLAVE"):
"""Create Memif interface on the given node.
:param node: Given node to create Memif interface on.
:rtype: int
:raises ValueError: If command 'create memif' fails.
"""
-
role = getattr(MemifRole, role.upper()).value
# Create socket
# Create memif
sw_if_index = Memif._memif_create(
- node, mid, sid, rxq=rxq, txq=txq, role=role)
+ node, mid, sid, rxq=rxq, txq=txq, role=role
+ )
# Update Topology
- if_key = Topology.add_new_port(node, 'memif')
+ if_key = Topology.add_new_port(node, u"memif")
Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
ifc_name = Memif.vpp_get_memif_interface_name(node, sw_if_index)
ifc_mac = Memif.vpp_get_memif_interface_mac(node, sw_if_index)
Topology.update_interface_mac_address(node, if_key, ifc_mac)
- Topology.update_interface_memif_socket(node, if_key, '/tmp/' + filename)
+ Topology.update_interface_memif_socket(
+ node, if_key, u"/tmp/" + filename
+ )
Topology.update_interface_memif_id(node, if_key, mid)
Topology.update_interface_memif_role(node, if_key, str(role))
:param node: Given node to show Memif data on.
:type node: dict
"""
-
Memif._memif_details(node)
@staticmethod
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
Memif.show_memif(node)
@staticmethod
:returns: Memif interface name, or None if not found.
:rtype: str
"""
-
details = Memif._memif_details(node)
for memif in details:
- if memif["sw_if_index"] == sw_if_index:
- return memif["if_name"]
+ if memif[u"sw_if_index"] == sw_if_index:
+ return memif[u"if_name"]
return None
@staticmethod
:returns: Memif interface MAC address, or None if not found.
:rtype: str
"""
-
details = Memif._memif_details(node)
for memif in details:
- if memif["sw_if_index"] == sw_if_index:
- return memif["hw_addr"]
+ if memif[u"sw_if_index"] == sw_if_index:
+ return memif[u"hw_addr"]
return None
from pprint import pformat
from socket import AF_INET, inet_pton
-
from enum import IntEnum
from robot.api import logger
:type int_in: str
:type int_out: str
"""
-
- cmd = 'nat44_interface_add_del_feature'
+ cmd = u"nat44_interface_add_del_feature"
int_in_idx = InterfaceUtil.get_sw_if_index(node, int_in)
- err_msg = 'Failed to set inside interface {int} for NAT44 on host ' \
- '{host}'.format(int=int_in, host=node['host'])
+ err_msg = f"Failed to set inside interface {int_in} for NAT44 " \
+ f"on host {node[u'host']}"
args_in = dict(
sw_if_index=int_in_idx,
is_add=1,
- flags=getattr(NATConfigFlags, "NAT_IS_INSIDE").value
+ flags=getattr(NATConfigFlags, u"NAT_IS_INSIDE").value
)
+
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args_in).get_reply(err_msg)
int_out_idx = InterfaceUtil.get_sw_if_index(node, int_out)
- err_msg = 'Failed to set outside interface {int} for NAT44 on host ' \
- '{host}'.format(int=int_out, host=node['host'])
+ err_msg = f"Failed to set outside interface {int_out} for NAT44 " \
+ f"on host {node[u'host']}"
args_in = dict(
sw_if_index=int_out_idx,
is_add=1,
- flags=getattr(NATConfigFlags, "NAT_IS_OUTSIDE").value
+ flags=getattr(NATConfigFlags, u"NAT_IS_OUTSIDE").value
)
+
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args_in).get_reply(err_msg)
:type ip_out: str
:type subnet_out: str or int
"""
-
- cmd = 'nat_det_add_del_map'
- err_msg = 'Failed to set deterministic behaviour of NAT on host ' \
- '{host}'.format(host=node['host'])
+ cmd = u"nat_det_add_del_map"
+ err_msg = f"Failed to set deterministic behaviour of NAT " \
+ f"on host {node[u'host']}"
args_in = dict(
is_add=True,
in_addr=inet_pton(AF_INET, str(ip_in)),
out_addr=inet_pton(AF_INET, str(ip_out)),
out_plen=int(subnet_out)
)
+
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args_in).get_reply(err_msg)
:param node: DUT node.
:type node: dict
"""
+ cmd = u"nat_show_config"
+ err_msg = f"Failed to get NAT configuration on host {node[u'host']}"
- cmd = 'nat_show_config'
- err_msg = 'Failed to get NAT configuration on host {host}'.\
- format(host=node['host'])
with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd).get_reply(err_msg)
- logger.debug("NAT Configuration:\n{reply}".format(reply=pformat(reply)))
+
+ logger.debug(f"NAT Configuration:\n{pformat(reply)}")
cmds = [
- "nat_worker_dump",
- "nat44_interface_addr_dump",
- "nat44_address_dump",
- "nat44_static_mapping_dump",
- "nat44_user_dump",
- "nat44_interface_dump",
- "nat44_user_session_dump",
- "nat_det_map_dump"
+ u"nat_worker_dump",
+ u"nat44_interface_addr_dump",
+ u"nat44_address_dump",
+ u"nat44_static_mapping_dump",
+ u"nat44_user_dump",
+ u"nat44_interface_dump",
+ u"nat44_user_session_dump",
+ u"nat_det_map_dump"
]
PapiSocketExecutor.dump_and_log(node, cmds)
-# Copyright (c) 2016 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
:type node: dict
:type namespace_name: str
"""
- cmd = ('ip netns add {0}'.format(namespace_name))
+ cmd = f"p netns add {namespace_name}"
+
exec_cmd_no_error(node, cmd, sudo=True)
self._namespaces.append(namespace_name)
:type interface: str
:raises RuntimeError: Interface could not be attached.
"""
- cmd = 'ip link set {0} netns {1}'.format(interface, namespace)
+ cmd = f"ip link set {interface} netns {namespace}"
+
(ret_code, _, stderr) = exec_cmd(node, cmd, timeout=5, sudo=True)
if ret_code != 0:
- raise RuntimeError(
- 'Could not attach interface, reason:{}'.format(stderr))
- cmd = 'ip netns exec {} ip link set {} up'.format(
- namespace, interface)
+ raise RuntimeError(f"Could not attach interface, reason:\n{stderr}")
+
+ cmd = f"ip netns exec {namespace} ip link set {interface} up"
+
(ret_code, _, stderr) = exec_cmd(node, cmd, timeout=5, sudo=True)
if ret_code != 0:
raise RuntimeError(
- 'Could not set interface state, reason:{}'.format(stderr))
+ f"Could not set interface state, reason:\n{stderr}"
+ )
@staticmethod
def create_bridge_for_int_in_namespace(
:type bridge_name: str
:type interfaces: list
"""
- cmd = 'ip netns exec {} brctl addbr {}'.format(namespace, bridge_name)
+ cmd = f"ip netns exec {namespace} brctl addbr {bridge_name}"
exec_cmd_no_error(node, cmd, sudo=True)
+
for interface in interfaces:
- cmd = 'ip netns exec {} brctl addif {} {}'.format(
- namespace, bridge_name, interface)
+ cmd = f"ip netns exec {namespace} brctl addif {bridge_name} " \
+ f"{interface}"
exec_cmd_no_error(node, cmd, sudo=True)
- cmd = 'ip netns exec {} ip link set dev {} up'.format(
- namespace, bridge_name)
+
+ cmd = f"ip netns exec {namespace} ip link set dev {bridge_name} up"
exec_cmd_no_error(node, cmd, sudo=True)
def clean_up_namespaces(self, node):
:raises RuntimeError: Namespaces could not be cleaned properly.
"""
for namespace in self._namespaces:
- print "Cleaning namespace {}".format(namespace)
- cmd = 'ip netns delete {}'.format(namespace)
+ print(f"Cleaning namespace {namespace}")
+ cmd = f"ip netns delete {namespace}"
(ret_code, _, _) = exec_cmd(node, cmd, timeout=5, sudo=True)
if ret_code != 0:
- raise RuntimeError('Could not delete namespace')
+ raise RuntimeError(u"Could not delete namespace")
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""
nodes = self._nodes
if len(nodes) < 2:
- raise RuntimeError('Not enough nodes to compute path')
+ raise RuntimeError(u"Not enough nodes to compute path")
for idx in range(0, len(nodes) - 1):
topo = Topology()
node2 = nodes[idx + 1]
n1_list = self._nodes_filter[idx]
n2_list = self._nodes_filter[idx + 1]
- links = topo.get_active_connecting_links(node1, node2,
- filter_list_node1=n1_list,
- filter_list_node2=n2_list)
+ links = topo.get_active_connecting_links(
+ node1, node2, filter_list_node1=n1_list,
+ filter_list_node2=n2_list
+ )
if not links:
- raise RuntimeError('No link between {0} and {1}'.format(
- node1['host'], node2['host']))
+ raise RuntimeError(
+ f"No link between {node1[u'host']} and {node2[u'host']}"
+ )
if always_same_link:
l_set = set(links).intersection(self._links)
l_set = set(links).difference(self._links)
if not l_set:
raise RuntimeError(
- 'No free link between {0} and {1}, all links already '
- 'used'.format(node1['host'], node2['host']))
+ f"No free link between {node1[u'host']} and "
+ f"{node2[u'host']}, all links already used"
+ )
if not l_set:
link = links.pop()
.. note:: Call compute_path before.
"""
if not self._path:
- raise RuntimeError('No path for topology')
+ raise RuntimeError(u"No path for topology")
return self._path[0]
def last_interface(self):
.. note:: Call compute_path before.
"""
if not self._path:
- raise RuntimeError('No path for topology')
+ raise RuntimeError(u"No path for topology")
return self._path[-1]
def first_ingress_interface(self):
.. note:: Call compute_path before.
"""
if not self._path:
- raise RuntimeError('No path for topology')
+ raise RuntimeError(u"No path for topology")
return self._path[1]
def last_egress_interface(self):
.. note:: Call compute_path before.
"""
if not self._path:
- raise RuntimeError('No path for topology')
+ raise RuntimeError(u"No path for topology")
return self._path[-2]
class OptionString(object):
"""Class serving as a builder for option strings.
- Motivation: Both manual contatenation and .join() methods
+ Motivation: Both manual concatenation and .join() methods
are prone to leaving superfluous spaces if some parts of options
are optional (missing, empty).
the best fitting one, without much logic near the call site.
"""
- def __init__(self, parts=tuple(), prefix=""):
+ def __init__(self, parts=tuple(), prefix=u""):
"""Create instance with listed strings as parts to use.
Prefix will be converted to string and stripped.
TODO: Support users calling with parts being a string?
- :param parts: List of of stringifiable objects to become parts.
- :param prefix: Subtring to prepend to every parameter (not value).
+ :param parts: List of stringifiable objects to become parts.
+ :param prefix: Substring to prepend to every parameter (not value).
:type parts: Iterable of object
:type prefix: object
"""
:returns: Executable constructor call as string.
:rtype: str
"""
- return "OptionString(parts={parts!r},prefix={prefix!r})".format(
- parts=self.parts, prefix=self.prefix)
+ return f"OptionString(parts={self.parts!r},prefix={self.prefix!r})"
# TODO: Would we ever need a copy() method?
# Currently, superstring "master" is mutable but unique,
:returns: The converted part without prefix, empty means not added.
:rtype: str
"""
- part = "" if part is None else str(part).strip()
+ part = u"" if part is None else str(part).strip()
if part:
prefixed_part = self.prefix + part if prefixed else part
self.parts.append(prefixed_part)
Parameter is prefixed before adding.
:param parameter: Parameter object, usually a word starting with dash.
- :type variable: object
+ :type parameter: object
:returns: Self, to enable method chaining.
:rtype: OptionString
"""
:param parameter: Parameter object, usually a word starting with dash.
:param condition: Do not add if truth value of this is false.
- :type variable: object
+ :type parameter: object
:type condition: object
:returns: Self, to enable method chaining.
:rtype: OptionString
:param parameter: Parameter object, usually a word starting with dash.
:param value: Value object. Prefix is never added.
- :type variable: object
+ :type parameter: object
:type value: object
:returns: Self, to enable method chaining.
:rtype: OptionString
:param parameter: Parameter object, usually a word starting with dash.
:param value: Value object. Prefix is never added.
- :type variable: object
+ :type parameter: object
:type value: object
:returns: Self, to enable method chaining.
:rtype: OptionString
# pylint: disable=protected-access
if temp._check_and_add(parameter, prefixed=True):
if temp._check_and_add(value, prefixed=False):
- self.parts.append("=".join(temp.parts))
+ self.parts.append(u"=".join(temp.parts))
return self
def add_with_value_if(self, parameter, value, condition):
:param parameter: Parameter object, usually a word starting with dash.
:param value: Value object. Prefix is never added.
:param condition: Do not add if truth value of this is false.
- :type variable: object
+ :type parameter: object
:type value: object
:type condition: object
:returns: Self, to enable method chaining.
:param parameter: Parameter object, usually a word starting with dash.
:param value: Value object. Prefix is never added.
:param condition: Do not add if truth value of this is false.
- :type variable: object
+ :type parameter: object
:type value: object
:type condition: object
:returns: Self, to enable method chaining.
self.add_equals(parameter, value)
return self
- def add_with_value_from_dict(self, parameter, key, mapping, default=""):
+ def add_with_value_from_dict(self, parameter, key, mapping, default=u""):
"""Add parameter with value from dict under key, or default.
If key is missing, default is used as value.
value = mapping.get(key, default)
return self.add_with_value(parameter, value)
- def add_equals_from_dict(self, parameter, key, mapping, default=""):
+ def add_equals_from_dict(self, parameter, key, mapping, default=u""):
"""Add parameter=value to options where value is from dict.
If key is missing, default is used as value.
value = mapping.get(key, default)
return self.add_equals(parameter, value)
- def add_if_from_dict(self, parameter, key, mapping, default="False"):
+ def add_if_from_dict(self, parameter, key, mapping, default=u"False"):
"""Add parameter based on if the condition in dict is true.
If key is missing, default is used as condition.
return self.add_if(parameter, condition)
def add_with_value_if_from_dict(
- self, parameter, value, key, mapping, default="False"):
+ self, parameter, value, key, mapping, default=u"False"):
"""Add parameter and value based on condition in dict.
If key is missing, default is used as condition.
return self.add_with_value_if(parameter, value, condition)
def add_equals_if_from_dict(
- self, parameter, value, key, mapping, default="False"):
+ self, parameter, value, key, mapping, default=u"False"):
"""Add parameter=value based on condition in dict.
If key is missing, default is used as condition.
:returns: Space separated string of options.
:rtype: str
"""
- return " ".join(self.parts)
+ return u" ".join(self.parts)
import traceback
import dill
+
from numpy import random
# TODO: Teach FD.io CSIT to use multiple dirs in PYTHONPATH,
# so we have to catch them all.
traceback_string = traceback.format_exc()
communication_pipe.send(traceback_string)
- # After sendig, re-raise, so usages other than "one process per call"
+ # After sending, re-raise, so usages other than "one process per call"
# keep behaving correctly.
raise
covariance_matrix[first][second] *= scale_coeff
while 1:
sample_point = random.multivariate_normal(
- averages, covariance_matrix, 1)[0].tolist()
+ averages, covariance_matrix, 1
+ )[0].tolist()
# Multivariate Gauss can fall outside (-1, 1) interval
for first in range(dimension):
sample_coordinate = sample_point[first]
:raises numpy.linalg.LinAlgError: If the focus shape gets singular
(due to rounding errors). Try changing scale_coeff.
"""
-
debug_list = list()
trace_list = list()
# Block until input object appears.
dimension, dilled_function, param_focus_tracker, max_samples = (
- communication_pipe.recv())
- debug_list.append("Called with param_focus_tracker {tracker!r}"
- .format(tracker=param_focus_tracker))
+ communication_pipe.recv()
+ )
+ debug_list.append(
+ f"Called with param_focus_tracker {param_focus_tracker!r}"
+ )
def trace(name, value):
"""
:type value: object
"""
if trace_enabled:
- trace_list.append(name + " " + repr(value))
+ trace_list.append(name + u" " + repr(value))
value_logweight_function = dill.loads(dilled_function)
samples = 0
break
sample_point = generate_sample(
param_focus_tracker.averages, param_focus_tracker.covariance_matrix,
- dimension, scale_coeff)
- trace("sample_point", sample_point)
+ dimension, scale_coeff
+ )
+ trace(u"sample_point", sample_point)
samples += 1
- trace("samples", samples)
+ trace(u"samples", samples)
value, log_weight = value_logweight_function(trace, *sample_point)
- trace("value", value)
- trace("log_weight", log_weight)
- trace("focus tracker before adding", param_focus_tracker)
+ trace(u"value", value)
+ trace(u"log_weight", log_weight)
+ trace(u"focus tracker before adding", param_focus_tracker)
# Update focus related statistics.
param_distance = param_focus_tracker.add_without_dominance_get_distance(
- sample_point, log_weight)
+ sample_point, log_weight
+ )
# The code above looked at weight (not importance).
# The code below looks at importance (not weight).
log_rarity = param_distance / 2.0
- trace("log_rarity", log_rarity)
+ trace(u"log_rarity", log_rarity)
log_importance = log_weight + log_rarity
- trace("log_importance", log_importance)
+ trace(u"log_importance", log_importance)
value_tracker.add(value, log_importance)
# Update sampled statistics.
param_sampled_tracker.add_get_shift(sample_point, log_importance)
- debug_list.append("integrator used " + str(samples) + " samples")
- debug_list.append(" ".join([
- "value_avg", str(value_tracker.average),
- "param_sampled_avg", repr(param_sampled_tracker.averages),
- "param_sampled_cov", repr(param_sampled_tracker.covariance_matrix),
- "value_log_variance", str(value_tracker.log_variance),
- "value_log_secondary_variance",
- str(value_tracker.secondary.log_variance)]))
+ debug_list.append(u"integrator used " + str(samples) + u" samples")
+ debug_list.append(
+ " ".join([
+ u"value_avg", str(value_tracker.average),
+ u"param_sampled_avg", repr(param_sampled_tracker.averages),
+ u"param_sampled_cov", repr(param_sampled_tracker.covariance_matrix),
+ u"value_log_variance", str(value_tracker.log_variance),
+ u"value_log_secondary_variance",
+ str(value_tracker.secondary.log_variance)
+ ])
+ )
communication_pipe.send(
- (value_tracker, param_focus_tracker, debug_list, trace_list, samples))
+ (value_tracker, param_focus_tracker, debug_list, trace_list, samples)
+ )
import math
import multiprocessing
import time
+
from collections import namedtuple
import dill
+
from scipy.special import erfcx, erfc
# TODO: Teach FD.io CSIT to use multiple dirs in PYTHONPATH,
# then switch to absolute imports within PLRsearch package.
# Current usage of relative imports is just a short term workaround.
from . import Integrator
-from .log_plus import log_plus, log_minus
from . import stat_trackers
+from .log_plus import log_plus, log_minus
class PLRsearch(object):
Two constants are stored as class fields for speed.
- Method othed than search (and than __init__)
+ Method other than search (and than __init__)
are just internal code structure.
TODO: Those method names should start with underscore then.
stop_time = time.time() + self.timeout
min_rate = float(min_rate)
max_rate = float(max_rate)
- logging.info("Started search with min_rate %(min)r, max_rate %(max)r",
- {"min": min_rate, "max": max_rate})
+ logging.info(
+ f"Started search with min_rate {min_rate!r}, "
+ f"max_rate {max_rate!r}")
trial_result_list = list()
trial_number = self.trial_number_offset
focus_trackers = (None, None)
transmit_rate = (min_rate + max_rate) / 2.0
lossy_loads = [max_rate]
- zeros = 0 # How many cosecutive zero loss results are happening.
+ zeros = 0 # How many consecutive zero loss results are happening.
while 1:
trial_number += 1
- logging.info("Trial %(number)r", {"number": trial_number})
+ logging.info(f"Trial {trial_number!r}")
results = self.measure_and_compute(
self.trial_duration_per_trial * trial_number, transmit_rate,
- trial_result_list, min_rate, max_rate, focus_trackers)
+ trial_result_list, min_rate, max_rate, focus_trackers
+ )
measurement, average, stdev, avg1, avg2, focus_trackers = results
zeros += 1
# TODO: Ratio of fill rate to drain rate seems to have
# in order to get to usable loses at higher loads.
if len(lossy_loads) > 3:
lossy_loads = lossy_loads[3:]
- logging.debug("Zeros %(z)r orig %(o)r next %(n)r loads %(s)r",
- {"z": zeros, "o": (avg1 + avg2) / 2.0,
- "n": next_load, "s": lossy_loads})
+ logging.debug(
+ f"Zeros {zeros!r} orig {(avg1 + avg2) / 2.0!r} "
+ f"next {next_load!r} loads {lossy_loads!r}"
+ )
transmit_rate = min(max_rate, max(min_rate, next_load))
@staticmethod
# TODO: chi is from https://en.wikipedia.org/wiki/Nondimensionalization
chi = (load - mrr) / spread
chi0 = -mrr / spread
- trace("stretch: load", load)
- trace("mrr", mrr)
- trace("spread", spread)
- trace("chi", chi)
- trace("chi0", chi0)
+ trace(u"stretch: load", load)
+ trace(u"mrr", mrr)
+ trace(u"spread", spread)
+ trace(u"chi", chi)
+ trace(u"chi0", chi0)
if chi > 0:
log_lps = math.log(
- load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread)
- trace("big loss direct log_lps", log_lps)
+ load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread
+ )
+ trace(u"big loss direct log_lps", log_lps)
else:
two_positive = log_plus(chi, 2 * chi0 - log_2)
two_negative = log_plus(chi0, 2 * chi - log_2)
if two_positive <= two_negative:
log_lps = log_minus(chi, chi0) + log_spread
- trace("small loss crude log_lps", log_lps)
+ trace(u"small loss crude log_lps", log_lps)
return log_lps
two = log_minus(two_positive, two_negative)
three_positive = log_plus(two_positive, 3 * chi - log_3)
three = log_minus(three_positive, three_negative)
if two == three:
log_lps = two + log_spread
- trace("small loss approx log_lps", log_lps)
+ trace(u"small loss approx log_lps", log_lps)
else:
log_lps = math.log(log_plus(0, chi) - log_plus(0, chi0))
log_lps += log_spread
- trace("small loss direct log_lps", log_lps)
+ trace(u"small loss direct log_lps", log_lps)
return log_lps
@staticmethod
# TODO: The stretch sign is just to have less minuses. Worth changing?
chi = (mrr - load) / spread
chi0 = mrr / spread
- trace("Erf: load", load)
- trace("mrr", mrr)
- trace("spread", spread)
- trace("chi", chi)
- trace("chi0", chi0)
+ trace(u"Erf: load", load)
+ trace(u"mrr", mrr)
+ trace(u"spread", spread)
+ trace(u"chi", chi)
+ trace(u"chi0", chi0)
if chi >= -1.0:
- trace("positive, b roughly bigger than m", None)
+ trace(u"positive, b roughly bigger than m", None)
if chi > math.exp(10):
first = PLRsearch.log_xerfcx_10 + 2 * (math.log(chi) - 10)
- trace("approximated first", first)
+ trace(u"approximated first", first)
else:
first = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi))
- trace("exact first", first)
+ trace(u"exact first", first)
first -= chi * chi
second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0))
second -= chi0 * chi0
intermediate = log_minus(first, second)
- trace("first", first)
+ trace(u"first", first)
else:
- trace("negative, b roughly smaller than m", None)
+ trace(u"negative, b roughly smaller than m", None)
exp_first = PLRsearch.xerfcx_limit + chi * erfcx(-chi)
exp_first *= math.exp(-chi * chi)
exp_first -= 2 * chi
second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0))
second -= chi0 * chi0
intermediate = math.log(exp_first - math.exp(second))
- trace("exp_first", exp_first)
- trace("second", second)
- trace("intermediate", intermediate)
+ trace(u"exp_first", exp_first)
+ trace(u"second", second)
+ trace(u"intermediate", intermediate)
result = intermediate + math.log(spread) - math.log(erfc(-chi0))
- trace("result", result)
+ trace(u"result", result)
return result
@staticmethod
:type lfit_func: Function from 3 floats to float.
:type min_rate: float
:type max_rate: float
- :type log_lps_target: float
+ :type loss_ratio_target: float
:type mrr: float
:type spread: float
:returns: Load [pps] which achieves the target with given parameters.
loss_rate = math.exp(lfit_func(trace, rate, mrr, spread))
loss_ratio = loss_rate / rate
if loss_ratio > loss_ratio_target:
- trace("halving down", rate)
+ trace(u"halving down", rate)
rate_hi = rate
elif loss_ratio < loss_ratio_target:
- trace("halving up", rate)
+ trace(u"halving up", rate)
rate_lo = rate
- trace("found", rate)
+ trace(u"found", rate)
return rate
@staticmethod
:param trace: A multiprocessing-friendly logging function (closure).
:param lfit_func: Fitting function, typically lfit_spread or lfit_erf.
- :param result_list: List of trial measurement results.
+ :param trial_result_list: List of trial measurement results.
:param mrr: The mrr parameter for the fitting function.
:param spread: The spread parameter for the fittinmg function.
:type trace: function (str, object) -> None
:type lfit_func: Function from 3 floats to float.
- :type result_list: list of MLRsearch.ReceiveRateMeasurement
+ :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement
:type mrr: float
:type spread: float
:returns: Logarithm of result weight for given function and parameters.
:rtype: float
"""
log_likelihood = 0.0
- trace("log_weight for mrr", mrr)
- trace("spread", spread)
+ trace(u"log_weight for mrr", mrr)
+ trace(u"spread", spread)
for result in trial_result_list:
- trace("for tr", result.target_tr)
- trace("lc", result.loss_count)
- trace("d", result.duration)
+ trace(u"for tr", result.target_tr)
+ trace(u"lc", result.loss_count)
+ trace(u"d", result.duration)
log_avg_loss_per_second = lfit_func(
trace, result.target_tr, mrr, spread)
log_avg_loss_per_trial = (
# Poisson probability computation works nice for logarithms.
log_trial_likelihood = (
result.loss_count * log_avg_loss_per_trial
- - math.exp(log_avg_loss_per_trial))
+ - math.exp(log_avg_loss_per_trial)
+ )
log_trial_likelihood -= math.lgamma(1 + result.loss_count)
log_likelihood += log_trial_likelihood
- trace("avg_loss_per_trial", math.exp(log_avg_loss_per_trial))
- trace("log_trial_likelihood", log_trial_likelihood)
+ trace(u"avg_loss_per_trial", math.exp(log_avg_loss_per_trial))
+ trace(u"log_trial_likelihood", log_trial_likelihood)
return log_likelihood
def measure_and_compute(
:rtype: _ComputeResult
"""
logging.debug(
- "measure_and_compute started with self %(self)r, trial_duration "
- "%(dur)r, transmit_rate %(tr)r, trial_result_list %(trl)r, "
- "max_rate %(mr)r, focus_trackers %(track)r, max_samples %(ms)r",
- {"self": self, "dur": trial_duration, "tr": transmit_rate,
- "trl": trial_result_list, "mr": max_rate, "track": focus_trackers,
- "ms": max_samples})
+ f"measure_and_compute started with self {self!r}, trial_duration "
+ f"{trial_duration!r}, transmit_rate {transmit_rate!r}, "
+ f"trial_result_list {trial_result_list!r}, max_rate {max_rate!r}, "
+ f"focus_trackers {focus_trackers!r}, max_samples {max_samples!r}"
+ )
# Preparation phase.
dimension = 2
stretch_focus_tracker, erf_focus_tracker = focus_trackers
start computation, return the boss pipe end.
:param fitting_function: lfit_erf or lfit_stretch.
- :param bias_avg: Tuple of floats to start searching around.
- :param bias_cov: Covariance matrix defining initial focus shape.
+ :param focus_tracker: Tracker initialized to speed up the numeric
+ computation.
:type fitting_function: Function from 3 floats to float.
- :type bias_avg: 2-tuple of floats
- :type bias_cov: 2-tuple of 2-tuples of floats
+ :type focus_tracker: None or stat_trackers.VectorStatTracker
:returns: Boss end of communication pipe.
:rtype: multiprocessing.Connection
"""
mrr = max_rate * (1.0 / (x_mrr + 1.0) - 0.5) + 1.0
spread = math.exp((x_spread + 1.0) / 2.0 * math.log(mrr))
logweight = self.log_weight(
- trace, fitting_function, trial_result_list, mrr, spread)
+ trace, fitting_function, trial_result_list, mrr, spread
+ )
value = math.log(self.find_critical_rate(
trace, fitting_function, min_rate, max_rate,
- self.packet_loss_ratio_target, mrr, spread))
+ self.packet_loss_ratio_target, mrr, spread)
+ )
return value, logweight
dilled_function = dill.dumps(value_logweight_func)
boss_pipe_end, worker_pipe_end = multiprocessing.Pipe()
boss_pipe_end.send(
- (dimension, dilled_function, focus_tracker, max_samples))
+ (dimension, dilled_function, focus_tracker, max_samples)
+ )
worker = multiprocessing.Process(
- target=Integrator.try_estimate_nd, args=(
- worker_pipe_end, 10.0, self.trace_enabled))
+ target=Integrator.try_estimate_nd,
+ args=(worker_pipe_end, 10.0, self.trace_enabled)
+ )
worker.daemon = True
worker.start()
return boss_pipe_end
- erf_pipe = start_computing(
- self.lfit_erf, erf_focus_tracker)
- stretch_pipe = start_computing(
- self.lfit_stretch, stretch_focus_tracker)
+ erf_pipe = start_computing(self.lfit_erf, erf_focus_tracker)
+ stretch_pipe = start_computing(self.lfit_stretch, stretch_focus_tracker)
# Measurement phase.
measurement = self.measurer.measure(trial_duration, transmit_rate)
"""
pipe.send(None)
if not pipe.poll(10.0):
- raise RuntimeError(
- "Worker {name} did not finish!".format(name=name))
+ raise RuntimeError(f"Worker {name} did not finish!")
result_or_traceback = pipe.recv()
try:
value_tracker, focus_tracker, debug_list, trace_list, sampls = (
- result_or_traceback)
+ result_or_traceback
+ )
except ValueError:
raise RuntimeError(
- "Worker {name} failed with the following traceback:\n{tr}"
- .format(name=name, tr=result_or_traceback))
- logging.info("Logs from worker %(name)r:", {"name": name})
+ f"Worker {name} failed with the following traceback:\n"
+ f"{result_or_traceback}"
+ )
+ logging.info(f"Logs from worker {name!r}:")
for message in debug_list:
logging.info(message)
for message in trace_list:
logging.debug(message)
- logging.debug("trackers: value %(val)r focus %(foc)r", {
- "val": value_tracker, "foc": focus_tracker})
+ logging.debug(
+ f"trackers: value {value_tracker!r} focus {focus_tracker!r}"
+ )
return _PartialResult(value_tracker, focus_tracker, sampls)
- stretch_result = stop_computing("stretch", stretch_pipe)
- erf_result = stop_computing("erf", erf_pipe)
+ stretch_result = stop_computing(u"stretch", stretch_pipe)
+ erf_result = stop_computing(u"erf", erf_pipe)
result = PLRsearch._get_result(measurement, stretch_result, erf_result)
logging.info(
- "measure_and_compute finished with trial result %(res)r "
- "avg %(avg)r stdev %(stdev)r stretch %(a1)r erf %(a2)r "
- "new trackers %(nt)r old trackers %(ot)r stretch samples %(ss)r "
- "erf samples %(es)r",
- {"res": result.measurement,
- "avg": result.avg, "stdev": result.stdev,
- "a1": result.stretch_exp_avg, "a2": result.erf_exp_avg,
- "nt": result.trackers, "ot": old_trackers,
- "ss": stretch_result.samples, "es": erf_result.samples})
+ f"measure_and_compute finished with trial result "
+ f"{result.measurement!r} avg {result.avg!r} stdev {result.stdev!r} "
+ f"stretch {result.stretch_exp_avg!r} erf {result.erf_exp_avg!r} "
+ f"new trackers {result.trackers!r} old trackers {old_trackers!r} "
+ f"stretch samples {stretch_result.samples!r} erf samples "
+ f"{erf_result.samples!r}"
+ )
return result
@staticmethod
# Named tuples, for multiple local variables to be passed as return value.
_PartialResult = namedtuple(
- "_PartialResult", "value_tracker focus_tracker samples")
+ u"_PartialResult", u"value_tracker focus_tracker samples"
+)
"""Two stat trackers and sample counter.
:param value_tracker: Tracker for the value (critical load) being integrated.
"""
_ComputeResult = namedtuple(
- "_ComputeResult",
- "measurement avg stdev stretch_exp_avg erf_exp_avg trackers")
+ u"_ComputeResult",
+ u"measurement avg stdev stretch_exp_avg erf_exp_avg trackers"
+)
"""Measurement, 4 computation result values, pair of trackers.
:param measurement: The trial measurement result obtained during computation.
TODO: Figure out a more performant way of handling -inf.
-The functions handle the common task of adding or substracting
+The functions handle the common task of adding or subtracting
two numbers where both operands and the result is given in logarithm form.
There are conditionals to make sure overflow does not happen (if possible)
during the computation."""
def log_plus(first, second):
- """Return logarithm of the sum of two exponentials.
+ """Return logarithm of the sum of two exponents.
Basically math.log(math.exp(first) + math.exp(second))
which avoids overflow and uses None as math.log(0.0).
:returns: Logarithm of the sum (or None if zero).
:rtype: float
"""
-
if first is None:
return second
if second is None:
def log_minus(first, second):
- """Return logarithm of the difference of two exponentials.
+ """Return logarithm of the difference of two exponents.
Basically math.log(math.exp(first) - math.exp(second))
which avoids overflow and uses None as math.log(0.0).
:rtype: float
:raises RuntimeError: If the difference would be non-positive.
"""
-
if first is None:
- raise RuntimeError("log_minus: does not suport None first")
+ raise RuntimeError(u"log_minus: does not support None first")
if second is None:
return first
if second >= first:
- raise RuntimeError("log_minus: first has to be bigger than second")
+ raise RuntimeError(u"log_minus: first has to be bigger than second")
factor = -math.expm1(second - first)
if factor <= 0.0:
- raise RuntimeError("log_minus: non-positive number to log")
+ raise RuntimeError(u"log_minus: non-positive number to log")
else:
return first + math.log(factor)
def __repr__(self):
"""Return string, which interpreted constructs state of self.
- :returns: Expression contructing an equivalent instance.
+ :returns: Expression constructing an equivalent instance.
:rtype: str
"""
- return ("ScalarStatTracker(log_sum_weight={lsw!r},average={a!r},"
- "log_variance={lv!r})".format(
- lsw=self.log_sum_weight, a=self.average,
- lv=self.log_variance))
+ return f"ScalarStatTracker(log_sum_weight={self.log_sum_weight!r}," \
+ f"average={self.average!r},log_variance={self.log_variance!r})"
def copy(self):
"""Return new ScalarStatTracker instance with the same state as self.
:rtype: ScalarStatTracker
"""
return ScalarStatTracker(
- self.log_sum_weight, self.average, self.log_variance)
+ self.log_sum_weight, self.average, self.log_variance
+ )
def add(self, scalar_value, log_weight=0.0):
"""Return updated stats corresponding to addition of another sample.
One typical use is for Monte Carlo integrator to decide whether
the partial sums so far are reliable enough.
"""
-
def __init__(
self, log_sum_weight=None, average=0.0, log_variance=None,
log_sum_secondary_weight=None, secondary_average=0.0,
# so in case of diamond inheritance mismatch would be probable.
ScalarStatTracker.__init__(self, log_sum_weight, average, log_variance)
self.secondary = ScalarStatTracker(
- log_sum_secondary_weight, secondary_average, log_secondary_variance)
+ log_sum_secondary_weight, secondary_average, log_secondary_variance
+ )
self.max_log_weight = max_log_weight
def __repr__(self):
:rtype: str
"""
sec = self.secondary
- return (
- "ScalarDualStatTracker(log_sum_weight={lsw!r},average={a!r},"
- "log_variance={lv!r},log_sum_secondary_weight={lssw!r},"
- "secondary_average={sa!r},log_secondary_variance={lsv!r},"
- "max_log_weight={mlw!r})".format(
- lsw=self.log_sum_weight, a=self.average, lv=self.log_variance,
- lssw=sec.log_sum_weight, sa=sec.average, lsv=sec.log_variance,
- mlw=self.max_log_weight))
+ return f"ScalarDualStatTracker(log_sum_weight={self.log_sum_weight!r},"\
+ f"average={self.average!r},log_variance={self.log_variance!r}," \
+ f"log_sum_secondary_weight={sec.log_sum_weight!r}," \
+ f"secondary_average={sec.average!r}," \
+ f"log_secondary_variance={sec.log_variance!r}," \
+ f"max_log_weight={self.max_log_weight!r})"
def add(self, scalar_value, log_weight=0.0):
"""Return updated both stats after addition of another sample.
primary.add(scalar_value, log_weight)
return self
-
def get_pessimistic_variance(self):
"""Return estimate of variance reflecting weight effects.
def __init__(
self, dimension=2, log_sum_weight=None, averages=None,
covariance_matrix=None):
- """Initialize new tracker instance, two-dimenstional empty by default.
+ """Initialize new tracker instance, two-dimensional empty by default.
If any of latter two arguments is None, it means
the tracker state is invalid. Use reset method
- to create empty tracker of constructed dimentionality.
+ to create empty tracker of constructed dimensionality.
:param dimension: Number of scalar components of samples.
:param log_sum_weight: Natural logarithm of sum of weights
def __repr__(self):
"""Return string, which interpreted constructs state of self.
- :returns: Expression contructing an equivalent instance.
+ :returns: Expression constructing an equivalent instance.
:rtype: str
"""
- return (
- "VectorStatTracker(dimension={d!r},log_sum_weight={lsw!r},"
- "averages={a!r},covariance_matrix={cm!r})".format(
- d=self.dimension, lsw=self.log_sum_weight, a=self.averages,
- cm=self.covariance_matrix))
+ return f"VectorStatTracker(dimension={self.dimension!r}," \
+ f"log_sum_weight={self.log_sum_weight!r}," \
+ f"averages={self.averages!r}," \
+ f"covariance_matrix={self.covariance_matrix!r})"
def copy(self):
"""Return new instance with the same state as self.
"""
return VectorStatTracker(
self.dimension, self.log_sum_weight, self.averages[:],
- copy.deepcopy(self.covariance_matrix))
+ copy.deepcopy(self.covariance_matrix)
+ )
def reset(self):
"""Return state set to empty data of proper dimensionality.
old_log_sum_weight = self.log_sum_weight
old_averages = self.averages
if not old_averages:
- shift = [0.0 for index in range(dimension)]
+ shift = [0.0 for _ in range(dimension)]
else:
- shift = [vector_value[index] - old_averages[index]
- for index in range(dimension)]
+ shift = [
+ vector_value[index] - old_averages[index]
+ for index in range(dimension)
+ ]
if old_log_sum_weight is None:
# First sample.
self.log_sum_weight = log_weight
new_log_sum_weight = log_plus(old_log_sum_weight, log_weight)
data_ratio = math.exp(old_log_sum_weight - new_log_sum_weight)
sample_ratio = math.exp(log_weight - new_log_sum_weight)
- new_averages = [old_averages[index] + shift[index] * sample_ratio
- for index in range(dimension)]
+ new_averages = [
+ old_averages[index] + shift[index] * sample_ratio
+ for index in range(dimension)
+ ]
# It is easier to update covariance matrix in-place.
for second in range(dimension):
for first in range(dimension):
If the weight of the incoming sample is far bigger
than the weight of all the previous data together,
- convariance matrix would suffer from underflows.
+ covariance matrix would suffer from underflow.
To avoid that, this method manipulates both weights
before calling add().
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
import os
import select
-from scapy.config import conf
from scapy.all import ETH_P_IP, ETH_P_IPV6, ETH_P_ALL, ETH_P_ARP
+from scapy.config import conf
from scapy.layers.inet6 import IPv6
from scapy.layers.l2 import Ether, ARP
conf.use_pcap = True
import scapy.arch.pcapdnet # pylint: disable=C0413, unused-import
-__all__ = ['RxQueue', 'TxQueue', 'Interface', 'create_gratuitous_arp_request',
- 'auto_pad', 'checksum_equal']
+__all__ = [
+ u"RxQueue", u"TxQueue", u"Interface", u"create_gratuitous_arp_request",
+ u"auto_pad", u"checksum_equal"
+]
# TODO: http://stackoverflow.com/questions/320232/
# ensuring-subprocesses-are-dead-on-exiting-python-program
class PacketVerifier(object):
"""Base class for TX and RX queue objects for packet verifier."""
def __init__(self, interface_name):
- os.system('sudo echo 1 > /proc/sys/net/ipv6/conf/{0}/disable_ipv6'
- .format(interface_name))
- os.system('sudo ip link set {0} up promisc on'.format(interface_name))
+ os.system(
+ f"sudo echo 1 > /proc/sys/net/ipv6/conf/{interface_name}/"
+ f"disable_ipv6"
+ )
+ os.system(f"sudo ip link set {interface_name} up promisc on")
self._ifname = interface_name
try:
ether_type = Ether(buf[0:14]).type
except AttributeError:
- raise RuntimeError(
- 'No EtherType in packet {0}'.format(buf.__repr__()))
+ raise RuntimeError(f"No EtherType in packet {buf!r}")
if ether_type == ETH_P_IP:
# 14 is Ethernet fame header size.
return None
elif ether_type == ETH_P_IPV6:
if not Ether(buf[0:14+6]).haslayer(IPv6):
- raise RuntimeError(
- 'Invalid IPv6 packet {0}'.format(buf.__repr__()))
+ raise RuntimeError(f"Invalid IPv6 packet {buf!r}")
# ... to add to the above, 40 bytes is the length of IPV6 header.
# The ipv6.len only contains length of the payload and not the header
- pkt_len = Ether(buf)['IPv6'].plen + 14 + 40
+ pkt_len = Ether(buf)[u"IPv6"].plen + 14 + 40
if len(buf) < 60:
return None
elif ether_type == ETH_P_ARP:
pkt = Ether(buf[:20])
if not pkt.haslayer(ARP):
- raise RuntimeError('Incomplete ARP packet')
+ raise RuntimeError(u"Incomplete ARP packet")
# len(eth) + arp(2 hw addr type + 2 proto addr type
# + 1b len + 1b len + 2b operation)
pkt = Ether(str(pkt))
if not pkt.haslayer(ARP):
pkt.show()
- raise RuntimeError('Incomplete RARP packet')
+ raise RuntimeError(u"Incomplete RARP packet")
# len(eth) + arp(2 hw addr type + 2 proto addr type
# + 1b len + 1b len + 2b operation)
del pkt
else:
- raise RuntimeError('Unknown protocol {0}'.format(ether_type))
+ raise RuntimeError(f"Unknown protocol {ether_type}")
if pkt_len < 60:
pkt_len = 60
# Auto pad all packets in ignore list
ignore_list.append(auto_pad(ig_pkt))
while True:
- (rlist, _, _) = select.select([self._sock], [], [], timeout)
+ rlist, _, _ = select.select([self._sock], [], [], timeout)
if self._sock not in rlist:
return None
pkt = self._sock.recv(0x7fff)
pkt_pad = auto_pad(pkt)
- print 'Received packet on {0} of len {1}'\
- .format(self._ifname, len(pkt))
+ print(f"Received packet on {self._ifname} of len {len(pkt)}")
if verbose:
pkt.show2() # pylint: disable=no-member
- print
+ print()
if pkt_pad in ignore_list:
ignore_list.remove(pkt_pad)
- print 'Received packet ignored.'
+ print(u"Received packet ignored.")
continue
else:
return pkt
:type pkt: string or scapy Packet derivative.
:type verbose: bool
"""
- print 'Sending packet out of {0} of len {1}'.format(self._ifname,
- len(pkt))
+ print(f"Sending packet out of {self._ifname} of len {len(pkt)}")
if verbose:
Ether(str(pkt)).show2()
- print
+ print()
pkt = auto_pad(str(pkt))
self._sock.send(pkt)
def create_gratuitous_arp_request(src_mac, src_ip):
"""Creates scapy representation of gratuitous ARP request."""
- return (Ether(src=src_mac, dst='ff:ff:ff:ff:ff:ff') /
- ARP(psrc=src_ip, hwsrc=src_mac, pdst=src_ip))
+ return (Ether(src=src_mac, dst=u"ff:ff:ff:ff:ff:ff") /
+ ARP(psrc=src_ip, hwsrc=src_mac, pdst=src_ip)
+ )
def auto_pad(packet):
"""Pads zeroes at the end of the packet if the total len < 60 bytes."""
padded = str(packet)
if len(padded) < 60:
- padded += ('\0' * (60 - len(padded)))
+ padded += (b'\0' * (60 - len(padded)))
return padded
"""Python API executor library.
"""
-import binascii
import copy
import glob
import json
from resources.libraries.python.Constants import Constants
from resources.libraries.python.LocalExecution import run
from resources.libraries.python.FilteredLogger import FilteredLogger
-from resources.libraries.python.PythonThree import raise_from
from resources.libraries.python.PapiHistory import PapiHistory
from resources.libraries.python.ssh import (
SSH, SSHTimeout, exec_cmd_no_error, scp_node)
from resources.libraries.python.VppApiCrc import VppApiCrcChecker
-__all__ = ["PapiExecutor", "PapiSocketExecutor"]
+__all__ = [u"PapiExecutor", u"PapiSocketExecutor"]
def dictize(obj):
from tuple, including its read-only __getitem__ attribute,
so we cannot monkey-patch it.
- TODO: Create a proxy for namedtuple to allow that.
+ TODO: Create a proxy for named tuple to allow that.
:param obj: Arbitrary object to dictize.
:type obj: object
:returns: Dictized object.
:rtype: same as obj type or collections.OrderedDict
"""
- if not hasattr(obj, "_asdict"):
+ if not hasattr(obj, u"_asdict"):
return obj
ret = obj._asdict()
old_get = ret.__getitem__
return
cls = self.__class__ # Shorthand for setting class fields.
package_path = None
- tmp_dir = tempfile.mkdtemp(dir="/tmp")
+ tmp_dir = tempfile.mkdtemp(dir=u"/tmp")
try:
# Pack, copy and unpack Python part of VPP installation from _node.
# TODO: Use rsync or recursive version of ssh.scp_node instead?
node = self._node
- exec_cmd_no_error(node, ["rm", "-rf", "/tmp/papi.txz"])
+ exec_cmd_no_error(node, [u"rm", u"-rf", u"/tmp/papi.txz"])
# Papi python version depends on OS (and time).
# Python 2.7 or 3.4, site-packages or dist-packages.
- installed_papi_glob = "/usr/lib/python*/*-packages/vpp_papi"
+ installed_papi_glob = u"/usr/lib/python*/*-packages/vpp_papi"
# We need to wrap this command in bash, in order to expand globs,
# and as ssh does join, the inner command has to be quoted.
- inner_cmd = " ".join([
- "tar", "cJf", "/tmp/papi.txz", "--exclude=*.pyc",
- installed_papi_glob, "/usr/share/vpp/api"])
- exec_cmd_no_error(node, ["bash", "-c", "'" + inner_cmd + "'"])
- scp_node(node, tmp_dir + "/papi.txz", "/tmp/papi.txz", get=True)
- run(["tar", "xf", tmp_dir + "/papi.txz", "-C", tmp_dir])
- api_json_directory = tmp_dir + "/usr/share/vpp/api"
+ inner_cmd = u" ".join([
+ u"tar", u"cJf", u"/tmp/papi.txz", u"--exclude=*.pyc",
+ installed_papi_glob, u"/usr/share/vpp/api"
+ ])
+ exec_cmd_no_error(node, [u"bash", u"-c", u"'" + inner_cmd + u"'"])
+ scp_node(node, tmp_dir + u"/papi.txz", u"/tmp/papi.txz", get=True)
+ run([u"tar", u"xf", tmp_dir + u"/papi.txz", u"-C", tmp_dir])
+ api_json_directory = tmp_dir + u"/usr/share/vpp/api"
# Perform initial checks before .api.json files are gone,
# by creating the checker instance.
cls.crc_checker = VppApiCrcChecker(api_json_directory)
# When present locally, we finally can find the installation path.
package_path = glob.glob(tmp_dir + installed_papi_glob)[0]
# Package path has to be one level above the vpp_papi directory.
- package_path = package_path.rsplit('/', 1)[0]
+ package_path = package_path.rsplit(u"/", 1)[0]
sys.path.append(package_path)
# pylint: disable=import-error
from vpp_papi.vpp_papi import VPPApiClient as vpp_class
vpp_class.apidir = api_json_directory
# We need to create instance before removing from sys.path.
cls.vpp_instance = vpp_class(
- use_socket=True, server_address="TBD", async_thread=False,
- read_timeout=14, logger=FilteredLogger(logger, "INFO"))
+ use_socket=True, server_address=u"TBD", async_thread=False,
+ read_timeout=14, logger=FilteredLogger(logger, u"INFO"))
# Cannot use loglevel parameter, robot.api.logger lacks support.
# TODO: Stop overriding read_timeout when VPP-1722 is fixed.
finally:
# Parsing takes longer than connecting, prepare instance before tunnel.
vpp_instance = self.vpp_instance
node = self._node
- self._temp_dir = tempfile.mkdtemp(dir="/tmp")
- self._local_vpp_socket = self._temp_dir + "/vpp-api.sock"
- self._ssh_control_socket = self._temp_dir + "/ssh.sock"
+ self._temp_dir = tempfile.mkdtemp(dir=u"/tmp")
+ self._local_vpp_socket = self._temp_dir + u"/vpp-api.sock"
+ self._ssh_control_socket = self._temp_dir + u"/ssh.sock"
ssh_socket = self._ssh_control_socket
# Cleanup possibilities.
- ret_code, _ = run(["ls", ssh_socket], check=False)
+ ret_code, _ = run([u"ls", ssh_socket], check=False)
if ret_code != 2:
# This branch never seems to be hit in CI,
# but may be useful when testing manually.
- run(["ssh", "-S", ssh_socket, "-O", "exit", "0.0.0.0"],
- check=False, log=True)
+ run(
+ [u"ssh", u"-S", ssh_socket, u"-O", u"exit", u"0.0.0.0"],
+ check=False, log=True
+ )
# TODO: Is any sleep necessary? How to prove if not?
- run(["sleep", "0.1"])
- run(["rm", "-vrf", ssh_socket])
+ run([u"sleep", u"0.1"])
+ run([u"rm", u"-vrf", ssh_socket])
# Even if ssh can perhaps reuse this file,
# we need to remove it for readiness detection to work correctly.
- run(["rm", "-rvf", self._local_vpp_socket])
+ run([u"rm", u"-rvf", self._local_vpp_socket])
# On VIRL, the ssh user is not added to "vpp" group,
# so we need to change remote socket file access rights.
exec_cmd_no_error(
- node, "chmod o+rwx " + self._remote_vpp_socket, sudo=True)
+ node, u"chmod o+rwx " + self._remote_vpp_socket, sudo=True
+ )
# We use sleep command. The ssh command will exit in 10 second,
# unless a local socket connection is established,
# in which case the ssh command will exit only when
# the ssh connection is closed again (via control socket).
# The log level is to supress "Warning: Permanently added" messages.
ssh_cmd = [
- "ssh", "-S", ssh_socket, "-M",
- "-o", "LogLevel=ERROR", "-o", "UserKnownHostsFile=/dev/null",
- "-o", "StrictHostKeyChecking=no", "-o", "ExitOnForwardFailure=yes",
- "-L", self._local_vpp_socket + ':' + self._remote_vpp_socket,
- "-p", str(node['port']), node['username'] + "@" + node['host'],
- "sleep", "10"]
- priv_key = node.get("priv_key")
+ u"ssh", u"-S", ssh_socket, u"-M",
+ u"-o", u"LogLevel=ERROR", u"-o", u"UserKnownHostsFile=/dev/null",
+ u"-o", u"StrictHostKeyChecking=no",
+ u"-o", u"ExitOnForwardFailure=yes",
+ u"-L", self._local_vpp_socket + u":" + self._remote_vpp_socket,
+ u"-p", str(node[u"port"]), node[u"username"] + u"@" + node[u"host"],
+ u"sleep", u"10"
+ ]
+ priv_key = node.get(u"priv_key")
if priv_key:
# This is tricky. We need a file to pass the value to ssh command.
- # And we need ssh command, because paramiko does not suport sockets
+ # And we need ssh command, because paramiko does not support sockets
# (neither ssh_socket, nor _remote_vpp_socket).
key_file = tempfile.NamedTemporaryFile()
key_file.write(priv_key)
# Make sure the content is written, but do not close yet.
key_file.flush()
- ssh_cmd[1:1] = ["-i", key_file.name]
- password = node.get("password")
+ ssh_cmd[1:1] = [u"-i", key_file.name]
+ password = node.get(u"password")
if password:
# Prepend sshpass command to set password.
- ssh_cmd[:0] = ["sshpass", "-p", password]
+ ssh_cmd[:0] = [u"sshpass", u"-p", password]
time_stop = time.time() + 10.0
# subprocess.Popen seems to be the best way to run commands
# on background. Other ways (shell=True with "&" and ssh with -f)
# Check socket presence on local side.
while time.time() < time_stop:
# It can take a moment for ssh to create the socket file.
- ret_code, _ = run(["ls", "-l", self._local_vpp_socket], check=False)
+ ret_code, _ = run(
+ [u"ls", u"-l", self._local_vpp_socket], check=False
+ )
if not ret_code:
break
time.sleep(0.1)
else:
- raise RuntimeError("Local side socket has not appeared.")
+ raise RuntimeError(u"Local side socket has not appeared.")
if priv_key:
# Socket up means the key has been read. Delete file by closing it.
key_file.close()
vpp_instance.transport.server_address = self._local_vpp_socket
# It seems we can get read error even if every preceding check passed.
# Single retry seems to help.
- for _ in xrange(2):
+ for _ in range(2):
try:
- vpp_instance.connect_sync("csit_socket")
+ vpp_instance.connect_sync(u"csit_socket")
except (IOError, struct.error) as err:
- logger.warn("Got initial connect error {err!r}".format(err=err))
+ logger.warn(f"Got initial connect error {err!r}")
vpp_instance.disconnect()
else:
break
else:
- raise RuntimeError("Failed to connect to VPP over a socket.")
+ raise RuntimeError(u"Failed to connect to VPP over a socket.")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
Arguments related to possible exception are entirely ignored.
"""
self.vpp_instance.disconnect()
- run(["ssh", "-S", self._ssh_control_socket, "-O", "exit", "0.0.0.0"],
- check=False)
+ run([
+ u"ssh", u"-S", self._ssh_control_socket, u"-O", u"exit", u"0.0.0.0"
+ ], check=False)
shutil.rmtree(self._temp_dir)
def add(self, csit_papi_command, history=True, **kwargs):
self.crc_checker.report_initial_conflicts()
if history:
PapiHistory.add_to_papi_history(
- self._node, csit_papi_command, **kwargs)
+ self._node, csit_papi_command, **kwargs
+ )
self.crc_checker.check_api_name(csit_papi_command)
self._api_command_list.append(
- dict(api_name=csit_papi_command, api_args=copy.deepcopy(kwargs)))
+ dict(
+ api_name=csit_papi_command,
+ api_args=copy.deepcopy(kwargs)
+ )
+ )
return self
def get_replies(self, err_msg="Failed to get replies."):
"""
return self._execute(err_msg=err_msg)
- def get_reply(self, err_msg="Failed to get reply."):
+ def get_reply(self, err_msg=u"Failed to get reply."):
"""Get reply from VPP Python API.
The reply is parsed into dict-like object,
"""
replies = self.get_replies(err_msg=err_msg)
if len(replies) != 1:
- raise RuntimeError("Expected single reply, got {replies!r}".format(
- replies=replies))
+ raise RuntimeError(f"Expected single reply, got {replies!r}")
return replies[0]
- def get_sw_if_index(self, err_msg="Failed to get reply."):
+ def get_sw_if_index(self, err_msg=u"Failed to get reply."):
"""Get sw_if_index from reply from VPP Python API.
Frequently, the caller is only interested in sw_if_index field
:raises AssertionError: If retval is nonzero, parsing or ssh error.
"""
reply = self.get_reply(err_msg=err_msg)
- logger.trace("Getting index from {reply!r}".format(reply=reply))
- return reply["sw_if_index"]
+ logger.trace(f"Getting index from {reply!r}")
+ return reply[u"sw_if_index"]
def get_details(self, err_msg="Failed to get dump details."):
"""Get dump details from VPP Python API.
return self._execute(err_msg)
@staticmethod
- def run_cli_cmd(node, cli_cmd, log=True,
- remote_vpp_socket=Constants.SOCKSVR_PATH):
+ def run_cli_cmd(
+ node, cli_cmd, log=True, remote_vpp_socket=Constants.SOCKSVR_PATH):
"""Run a CLI command as cli_inband, return the "reply" field of reply.
Optionally, log the field value.
:returns: CLI output.
:rtype: str
"""
- cmd = 'cli_inband'
- args = dict(cmd=cli_cmd)
- err_msg = "Failed to run 'cli_inband {cmd}' PAPI command on host " \
- "{host}".format(host=node['host'], cmd=cli_cmd)
+ cmd = u"cli_inband"
+ args = dict(
+ cmd=cli_cmd
+ )
+ err_msg = f"Failed to run 'cli_inband {cli_cmd}' PAPI command " \
+ f"on host {node[u'host']}"
+
with PapiSocketExecutor(node, remote_vpp_socket) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)["reply"]
if log:
logger.info(
- "{cmd} ({host} - {remote_vpp_socket}):\n{reply}".
- format(cmd=cmd, reply=reply.encode('utf-8').strip(),
- remote_vpp_socket=remote_vpp_socket, host=node['host']))
+ f"{cmd} ({node[u'host']} - {remote_vpp_socket}):\n"
+ f"{reply.encode('utf-8').strip()}"
+ )
return reply
@staticmethod
"""
sockets = Topology.get_node_sockets(node, socket_type=SocketType.PAPI)
if sockets:
- for socket in sockets.values():
+ for socket in sockets:
PapiSocketExecutor.run_cli_cmd(
- node, cli_cmd, log=log, remote_vpp_socket=socket)
+ node, cli_cmd, log=log, remote_vpp_socket=socket
+ )
@staticmethod
def dump_and_log(node, cmds):
with PapiSocketExecutor(node) as papi_exec:
for cmd in cmds:
dump = papi_exec.add(cmd).get_details()
- logger.debug("{cmd}:\n{data}".format(
- cmd=cmd, data=pformat(dump)))
+ logger.debug(f"{cmd}:\n{pformat(dump)}")
- def _execute(self, err_msg="Undefined error message"):
+ def _execute(self, err_msg=u"Undefined error message", exp_rv=0):
"""Turn internal command list into data and execute; return replies.
This method also clears the internal command list.
self._api_command_list = list()
replies = list()
for command in local_list:
- api_name = command["api_name"]
+ api_name = command[u"api_name"]
papi_fn = getattr(vpp_instance.api, api_name)
try:
try:
- reply = papi_fn(**command["api_args"])
+ reply = papi_fn(**command[u"api_args"])
except (IOError, struct.error) as err:
- # Ocassionally an error happens, try reconnect.
- logger.warn("Reconnect after error: {err!r}".format(
- err=err))
+ # Occasionally an error happens, try reconnect.
+ logger.warn(f"Reconnect after error: {err!r}")
self.vpp_instance.disconnect()
- # Testing showes immediate reconnect fails.
+ # Testing shows immediate reconnect fails.
time.sleep(1)
- self.vpp_instance.connect_sync("csit_socket")
- logger.trace("Reconnected.")
- reply = papi_fn(**command["api_args"])
+ self.vpp_instance.connect_sync(u"csit_socket")
+ logger.trace(u"Reconnected.")
+ reply = papi_fn(**command[u"api_args"])
except (AttributeError, IOError, struct.error) as err:
- raise_from(AssertionError(err_msg), err, level="INFO")
+ raise AssertionError(err_msg) from err
# *_dump commands return list of objects, convert, ordinary reply.
if not isinstance(reply, list):
reply = [reply]
for item in reply:
self.crc_checker.check_api_name(item.__class__.__name__)
dict_item = dictize(item)
- if "retval" in dict_item.keys():
+ if u"retval" in dict_item.keys():
# *_details messages do not contain retval.
- retval = dict_item["retval"]
- if retval != 0:
+ retval = dict_item[u"retval"]
+ if retval != exp_rv:
# TODO: What exactly to log and raise here?
- err = AssertionError("Retval {rv!r}".format(rv=retval))
- # Lowering log level, some retval!=0 calls are expected.
- # TODO: Expose level argument so callers can decide?
- raise_from(AssertionError(err_msg), err, level="DEBUG")
+ raise AssertionError(
+ f"Retval {retval!r} does not match expected "
+ f"retval {exp_rv!r}"
+ )
replies.append(dict_item)
return replies
:param node: Node to run command(s) on.
:type node: dict
"""
-
# Node to run command(s) on.
self._node = node
try:
self._ssh.connect(self._node)
except IOError:
- raise RuntimeError("Cannot open SSH connection to host {host} to "
- "execute PAPI command(s)".
- format(host=self._node["host"]))
+ raise RuntimeError(
+ f"Cannot open SSH connection to host {self._node[u'host']} "
+ f"to execute PAPI command(s)"
+ )
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._ssh.disconnect(self._node)
- def add(self, csit_papi_command="vpp-stats", history=True, **kwargs):
+ def add(self, csit_papi_command=u"vpp-stats", history=True, **kwargs):
"""Add next command to internal command list; return self.
The argument name 'csit_papi_command' must be unique enough as it cannot
"""
if history:
PapiHistory.add_to_papi_history(
- self._node, csit_papi_command, **kwargs)
- self._api_command_list.append(dict(
- api_name=csit_papi_command, api_args=copy.deepcopy(kwargs)))
+ self._node, csit_papi_command, **kwargs
+ )
+ self._api_command_list.append(
+ dict(
+ api_name=csit_papi_command, api_args=copy.deepcopy(kwargs)
+ )
+ )
return self
- def get_stats(self, err_msg="Failed to get statistics.", timeout=120,
- socket=Constants.SOCKSTAT_PATH):
+ def get_stats(
+ self, err_msg=u"Failed to get statistics.", timeout=120,
+ socket=Constants.SOCKSTAT_PATH):
"""Get VPP Stats from VPP Python API.
:param err_msg: The message used if the PAPI command(s) execution fails.
:param timeout: Timeout in seconds.
+ :param socket: Path to Stats socketto tunnel to.
:type err_msg: str
:type timeout: int
+ :type socket: str
:returns: Requested VPP statistics.
:rtype: list of dict
"""
- paths = [cmd['api_args']['path'] for cmd in self._api_command_list]
+ paths = [cmd[u"api_args"][u"path"] for cmd in self._api_command_list]
self._api_command_list = list()
stdout = self._execute_papi(
- paths, method='stats', err_msg=err_msg, timeout=timeout,
- socket=socket)
+ paths, method=u"stats", err_msg=err_msg, timeout=timeout,
+ socket=socket
+ )
return json.loads(stdout)
:rtype: dict or str or int
"""
if isinstance(val, dict):
- for val_k, val_v in val.iteritems():
+ for val_k, val_v in val.items():
val[str(val_k)] = process_value(val_v)
return val
elif isinstance(val, list):
val[idx] = process_value(val_l)
return val
else:
- return binascii.hexlify(val) if isinstance(val, str) else val
+ return val.encode().hex() if isinstance(val, str) else val
api_data_processed = list()
for api in api_d:
api_args_processed = dict()
- for a_k, a_v in api["api_args"].iteritems():
+ for a_k, a_v in api[u"api_args"].items():
api_args_processed[str(a_k)] = process_value(a_v)
- api_data_processed.append(dict(api_name=api["api_name"],
- api_args=api_args_processed))
+ api_data_processed.append(
+ dict(
+ api_name=api[u"api_name"],
+ api_args=api_args_processed
+ )
+ )
return api_data_processed
- def _execute_papi(self, api_data, method='request', err_msg="",
- timeout=120, socket=None):
+ def _execute_papi(
+ self, api_data, method=u"request", err_msg=u"", timeout=120,
+ socket=None):
"""Execute PAPI command(s) on remote node and store the result.
:param api_data: List of APIs with their arguments.
:raises AssertionError: If PAPI command(s) execution has failed.
"""
if not api_data:
- raise RuntimeError("No API data provided.")
+ raise RuntimeError(u"No API data provided.")
json_data = json.dumps(api_data) \
- if method in ("stats", "stats_request") \
+ if method in (u"stats", u"stats_request") \
else json.dumps(self._process_api_data(api_data))
- sock = " --socket {socket}".format(socket=socket) if socket else ""
- cmd = (
- "{fw_dir}/{papi_provider} --method {method} --data '{json}'{socket}"
- .format(fw_dir=Constants.REMOTE_FW_DIR,
- papi_provider=Constants.RESOURCES_PAPI_PROVIDER,
- method=method, json=json_data, socket=sock))
+ sock = f" --socket {socket}" if socket else u""
+ cmd = f"{Constants.REMOTE_FW_DIR}/{Constants.RESOURCES_PAPI_PROVIDER}" \
+ f" --method {method} --data '{json_data}'{sock}"
try:
ret_code, stdout, _ = self._ssh.exec_command_sudo(
- cmd=cmd, timeout=timeout, log_stdout_err=False)
+ cmd=cmd, timeout=timeout, log_stdout_err=False
+ )
# TODO: Fail on non-empty stderr?
except SSHTimeout:
- logger.error("PAPI command(s) execution timeout on host {host}:"
- "\n{apis}".format(host=self._node["host"],
- apis=api_data))
+ logger.error(
+ f"PAPI command(s) execution timeout on host "
+ f"{self._node[u'host']}:\n{api_data}"
+ )
raise
except Exception as exc:
- raise_from(RuntimeError(
- "PAPI command(s) execution on host {host} "
- "failed: {apis}".format(
- host=self._node["host"], apis=api_data)), exc)
+ raise RuntimeError(
+ f"PAPI command(s) execution on host {self._node[u'host']} "
+ f"failed: {api_data}"
+ ) from exc
if ret_code != 0:
raise AssertionError(err_msg)
from resources.libraries.python.topology import NodeType, DICT__nodes
-__all__ = ["DICT__DUTS_PAPI_HISTORY", "PapiHistory"]
+__all__ = [u"DICT__DUTS_PAPI_HISTORY", u"PapiHistory"]
DICT__DUTS_PAPI_HISTORY = dict()
:param node: DUT node to reset PAPI command history for.
:type node: dict
"""
- DICT__DUTS_PAPI_HISTORY[node['host']] = list()
+ DICT__DUTS_PAPI_HISTORY[node[u"host"]] = list()
@staticmethod
def reset_papi_history_on_all_duts(nodes):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
PapiHistory.reset_papi_history(node)
@staticmethod
"""
if papi:
args = list()
- for key, val in kwargs.iteritems():
- args.append("{key}={val!r}".format(key=key, val=val))
- item = "{cmd}({args})".format(cmd=csit_papi_command,
- args=",".join(args))
+ for key, val in kwargs.items():
+ args.append(f"{key}={val!r}")
+ item = f"{csit_papi_command}({u','.join(args)})"
else:
# This else part is here to store VAT commands.
# VAT history is not used.
# TODO: Remove when VatExecutor is completely removed.
- item = "{cmd}".format(cmd=csit_papi_command)
- DICT__DUTS_PAPI_HISTORY[node['host']].append(item)
+ item = f"{csit_papi_command}"
+ DICT__DUTS_PAPI_HISTORY[node[u"host"]].append(item)
@staticmethod
def show_papi_history(node):
:param node: DUT node to show PAPI command history for.
:type node: dict
"""
- history_list = DICT__DUTS_PAPI_HISTORY[node['host']]
+ history_list = DICT__DUTS_PAPI_HISTORY[node[u"host"]]
if not history_list:
- history_list = ("No PAPI command executed", )
- logger.info(
- "{0} PAPI command history:\n{1}\n".format(
- node['host'], "\n".join(history_list)))
+ history_list = (u"No PAPI command executed", )
+ history = u'\n'.join(history_list)
+ logger.info(f"{node[u'host']} PAPI command history:\n{history}\n")
@staticmethod
def show_papi_history_on_all_duts(nodes):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
PapiHistory.show_papi_history(node)
:type exceed_dscp: str
:type violate_dscp: str
"""
- cmd = 'policer_add_del'
+ cmd = u"policer_add_del"
args = dict(
is_add=int(is_add),
name=str(policer_name),
eb=int(ebs),
rate_type=getattr(PolicerRateType, rate_type.upper()).value,
round_type=getattr(
- PolicerRoundType, 'ROUND_TO_{rt}'.format(
- rt=round_type.upper())).value,
- type=getattr(PolicerType, 'TYPE_{pt}'.format(
- pt=policer_type.upper())).value,
+ PolicerRoundType, f"ROUND_TO_{round_type.upper()}"
+ ).value,
+ type=getattr(PolicerType, f"TYPE_{policer_type.upper()}").value,
conform_action_type=getattr(
- PolicerAction, conform_action_type.upper()).value,
- conform_dscp=getattr(DSCP, 'D_{dscp}'.format(
- dscp=conform_dscp.upper())).value
+ PolicerAction, conform_action_type.upper()
+ ).value,
+ conform_dscp=getattr(DSCP, f"D_{conform_dscp.upper()}").value
if
conform_action_type.upper() == PolicerAction.MARK_AND_TRANSMIT.name
else 0,
exceed_action_type=getattr(
- PolicerAction, exceed_action_type.upper()).value,
- exceed_dscp=getattr(DSCP, 'D_{dscp}'.format(
- dscp=exceed_dscp.upper())).value
+ PolicerAction, exceed_action_type.upper()
+ ).value,
+ exceed_dscp=getattr(DSCP, f"D_{exceed_dscp.upper()}").value
if
exceed_action_type.upper() == PolicerAction.MARK_AND_TRANSMIT.name
else 0,
violate_action_type=getattr(
- PolicerAction, violate_action_type.upper()).value,
- violate_dscp=getattr(DSCP, 'D_{dscp}'.format(
- dscp=violate_dscp.upper())).value
+ PolicerAction, violate_action_type.upper()
+ ).value,
+ violate_dscp=getattr(DSCP, f"D_{violate_dscp.upper()}").value
if
violate_action_type.upper() == PolicerAction.MARK_AND_TRANSMIT.name
else 0,
- color_aware=1 if color_aware == "'ca'" else 0
+ color_aware=1 if color_aware == u"'ca'" else 0
)
- err_msg = 'Failed to configure policer {pn} on host {host}'.format(
- pn=policer_name, host=node['host'])
+ err_msg = f"Failed to configure policer {policer_name} " \
+ f"on host {node['host']}"
with PapiSocketExecutor(node) as papi_exec:
reply = papi_exec.add(cmd, **args).get_reply(err_msg)
- return reply['policer_index']
+ return reply[u"policer_index"]
@staticmethod
def policer_classify_set_interface(
:type ip6_table_index: int
:type l2_table_index: int
"""
- if isinstance(interface, basestring):
+ if isinstance(interface, str):
sw_if_index = Topology.get_interface_sw_index(node, interface)
else:
sw_if_index = interface
- cmd = 'policer_classify_set_interface'
-
+ cmd = u"policer_classify_set_interface"
args = dict(
is_add=int(is_add),
sw_if_index=sw_if_index,
ip6_table_index=int(ip6_table_index),
l2_table_index=int(l2_table_index)
)
- err_msg = 'Failed to set/unset policer classify interface {ifc} ' \
- 'on host {host}'.format(ifc=interface, host=node['host'])
+ err_msg = f"Failed to set/unset policer classify interface " \
+ f"{interface} on host {node[u'host']}"
with PapiSocketExecutor(node) as papi_exec:
papi_exec.add(cmd, **args).get_reply(err_msg)
:returns: DSCP numeric value.
:rtype: int
"""
- return getattr(DSCP, 'D_{dscp}'.format(dscp=dscp.upper())).value
+ return getattr(DSCP, f"D_{dscp.upper()}").value
+++ /dev/null
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Library holding utility functions to be replaced by later Python builtins."""
-
-from robot.api import logger
-
-
-def raise_from(raising, excepted, level="WARN"):
- """Function to be replaced by "raise from" in Python 3.
-
- Neither "six" nor "future" offer good enough implementation right now.
- chezsoi.org/lucas/blog/displaying-chained-exceptions-stacktraces-in-python-2
-
- Current implementation just logs the excepted error, and raises the new one.
- For allower log level values, see:
- robot-framework.readthedocs.io/en/latest/autodoc/robot.api.html#log-levels
-
- :param raising: The exception to raise.
- :param excepted: The exception we excepted and want to log.
- :param level: Robot logger logging level to log with.
- :type raising: BaseException
- :type excepted: BaseException
- :type level: str
- :raises: raising
- """
- logger.write("Excepted: {exc!r}\nRaising: {rai!r}".format(
- exc=excepted, rai=raising), level)
- raise raising
from resources.libraries.python.QemuUtils import QemuUtils
from resources.libraries.python.topology import NodeType, Topology
-__all__ = ["QemuManager"]
+__all__ = [u"QemuManager"]
class QemuManager(object):
"""QEMU lifecycle management class"""
# Use one instance of class per tests.
- ROBOT_LIBRARY_SCOPE = 'TEST CASE'
+ ROBOT_LIBRARY_SCOPE = u"TEST CASE"
def __init__(self, nodes):
"""Init QemuManager object."""
:param kwargs: Named parameters.
:type kwargs: dict
"""
- node = kwargs['node']
- nf_chains = int(kwargs['nf_chains'])
- nf_nodes = int(kwargs['nf_nodes'])
- queues = kwargs['rxq_count_int'] if kwargs['auto_scale'] else 1
- vs_dtc = kwargs['vs_dtc']
- nf_dtc = kwargs['vs_dtc'] if kwargs['auto_scale'] else kwargs['nf_dtc']
- nf_dtcr = kwargs['nf_dtcr'] if isinstance(kwargs['nf_dtcr'], int) else 2
+ node = kwargs[u"node"]
+ nf_chains = int(kwargs[u"nf_chains"])
+ nf_nodes = int(kwargs[u"nf_nodes"])
+ queues = kwargs[u"rxq_count_int"] if kwargs[u"auto_scale"] else 1
+ vs_dtc = kwargs[u"vs_dtc"]
+ nf_dtc = kwargs[u"vs_dtc"] if kwargs[u"auto_scale"] \
+ else kwargs[u"nf_dtc"]
+ nf_dtcr = kwargs[u"nf_dtcr"] \
+ if isinstance(kwargs[u"nf_dtcr"], int) else 2
img = Constants.QEMU_VM_KERNEL
for nf_chain in range(1, nf_chains + 1):
for nf_node in range(1, nf_nodes + 1):
qemu_id = (nf_chain - 1) * nf_nodes + nf_node
- name = '{node}_{qemu_id}'.format(node=node, qemu_id=qemu_id)
- sock1 = '/var/run/vpp/sock-{qemu_id}-1'.format(qemu_id=qemu_id)
- sock2 = '/var/run/vpp/sock-{qemu_id}-2'.format(qemu_id=qemu_id)
+ name = f"{node}_{qemu_id}"
+ sock1 = f"/var/run/vpp/sock-{qemu_id}-1"
+ sock2 = f"/var/run/vpp/sock-{qemu_id}-2"
+ idx1 = (nf_chain - 1) * nf_nodes * 2 + nf_node * 2 - 1
vif1_mac = Topology.get_interface_mac(
- self.nodes[node], 'vhost{idx}'.format(
- idx=(nf_chain - 1) * nf_nodes * 2 + nf_node * 2 - 1)) \
- if kwargs['vnf'] == 'testpmd_mac' \
- else kwargs['tg_if1_mac'] if nf_node == 1 \
- else '52:54:00:00:{id:02x}:02'.format(id=qemu_id - 1)
+ self.nodes[node], f"vhost{idx1}"
+ ) if kwargs[u"vnf"] == u"testpmd_mac" \
+ else kwargs[u"tg_if1_mac"] if nf_node == 1 \
+ else f"52:54:00:00:{(qemu_id - 1):02x}:02"
+ idx2 = (nf_chain - 1) * nf_nodes * 2 + nf_node * 2
vif2_mac = Topology.get_interface_mac(
- self.nodes[node], 'vhost{idx}'.format(
- idx=(nf_chain - 1) * nf_nodes * 2 + nf_node * 2)) \
- if kwargs['vnf'] == 'testpmd_mac' \
- else kwargs['tg_if2_mac'] if nf_node == nf_nodes \
- else '52:54:00:00:{id:02x}:01'.format(id=qemu_id + 1)
+ self.nodes[node], f"vhost{idx2}"
+ ) if kwargs[u"vnf"] == u"testpmd_mac" \
+ else kwargs[u"tg_if2_mac"] if nf_node == nf_nodes \
+ else f"52:54:00:00:{(qemu_id + 1):02x}:01"
self.machines_affinity[name] = CpuUtils.get_affinity_nf(
nodes=self.nodes, node=node, nf_chains=nf_chains,
nf_nodes=nf_nodes, nf_chain=nf_chain, nf_node=nf_node,
- vs_dtc=vs_dtc, nf_dtc=nf_dtc, nf_dtcr=nf_dtcr)
+ vs_dtc=vs_dtc, nf_dtc=nf_dtc, nf_dtcr=nf_dtcr
+ )
self.machines[name] = QemuUtils(
node=self.nodes[node], qemu_id=qemu_id,
smp=len(self.machines_affinity[name]), mem=4096,
- vnf=kwargs['vnf'], img=img)
+ vnf=kwargs[u"vnf"], img=img
+ )
self.machines[name].configure_kernelvm_vnf(
- mac1='52:54:00:00:{id:02x}:01'.format(id=qemu_id),
- mac2='52:54:00:00:{id:02x}:02'.format(id=qemu_id),
- vif1_mac=vif1_mac,
- vif2_mac=vif2_mac,
- queues=queues,
- jumbo_frames=kwargs['jumbo'])
+ mac1=f"52:54:00:00:{qemu_id:02x}:01",
+ mac2=f"52:54:00:00:{qemu_id:02x}:02",
+ vif1_mac=vif1_mac, vif2_mac=vif2_mac, queues=queues,
+ jumbo_frames=kwargs[u"jumbo"]
+ )
self.machines[name].qemu_add_vhost_user_if(
- sock1, jumbo_frames=kwargs['jumbo'], queues=queues,
- queue_size=kwargs['perf_qemu_qsz'])
+ sock1, jumbo_frames=kwargs[u"jumbo"], queues=queues,
+ queue_size=kwargs[u"perf_qemu_qsz"]
+ )
self.machines[name].qemu_add_vhost_user_if(
- sock2, jumbo_frames=kwargs['jumbo'], queues=queues,
- queue_size=kwargs['perf_qemu_qsz'])
+ sock2, jumbo_frames=kwargs[u"jumbo"], queues=queues,
+ queue_size=kwargs[u"perf_qemu_qsz"]
+ )
def construct_vms_on_all_nodes(self, **kwargs):
"""Construct 1..Mx1..N VMs(s) with specified name on all nodes.
"""
self.initialize()
for node in self.nodes:
- if self.nodes[node]['type'] == NodeType.DUT:
+ if self.nodes[node][u"type"] == NodeType.DUT:
self.construct_vms_on_node(node=node, **kwargs)
def start_all_vms(self, pinning=False):
:param pinning: If True, then do also QEMU process pinning.
:type pinning: bool
"""
- for machine, machine_affinity in zip(self.machines.values(),
- self.machines_affinity.values()):
+ for machine, machine_affinity in \
+ zip(self.machines.values(), self.machines_affinity.values()):
machine.qemu_start()
if pinning:
machine.qemu_set_affinity(*machine_affinity)
"""QEMU utilities library."""
import json
+
from re import match
from string import Template
from time import sleep
from robot.api import logger
+
from resources.libraries.python.Constants import Constants
from resources.libraries.python.DpdkUtil import DpdkUtil
from resources.libraries.python.DUTSetup import DUTSetup
from resources.libraries.python.OptionString import OptionString
-from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
-from resources.libraries.python.VPPUtil import VPPUtil
from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
+from resources.libraries.python.VPPUtil import VPPUtil
-__all__ = ["QemuUtils"]
+__all__ = [u"QemuUtils"]
class QemuUtils(object):
"""QEMU utilities."""
# Use one instance of class per tests.
- ROBOT_LIBRARY_SCOPE = 'TEST CASE'
+ ROBOT_LIBRARY_SCOPE = u"TEST CASE"
- def __init__(self, node, qemu_id=1, smp=1, mem=512, vnf=None,
- img=Constants.QEMU_VM_IMAGE):
+ def __init__(
+ self, node, qemu_id=1, smp=1, mem=512, vnf=None,
+ img=Constants.QEMU_VM_IMAGE):
"""Initialize QemuUtil class.
:param node: Node to run QEMU on.
self._vhost_id = 0
self._node = node
self._arch = Topology.get_node_arch(self._node)
- dpdk_target = 'arm64-armv8a' if self._arch == 'aarch64' \
- else 'x86_64-native'
- self._testpmd_path = '{path}/{dpdk_target}-linuxapp-gcc/app'\
- .format(path=Constants.QEMU_VM_DPDK, dpdk_target=dpdk_target)
+ dpdk_target = u"arm64-armv8a" if self._arch == u"aarch64" \
+ else u"x86_64-native"
+ self._testpmd_path = f"{Constants.QEMU_VM_DPDK}/" \
+ f"{dpdk_target}-linuxapp-gcc/app"
self._vm_info = {
- 'host': node['host'],
- 'type': NodeType.VM,
- 'port': 10021 + qemu_id,
- 'serial': 4555 + qemu_id,
- 'username': 'cisco',
- 'password': 'cisco',
- 'interfaces': {},
+ u"host": node[u"host"],
+ u"type": NodeType.VM,
+ u"port": 10021 + qemu_id,
+ u"serial": 4555 + qemu_id,
+ u"username": 'cisco',
+ u"password": 'cisco',
+ u"interfaces": {},
}
- if node['port'] != 22:
- self._vm_info['host_port'] = node['port']
- self._vm_info['host_username'] = node['username']
- self._vm_info['host_password'] = node['password']
+ if node[u"port"] != 22:
+ self._vm_info[u"host_port"] = node[u"port"]
+ self._vm_info[u"host_username"] = node[u"username"]
+ self._vm_info[u"host_password"] = node[u"password"]
# Input Options.
self._opt = dict()
- self._opt['qemu_id'] = qemu_id
- self._opt['mem'] = int(mem)
- self._opt['smp'] = int(smp)
- self._opt['img'] = img
- self._opt['vnf'] = vnf
+ self._opt[u"qemu_id"] = qemu_id
+ self._opt[u"mem"] = int(mem)
+ self._opt[u"smp"] = int(smp)
+ self._opt[u"img"] = img
+ self._opt[u"vnf"] = vnf
# Temporary files.
self._temp = dict()
- self._temp['pidfile'] = '/var/run/qemu_{id}.pid'.format(id=qemu_id)
+ self._temp[u"pidfile"] = f"/var/run/qemu_{qemu_id}.pid"
if img == Constants.QEMU_VM_IMAGE:
- self._opt['vm_type'] = 'nestedvm'
- self._temp['qmp'] = '/var/run/qmp_{id}.sock'.format(id=qemu_id)
- self._temp['qga'] = '/var/run/qga_{id}.sock'.format(id=qemu_id)
+ self._opt[u"vm_type"] = u"nestedvm"
+ self._temp[u"qmp"] = f"/var/run/qmp_{qemu_id}.sock"
+ self._temp[u"qga"] = f"/var/run/qga_{qemu_id}.sock"
elif img == Constants.QEMU_VM_KERNEL:
- self._opt['img'], _ = exec_cmd_no_error(
- node,
- 'ls -1 {img}* | tail -1'.format(img=Constants.QEMU_VM_KERNEL),
- message='Qemu Kernel VM image not found!')
- self._opt['vm_type'] = 'kernelvm'
- self._temp['log'] = '/tmp/serial_{id}.log'.format(id=qemu_id)
- self._temp['ini'] = '/etc/vm_init_{id}.conf'.format(id=qemu_id)
- self._opt['initrd'], _ = exec_cmd_no_error(
- node,
- 'ls -1 {initrd}* | tail -1'.format(
- initrd=Constants.QEMU_VM_KERNEL_INITRD),
- message='Qemu Kernel initrd image not found!')
+ self._opt[u"img"], _ = exec_cmd_no_error(
+ node, f"ls -1 {Constants.QEMU_VM_KERNEL}* | tail -1",
+ message=u"Qemu Kernel VM image not found!"
+ )
+ self._opt[u"vm_type"] = u"kernelvm"
+ self._temp[u"log"] = f"/tmp/serial_{qemu_id}.log"
+ self._temp[u"ini"] = f"/etc/vm_init_{qemu_id}.conf"
+ self._opt[u"initrd"], _ = exec_cmd_no_error(
+ node, f"ls -1 {Constants.QEMU_VM_KERNEL_INITRD}* | tail -1",
+ message=u"Qemu Kernel initrd image not found!"
+ )
else:
- raise RuntimeError('QEMU: Unknown VM image option: {}'.format(img))
+ raise RuntimeError(f"QEMU: Unknown VM image option: {img}")
# Computed parameters for QEMU command line.
- self._params = OptionString(prefix='-')
+ self._params = OptionString(prefix=u"-")
self.add_params()
def add_params(self):
"""Set QEMU command line parameters."""
self.add_default_params()
- if self._opt.get('vm_type', '') == 'nestedvm':
+ if self._opt.get(u"vm_type", u"") == u"nestedvm":
self.add_nestedvm_params()
- elif self._opt.get('vm_type', '') == 'kernelvm':
+ elif self._opt.get(u"vm_type", u"") == u"kernelvm":
self.add_kernelvm_params()
else:
- raise RuntimeError('QEMU: Unsupported VM type!')
+ raise RuntimeError(u"QEMU: Unsupported VM type!")
def add_default_params(self):
"""Set default QEMU command line parameters."""
- self._params.add('daemonize')
- self._params.add('nodefaults')
- self._params.add_with_value('name', 'vnf{qemu},debug-threads=on'.format(
- qemu=self._opt.get('qemu_id')))
- self._params.add('no-user-config')
- self._params.add_with_value('monitor', 'none')
- self._params.add_with_value('display', 'none')
- self._params.add_with_value('vga', 'none')
- self._params.add('enable-kvm')
- self._params.add_with_value('pidfile', self._temp.get('pidfile'))
- self._params.add_with_value('cpu', 'host')
-
- if self._arch == 'aarch64':
- machine_args = 'virt,accel=kvm,usb=off,mem-merge=off,gic-version=3'
- else:
- machine_args = 'pc,accel=kvm,usb=off,mem-merge=off'
- self._params.add_with_value(
- 'machine', machine_args)
+ self._params.add(u"daemonize")
+ self._params.add(u"nodefaults")
self._params.add_with_value(
- 'smp', '{smp},sockets=1,cores={smp},threads=1'.format(
- smp=self._opt.get('smp')))
+ u"name", f"vnf{self._opt.get(u'qemu_id')},debug-threads=on"
+ )
+ self._params.add(u"no-user-config")
+ self._params.add_with_value(u"monitor", u"none")
+ self._params.add_with_value(u"display", u"none")
+ self._params.add_with_value(u"vga", u"none")
+ self._params.add(u"enable-kvm")
+ self._params.add_with_value(u"pidfile", self._temp.get(u"pidfile"))
+ self._params.add_with_value(u"cpu", u"host")
+
+ if self._arch == u"aarch64":
+ machine_args = u"virt,accel=kvm,usb=off,mem-merge=off,gic-version=3"
+ else:
+ machine_args = u"pc,accel=kvm,usb=off,mem-merge=off"
+ self._params.add_with_value(u"machine", machine_args)
self._params.add_with_value(
- 'object', 'memory-backend-file,id=mem,size={mem}M,'
- 'mem-path=/dev/hugepages,share=on'.format(mem=self._opt.get('mem')))
+ u"smp", f"{self._opt.get(u'smp')},sockets=1,"
+ f"cores={self._opt.get(u'smp')},threads=1"
+ )
self._params.add_with_value(
- 'm', '{mem}M'.format(mem=self._opt.get('mem')))
- self._params.add_with_value('numa', 'node,memdev=mem')
- self._params.add_with_value('balloon', 'none')
+ u"object", f"memory-backend-file,id=mem,"
+ f"size={self._opt.get(u'mem')}M,mem-path=/dev/hugepages,share=on"
+ )
+ self._params.add_with_value(u"m", f"{self._opt.get(u'mem')}M")
+ self._params.add_with_value(u"numa", u"node,memdev=mem")
+ self._params.add_with_value(u"balloon", u"none")
def add_nestedvm_params(self):
"""Set NestedVM QEMU parameters."""
self._params.add_with_value(
- 'net', 'nic,macaddr=52:54:00:00:{qemu:02x}:ff'.format(
- qemu=self._opt.get('qemu_id')))
+ u"net",
+ f"nic,macaddr=52:54:00:00:{self._opt.get(u'qemu_id'):02x}:ff"
+ )
self._params.add_with_value(
- 'net', 'user,hostfwd=tcp::{info[port]}-:22'.format(
- info=self._vm_info))
- locking = ',file.locking=off'
+ u"net", f"user,hostfwd=tcp::{self._vm_info[u'port']}-:22"
+ )
+ locking = u",file.locking=off"
self._params.add_with_value(
- 'drive', 'file={img},format=raw,cache=none,if=virtio{locking}'.
- format(img=self._opt.get('img'), locking=locking))
+ u"drive", f"file={self._opt.get(u'img')},"
+ f"format=raw,cache=none,if=virtio{locking}"
+ )
self._params.add_with_value(
- 'qmp', 'unix:{qmp},server,nowait'.format(qmp=self._temp.get('qmp')))
+ u"qmp", f"unix:{self._temp.get(u'qmp')},server,nowait"
+ )
self._params.add_with_value(
- 'chardev', 'socket,host=127.0.0.1,port={info[serial]},'
- 'id=gnc0,server,nowait'.format(info=self._vm_info))
- self._params.add_with_value('device', 'isa-serial,chardev=gnc0')
+ u"chardev", f"socket,host=127.0.0.1,"
+ f"port={self._vm_info[u'serial']},id=gnc0,server,nowait")
+ self._params.add_with_value(u"device", u"isa-serial,chardev=gnc0")
self._params.add_with_value(
- 'chardev', 'socket,path={qga},server,nowait,id=qga0'.format(
- qga=self._temp.get('qga')))
- self._params.add_with_value('device', 'isa-serial,chardev=qga0')
+ u"chardev", f"socket,path={self._temp.get(u'qga')},"
+ f"server,nowait,id=qga0"
+ )
+ self._params.add_with_value(u"device", u"isa-serial,chardev=qga0")
def add_kernelvm_params(self):
"""Set KernelVM QEMU parameters."""
- console = 'ttyAMA0' if self._arch == 'aarch64' else 'ttyS0'
- self._params.add_with_value('serial', 'file:{log}'.format(
- log=self._temp.get('log')))
- self._params.add_with_value(
- 'fsdev', 'local,id=root9p,path=/,security_model=none')
+ console = u"ttyAMA0" if self._arch == u"aarch64" else u"ttyS0"
self._params.add_with_value(
- 'device', 'virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot')
+ u"serial", f"file:{self._temp.get(u'log')}"
+ )
self._params.add_with_value(
- 'kernel', '{img}'.format(img=self._opt.get('img')))
+ u"fsdev", u"local,id=root9p,path=/,security_model=none"
+ )
self._params.add_with_value(
- 'initrd', '{initrd}'.format(initrd=self._opt.get('initrd')))
+ u"device", u"virtio-9p-pci,fsdev=root9p,mount_tag=virtioroot"
+ )
+ self._params.add_with_value(u"kernel", f"{self._opt.get(u'img')}")
+ self._params.add_with_value(u"initrd", f"{self._opt.get(u'initrd')}")
self._params.add_with_value(
- 'append', '"ro rootfstype=9p rootflags=trans=virtio '
- 'root=virtioroot console={console} tsc=reliable '
- 'hugepages=256 init={init} fastboot"'.format(
- console=console, init=self._temp.get('ini')))
+ u"append", f"'ro rootfstype=9p rootflags=trans=virtio "
+ f"root=virtioroot console={console} tsc=reliable hugepages=256 "
+ f"init={self._temp.get(u'ini')} fastboot'"
+ )
def create_kernelvm_config_vpp(self, **kwargs):
"""Create QEMU VPP config files.
file.
:type kwargs: dict
"""
- startup = ('/etc/vpp/vm_startup_{id}.conf'.
- format(id=self._opt.get('qemu_id')))
- running = ('/etc/vpp/vm_running_{id}.exec'.
- format(id=self._opt.get('qemu_id')))
+ startup = f"/etc/vpp/vm_startup_{self._opt.get(u'qemu_id')}.conf"
+ running = f"/etc/vpp/vm_running_{self._opt.get(u'qemu_id')}.exec"
- self._temp['startup'] = startup
- self._temp['running'] = running
- self._opt['vnf_bin'] = ('/usr/bin/vpp -c {startup}'.
- format(startup=startup))
+ self._temp[u"startup"] = startup
+ self._temp[u"running"] = running
+ self._opt[u"vnf_bin"] = f"/usr/bin/vpp -c {startup}"
# Create VPP startup configuration.
vpp_config = VppConfigGenerator()
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_exec(running)
vpp_config.add_socksvr()
- vpp_config.add_cpu_main_core('0')
- if self._opt.get('smp') > 1:
- vpp_config.add_cpu_corelist_workers('1-{smp}'.format(
- smp=self._opt.get('smp')-1))
- vpp_config.add_dpdk_dev('0000:00:06.0', '0000:00:07.0')
- vpp_config.add_dpdk_dev_default_rxq(kwargs['queues'])
- vpp_config.add_dpdk_log_level('debug')
- if not kwargs['jumbo_frames']:
+ vpp_config.add_cpu_main_core(u"0")
+ if self._opt.get(u"smp") > 1:
+ vpp_config.add_cpu_corelist_workers(f"1-{self._opt.get(u'smp')-1}")
+ vpp_config.add_dpdk_dev(u"0000:00:06.0", u"0000:00:07.0")
+ vpp_config.add_dpdk_dev_default_rxq(kwargs[u"queues"])
+ vpp_config.add_dpdk_log_level(u"debug")
+ if not kwargs[u"jumbo_frames"]:
vpp_config.add_dpdk_no_multi_seg()
vpp_config.add_dpdk_no_tx_checksum_offload()
- vpp_config.add_plugin('disable', 'default')
- vpp_config.add_plugin('enable', 'dpdk_plugin.so')
+ vpp_config.add_plugin(u"disable", [u"default"])
+ vpp_config.add_plugin(u"enable", [u"dpdk_plugin.so"])
vpp_config.write_config(startup)
# Create VPP running configuration.
- template = '{res}/{tpl}.exec'.format(res=Constants.RESOURCES_TPL_VM,
- tpl=self._opt.get('vnf'))
- exec_cmd_no_error(self._node, 'rm -f {running}'.format(running=running),
- sudo=True)
+ template = f"{Constants.RESOURCES_TPL_VM}/{self._opt.get(u'vnf')}.exec"
+ exec_cmd_no_error(self._node, f"rm -f {running}", sudo=True)
with open(template, 'r') as src_file:
src = Template(src_file.read())
exec_cmd_no_error(
- self._node, "echo '{out}' | sudo tee {running}".format(
- out=src.safe_substitute(**kwargs), running=running))
+ self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
+ f"sudo tee {running}"
+ )
def create_kernelvm_config_testpmd_io(self, **kwargs):
"""Create QEMU testpmd-io command line.
:type kwargs: dict
"""
testpmd_cmd = DpdkUtil.get_testpmd_cmdline(
- eal_corelist='0-{smp}'.format(smp=self._opt.get('smp') - 1),
+ eal_corelist=f"0-{self._opt.get(u'smp') - 1}",
eal_driver=False,
eal_in_memory=True,
pmd_num_mbufs=16384,
- pmd_rxq=kwargs['queues'],
- pmd_txq=kwargs['queues'],
+ pmd_rxq=kwargs[u"queues"],
+ pmd_txq=kwargs[u"queues"],
pmd_tx_offloads='0x0',
pmd_disable_hw_vlan=False,
- pmd_nb_cores=str(self._opt.get('smp') - 1))
+ pmd_nb_cores=str(self._opt.get(u"smp") - 1)
+ )
- self._opt['vnf_bin'] = ('{testpmd_path}/{testpmd_cmd}'.
- format(testpmd_path=self._testpmd_path,
- testpmd_cmd=testpmd_cmd))
+ self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
def create_kernelvm_config_testpmd_mac(self, **kwargs):
"""Create QEMU testpmd-mac command line.
:type kwargs: dict
"""
testpmd_cmd = DpdkUtil.get_testpmd_cmdline(
- eal_corelist='0-{smp}'.format(smp=self._opt.get('smp') - 1),
+ eal_corelist=f"0-{self._opt.get(u'smp') - 1}",
eal_driver=False,
eal_in_memory=True,
pmd_num_mbufs=16384,
- pmd_fwd_mode='mac',
- pmd_eth_peer_0='0,{mac}'.format(mac=kwargs['vif1_mac']),
- pmd_eth_peer_1='1,{mac}'.format(mac=kwargs['vif2_mac']),
- pmd_rxq=kwargs['queues'],
- pmd_txq=kwargs['queues'],
- pmd_tx_offloads='0x0',
+ pmd_fwd_mode=u"mac",
+ pmd_eth_peer_0=f"0,{kwargs[u'vif1_mac']}",
+ pmd_eth_peer_1=f"1,{kwargs[u'vif2_mac']}",
+ pmd_rxq=kwargs[u"queues"],
+ pmd_txq=kwargs[u"queues"],
+ pmd_tx_offloads=u"0x0",
pmd_disable_hw_vlan=False,
- pmd_nb_cores=str(self._opt.get('smp') - 1))
+ pmd_nb_cores=str(self._opt.get(u"smp") - 1)
+ )
- self._opt['vnf_bin'] = ('{testpmd_path}/{testpmd_cmd}'.
- format(testpmd_path=self._testpmd_path,
- testpmd_cmd=testpmd_cmd))
+ self._opt[u"vnf_bin"] = f"{self._testpmd_path}/{testpmd_cmd}"
def create_kernelvm_init(self, **kwargs):
"""Create QEMU init script.
:param kwargs: Key-value pairs to replace content of init startup file.
:type kwargs: dict
"""
- template = '{res}/init.sh'.format(res=Constants.RESOURCES_TPL_VM)
- init = self._temp.get('ini')
- exec_cmd_no_error(
- self._node, 'rm -f {init}'.format(init=init), sudo=True)
+ template = f"{Constants.RESOURCES_TPL_VM}/init.sh"
+ init = self._temp.get(u"ini")
+ exec_cmd_no_error(self._node, f"rm -f {init}", sudo=True)
with open(template, 'r') as src_file:
src = Template(src_file.read())
exec_cmd_no_error(
- self._node, "echo '{out}' | sudo tee {init}".format(
- out=src.safe_substitute(**kwargs), init=init))
- exec_cmd_no_error(
- self._node, "chmod +x {init}".format(init=init), sudo=True)
+ self._node, f"echo '{src.safe_substitute(**kwargs)}' | "
+ f"sudo tee {init}"
+ )
+ exec_cmd_no_error(self._node, f"chmod +x {init}", sudo=True)
def configure_kernelvm_vnf(self, **kwargs):
"""Create KernelVM VNF configurations.
:param kwargs: Key-value pairs for templating configs.
:type kwargs: dict
"""
- if 'vpp' in self._opt.get('vnf'):
+ if u"vpp" in self._opt.get(u"vnf"):
self.create_kernelvm_config_vpp(**kwargs)
- elif 'testpmd_io' in self._opt.get('vnf'):
+ elif u"testpmd_io" in self._opt.get(u"vnf"):
self.create_kernelvm_config_testpmd_io(**kwargs)
- elif 'testpmd_mac' in self._opt.get('vnf'):
+ elif u"testpmd_mac" in self._opt.get(u"vnf"):
self.create_kernelvm_config_testpmd_mac(**kwargs)
else:
- raise RuntimeError('QEMU: Unsupported VNF!')
- self.create_kernelvm_init(vnf_bin=self._opt['vnf_bin'])
+ raise RuntimeError(u"QEMU: Unsupported VNF!")
+ self.create_kernelvm_init(vnf_bin=self._opt[u"vnf_bin"])
def get_qemu_pids(self):
"""Get QEMU CPU pids.
:returns: List of QEMU CPU pids.
:rtype: list of str
"""
- command = ("grep -rwl 'CPU' /proc/$(sudo cat {pidfile})/task/*/comm ".
- format(pidfile=self._temp.get('pidfile')))
- command += (r"| xargs dirname | sed -e 's/\/.*\///g' | uniq")
+ command = f"grep -rwl 'CPU' /proc/$(sudo cat " \
+ f"{self._temp.get(u'pidfile')})/task/*/comm "
+ command += r"| xargs dirname | sed -e 's/\/.*\///g' | uniq"
stdout, _ = exec_cmd_no_error(self._node, command)
return stdout.splitlines()
sleep(1)
continue
for qemu_cpu, host_cpu in zip(qemu_cpus, host_cpus):
- command = ('taskset -pc {host_cpu} {thread}'.
- format(host_cpu=host_cpu, thread=qemu_cpu))
- message = ('QEMU: Set affinity failed on {host}!'.
- format(host=self._node['host']))
- exec_cmd_no_error(self._node, command, sudo=True,
- message=message)
+ command = f"taskset -pc {host_cpu} {qemu_cpu}"
+ message = f"QEMU: Set affinity failed " \
+ f"on {self._node[u'host']}!"
+ exec_cmd_no_error(
+ self._node, command, sudo=True, message=message
+ )
break
except (RuntimeError, ValueError):
self.qemu_kill_all()
raise
else:
self.qemu_kill_all()
- raise RuntimeError('Failed to set Qemu threads affinity!')
+ raise RuntimeError(u"Failed to set Qemu threads affinity!")
def qemu_set_scheduler_policy(self):
"""Set scheduler policy to SCHED_RR with priority 1 for all Qemu CPU
qemu_cpus = self.get_qemu_pids()
for qemu_cpu in qemu_cpus:
- command = ('chrt -r -p 1 {thread}'.
- format(thread=qemu_cpu))
- message = ('QEMU: Set SCHED_RR failed on {host}'.
- format(host=self._node['host']))
- exec_cmd_no_error(self._node, command, sudo=True,
- message=message)
+ command = f"chrt -r -p 1 {qemu_cpu}"
+ message = f"QEMU: Set SCHED_RR failed on {self._node[u'host']}"
+ exec_cmd_no_error(
+ self._node, command, sudo=True,message=message
+ )
except (RuntimeError, ValueError):
self.qemu_kill_all()
raise
- def qemu_add_vhost_user_if(self, socket, server=True, jumbo_frames=False,
- queue_size=None, queues=1):
+ def qemu_add_vhost_user_if(
+ self, socket, server=True, jumbo_frames=False, queue_size=None,
+ queues=1):
"""Add Vhost-user interface.
:param socket: Path of the unix socket.
"""
self._vhost_id += 1
self._params.add_with_value(
- 'chardev', 'socket,id=char{vhost},path={socket}{server}'.format(
- vhost=self._vhost_id, socket=socket,
- server=',server' if server is True else ''))
+ u"chardev", f"socket,id=char{self._vhost_id},"
+ f"path={socket}{u',server' if server is True else u''}"
+ )
self._params.add_with_value(
- 'netdev', 'vhost-user,id=vhost{vhost},chardev=char{vhost},'
- 'queues={queues}'.format(vhost=self._vhost_id, queues=queues))
- mac = ('52:54:00:00:{qemu:02x}:{vhost:02x}'.
- format(qemu=self._opt.get('qemu_id'), vhost=self._vhost_id))
- queue_size = ('rx_queue_size={queue_size},tx_queue_size={queue_size}'.
- format(queue_size=queue_size)) if queue_size else ''
- mbuf = 'on,host_mtu=9200'
+ u"netdev", f"vhost-user,id=vhost{self._vhost_id},"
+ f"chardev=char{self._vhost_id},queues={queues}"
+ )
+ mac = f"52:54:00:00:{self._opt.get(u'qemu_id'):02x}:" \
+ f"{self._vhost_id:02x}"
+ queue_size = f"rx_queue_size={queue_size},tx_queue_size={queue_size}" \
+ if queue_size else u""
+ mbuf = u"on,host_mtu=9200"
self._params.add_with_value(
- 'device', 'virtio-net-pci,netdev=vhost{vhost},mac={mac},'
- 'addr={addr}.0,mq=on,vectors={vectors},csum=off,gso=off,'
- 'guest_tso4=off,guest_tso6=off,guest_ecn=off,mrg_rxbuf={mbuf},'
- '{queue_size}'.format(
- addr=self._vhost_id+5, vhost=self._vhost_id, mac=mac,
- mbuf=mbuf if jumbo_frames else 'off', queue_size=queue_size,
- vectors=(2 * queues + 2)))
+ u"device", f"virtio-net-pci,netdev=vhost{self._vhost_id},mac={mac},"
+ f"addr={self._vhost_id+5}.0,mq=on,vectors={2 * queues + 2},"
+ f"csum=off,gso=off,guest_tso4=off,guest_tso6=off,guest_ecn=off,"
+ f"mrg_rxbuf={mbuf if jumbo_frames else u'off'},{queue_size}"
+ )
# Add interface MAC and socket to the node dict.
- if_data = {'mac_address': mac, 'socket': socket}
- if_name = 'vhost{vhost}'.format(vhost=self._vhost_id)
- self._vm_info['interfaces'][if_name] = if_data
+ if_data = {u"mac_address": mac, u"socket": socket}
+ if_name = f"vhost{self._vhost_id}"
+ self._vm_info[u"interfaces"][if_name] = if_data
# Add socket to temporary file list.
self._temp[if_name] = socket
from resources.libraries.python.LocalExecution import run
from resources.libraries.python.topology import NodeType
-__all__ = ["SetupFramework"]
+__all__ = [u"SetupFramework"]
def pack_framework_dir():
"""
try:
- directory = environ["TMPDIR"]
+ directory = environ[u"TMPDIR"]
except KeyError:
directory = None
if directory is not None:
- tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="csit-testing-",
- dir="{0}".format(directory))
+ tmpfile = NamedTemporaryFile(
+ suffix=u".tgz", prefix=u"csit-testing-", dir=f"{directory}"
+ )
else:
- tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="csit-testing-")
+ tmpfile = NamedTemporaryFile(suffix=u".tgz", prefix=u"csit-testing-")
file_name = tmpfile.name
tmpfile.close()
- run(["tar", "--sparse", "--exclude-vcs", "--exclude=output*.xml",
- "--exclude=./tmp", "-zcf", file_name, "."],
- msg="Could not pack testing framework")
+ run([u"tar", u"--sparse", u"--exclude-vcs", u"--exclude=output*.xml",
+ u"--exclude=./tmp", u"-zcf", file_name, u"."],
+ msg=u"Could not pack testing framework")
return file_name
:type node: dict
:returns: nothing
"""
- host = node['host']
- logger.console('Copying tarball to {0} starts.'.format(host))
- scp_node(node, tarball, "/tmp/")
- logger.console('Copying tarball to {0} done.'.format(host))
+ host = node[u"host"]
+ logger.console(f"Copying tarball to {host} starts.")
+ scp_node(node, tarball, u"/tmp/")
+ logger.console(f"Copying tarball to {host} done.")
def extract_tarball_at_node(tarball, node):
:returns: nothing
:raises RuntimeError: When failed to unpack tarball.
"""
- host = node['host']
- logger.console('Extracting tarball to {0} on {1} starts.'
- .format(con.REMOTE_FW_DIR, host))
+ host = node[u"host"]
+ logger.console(
+ f"Extracting tarball to {con.REMOTE_FW_DIR} on {host} starts."
+ )
+ cmd = f"sudo rm -rf {con.REMOTE_FW_DIR}; mkdir {con.REMOTE_FW_DIR}; " \
+ f"tar -zxf {tarball} -C {con.REMOTE_FW_DIR}; rm -f {tarball}"
exec_cmd_no_error(
- node, "sudo rm -rf {1}; mkdir {1}; tar -zxf {0} -C {1};"
- " rm -f {0}".format(tarball, con.REMOTE_FW_DIR),
- message='Failed to extract {0} at node {1}'.format(tarball, host),
- timeout=30, include_reason=True)
- logger.console('Extracting tarball to {0} on {1} done.'
- .format(con.REMOTE_FW_DIR, host))
+ node, cmd, message=f"Failed to extract {tarball} at node {host}",
+ timeout=30, include_reason=True
+ )
+ logger.console(f"Extracting tarball to {con.REMOTE_FW_DIR} on {host} done.")
def create_env_directory_at_node(node):
:returns: nothing
:raises RuntimeError: When failed to setup virtualenv.
"""
- host = node['host']
- logger.console('Virtualenv setup including requirements.txt on {0} starts.'
- .format(host))
+ host = node[u"host"]
+ logger.console(
+ f"Virtualenv setup including requirements.txt on {host} starts."
+ )
+ cmd = f"cd {con.REMOTE_FW_DIR} && rm -rf env && virtualenv " \
+ f"-p $(which python3) --system-site-packages --never-download env " \
+ f"&& source env/bin/activate && pip3 install -r requirements.txt"
exec_cmd_no_error(
- node, 'cd {0} && rm -rf env'
- ' && virtualenv -p $(which python3) '
- '--system-site-packages --never-download env'
- ' && source env/bin/activate && pip3 install -r requirements.txt'
- .format(con.REMOTE_FW_DIR), timeout=100, include_reason=True,
- message="Failed install at node {host}".format(host=host))
- logger.console('Virtualenv setup on {0} done.'.format(host))
+ node, cmd, timeout=100, include_reason=True,
+ message=f"Failed install at node {host}"
+ )
+ logger.console(f"Virtualenv setup on {host} done.")
def setup_node(node, tarball, remote_tarball, results=None):
:returns: True - success, False - error
:rtype: bool
"""
- host = node['host']
+ host = node[u"host"]
try:
copy_tarball_to_node(tarball, node)
extract_tarball_at_node(remote_tarball, node)
- if node['type'] == NodeType.TG:
+ if node[u"type"] == NodeType.TG:
create_env_directory_at_node(node)
except RuntimeError as exc:
- logger.console("Node {node} setup failed, error: {err!r}".format(
- node=host, err=exc))
+ logger.console(f"Node {host} setup failed, error: {exc!r}")
result = False
else:
- logger.console('Setup of node {ip} done.'.format(ip=host))
+ logger.console(f"Setup of node {host} done.")
result = True
if isinstance(results, list):
:param node: Node to delete framework directory on.
:type node: dict
"""
- host = node['host']
- logger.console(
- 'Deleting framework directory on {0} starts.'.format(host))
+ host = node[u"host"]
+ logger.console(f"Deleting framework directory on {host} starts.")
exec_cmd_no_error(
- node, 'sudo rm -rf {0}'.format(con.REMOTE_FW_DIR),
- message="Framework delete failed at node {host}".format(host=host),
- timeout=100, include_reason=True)
- logger.console(
- 'Deleting framework directory on {0} done.'.format(host))
+ node, f"sudo rm -rf {con.REMOTE_FW_DIR}",
+ message=f"Framework delete failed at node {host}",
+ timeout=100, include_reason=True
+ )
+ logger.console(f"Deleting framework directory on {host} done.")
def cleanup_node(node, results=None):
:returns: True - success, False - error
:rtype: bool
"""
- host = node['host']
+ host = node[u"host"]
try:
delete_framework_dir(node)
except RuntimeError:
- logger.error("Cleanup of node {0} failed.".format(host))
+ logger.error(f"Cleanup of node {host} failed.")
result = False
else:
- logger.console('Cleanup of node {0} done.'.format(host))
+ logger.console(f"Cleanup of node {host} done.")
result = True
if isinstance(results, list):
"""
tarball = pack_framework_dir()
- msg = 'Framework packed to {0}'.format(tarball)
+ msg = f"Framework packed to {tarball}"
logger.console(msg)
logger.trace(msg)
- remote_tarball = "/tmp/{0}".format(basename(tarball))
+ remote_tarball = f"/tmp/{tarball}"
results = []
threads = []
threads.append(thread)
logger.info(
- 'Executing node setups in parallel, waiting for threads to end')
+ f"Executing node setups in parallel, waiting for threads to end"
+ )
for thread in threads:
thread.join()
- logger.info('Results: {0}'.format(results))
+ logger.info(f"Results: {results}")
delete_local_tarball(tarball)
if all(results):
- logger.console('All nodes are ready.')
+ logger.console(u"All nodes are ready.")
for node in nodes.values():
- logger.info('Setup of {type} node {ip} done.'.
- format(type=node['type'], ip=node['host']))
+ logger.info(
+ f"Setup of {node[u'type']} node {node[u'host']} done."
+ )
else:
- raise RuntimeError('Failed to setup framework.')
+ raise RuntimeError(u"Failed to setup framework.")
class CleanupFramework(object):
threads.append(thread)
logger.info(
- 'Executing node cleanups in parallel, waiting for threads to end.')
+ u"Executing node cleanups in parallel, waiting for threads to end."
+ )
for thread in threads:
thread.join()
- logger.info('Results: {0}'.format(results))
+ logger.info(f"Results: {results}")
if all(results):
- logger.console('All nodes cleaned up.')
+ logger.console(u"All nodes cleaned up.")
else:
- raise RuntimeError('Failed to cleaned up framework.')
+ raise RuntimeError(u"Failed to cleaned up framework.")
-# Copyright (c) 2016 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""TG Setup library."""
-from resources.libraries.python.topology import NodeType
from resources.libraries.python.InterfaceUtil import InterfaceUtil
+from resources.libraries.python.topology import NodeType
class TGSetup(object):
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.TG:
+ if node[u"type"] == NodeType.TG:
InterfaceUtil.tg_set_interfaces_default_driver(node)
response.
:rtype: list
"""
- cmd = "sw_interface_vhost_user_dump"
+ cmd = u"sw_interface_vhost_user_dump"
with PapiSocketExecutor(node) as papi_exec:
details = papi_exec.add(cmd).get_details()
for vhost in details:
- vhost["interface_name"] = vhost["interface_name"].rstrip('\x00')
- vhost["sock_filename"] = vhost["sock_filename"].rstrip('\x00')
+ vhost[u"interface_name"] = vhost[u"interface_name"].rstrip(b'\0')
+ vhost[u"sock_filename"] = vhost[u"sock_filename"].rstrip(b'\0')
- logger.debug("VhostUser details:\n{details}".format(details=details))
+ logger.debug(f"VhostUser details:\n{details}")
return details
:returns: SW interface index.
:rtype: int
"""
- cmd = 'create_vhost_user_if'
- err_msg = 'Failed to create Vhost-user interface on host {host}'.format(
- host=node['host'])
+ cmd = u"create_vhost_user_if"
+ err_msg = f"Failed to create Vhost-user interface " \
+ f"on host {node[u'host']}"
args = dict(
sock_filename=str(socket)
)
sw_if_index = papi_exec.add(cmd, **args).get_sw_if_index(err_msg)
# Update the Topology:
- if_key = Topology.add_new_port(node, 'vhost')
+ if_key = Topology.add_new_port(node, u"vhost")
Topology.update_interface_sw_if_index(node, if_key, sw_if_index)
ifc_name = InterfaceUtil.vpp_get_interface_name(node, sw_if_index)
:returns: Interface name or None if not found.
:rtype: str
"""
- for interface in node['interfaces'].values():
- if interface.get('socket') == socket:
- return interface.get('name')
+ for interface in node[u"interfaces"].values():
+ if interface.get(u"socket") == socket:
+ return interface.get(u"name")
return None
@staticmethod
:type nodes: dict
"""
for node in nodes.values():
- if node['type'] == NodeType.DUT:
+ if node[u"type"] == NodeType.DUT:
VhostUser.vpp_show_vhost(node)
+
+ @staticmethod
+ def vhost_user_dump(node):
+ """Get vhost-user data for the given node.
+
+ :param node: VPP node to get interface data from.
+ :type node: dict
+ :returns: List of dictionaries with all vhost-user interfaces.
+ :rtype: list
+ """
+
+ def process_vhost_dump(vhost_dump):
+ """Process vhost dump.
+
+ :param vhost_dump: Vhost interface dump.
+ :type vhost_dump: dict
+ :returns: Processed vhost interface dump.
+ :rtype: dict
+ """
+ vhost_dump[u"interface_name"] = \
+ vhost_dump[u"interface_name"].rstrip(b'\0')
+ vhost_dump[u"sock_filename"] = \
+ vhost_dump[u"sock_filename"].rstrip(b'\0')
+ return vhost_dump
+
+ cmd = u"sw_interface_vhost_user_dump"
+ err_msg = f"Failed to get vhost-user dump on host {node['host']}"
+ with PapiSocketExecutor(node) as papi_exec:
+ details = papi_exec.add(cmd).get_details(err_msg)
+
+ for dump in details:
+ # In-place edits.
+ process_vhost_dump(dump)
+
+ logger.debug(f"Vhost-user details:\n{details}")
+ return
"""Module defining utilities for test directory regeneration."""
-from __future__ import print_function
+import sys
from glob import glob
+from io import open
from os import getcwd
-import sys
+
from resources.libraries.python.Constants import Constants
from resources.libraries.python.autogen.Testcase import Testcase
PROTOCOL_TO_MIN_FRAME_SIZE = {
- "ip4": 64,
- "ip6": 78,
- "ethip4vxlan": 114, # What is the real minimum for latency stream?
- "dot1qip4vxlan": 118
+ u"ip4": 64,
+ u"ip6": 78,
+ u"ethip4vxlan": 114, # What is the real minimum for latency stream?
+ u"dot1qip4vxlan": 118
}
-MIN_FRAME_SIZE_VALUES = PROTOCOL_TO_MIN_FRAME_SIZE.values()
-
-
-# Copied from https://stackoverflow.com/a/14981125
-def eprint(*args, **kwargs):
- """Print to stderr."""
- print(*args, file=sys.stderr, **kwargs)
+MIN_FRAME_SIZE_VALUES = list(PROTOCOL_TO_MIN_FRAME_SIZE.values())
def replace_defensively(
"""
found = whole.count(to_replace)
if found != how_many:
- raise ValueError(in_filename + ": " + msg)
+ raise ValueError(in_filename + u": " + msg)
return whole.replace(to_replace, replace_with)
:returns: Interface ID, Suite ID.
:rtype: (str, str)
"""
- dash_split = filename.split("-", 1)
+ dash_split = filename.split(u"-", 1)
if len(dash_split[0]) <= 4:
# It was something like "2n1l", we need one more split.
- dash_split = dash_split[1].split("-", 1)
- return dash_split[0], dash_split[1].split(".", 1)[0]
+ dash_split = dash_split[1].split(u"-", 1)
+ return dash_split[0], dash_split[1].split(u".", 1)[0]
def add_default_testcases(testcase, iface, suite_id, file_out, tc_kwargs_list):
for num, kwargs in enumerate(tc_kwargs_list, start=1):
# TODO: Is there a better way to disable some combinations?
emit = True
- if kwargs["frame_size"] == 9000:
- if "vic1227" in iface:
+ if kwargs[u"frame_size"] == 9000:
+ if u"vic1227" in iface:
# Not supported in HW.
emit = False
- if "vic1385" in iface:
+ if u"vic1385" in iface:
# Not supported in HW.
emit = False
- if "ipsec" in suite_id:
+ if u"ipsec" in suite_id:
# IPsec code does not support chained buffers.
# Tracked by Jira ticket VPP-1207.
emit = False
- if "-16vm2t-" in suite_id or "-16dcr2t-" in suite_id:
- if kwargs["phy_cores"] > 3:
+ if u"-16vm2t-" in suite_id or u"-16dcr2t-" in suite_id:
+ if kwargs[u"phy_cores"] > 3:
# CSIT lab only has 28 (physical) core processors,
# so these test would fail when attempting to assign cores.
emit = False
- if "-24vm1t-" in suite_id or "-24dcr1t-" in suite_id:
- if kwargs["phy_cores"] > 3:
+ if u"-24vm1t-" in suite_id or u"-24dcr1t-" in suite_id:
+ if kwargs[u"phy_cores"] > 3:
# CSIT lab only has 28 (physical) core processors,
# so these test would fail when attempting to assign cores.
emit = False
- if "soak" in suite_id:
+ if u"soak" in suite_id:
# Soak test take too long, do not risk other than tc01.
- if kwargs["phy_cores"] != 1:
+ if kwargs[u"phy_cores"] != 1:
emit = False
- if kwargs["frame_size"] not in MIN_FRAME_SIZE_VALUES:
+ if kwargs[u"frame_size"] not in MIN_FRAME_SIZE_VALUES:
emit = False
if emit:
file_out.write(testcase.generate(num=num, **kwargs))
"""
for suite_type in Constants.PERF_TYPE_TO_KEYWORD:
tmp_filename = replace_defensively(
- in_filename, "ndrpdr", suite_type, 1,
- "File name should contain suite type once.", in_filename)
+ in_filename, u"ndrpdr", suite_type, 1,
+ u"File name should contain suite type once.", in_filename
+ )
tmp_prolog = replace_defensively(
- in_prolog, "ndrpdr".upper(), suite_type.upper(), 1,
- "Suite type should appear once in uppercase (as tag).",
- in_filename)
+ in_prolog, u"ndrpdr".upper(), suite_type.upper(), 1,
+ u"Suite type should appear once in uppercase (as tag).",
+ in_filename
+ )
tmp_prolog = replace_defensively(
tmp_prolog,
- "Find NDR and PDR intervals using optimized search",
+ u"Find NDR and PDR intervals using optimized search",
Constants.PERF_TYPE_TO_KEYWORD[suite_type], 1,
- "Main search keyword should appear once in suite.",
- in_filename)
+ u"Main search keyword should appear once in suite.",
+ in_filename
+ )
tmp_prolog = replace_defensively(
tmp_prolog,
- Constants.PERF_TYPE_TO_SUITE_DOC_VER["ndrpdr"],
+ Constants.PERF_TYPE_TO_SUITE_DOC_VER[u"ndrpdr"],
Constants.PERF_TYPE_TO_SUITE_DOC_VER[suite_type],
- 1, "Exact suite type doc not found.", in_filename)
+ 1, u"Exact suite type doc not found.", in_filename
+ )
tmp_prolog = replace_defensively(
tmp_prolog,
- Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER["ndrpdr"],
+ Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER[u"ndrpdr"],
Constants.PERF_TYPE_TO_TEMPLATE_DOC_VER[suite_type],
- 1, "Exact template type doc not found.", in_filename)
+ 1, u"Exact template type doc not found.", in_filename
+ )
_, suite_id = get_iface_and_suite_id(tmp_filename)
testcase = Testcase.default(suite_id)
for nic_name in Constants.NIC_NAME_TO_CODE:
out_filename = replace_defensively(
- tmp_filename, "10ge2p1x710",
+ tmp_filename, u"10ge2p1x710",
Constants.NIC_NAME_TO_CODE[nic_name], 1,
- "File name should contain NIC code once.", in_filename)
+ u"File name should contain NIC code once.", in_filename
+ )
out_prolog = replace_defensively(
- tmp_prolog, "Intel-X710", nic_name, 2,
- "NIC name should appear twice (tag and variable).",
- in_filename)
- if out_prolog.count("HW_") == 2:
+ tmp_prolog, u"Intel-X710", nic_name, 2,
+ u"NIC name should appear twice (tag and variable).",
+ in_filename
+ )
+ if out_prolog.count(u"HW_") == 2:
# TODO CSIT-1481: Crypto HW should be read
# from topology file instead.
if nic_name in Constants.NIC_NAME_TO_CRYPTO_HW:
out_prolog = replace_defensively(
- out_prolog, "HW_DH895xcc",
+ out_prolog, u"HW_DH895xcc",
Constants.NIC_NAME_TO_CRYPTO_HW[nic_name], 1,
- "HW crypto name should appear.", in_filename)
+ u"HW crypto name should appear.", in_filename
+ )
iface, suite_id = get_iface_and_suite_id(out_filename)
- with open(out_filename, "w") as file_out:
+ with open(out_filename, "wt") as file_out:
file_out.write(out_prolog)
add_default_testcases(
- testcase, iface, suite_id, file_out, kwargs_list)
+ testcase, iface, suite_id, file_out, kwargs_list
+ )
def write_reconf_files(in_filename, in_prolog, kwargs_list):
testcase = Testcase.default(suite_id)
for nic_name in Constants.NIC_NAME_TO_CODE:
out_filename = replace_defensively(
- in_filename, "10ge2p1x710",
+ in_filename, u"10ge2p1x710",
Constants.NIC_NAME_TO_CODE[nic_name], 1,
- "File name should contain NIC code once.", in_filename)
+ u"File name should contain NIC code once.", in_filename
+ )
out_prolog = replace_defensively(
- in_prolog, "Intel-X710", nic_name, 2,
- "NIC name should appear twice (tag and variable).",
- in_filename)
- if out_prolog.count("HW_") == 2:
+ in_prolog, u"Intel-X710", nic_name, 2,
+ u"NIC name should appear twice (tag and variable).",
+ in_filename
+ )
+ if out_prolog.count(u"HW_") == 2:
# TODO CSIT-1481: Crypto HW should be read
# from topology file instead.
- if nic_name in Constants.NIC_NAME_TO_CRYPTO_HW.keys():
+ if nic_name in list(Constants.NIC_NAME_TO_CRYPTO_HW.keys()):
out_prolog = replace_defensively(
- out_prolog, "HW_DH895xcc",
+ out_prolog, u"HW_DH895xcc",
Constants.NIC_NAME_TO_CRYPTO_HW[nic_name], 1,
- "HW crypto name should appear.", in_filename)
+ u"HW crypto name should appear.", in_filename
+ )
iface, suite_id = get_iface_and_suite_id(out_filename)
- with open(out_filename, "w") as file_out:
+ with open(out_filename, "wt") as file_out:
file_out.write(out_prolog)
add_default_testcases(
- testcase, iface, suite_id, file_out, kwargs_list)
+ testcase, iface, suite_id, file_out, kwargs_list
+ )
def write_tcp_files(in_filename, in_prolog, kwargs_list):
testcase = Testcase.tcp(suite_id)
for nic_name in Constants.NIC_NAME_TO_CODE:
out_filename = replace_defensively(
- in_filename, "10ge2p1x710",
+ in_filename, u"10ge2p1x710",
Constants.NIC_NAME_TO_CODE[nic_name], 1,
- "File name should contain NIC code once.", in_filename)
+ u"File name should contain NIC code once.", in_filename
+ )
out_prolog = replace_defensively(
- in_prolog, "Intel-X710", nic_name, 2,
- "NIC name should appear twice (tag and variable).",
- in_filename)
- with open(out_filename, "w") as file_out:
+ in_prolog, u"Intel-X710", nic_name, 2,
+ u"NIC name should appear twice (tag and variable).",
+ in_filename
+ )
+ with open(out_filename, "wt") as file_out:
file_out.write(out_prolog)
add_tcp_testcases(testcase, file_out, kwargs_list)
"""
self.quiet = quiet
- def regenerate_glob(self, pattern, protocol="ip4"):
+ def regenerate_glob(self, pattern, protocol=u"ip4"):
"""Regenerate files matching glob pattern based on arguments.
In the current working directory, find all files matching
the glob pattern. Use testcase template according to suffix
- to regenerate test cases, autonumbering them,
+ to regenerate test cases, auto-numbering them,
taking arguments from list.
- Log-like prints are emited to sys.stderr.
+ Log-like prints are emitted to sys.stderr.
:param pattern: Glob pattern to select files. Example: *-ndrpdr.robot
:param protocol: String determining minimal frame size. Default: "ip4"
:raises RuntimeError: If invalid source suite is encountered.
"""
if not self.quiet:
- eprint("Regenerator starts at {cwd}".format(cwd=getcwd()))
+ print(f"Regenerator starts at {getcwd()}", file=sys.stderr)
min_frame_size = PROTOCOL_TO_MIN_FRAME_SIZE[protocol]
default_kwargs_list = [
- {"frame_size": min_frame_size, "phy_cores": 1},
- {"frame_size": min_frame_size, "phy_cores": 2},
- {"frame_size": min_frame_size, "phy_cores": 4},
- {"frame_size": 1518, "phy_cores": 1},
- {"frame_size": 1518, "phy_cores": 2},
- {"frame_size": 1518, "phy_cores": 4},
- {"frame_size": 9000, "phy_cores": 1},
- {"frame_size": 9000, "phy_cores": 2},
- {"frame_size": 9000, "phy_cores": 4},
- {"frame_size": "IMIX_v4_1", "phy_cores": 1},
- {"frame_size": "IMIX_v4_1", "phy_cores": 2},
- {"frame_size": "IMIX_v4_1", "phy_cores": 4}
+ {u"frame_size": min_frame_size, u"phy_cores": 1},
+ {u"frame_size": min_frame_size, u"phy_cores": 2},
+ {u"frame_size": min_frame_size, u"phy_cores": 4},
+ {u"frame_size": 1518, u"phy_cores": 1},
+ {u"frame_size": 1518, u"phy_cores": 2},
+ {u"frame_size": 1518, u"phy_cores": 4},
+ {u"frame_size": 9000, u"phy_cores": 1},
+ {u"frame_size": 9000, u"phy_cores": 2},
+ {u"frame_size": 9000, u"phy_cores": 4},
+ {u"frame_size": u"IMIX_v4_1", u"phy_cores": 1},
+ {u"frame_size": u"IMIX_v4_1", u"phy_cores": 2},
+ {u"frame_size": u"IMIX_v4_1", u"phy_cores": 4}
]
- tcp_kwargs_list = [{"phy_cores": i, "frame_size": 0} for i in (1, 2, 4)]
+ tcp_kwargs_list = [{u"phy_cores": i, u"frame_size": 0}
+ for i in (1, 2, 4)
+ ]
for in_filename in glob(pattern):
if not self.quiet:
- eprint("Regenerating in_filename:", in_filename)
+ print(
+ u"Regenerating in_filename:", in_filename, file=sys.stderr
+ )
iface, _ = get_iface_and_suite_id(in_filename)
- if not iface.endswith("10ge2p1x710"):
+ if not iface.endswith(u"10ge2p1x710"):
raise RuntimeError(
- "Error in {fil}: non-primary NIC found.".format(
- fil=in_filename))
- with open(in_filename, "r") as file_in:
- in_prolog = "".join(
- file_in.read().partition("*** Test Cases ***")[:-1])
- if in_filename.endswith("-ndrpdr.robot"):
+ f"Error in {in_filename}: non-primary NIC found."
+ )
+ with open(in_filename, "rt") as file_in:
+ in_prolog = u"".join(
+ file_in.read().partition("*** Test Cases ***")[:-1]
+ )
+ if in_filename.endswith(u"-ndrpdr.robot"):
write_default_files(in_filename, in_prolog, default_kwargs_list)
- elif in_filename.endswith("-reconf.robot"):
+ elif in_filename.endswith(u"-reconf.robot"):
write_reconf_files(in_filename, in_prolog, default_kwargs_list)
- elif in_filename[-10:] in ("-cps.robot", "-rps.robot"):
+ elif in_filename[-10:] in (u"-cps.robot", u"-rps.robot"):
write_tcp_files(in_filename, in_prolog, tcp_kwargs_list)
else:
raise RuntimeError(
- "Error in {fil}: non-primary suite type found.".format(
- fil=in_filename))
+ f"Error in {in_filename}: non-primary suite type found."
+ )
if not self.quiet:
- eprint("Regenerator ends.")
- eprint() # To make autogen check output more readable.
+ print(u"Regenerator ends.", file=sys.stderr)
+ print(file=sys.stderr) # To make autogen check output more readable.
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
try:
fsize = int(frame_size)
subst_dict = {
- "frame_num": "${%d}" % fsize,
- "frame_str": "%dB" % fsize
+ u"frame_num": f"${{{fsize:d}}}",
+ u"frame_str": f"{fsize}%dB"
}
except ValueError: # Assuming an IMIX string.
subst_dict = {
- "frame_num": str(frame_size),
- "frame_str": "IMIX"
+ u"frame_num": str(frame_size),
+ u"frame_str": u"IMIX"
}
cores_str = str(phy_cores)
cores_num = int(cores_str)
subst_dict.update(
{
- "cores_num": "${%d}" % cores_num,
- "cores_str": phy_cores,
- "tc_num": "tc{num:02d}".format(num=num)
- })
+ u"cores_num": f"${{{cores_num:d}}}",
+ u"cores_str": phy_cores,
+ u"tc_num": f"tc{num:02d}"
+ }
+ )
return self.template.substitute(subst_dict)
@classmethod
:returns: Instance for generating testcase text of this type.
:rtype: Testcase
"""
- template_string = r'''
-| ${tc_num}-${frame_str}-${cores_str}c-''' + suite_id + r'''
-| | [Tags] | ${frame_str} | ${cores_str}C
-| | frame_size=${frame_num} | phy_cores=${cores_num}
+ template_string = f'''
+| ${{tc_num}}-${{frame_str}}-${{cores_str}}c-{suite_id}
+| | [Tags] | ${{frame_str}} | ${{cores_str}}C
+| | frame_size=${{frame_num}} | phy_cores=${{cores_num}}
'''
return cls(template_string)
"""
# TODO: Choose a better frame size identifier for streamed protocols
# (TCP, QUIC, SCTP, ...) where DUT (not TG) decides frame size.
- template_string = r'''
-| ${tc_num}-IMIX-${cores_str}c-''' + suite_id + r'''
-| | [Tags] | ${cores_str}C
-| | phy_cores=${cores_num}
+ template_string = f'''
+| ${{tc_num}}-IMIX-${{cores_str}}c-{suite_id}
+| | [Tags] | ${{cores_str}}C
+| | phy_cores=${{cores_num}}
'''
return cls(template_string)
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""Used to parse JSON files or JSON data strings to dictionaries"""
import json
-from os import uname
+
+from io import open
class JsonParser(object):
:returns: JSON data parsed as python list.
:rtype: list
"""
- if "4.2.0-42-generic" in uname():
- # TODO: remove ugly workaround
- # On Ubuntu14.04 the VAT console returns "error:misc" even after
- # some commands execute correctly. This causes problems
- # with parsing JSON data.
- known_errors = ["sw_interface_dump error: Misc",
- "lisp_eid_table_dump error: Misc",
- "show_lisp_status error: Misc",
- "lisp_map_resolver_dump error: Misc",
- "show_lisp_pitr error: Misc",
- "snat_static_mapping_dump error: Misc",
- ]
- for item in known_errors:
- if item in json_data:
- json_data = json_data.replace(item, "")
- print("Removing API error: *{0}* "
- "from JSON output.".format(item))
parsed_data = json.loads(json_data)
return parsed_data
:returns: JSON data parsed as python list.
:rtype: list
"""
- input_data = open(json_file).read()
+ input_data = open(json_file, 'rt').read()
parsed_data = JsonParser.parse_data(input_data)
return parsed_data
import socket
-import StringIO
+
+from io import StringIO
from time import time, sleep
from paramiko import RSAKey, SSHClient, AutoAddPolicy
from scp import SCPClient, SCPException
from resources.libraries.python.OptionString import OptionString
-from resources.libraries.python.PythonThree import raise_from
-__all__ = ["exec_cmd", "exec_cmd_no_error"]
+__all__ = [u"exec_cmd", u"exec_cmd_no_error", u"SSH", u"SSHTimeout",
+ u"scp_node"]
# TODO: load priv key
class SSH(object):
"""Contains methods for managing and using SSH connections."""
- __MAX_RECV_BUF = 10*1024*1024
+ __MAX_RECV_BUF = 10 * 1024 * 1024
__existing_connections = {}
def __init__(self):
:rtype: int
"""
- return hash(frozenset([node['host'], node['port']]))
+ return hash(frozenset([node[u"host"], node[u"port"]]))
def connect(self, node, attempts=5):
"""Connect to node prior to running exec_command or scp.
if node_hash in SSH.__existing_connections:
self._ssh = SSH.__existing_connections[node_hash]
if self._ssh.get_transport().is_active():
- logger.debug('Reusing SSH: {ssh}'.format(ssh=self._ssh))
+ logger.debug(f"Reusing SSH: {self._ssh}")
else:
if attempts > 0:
self._reconnect(attempts-1)
else:
- raise IOError('Cannot connect to {host}'.
- format(host=node['host']))
+ raise IOError(f"Cannot connect to {node['host']}")
else:
try:
start = time()
pkey = None
- if 'priv_key' in node:
- pkey = RSAKey.from_private_key(
- StringIO.StringIO(node['priv_key']))
+ if u"priv_key" in node:
+ pkey = RSAKey.from_private_key(StringIO(node[u"priv_key"]))
self._ssh = SSHClient()
self._ssh.set_missing_host_key_policy(AutoAddPolicy())
- self._ssh.connect(node['host'], username=node['username'],
- password=node.get('password'), pkey=pkey,
- port=node['port'])
+ self._ssh.connect(
+ node[u"host"], username=node[u"username"],
+ password=node.get(u"password"), pkey=pkey,
+ port=node[u"port"]
+ )
self._ssh.get_transport().set_keepalive(10)
SSH.__existing_connections[node_hash] = self._ssh
- logger.debug('New SSH to {peer} took {total} seconds: {ssh}'.
- format(
- peer=self._ssh.get_transport().getpeername(),
- total=(time() - start),
- ssh=self._ssh))
+ logger.debug(
+ f"New SSH to {self._ssh.get_transport().getpeername()} "
+ f"took {(time() - start)} seconds: {self._ssh}"
+ )
except SSHException as exc:
- raise_from(IOError('Cannot connect to {host}'.format(
- host=node['host'])), exc)
+ raise IOError(f"Cannot connect to {node[u'host']}") from exc
except NoValidConnectionsError as err:
- raise_from(IOError(
- 'Unable to connect to port {port} on {host}'.format(
- port=node['port'], host=node['host'])), err)
+ raise IOError(
+ f"Unable to connect to port {node[u'port']} on "
+ f"{node[u'host']}"
+ ) from err
def disconnect(self, node=None):
"""Close SSH connection to the node.
return
node_hash = self._node_hash(node)
if node_hash in SSH.__existing_connections:
- logger.debug('Disconnecting peer: {host}, {port}'.
- format(host=node['host'], port=node['port']))
+ logger.debug(
+ f"Disconnecting peer: {node[u'host']}, {node[u'port']}"
+ )
ssh = SSH.__existing_connections.pop(node_hash)
ssh.close()
node = self._node
self.disconnect(node)
self.connect(node, attempts)
- logger.debug('Reconnecting peer done: {host}, {port}'.
- format(host=node['host'], port=node['port']))
+ logger.debug(
+ f"Reconnecting peer done: {node[u'host']}, {node[u'port']}"
+ )
def exec_command(self, cmd, timeout=10, log_stdout_err=True):
"""Execute SSH command on a new channel on the connected Node.
if isinstance(cmd, (list, tuple)):
cmd = OptionString(cmd)
cmd = str(cmd)
- stdout = StringIO.StringIO()
- stderr = StringIO.StringIO()
+ stdout = StringIO()
+ stderr = StringIO()
try:
chan = self._ssh.get_transport().open_session(timeout=5)
peer = self._ssh.get_transport().getpeername()
peer = self._ssh.get_transport().getpeername()
chan.settimeout(timeout)
- logger.trace('exec_command on {peer} with timeout {timeout}: {cmd}'
- .format(peer=peer, timeout=timeout, cmd=cmd))
+ logger.trace(f"exec_command on {peer} with timeout {timeout}: {cmd}")
start = time()
chan.exec_command(cmd)
if time() - start > timeout:
raise SSHTimeout(
- 'Timeout exception during execution of command: {cmd}\n'
- 'Current contents of stdout buffer: {stdout}\n'
- 'Current contents of stderr buffer: {stderr}\n'
- .format(cmd=cmd, stdout=stdout.getvalue(),
- stderr=stderr.getvalue())
+ f"Timeout exception during execution of command: {cmd}\n"
+ f"Current contents of stdout buffer: {stdout.getvalue()}\n"
+ f"Current contents of stderr buffer: {stderr.getvalue()}\n"
)
sleep(0.1)
stderr.write(chan.recv_stderr(self.__MAX_RECV_BUF))
end = time()
- logger.trace('exec_command on {peer} took {total} seconds'.
- format(peer=peer, total=end-start))
+ logger.trace(f"exec_command on {peer} took {end-start} seconds")
- logger.trace('return RC {rc}'.format(rc=return_code))
+ logger.trace(f"return RC {return_code}")
if log_stdout_err or int(return_code):
- logger.trace('return STDOUT {stdout}'.
- format(stdout=stdout.getvalue()))
- logger.trace('return STDERR {stderr}'.
- format(stderr=stderr.getvalue()))
+ logger.trace(f"return STDOUT {stdout.getvalue()}")
+ logger.trace(f"return STDERR {stderr.getvalue()}")
return return_code, stdout.getvalue(), stderr.getvalue()
- def exec_command_sudo(self, cmd, cmd_input=None, timeout=30,
- log_stdout_err=True):
+ def exec_command_sudo(
+ self, cmd, cmd_input=None, timeout=30, log_stdout_err=True):
"""Execute SSH command with sudo on a new channel on the connected Node.
:param cmd: Command to be executed.
>>> ssh = SSH()
>>> ssh.connect(node)
>>> # Execute command without input (sudo -S cmd)
- >>> ssh.exec_command_sudo("ifconfig eth0 down")
- >>> # Execute command with input (sudo -S cmd <<< "input")
- >>> ssh.exec_command_sudo("vpp_api_test", "dump_interface_table")
+ >>> ssh.exec_command_sudo(u"ifconfig eth0 down")
+ >>> # Execute command with input (sudo -S cmd <<< 'input')
+ >>> ssh.exec_command_sudo(u"vpp_api_test", u"dump_interface_table")
"""
if isinstance(cmd, (list, tuple)):
cmd = OptionString(cmd)
if cmd_input is None:
- command = 'sudo -E -S {c}'.format(c=cmd)
+ command = f"sudo -E -S {cmd}"
else:
- command = 'sudo -E -S {c} <<< "{i}"'.format(c=cmd, i=cmd_input)
- return self.exec_command(command, timeout,
- log_stdout_err=log_stdout_err)
+ command = f"sudo -E -S {cmd} <<< '{cmd_input}'"
+ return self.exec_command(
+ command, timeout, log_stdout_err=log_stdout_err)
- def exec_command_lxc(self, lxc_cmd, lxc_name, lxc_params='', sudo=True,
- timeout=30):
+ def exec_command_lxc(
+ self, lxc_cmd, lxc_name, lxc_params=u"", sudo=True, timeout=30):
"""Execute command in LXC on a new SSH channel on the connected Node.
:param lxc_cmd: Command to be executed.
:type timeout: int
:returns: return_code, stdout, stderr
"""
- command = "lxc-attach {p} --name {n} -- /bin/sh -c '{c}'"\
- .format(p=lxc_params, n=lxc_name, c=lxc_cmd)
+ command = f"lxc-attach {lxc_params} --name {lxc_name} -- /bin/sh " \
+ f"-c '{lxc_cmd}'"
if sudo:
- command = 'sudo -E -S {c}'.format(c=command)
+ command = f"sudo -E -S {command}"
return self.exec_command(command, timeout)
def interactive_terminal_open(self, time_out=45):
chan.set_combine_stderr(True)
buf = ''
- while not buf.endswith((":~# ", ":~$ ", "~]$ ", "~]# ")):
+ while not buf.endswith((u":~# ", u":~$ ", u"~]$ ", u"~]# ")):
try:
chunk = chan.recv(self.__MAX_RECV_BUF)
if not chunk:
break
buf += chunk
if chan.exit_status_ready():
- logger.error('Channel exit status ready')
+ logger.error(u"Channel exit status ready")
break
except socket.timeout as exc:
- raise_from(Exception('Socket timeout: {0}'.format(buf)), exc)
+ raise Exception(f"Socket timeout: {buf}") from exc
return chan
def interactive_terminal_exec_command(self, chan, cmd, prompt):
from other threads. You must not use this in a program that
uses SIGALRM itself (this includes certain profilers)
"""
- chan.sendall('{c}\n'.format(c=cmd))
- buf = ''
+ chan.sendall(f"{cmd}\n")
+ buf = u""
while not buf.endswith(prompt):
try:
chunk = chan.recv(self.__MAX_RECV_BUF)
break
buf += chunk
if chan.exit_status_ready():
- logger.error('Channel exit status ready')
+ logger.error(u"Channel exit status ready")
break
except socket.timeout as exc:
- raise_from(Exception(
- 'Socket timeout during execution of command: '
- '{0}\nBuffer content:\n{1}'.format(cmd, buf)), exc)
- tmp = buf.replace(cmd.replace('\n', ''), '')
+ raise Exception(
+ f"Socket timeout during execution of command: {cmd}\n"
+ f"Buffer content:\n{buf}"
+ ) from exc
+ tmp = buf.replace(cmd.replace(u"\n", u""), u"")
for item in prompt:
- tmp.replace(item, '')
+ tmp.replace(item, u"")
return tmp
@staticmethod
"""
chan.close()
- def scp(self, local_path, remote_path, get=False, timeout=30,
+ def scp(
+ self, local_path, remote_path, get=False, timeout=30,
wildcard=False):
"""Copy files from local_path to remote_path or vice versa.
:type wildcard: bool
"""
if not get:
- logger.trace('SCP {0} to {1}:{2}'.format(
- local_path, self._ssh.get_transport().getpeername(),
- remote_path))
+ logger.trace(
+ f"SCP {local_path} to "
+ f"{self._ssh.get_transport().getpeername()}:{remote_path}"
+ )
else:
- logger.trace('SCP {0}:{1} to {2}'.format(
- self._ssh.get_transport().getpeername(), remote_path,
- local_path))
+ logger.trace(
+ f"SCP {self._ssh.get_transport().getpeername()}:{remote_path} "
+ f"to {local_path}"
+ )
# SCPCLient takes a paramiko transport as its only argument
if not wildcard:
scp = SCPClient(self._ssh.get_transport(), socket_timeout=timeout)
else:
- scp = SCPClient(self._ssh.get_transport(), sanitize=lambda x: x,
- socket_timeout=timeout)
+ scp = SCPClient(
+ self._ssh.get_transport(), sanitize=lambda x: x,
+ socket_timeout=timeout
+ )
start = time()
if not get:
scp.put(local_path, remote_path)
scp.get(remote_path, local_path)
scp.close()
end = time()
- logger.trace('SCP took {0} seconds'.format(end-start))
+ logger.trace(f"SCP took {end-start} seconds")
def exec_cmd(node, cmd, timeout=600, sudo=False, disconnect=False):
:rtype: tuple(int, str, str)
"""
if node is None:
- raise TypeError('Node parameter is None')
+ raise TypeError(u"Node parameter is None")
if cmd is None:
- raise TypeError('Command parameter is None')
+ raise TypeError(u"Command parameter is None")
if not cmd:
- raise ValueError('Empty command parameter')
+ raise ValueError(u"Empty command parameter")
ssh = SSH()
- if node.get('host_port') is not None:
- ssh_node = dict()
- ssh_node['host'] = '127.0.0.1'
- ssh_node['port'] = node['port']
- ssh_node['username'] = node['username']
- ssh_node['password'] = node['password']
- import pexpect
- options = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
- tnl = '-L {port}:127.0.0.1:{port}'.format(port=node['port'])
- ssh_cmd = 'ssh {tnl} {op} {user}@{host} -p {host_port}'.\
- format(tnl=tnl, op=options, user=node['host_username'],
- host=node['host'], host_port=node['host_port'])
- logger.trace('Initializing local port forwarding:\n{ssh_cmd}'.
- format(ssh_cmd=ssh_cmd))
- child = pexpect.spawn(ssh_cmd)
- child.expect('.* password: ')
- logger.trace(child.after)
- child.sendline(node['host_password'])
- child.expect('Welcome .*')
- logger.trace(child.after)
- logger.trace('Local port forwarding finished.')
- else:
- ssh_node = node
-
try:
- ssh.connect(ssh_node)
+ ssh.connect(node)
except SSHException as err:
- logger.error("Failed to connect to node" + repr(err))
+ logger.error(f"Failed to connect to node {node[u'host']}\n{err!r}")
return None, None, None
try:
(ret_code, stdout, stderr) = ssh.exec_command(cmd, timeout=timeout)
else:
(ret_code, stdout, stderr) = ssh.exec_command_sudo(
- cmd, timeout=timeout)
+ cmd, timeout=timeout
+ )
except SSHException as err:
logger.error(repr(err))
return None, None, None
"""
for _ in range(retries + 1):
ret_code, stdout, stderr = exec_cmd(
- node, cmd, timeout=timeout, sudo=sudo, disconnect=disconnect)
+ node, cmd, timeout=timeout, sudo=sudo, disconnect=disconnect
+ )
if ret_code == 0:
break
sleep(1)
else:
- msg = 'Command execution failed: "{cmd}"\nRC: {rc}\n{stderr}'.format(
- cmd=cmd, rc=ret_code, stderr=stderr)
+ msg = f"Command execution failed: '{cmd}'\nRC: {ret_code}\n{stderr}"
logger.info(msg)
if message:
- if include_reason:
- msg = message + '\n' + msg
- else:
- msg = message
+ msg = message + '\n' + msg if include_reason else message
raise RuntimeError(msg)
return stdout, stderr
try:
ssh.connect(node)
except SSHException as exc:
- raise_from(RuntimeError(
- 'Failed to connect to {host}!'.format(host=node['host'])), exc)
+ raise RuntimeError(f"Failed to connect to {node[u'host']}!") from exc
try:
ssh.scp(local_path, remote_path, get, timeout)
except SCPException as exc:
- raise_from(RuntimeError(
- 'SCP execution failed on {host}!'.format(host=node['host'])), exc)
+ raise RuntimeError(f"SCP execution failed on {node[u'host']}!") from exc
finally:
if disconnect:
ssh.disconnect()
+++ /dev/null
-# Copyright (c) 2019 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""SPAN setup library"""
-
-from resources.libraries.python.topology import Topology
-from resources.libraries.python.PapiExecutor import PapiSocketExecutor
-
-
-class SPAN(object):
- """Class contains methods for setting up SPAN mirroring on DUTs."""
-
- def __init__(self):
- """Initializer."""
- pass
-
- @staticmethod
- def vpp_get_span_configuration(node, is_l2=False):
- """Get full SPAN configuration from VPP node.
-
- Used by Honeycomb.
-
- :param node: DUT node.
- :type node: dict
-
- :returns: Full SPAN configuration as list. One list entry for every
- source/destination interface pair.
- :rtype: list of dict
- """
- cmd = "sw_interface_span_dump"
- args = dict(
- is_l2=1 if is_l2 else 0
- )
- with PapiSocketExecutor(node) as papi_exec:
- details = papi_exec.add(cmd, **args).get_details()
-
- return details
-
- @staticmethod
- def vpp_get_span_configuration_by_interface(node, dst_interface,
- ret_format="sw_if_index"):
- """Get a list of all interfaces currently being mirrored
- to the specified interface.
-
- Used by Honeycomb.
-
- :param node: DUT node.
- :param dst_interface: Name, sw_if_index or key of interface.
- :param ret_format: Optional. Desired format of returned interfaces.
- :type node: dict
- :type dst_interface: str or int
- :type ret_format: string
- :returns: List of SPAN source interfaces for the provided destination
- interface.
- :rtype: list
- """
-
- data = SPAN.vpp_get_span_configuration(node)
-
- dst_int = Topology.convert_interface_reference(
- node, dst_interface, "sw_if_index")
- src_interfaces = []
- for item in data:
- if item["sw_if_index_to"] == dst_int:
- src_interfaces.append(item["sw_if_index_from"])
-
- if ret_format != "sw_if_index":
- src_interfaces = [
- Topology.convert_interface_reference(
- node, interface, ret_format
- ) for interface in src_interfaces]
-
- return src_interfaces
+++ /dev/null
-# Copyright (c) 2016 Cisco and/or its affiliates.
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-__init__ file for resources/libraries/python/telemetry
-"""
*** Settings ***
| Library | resources.libraries.python.InterfaceUtil
| Library | resources.libraries.python.NodePath
+| Library | resources.libraries.python.VhostUser
*** Keywords ***
| Set interfaces in path up