""" test framework utilities """
-import abc
+import ipaddress
+import logging
import socket
from socket import AF_INET6
-import six
-import sys
import os.path
+from copy import deepcopy
import scapy.compat
from scapy.layers.l2 import Ether
from io import BytesIO
from vpp_papi import mac_pton
+# Set up an empty logger for the testcase that can be overridden as necessary
+null_logger = logging.getLogger('VppTestCase.util')
+null_logger.addHandler(logging.NullHandler())
+
def ppp(headline, packet):
""" Return string containing the output of scapy packet.show() call. """
return ("%s.%d" % (tmp, i) for i in range(s, e))
-def ip4n_range(ip4n, s, e):
- ip4 = socket.inet_ntop(socket.AF_INET, ip4n)
- return (socket.inet_pton(socket.AF_INET, ip)
- for ip in ip4_range(ip4, s, e))
+def mcast_ip_to_mac(ip):
+ ip = ipaddress.ip_address(ip)
+ if not ip.is_multicast:
+ raise ValueError("Must be multicast address.")
+ ip_as_int = int(ip)
+ if ip.version == 4:
+ mcast_mac = "01:00:5e:%02x:%02x:%02x" % ((ip_as_int >> 16) & 0x7f,
+ (ip_as_int >> 8) & 0xff,
+ ip_as_int & 0xff)
+ else:
+ mcast_mac = "33:33:%02x:%02x:%02x:%02x" % ((ip_as_int >> 24) & 0xff,
+ (ip_as_int >> 16) & 0xff,
+ (ip_as_int >> 8) & 0xff,
+ ip_as_int & 0xff)
+ return mcast_mac
# wrapper around scapy library function.
" current core pattern is: %s" % corefmt)
-class NumericConstant(object):
+class NumericConstant:
desc_dict = {}
return ""
-class Host(object):
+class Host:
""" Generic test host "connected" to VPPs interface. """
@property
self._ip6_ll = ip6_ll
-class ForeignAddressFactory(object):
- count = 0
- prefix_len = 24
- net_template = '10.10.10.{}'
- net = net_template.format(0) + '/' + str(prefix_len)
-
- def get_ip4(self):
- if self.count > 255:
- raise Exception("Network host address exhaustion")
- self.count += 1
- return self.net_template.format(self.count)
-
-
class L4_Conn():
""" L4 'connection' tied to two VPP interfaces """
L4_CONN_SIDE_ONE = 1
-class LoggerWrapper(object):
- def __init__(self, logger=None):
- self._logger = logger
-
- def debug(self, *args, **kwargs):
- if self._logger:
- self._logger.debug(*args, **kwargs)
-
- def error(self, *args, **kwargs):
- if self._logger:
- self._logger.error(*args, **kwargs)
-
-
-def fragment_rfc791(packet, fragsize, _logger=None):
+def fragment_rfc791(packet, fragsize, logger=null_logger):
"""
Fragment an IPv4 packet per RFC 791
:param packet: packet to fragment
:note: IP options are not supported
:returns: list of fragments
"""
- logger = LoggerWrapper(_logger)
logger.debug(ppp("Fragmenting packet:", packet))
packet = packet.__class__(scapy.compat.raw(packet)) # recalc. all values
if len(packet[IP].options) > 0:
pkts = []
ihl = packet[IP].ihl
otl = len(packet[IP])
- nfb = (fragsize - pre_ip_len - ihl * 4) / 8
+ nfb = int((fragsize - pre_ip_len - ihl * 4) / 8)
fo = packet[IP].frag
p = packet.__class__(hex_headers + hex_payload[:nfb * 8])
p[IP].frag = fo + nfb
del p[IP].chksum
- more_fragments = fragment_rfc791(p, fragsize, _logger)
+ more_fragments = fragment_rfc791(p, fragsize, logger)
pkts.extend(more_fragments)
return pkts
-def fragment_rfc8200(packet, identification, fragsize, _logger=None):
+def fragment_rfc8200(packet, identification, fragsize, logger=null_logger):
"""
Fragment an IPv6 packet per RFC 8200
:param packet: packet to fragment
:note: IP options are not supported
:returns: list of fragments
"""
- logger = LoggerWrapper(_logger)
packet = packet.__class__(scapy.compat.raw(packet)) # recalc. all values
if len(packet) <= fragsize:
return [packet]
del p[IPv6].nh
p = p / fragment_ext_hdr
del p[IPv6ExtHdrFragment].nh
- first_payload_len_nfb = (fragsize - len(p)) / 8
+ first_payload_len_nfb = int((fragsize - len(p)) / 8)
p = p / Raw(hex_payload[:first_payload_len_nfb * 8])
del p[IPv6].plen
p[IPv6ExtHdrFragment].nh = orig_nh
del p[IPv6].nh
p = p / fragment_ext_hdr
del p[IPv6ExtHdrFragment].nh
- l_nfb = (fragsize - len(p)) / 8
+ l_nfb = int((fragsize - len(p)) / 8)
p = p / Raw(hex_payload[offset:offset + l_nfb * 8])
p[IPv6ExtHdrFragment].nh = orig_nh
p[IPv6ExtHdrFragment].id = identification
- p[IPv6ExtHdrFragment].offset = offset / 8
+ p[IPv6ExtHdrFragment].offset = int(offset / 8)
p[IPv6ExtHdrFragment].m = 1
p = p.__class__(scapy.compat.raw(p))
logger.debug(ppp("Fragment %s:" % len(pkts), p))
def reassemble4(listoffragments):
return reassemble4_core(listoffragments, True)
+
+
+def recursive_dict_merge(dict_base, dict_update):
+ """Recursively merge base dict with update dict, return merged dict"""
+ for key in dict_update:
+ if key in dict_base:
+ if type(dict_update[key]) is dict:
+ dict_base[key] = recursive_dict_merge(dict_base[key],
+ dict_update[key])
+ else:
+ dict_base[key] = dict_update[key]
+ else:
+ dict_base[key] = dict_update[key]
+ return dict_base
+
+
+class StatsDiff:
+ """
+ Diff dictionary is a dictionary of dictionaries of interesting stats:
+
+ diff_dictionary =
+ {
+ "err" : { '/error/counter1' : 4, },
+ sw_if_index1 : { '/stat/segment/counter1' : 5,
+ '/stat/segment/counter2' : 6,
+ },
+ sw_if_index2 : { '/stat/segment/counter1' : 7,
+ },
+ }
+
+ It describes a per sw-if-index diffset, where each key is stat segment
+ path and value is the expected change for that counter for sw-if-index.
+ Special case string "err" is used for error counters, which are not per
+ sw-if-index.
+ """
+
+ def __init__(self, stats_diff={}):
+ self.stats_diff = stats_diff
+
+ def update(self, sw_if_index, key, value):
+ if sw_if_index in self.stats_diff:
+ self.stats_diff[sw_if_index][key] = value
+ else:
+ self.stats_diff[sw_if_index] = {key: value}
+
+ def __or__(self, other):
+ return recursive_dict_merge(deepcopy(self.stats_diff), other)