+ CORE_DUMP_DIR = u"/tmp"
+
+ # Perf stat events (comma separated).
+ PERF_STAT_EVENTS = get_str_from_env(
+ u"PERF_STAT_EVENTS",
+ u"cpu-clock,context-switches,cpu-migrations,page-faults,"
+ u"cycles,instructions,branches,branch-misses,L1-icache-load-misses")
+
+ # Equivalent to ~0 used in vpp code
+ BITWISE_NON_ZERO = 0xffffffff
+
+ # Default path to VPP API socket.
+ SOCKSVR_PATH = u"/run/vpp/api.sock"
+
+ # Number of trials to execute in MRR test.
+ PERF_TRIAL_MULTIPLICITY = get_int_from_env(u"PERF_TRIAL_MULTIPLICITY", 10)
+
+ # Duration [s] of one trial in MRR test.
+ PERF_TRIAL_DURATION = get_float_from_env(u"PERF_TRIAL_DURATION", 1.0)
+
+ # Whether to use latency streams in main search trials.
+ PERF_USE_LATENCY = get_pessimistic_bool_from_env(u"PERF_USE_LATENCY")
+
+ # Duration of one latency-specific trial in NDRPDR test.
+ PERF_TRIAL_LATENCY_DURATION = get_float_from_env(
+ u"PERF_TRIAL_LATENCY_DURATION", 5.0)
+
+ # For some testbeds TG takes longer than usual to start sending traffic.
+ # This constant [s] allows longer wait, without affecting
+ # the approximate duration. For example, use 0.098 for AWS.
+ PERF_TRIAL_STL_DELAY = get_float_from_env(u"PERF_TRIAL_STL_DELAY", 0.0)
+
+ # ASTF usually needs a different value for the delay.
+ PERF_TRIAL_ASTF_DELAY = get_float_from_env(
+ u"PERF_TRIAL_ASTF_DELAY", 0.112
+ )
+
+ # Extended debug (incl. vpp packet trace, linux perf stat, ...).
+ # Full list is available as suite variable (__init__.robot) or is
+ # override by test.
+ EXTENDED_DEBUG = get_pessimistic_bool_from_env(u"EXTENDED_DEBUG")
+
+ # UUID string of DUT1 /tmp volume created outside of the
+ # DUT1 docker in case of vpp-device test. ${EMPTY} value means that
+ # /tmp directory is inside the DUT1 docker.
+ DUT1_UUID = get_str_from_env(u"DUT1_UUID", u"")
+
+ # Default path to VPP API Stats socket.
+ SOCKSTAT_PATH = u"/run/vpp/stats.sock"
+
+ # Global "kill switch" for CRC checking during runtime.
+ FAIL_ON_CRC_MISMATCH = get_pessimistic_bool_from_env(
+ u"FAIL_ON_CRC_MISMATCH"
+ )
+
+ # Default IP4 prefix length (if not defined in topology file)
+ DEFAULT_IP4_PREFIX_LENGTH = u"24"
+
+ # Maximum number of interfaces in a data path
+ DATAPATH_INTERFACES_MAX = 100
+
+ # Mapping from NIC name to its bps limit.
+ NIC_NAME_TO_BPS_LIMIT = {
+ u"Intel-X520-DA2": 10000000000,
+ u"Intel-X553": 10000000000,
+ u"Intel-X710": 10000000000,
+ u"Intel-XL710": 24500000000,
+ u"Intel-XXV710": 24500000000,
+ u"Intel-E810CQ": 100000000000,
+ u"Mellanox-CX556A": 100000000000,
+ u"Amazon-Nitro-50G": 10000000000,
+ u"virtual": 100000000,
+ }
+
+ # Mapping from NIC name to its pps limit.
+ NIC_NAME_TO_PPS_LIMIT = {
+ u"Intel-X520-DA2": 14880952,
+ u"Intel-X553": 14880952,
+ u"Intel-X710": 14880952,
+ u"Intel-XL710": 18750000,
+ u"Intel-XXV710": 18750000,
+ u"Intel-E810CQ": 58500000,
+ # 2n-clx testbeds show duration stretching on high rates,
+ # depending on encapsulation TRex has to generate.
+ # 40 Mpps is still too much for dot1q (~8% stretching).
+ # 36 Mpps is around the maximal VPP throughput (l2patch 4c8t).
+ # Vxlan traffic will still show stretching at 36 Mpps (>12%),
+ # but we do not care about those tests that much.
+ u"Mellanox-CX556A": 36000000, # 148809523,
+ u"Amazon-Nitro-50G": 1200000,
+ u"virtual": 14880952,
+ }
+
+ # Suite file names use codes for NICs.
+ NIC_NAME_TO_CODE = {
+ u"Intel-X520-DA2": u"10ge2p1x520",
+ u"Intel-X553": u"10ge2p1x553",
+ u"Intel-X710": u"10ge2p1x710",
+ u"Intel-XL710": u"40ge2p1xl710",
+ u"Intel-XXV710": u"25ge2p1xxv710",
+ u"Intel-E810CQ": u"100ge2p1e810cq",
+ u"Amazon-Nitro-50G": u"50ge1p1ENA",
+ u"Mellanox-CX556A": u"100ge2p1cx556a",
+ }
+
+ # Not each driver is supported by each NIC.
+ NIC_NAME_TO_DRIVER = {
+ u"Intel-X520-DA2": [u"vfio-pci", u"af_xdp"],
+ u"Intel-X553": [u"vfio-pci", u"af_xdp"],
+ u"Intel-X710": [u"vfio-pci", u"avf", u"af_xdp"],
+ u"Intel-XL710": [u"vfio-pci", u"avf", u"af_xdp"],
+ u"Intel-XXV710": [u"vfio-pci", u"avf", u"af_xdp"],
+ u"Intel-E810CQ": [u"vfio-pci", u"avf", u"af_xdp"],
+ u"Amazon-Nitro-50G": [u"vfio-pci"],
+ u"Mellanox-CX556A": [u"rdma-core", u"af_xdp"],
+ }
+
+ # Each driver needs different prugin to work.
+ NIC_DRIVER_TO_PLUGINS = {
+ u"vfio-pci": u"dpdk_plugin.so",
+ u"avf": u"avf_plugin.so",
+ u"rdma-core": u"rdma_plugin.so",
+ u"af_xdp": u"af_xdp_plugin.so",
+ }
+
+ # Tags to differentiate tests for different NIC driver.
+ NIC_DRIVER_TO_TAG = {
+ u"vfio-pci": u"DRV_VFIO_PCI",
+ u"avf": u"DRV_AVF",
+ u"rdma-core": u"DRV_RDMA_CORE",
+ u"af_xdp": u"DRV_AF_XDP",
+ }
+
+ # Suite names have to be different, add prefix.
+ NIC_DRIVER_TO_SUITE_PREFIX = {
+ u"vfio-pci": u"",
+ u"avf": u"avf-",
+ u"rdma-core": u"rdma-",
+ u"af_xdp": u"af-xdp-",
+ }
+
+ # Number of virtual functions of physical nic.
+ NIC_DRIVER_TO_VFS = {
+ u"vfio-pci": u"nic_vfs}= | 0",
+ u"avf": u"nic_vfs}= | 1",
+ u"rdma-core": u"nic_vfs}= | 0",
+ u"af_xdp": u"nic_vfs}= | 0",
+ }
+
+ # Not each driver is supported by each NIC.
+ DPDK_NIC_NAME_TO_DRIVER = {
+ u"Intel-X520-DA2": [u"vfio-pci"],
+ u"Intel-X553": [u"vfio-pci"],
+ u"Intel-X710": [u"vfio-pci"],
+ u"Intel-XL710": [u"vfio-pci"],
+ u"Intel-XXV710": [u"vfio-pci"],
+ u"Intel-E810CQ": [u"vfio-pci"],
+ u"Amazon-Nitro-50G": [u"vfio-pci"],
+ u"Mellanox-CX556A": [u"mlx5_core"],
+ }
+
+ # Tags to differentiate tests for different NIC driver.
+ DPDK_NIC_DRIVER_TO_TAG = {
+ u"vfio-pci": u"DRV_VFIO_PCI",
+ u"mlx5_core": u"DRV_MLX5_CORE",
+ }
+
+ # Suite names have to be different, add prefix.
+ DPDK_NIC_DRIVER_TO_SUITE_PREFIX = {
+ u"vfio-pci": u"",
+ u"mlx5_core": u"mlx5-",
+ }
+
+ # Some identifiers constructed from suite names
+ # have to be independent of NIC driver used.
+ # In order to remove or reject the NIC driver part,
+ # it is useful to have a list of such prefixes precomputed.
+ FORBIDDEN_SUITE_PREFIX_LIST = [
+ prefix for prefix in NIC_DRIVER_TO_SUITE_PREFIX.values() if prefix
+ ]
+ FORBIDDEN_SUITE_PREFIX_LIST += [
+ prefix for prefix in DPDK_NIC_DRIVER_TO_SUITE_PREFIX.values() if prefix
+ ]
+
+ # TODO CSIT-1481: Crypto HW should be read from topology file instead.
+ NIC_NAME_TO_CRYPTO_HW = {
+ u"Intel-X553": u"HW_C3xxx",
+ u"Intel-X710": u"HW_DH895xcc",
+ u"Intel-XL710": u"HW_DH895xcc",
+ }
+
+ DEVICE_TYPE_TO_KEYWORD = {
+ u"scapy": None
+ }
+
+ PERF_TYPE_TO_KEYWORD = {
+ u"mrr": u"Traffic should pass with maximum rate",
+ u"ndrpdr": u"Find NDR and PDR intervals using optimized search",
+ u"soak": u"Find critical load using PLRsearch",
+ }
+
+ PERF_TYPE_TO_SUITE_DOC_VER = {
+ u"mrr": u'''fication:* In MaxReceivedRate tests TG sends traffic\\
+| ... | at line rate and reports total received packets over trial period.\\''',
+ u"ndrpdr": u'''ication:* TG finds and reports throughput NDR (Non Drop\\
+| ... | Rate) with zero packet loss tolerance and throughput PDR (Partial Drop\\
+| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage\\
+| ... | of packets transmitted. NDR and PDR are discovered for different\\
+| ... | Ethernet L2 frame sizes using MLRsearch library.\\''',
+ u"soak": u'''fication:* TG sends traffic at dynamically computed\\
+| ... | rate as PLRsearch algorithm gathers data and improves its estimate\\
+| ... | of a rate at which a prescribed small fraction of packets\\
+| ... | would be lost. After set time, the serarch stops\\
+| ... | and the algorithm reports its current estimate.\\''',
+ }
+
+ PERF_TYPE_TO_TEMPLATE_DOC_VER = {
+ u"mrr": u'''Measure MaxReceivedRate for ${frame_size}B frames\\
+| | ... | using burst trials throughput test.\\''',
+ u"ndrpdr": u"Measure NDR and PDR values using MLRsearch algorithm.\\",
+ u"soak": u"Estimate critical rate using PLRsearch algorithm.\\",
+ }