+ if node[u"type"] == NodeType.DUT:
+ uio_driver = Topology.get_uio_driver(node)
+ DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
+
+ @staticmethod
+ def load_kernel_module(node, module):
+ """Load kernel module on node.
+
+ :param node: DUT node.
+ :param module: Module to load.
+ :type node: dict
+ :type module: str
+ :returns: nothing
+ :raises RuntimeError: If loading failed.
+ """
+ command = f"modprobe {module}"
+ message = f"Failed to load {module} on host {node[u'host']}"
+
+ exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
+
+ @staticmethod
+ def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
+ """Install VPP on all DUT nodes. Start the VPP service in case of
+ systemd is not available or does not support autostart.
+
+ :param nodes: Nodes in the topology.
+ :param vpp_pkg_dir: Path to directory where VPP packages are stored.
+ :type nodes: dict
+ :type vpp_pkg_dir: str
+ :raises RuntimeError: If failed to remove or install VPP.
+ """
+ for node in nodes.values():
+ message = f"Failed to install VPP on host {node[u'host']}!"
+ if node[u"type"] == NodeType.DUT:
+ command = u"ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true"
+ exec_cmd_no_error(node, command, sudo=True)
+
+ command = u". /etc/lsb-release; echo \"${DISTRIB_ID}\""
+ stdout, _ = exec_cmd_no_error(node, command)
+
+ if stdout.strip() == u"Ubuntu":
+ exec_cmd_no_error(
+ node, u"apt-get purge -y '*vpp*' || true",
+ timeout=120, sudo=True
+ )
+ # workaround to avoid installation of vpp-api-python
+ exec_cmd_no_error(
+ node, f"rm -f {vpp_pkg_dir}vpp-api-python.deb",
+ timeout=120, sudo=True
+ )
+ exec_cmd_no_error(
+ node, f"dpkg -i --force-all {vpp_pkg_dir}*.deb",
+ timeout=120, sudo=True, message=message
+ )
+ exec_cmd_no_error(node, u"dpkg -l | grep vpp", sudo=True)
+ if DUTSetup.running_in_container(node):
+ DUTSetup.restart_service(node, Constants.VPP_UNIT)
+ else:
+ exec_cmd_no_error(
+ node, u"yum -y remove '*vpp*' || true",
+ timeout=120, sudo=True
+ )
+ # workaround to avoid installation of vpp-api-python
+ exec_cmd_no_error(
+ node, f"rm -f {vpp_pkg_dir}vpp-api-python.rpm",
+ timeout=120, sudo=True
+ )
+ exec_cmd_no_error(
+ node, f"rpm -ivh {vpp_pkg_dir}*.rpm",
+ timeout=120, sudo=True, message=message
+ )
+ exec_cmd_no_error(node, u"rpm -qai '*vpp*'", sudo=True)
+ DUTSetup.restart_service(node, Constants.VPP_UNIT)
+
+ @staticmethod
+ def running_in_container(node):
+ """This method tests if topology node is running inside container.
+
+ :param node: Topology node.
+ :type node: dict
+ :returns: True if running in docker container, false if not or failed
+ to detect.
+ :rtype: bool
+ """
+ command = u"fgrep docker /proc/1/cgroup"
+ message = u"Failed to get cgroup settings."
+ try:
+ exec_cmd_no_error(
+ node, command, timeout=30, sudo=False, message=message
+ )
+ except RuntimeError:
+ return False
+ return True
+
+ @staticmethod
+ def get_docker_mergeddir(node, uuid):
+ """Get Docker overlay for MergedDir diff.
+
+ :param node: DUT node.
+ :param uuid: Docker UUID.
+ :type node: dict
+ :type uuid: str
+ :returns: Docker container MergedDir.
+ :rtype: str
+ :raises RuntimeError: If getting output failed.
+ """
+ command = f"docker inspect " \
+ f"--format='{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}"
+ message = f"Failed to get directory of {uuid} on host {node[u'host']}"
+
+ stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
+ return stdout.strip()
+
+ @staticmethod
+ def get_hugepages_info(node, hugesize=None):
+ """Get number of huge pages in system.
+
+ :param node: Node in the topology.
+ :param hugesize: Size of hugepages. Default system huge size if None.
+ :type node: dict
+ :type hugesize: int
+ :returns: Number of huge pages in system.
+ :rtype: dict
+ :raises RuntimeError: If reading failed.
+ """
+ if not hugesize:
+ hugesize = "$(grep Hugepagesize /proc/meminfo | awk '{ print $2 }')"
+ command = f"cat /sys/kernel/mm/hugepages/hugepages-{hugesize}kB/*"
+ stdout, _ = exec_cmd_no_error(node, command)
+ try:
+ line = stdout.splitlines()
+ return {
+ "free_hugepages": int(line[0]),
+ "nr_hugepages": int(line[1]),
+ "nr_hugepages_mempolicy": int(line[2]),
+ "nr_overcommit_hugepages": int(line[3]),
+ "resv_hugepages": int(line[4]),
+ "surplus_hugepages": int(line[5])
+ }
+ except ValueError:
+ logger.trace(u"Reading huge pages information failed!")
+
+ @staticmethod
+ def check_huge_page(
+ node, huge_mnt, mem_size, hugesize=2048, allocate=False):
+ """Check if there is enough HugePages in system. If allocate is set to
+ true, try to allocate more HugePages.
+
+ :param node: Node in the topology.
+ :param huge_mnt: HugePage mount point.
+ :param mem_size: Reqeusted memory in MB.
+ :param hugesize: HugePage size in KB.
+ :param allocate: Whether to allocate more memory if not enough.
+ :type node: dict
+ :type huge_mnt: str
+ :type mem_size: int
+ :type hugesize: int
+ :type allocate: bool
+ :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
+ or increasing map count failed.
+ """
+ # Get huge pages information.
+ hugepages = DUTSetup.get_hugepages_info(node, hugesize=hugesize)
+
+ # Check if hugepages requested are available on node.
+ if hugepages[u"nr_overcommit_hugepages"]:
+ # If overcommit is used, we need to know how many additional pages
+ # we can allocate
+ huge_available = hugepages[u"nr_overcommit_hugepages"] - \
+ hugepages[u"surplus_hugepages"]
+ else:
+ # Fallbacking to free_hugepages which were used before to detect.
+ huge_available = hugepages[u"free_hugepages"]
+
+ if ((mem_size * 1024) // hugesize) > huge_available:
+ # If we want to allocate hugepage dynamically.
+ if allocate:
+ huge_needed = ((mem_size * 1024) // hugesize) - huge_available
+ huge_to_allocate = huge_needed + hugepages[u"nr_hugepages"]
+ max_map_count = huge_to_allocate * 4
+ # Check if huge pages mount point exist.
+ try:
+ exec_cmd_no_error(node, u"fgrep 'hugetlbfs' /proc/mounts")
+ except RuntimeError:
+ exec_cmd_no_error(node, f"mkdir -p {huge_mnt}", sudo=True)
+ exec_cmd_no_error(
+ node,
+ f"mount -t hugetlbfs -o pagesize={hugesize}k none "
+ f"{huge_mnt}",
+ sudo=True)
+ # Increase maximum number of memory map areas for process.
+ exec_cmd_no_error(
+ node,
+ f"echo \"{max_map_count}\" | "
+ f"sudo tee /proc/sys/vm/max_map_count",
+ message=f"Increase map count failed on {node[u'host']}!"
+ )
+ # Increase hugepage count.
+ exec_cmd_no_error(
+ node,
+ f"echo \"{huge_to_allocate}\" | "
+ f"sudo tee /proc/sys/vm/nr_hugepages",
+ message=f"Mount huge pages failed on {node[u'host']}!"
+ )
+ # If we do not want to allocate dynamically end with error.
+ else:
+ raise RuntimeError(
+ f"Not enough availablehuge pages: {huge_available}!"
+ )