1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """DUT setup library."""
16 from time import sleep
17 from robot.api import logger
19 from resources.libraries.python.Constants import Constants
20 from resources.libraries.python.ssh import SSH, exec_cmd, exec_cmd_no_error
21 from resources.libraries.python.topology import NodeType, Topology
25 """Contains methods for setting up DUTs."""
28 def get_service_logs(node, service):
29 """Get specific service unit logs from node.
31 :param node: Node in the topology.
32 :param service: Service unit name.
36 command = u"cat /tmp/*supervisor*.log"\
37 if DUTSetup.running_in_container(node) \
38 else f"journalctl --no-pager _SYSTEMD_INVOCATION_ID=$(systemctl " \
39 f"show -p InvocationID --value {service})"
41 message = f"Node {node[u'host']} failed to get logs from unit {service}"
44 node, command, timeout=30, sudo=True, message=message
48 def get_service_logs_on_all_duts(nodes, service):
49 """Get specific service unit logs from all DUTs.
51 :param nodes: Nodes in the topology.
52 :param service: Service unit name.
56 for node in nodes.values():
57 if node[u"type"] == NodeType.DUT:
58 DUTSetup.get_service_logs(node, service)
61 def restart_service(node, service):
62 """Restart the named service on node.
64 :param node: Node in the topology.
65 :param service: Service unit name.
69 command = f"supervisorctl restart {service}" \
70 if DUTSetup.running_in_container(node) \
71 else f"service {service} restart"
72 message = f"Node {node[u'host']} failed to restart service {service}"
75 node, command, timeout=180, sudo=True, message=message
78 DUTSetup.get_service_logs(node, service)
81 def restart_service_on_all_duts(nodes, service):
82 """Restart the named service on all DUTs.
84 :param nodes: Nodes in the topology.
85 :param service: Service unit name.
89 for node in nodes.values():
90 if node[u"type"] == NodeType.DUT:
91 DUTSetup.restart_service(node, service)
94 def start_service(node, service):
95 """Start up the named service on node.
97 :param node: Node in the topology.
98 :param service: Service unit name.
102 # TODO: change command to start once all parent function updated.
103 command = f"supervisorctl restart {service}" \
104 if DUTSetup.running_in_container(node) \
105 else f"service {service} restart"
106 message = f"Node {node[u'host']} failed to start service {service}"
109 node, command, timeout=180, sudo=True, message=message
112 DUTSetup.get_service_logs(node, service)
115 def start_service_on_all_duts(nodes, service):
116 """Start up the named service on all DUTs.
118 :param nodes: Nodes in the topology.
119 :param service: Service unit name.
123 for node in nodes.values():
124 if node[u"type"] == NodeType.DUT:
125 DUTSetup.start_service(node, service)
128 def stop_service(node, service):
129 """Stop the named service on node.
131 :param node: Node in the topology.
132 :param service: Service unit name.
136 DUTSetup.get_service_logs(node, service)
138 command = f"supervisorctl stop {service}" \
139 if DUTSetup.running_in_container(node) \
140 else f"service {service} stop"
141 message = f"Node {node[u'host']} failed to stop service {service}"
144 node, command, timeout=180, sudo=True, message=message
148 def stop_service_on_all_duts(nodes, service):
149 """Stop the named service on all DUTs.
151 :param nodes: Nodes in the topology.
152 :param service: Service unit name.
156 for node in nodes.values():
157 if node[u"type"] == NodeType.DUT:
158 DUTSetup.stop_service(node, service)
161 def kill_program(node, program, namespace=None):
162 """Kill program on the specified topology node.
164 :param node: Topology node.
165 :param program: Program name.
166 :param namespace: Namespace program is running in.
173 if namespace in (None, u"default"):
176 shell_cmd = f"ip netns exec {namespace} sh -c"
178 pgrep_cmd = f"{shell_cmd} \'pgrep -c {program}\'"
179 _, stdout, _ = exec_cmd(node, pgrep_cmd, timeout=cmd_timeout,
182 logger.trace(f"{program} is not running on {host}")
184 exec_cmd(node, f"{shell_cmd} \'pkill {program}\'",
185 timeout=cmd_timeout, sudo=True)
186 for attempt in range(5):
187 _, stdout, _ = exec_cmd(node, pgrep_cmd, timeout=cmd_timeout,
190 logger.trace(f"Attempt {attempt}: {program} is dead on {host}")
193 logger.trace(f"SIGKILLing {program} on {host}")
194 exec_cmd(node, f"{shell_cmd} \'pkill -9 {program}\'",
195 timeout=cmd_timeout, sudo=True)
198 def verify_program_installed(node, program):
199 """Verify that program is installed on the specified topology node.
201 :param node: Topology node.
202 :param program: Program name.
206 cmd = f"command -v {program}"
207 exec_cmd_no_error(node, cmd, message=f"{program} is not installed")
210 def get_pid(node, process):
211 """Get PID of running process.
213 :param node: DUT node.
214 :param process: process name.
219 :raises RuntimeError: If it is not possible to get the PID.
226 logger.trace(f"Try {i}: Get {process} PID")
227 ret_code, stdout, stderr = ssh.exec_command(f"pidof {process}")
231 f"Not possible to get PID of {process} process on node: "
232 f"{node[u'host']}\n {stdout + stderr}"
235 pid_list = stdout.split()
236 if len(pid_list) == 1:
239 logger.debug(f"No {process} PID found on node {node[u'host']}")
241 logger.debug(f"More than one {process} PID found " \
242 f"on node {node[u'host']}")
243 retval = [int(pid) for pid in pid_list]
248 def get_vpp_pids(nodes):
249 """Get PID of running VPP process on all DUTs.
251 :param nodes: DUT nodes.
257 for node in nodes.values():
258 if node[u"type"] == NodeType.DUT:
259 pids[node[u"host"]] = DUTSetup.get_pid(node, u"vpp")
263 def crypto_device_verify(node, crypto_type, numvfs, force_init=False):
264 """Verify if Crypto QAT device virtual functions are initialized on all
265 DUTs. If parameter force initialization is set to True, then try to
266 initialize or remove VFs on QAT.
268 :param node: DUT node.
269 :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
270 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
271 :param force_init: If True then try to initialize to specific value.
273 :type crypto_type: string
275 :type force_init: bool
277 :raises RuntimeError: If QAT VFs are not created and force init is set
280 pci_addr = Topology.get_cryptodev(node)
281 sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
283 if sriov_numvfs != numvfs:
285 # QAT is not initialized and we want to initialize with numvfs
286 DUTSetup.crypto_device_init(node, crypto_type, numvfs)
289 f"QAT device failed to create VFs on {node[u'host']}"
293 def crypto_device_init(node, crypto_type, numvfs):
294 """Init Crypto QAT device virtual functions on DUT.
296 :param node: DUT node.
297 :crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
298 :param numvfs: Number of VFs to initialize, 0 - disable the VFs.
300 :type crypto_type: string
303 :raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
305 if crypto_type == u"HW_DH895xcc":
306 kernel_mod = u"qat_dh895xcc"
307 kernel_drv = u"dh895xcc"
308 elif crypto_type == u"HW_C3xxx":
309 kernel_mod = u"qat_c3xxx"
310 kernel_drv = u"c3xxx"
313 f"Unsupported crypto device type on {node[u'host']}"
316 pci_addr = Topology.get_cryptodev(node)
318 # QAT device must be re-bound to kernel driver before initialization.
319 DUTSetup.verify_kernel_module(node, kernel_mod, force_load=True)
321 # Stop VPP to prevent deadlock.
322 DUTSetup.stop_service(node, Constants.VPP_UNIT)
324 current_driver = DUTSetup.get_pci_dev_driver(
325 node, pci_addr.replace(u":", r"\:")
327 if current_driver is not None:
328 DUTSetup.pci_driver_unbind(node, pci_addr)
330 # Bind to kernel driver.
331 DUTSetup.pci_driver_bind(node, pci_addr, kernel_drv)
333 # Initialize QAT VFs.
335 DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
338 def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
339 """Get PCI address of Virtual Function.
341 :param node: DUT node.
342 :param pf_pci_addr: Physical Function PCI address.
343 :param vf_id: Virtual Function number.
345 :type pf_pci_addr: str
347 :returns: Virtual Function PCI address.
349 :raises RuntimeError: If failed to get Virtual Function PCI address.
351 command = f"sh -c \"basename $(readlink " \
352 f"/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id})\""
353 message = u"Failed to get virtual function PCI address."
355 stdout, _ = exec_cmd_no_error(
356 node, command, timeout=30, sudo=True, message=message
359 return stdout.strip()
362 def get_sriov_numvfs(node, pf_pci_addr):
363 """Get number of SR-IOV VFs.
365 :param node: DUT node.
366 :param pf_pci_addr: Physical Function PCI device address.
368 :type pf_pci_addr: str
369 :returns: Number of VFs.
371 :raises RuntimeError: If PCI device is not SR-IOV capable.
373 pci = pf_pci_addr.replace(u":", r"\:")
374 command = f"cat /sys/bus/pci/devices/{pci}/sriov_numvfs"
375 message = f"PCI device {pf_pci_addr} is not a SR-IOV device."
378 stdout, _ = exec_cmd_no_error(
379 node, command, timeout=30, sudo=True, message=message
382 sriov_numvfs = int(stdout)
385 f"Reading sriov_numvfs info failed on {node[u'host']}"
391 def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
392 """Init or reset SR-IOV virtual functions by setting its number on PCI
393 device on DUT. Setting to zero removes all VFs.
395 :param node: DUT node.
396 :param pf_pci_addr: Physical Function PCI device address.
397 :param numvfs: Number of VFs to initialize, 0 - removes the VFs.
399 :type pf_pci_addr: str
401 :raises RuntimeError: Failed to create VFs on PCI.
403 cmd = f"test -f /sys/bus/pci/devices/{pf_pci_addr}/sriov_numvfs"
404 sriov_unsupported, _, _ = exec_cmd(node, cmd)
405 # if sriov_numvfs doesn't exist, then sriov_unsupported != 0
406 if int(sriov_unsupported):
408 # sriov is not supported and we want 0 VFs
409 # no need to do anything
413 f"Can't configure {numvfs} VFs on {pf_pci_addr} device "
414 f"on {node[u'host']} since it doesn't support SR-IOV."
417 pci = pf_pci_addr.replace(u":", r"\:")
418 command = f"sh -c \"echo {numvfs} | " \
419 f"tee /sys/bus/pci/devices/{pci}/sriov_numvfs\""
420 message = f"Failed to create {numvfs} VFs on {pf_pci_addr} device " \
421 f"on {node[u'host']}"
424 node, command, timeout=120, sudo=True, message=message
428 def pci_driver_unbind(node, pci_addr):
429 """Unbind PCI device from current driver on node.
431 :param node: DUT node.
432 :param pci_addr: PCI device address.
435 :raises RuntimeError: If PCI device unbind failed.
437 pci = pci_addr.replace(u":", r"\:")
438 command = f"sh -c \"echo {pci_addr} | " \
439 f"tee /sys/bus/pci/devices/{pci}/driver/unbind\""
440 message = f"Failed to unbind PCI device {pci_addr} on {node[u'host']}"
443 node, command, timeout=120, sudo=True, message=message
447 def unbind_pci_devices_from_other_driver(node, driver, *pci_addrs):
448 """Unbind PCI devices from driver other than input driver on node.
450 :param node: DUT node.
451 :param driver: Driver to not unbind from. If None or empty string,
452 will attempt to unbind from the current driver.
453 :param pci_addrs: PCI device addresses.
456 :type pci_addrs: list
458 for pci_addr in pci_addrs:
460 DUTSetup.get_pci_dev_driver(node, pci_addr) != driver:
461 DUTSetup.pci_driver_unbind(node, pci_addr)
464 def pci_driver_bind(node, pci_addr, driver):
465 """Bind PCI device to driver on node.
467 :param node: DUT node.
468 :param pci_addr: PCI device address.
469 :param driver: Driver to bind.
473 :raises RuntimeError: If PCI device bind failed.
475 message = f"Failed to bind PCI device {pci_addr} to {driver} " \
476 f"on host {node[u'host']}"
477 pci = pci_addr.replace(u":", r"\:")
478 command = f"sh -c \"echo {driver} | " \
479 f"tee /sys/bus/pci/devices/{pci}/driver_override\""
482 node, command, timeout=120, sudo=True, message=message
485 command = f"sh -c \"echo {pci_addr} | " \
486 f"tee /sys/bus/pci/drivers/{driver}/bind\""
489 node, command, timeout=120, sudo=True, message=message
492 command = f"sh -c \"echo | " \
493 f"tee /sys/bus/pci/devices/{pci}/driver_override\""
496 node, command, timeout=120, sudo=True, message=message
500 def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
501 """Unbind Virtual Function from driver on node.
503 :param node: DUT node.
504 :param pf_pci_addr: PCI device address.
505 :param vf_id: Virtual Function ID.
507 :type pf_pci_addr: str
509 :raises RuntimeError: If Virtual Function unbind failed.
511 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
512 pf_pci = pf_pci_addr.replace(u":", r"\:")
513 vf_path = f"/sys/bus/pci/devices/{pf_pci}/virtfn{vf_id}"
515 command = f"sh -c \"echo {vf_pci_addr} | tee {vf_path}/driver/unbind\""
516 message = f"Failed to unbind VF {vf_pci_addr} on {node[u'host']}"
519 node, command, timeout=120, sudo=True, message=message
523 def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
524 """Bind Virtual Function to driver on node.
526 :param node: DUT node.
527 :param pf_pci_addr: PCI device address.
528 :param vf_id: Virtual Function ID.
529 :param driver: Driver to bind.
531 :type pf_pci_addr: str
534 :raises RuntimeError: If PCI device bind failed.
536 vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
537 pf_pci = pf_pci_addr.replace(u":", r'\:')
538 vf_path = f"/sys/bus/pci/devices/{pf_pci}/virtfn{vf_id}"
540 message = f"Failed to bind VF {vf_pci_addr} to {driver} " \
541 f"on {node[u'host']}"
542 command = f"sh -c \"echo {driver} | tee {vf_path}/driver_override\""
545 node, command, timeout=120, sudo=True, message=message
548 command = f"sh -c \"echo {vf_pci_addr} | " \
549 f"tee /sys/bus/pci/drivers/{driver}/bind\""
552 node, command, timeout=120, sudo=True, message=message
555 command = f"sh -c \"echo | tee {vf_path}/driver_override\""
558 node, command, timeout=120, sudo=True, message=message
562 def get_pci_dev_driver(node, pci_addr):
563 """Get current PCI device driver on node.
565 :param node: DUT node.
566 :param pci_addr: PCI device address.
569 :returns: Driver or None
570 :raises RuntimeError: If it is not possible to get the interface driver
571 information from the node.
573 driver_path = f"/sys/bus/pci/devices/{pci_addr}/driver"
574 cmd = f"test -d {driver_path}"
575 ret_code, ret_val, _ = exec_cmd(node, cmd)
577 # the directory doesn't exist which means the device is not bound
580 cmd = f"basename $(readlink -f {driver_path})"
581 ret_val, _ = exec_cmd_no_error(node, cmd)
582 return ret_val.strip()
585 def verify_kernel_module(node, module, force_load=False):
586 """Verify if kernel module is loaded on node. If parameter force
587 load is set to True, then try to load the modules.
590 :param module: Module to verify.
591 :param force_load: If True then try to load module.
594 :type force_load: bool
595 :raises RuntimeError: If module is not loaded or failed to load.
597 command = f"grep -w {module} /proc/modules"
598 message = f"Kernel module {module} is not loaded " \
599 f"on host {node[u'host']}"
603 node, command, timeout=30, sudo=False, message=message
607 # Module is not loaded and we want to load it
608 DUTSetup.load_kernel_module(node, module)
613 def verify_kernel_module_on_all_duts(nodes, module, force_load=False):
614 """Verify if kernel module is loaded on all DUTs. If parameter force
615 load is set to True, then try to load the modules.
617 :param nodes: DUT nodes.
618 :param module: Module to verify.
619 :param force_load: If True then try to load module.
622 :type force_load: bool
624 for node in nodes.values():
625 if node[u"type"] == NodeType.DUT:
626 DUTSetup.verify_kernel_module(node, module, force_load)
629 def verify_uio_driver_on_all_duts(nodes):
630 """Verify if uio driver kernel module is loaded on all DUTs. If module
631 is not present it will try to load it.
633 :param nodes: DUT nodes.
636 for node in nodes.values():
637 if node[u"type"] == NodeType.DUT:
638 uio_driver = Topology.get_uio_driver(node)
639 DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
642 def load_kernel_module(node, module):
643 """Load kernel module on node.
645 :param node: DUT node.
646 :param module: Module to load.
650 :raises RuntimeError: If loading failed.
652 command = f"modprobe {module}"
653 message = f"Failed to load {module} on host {node[u'host']}"
655 exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
658 def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
659 """Install VPP on all DUT nodes. Start the VPP service in case of
660 systemd is not available or does not support autostart.
662 :param nodes: Nodes in the topology.
663 :param vpp_pkg_dir: Path to directory where VPP packages are stored.
665 :type vpp_pkg_dir: str
666 :raises RuntimeError: If failed to remove or install VPP.
668 for node in nodes.values():
669 message = f"Failed to install VPP on host {node[u'host']}!"
670 if node[u"type"] == NodeType.DUT:
671 command = u"ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true"
672 exec_cmd_no_error(node, command, sudo=True)
674 command = u". /etc/lsb-release; echo \"${DISTRIB_ID}\""
675 stdout, _ = exec_cmd_no_error(node, command)
677 if stdout.strip() == u"Ubuntu":
679 node, u"apt-get purge -y '*vpp*' || true",
680 timeout=120, sudo=True
682 # workaround to avoid installation of vpp-api-python
684 node, f"rm -f {vpp_pkg_dir}vpp-api-python.deb",
685 timeout=120, sudo=True
688 node, f"dpkg -i --force-all {vpp_pkg_dir}*.deb",
689 timeout=120, sudo=True, message=message
691 exec_cmd_no_error(node, u"dpkg -l | grep vpp", sudo=True)
692 if DUTSetup.running_in_container(node):
693 DUTSetup.restart_service(node, Constants.VPP_UNIT)
696 node, u"yum -y remove '*vpp*' || true",
697 timeout=120, sudo=True
699 # workaround to avoid installation of vpp-api-python
701 node, f"rm -f {vpp_pkg_dir}vpp-api-python.rpm",
702 timeout=120, sudo=True
705 node, f"rpm -ivh {vpp_pkg_dir}*.rpm",
706 timeout=120, sudo=True, message=message
708 exec_cmd_no_error(node, u"rpm -qai '*vpp*'", sudo=True)
709 DUTSetup.restart_service(node, Constants.VPP_UNIT)
712 def running_in_container(node):
713 """This method tests if topology node is running inside container.
715 :param node: Topology node.
717 :returns: True if running in docker container, false if not or failed
721 command = u"fgrep docker /proc/1/cgroup"
722 message = u"Failed to get cgroup settings."
725 node, command, timeout=30, sudo=False, message=message
732 def get_docker_mergeddir(node, uuid):
733 """Get Docker overlay for MergedDir diff.
735 :param node: DUT node.
736 :param uuid: Docker UUID.
739 :returns: Docker container MergedDir.
741 :raises RuntimeError: If getting output failed.
743 command = f"docker inspect " \
744 f"--format='{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}"
745 message = f"Failed to get directory of {uuid} on host {node[u'host']}"
747 stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
748 return stdout.strip()
751 def get_hugepages_info(node, hugesize=None):
752 """Get number of huge pages in system.
754 :param node: Node in the topology.
755 :param hugesize: Size of hugepages. Default system huge size if None.
758 :returns: Number of huge pages in system.
760 :raises RuntimeError: If reading failed.
763 hugesize = "$(grep Hugepagesize /proc/meminfo | awk '{ print $2 }')"
764 command = f"cat /sys/kernel/mm/hugepages/hugepages-{hugesize}kB/*"
765 stdout, _ = exec_cmd_no_error(node, command)
767 line = stdout.splitlines()
769 "free_hugepages": int(line[0]),
770 "nr_hugepages": int(line[1]),
771 "nr_hugepages_mempolicy": int(line[2]),
772 "nr_overcommit_hugepages": int(line[3]),
773 "resv_hugepages": int(line[4]),
774 "surplus_hugepages": int(line[5])
777 logger.trace(u"Reading huge pages information failed!")
781 node, huge_mnt, mem_size, hugesize=2048, allocate=False):
782 """Check if there is enough HugePages in system. If allocate is set to
783 true, try to allocate more HugePages.
785 :param node: Node in the topology.
786 :param huge_mnt: HugePage mount point.
787 :param mem_size: Reqeusted memory in MB.
788 :param hugesize: HugePage size in KB.
789 :param allocate: Whether to allocate more memory if not enough.
795 :raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
796 or increasing map count failed.
798 # Get huge pages information.
799 hugepages = DUTSetup.get_hugepages_info(node, hugesize=hugesize)
801 # Check if hugepages requested are available on node.
802 if hugepages[u"nr_overcommit_hugepages"]:
803 # If overcommit is used, we need to know how many additional pages
805 huge_available = hugepages[u"nr_overcommit_hugepages"] - \
806 hugepages[u"surplus_hugepages"]
808 # Fallbacking to free_hugepages which were used before to detect.
809 huge_available = hugepages[u"free_hugepages"]
811 if ((mem_size * 1024) // hugesize) > huge_available:
812 # If we want to allocate hugepage dynamically.
814 huge_needed = ((mem_size * 1024) // hugesize) - huge_available
815 huge_to_allocate = huge_needed + hugepages[u"nr_hugepages"]
816 max_map_count = huge_to_allocate * 4
817 # Check if huge pages mount point exist.
819 exec_cmd_no_error(node, u"fgrep 'hugetlbfs' /proc/mounts")
821 exec_cmd_no_error(node, f"mkdir -p {huge_mnt}", sudo=True)
824 f"mount -t hugetlbfs -o pagesize={hugesize}k none "
827 # Increase maximum number of memory map areas for process.
830 f"echo \"{max_map_count}\" | "
831 f"sudo tee /proc/sys/vm/max_map_count",
832 message=f"Increase map count failed on {node[u'host']}!"
834 # Increase hugepage count.
837 f"echo \"{huge_to_allocate}\" | "
838 f"sudo tee /proc/sys/vm/nr_hugepages",
839 message=f"Mount huge pages failed on {node[u'host']}!"
841 # If we do not want to allocate dynamically end with error.
844 f"Not enough availablehuge pages: {huge_available}!"