1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from collections import OrderedDict, Counter
21 from string import Template
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.ssh import SSH
25 from resources.libraries.python.topology import Topology, SocketType
26 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
30 u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
33 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
36 class ContainerManager:
37 """Container lifecycle management class."""
39 def __init__(self, engine):
40 """Initialize Container Manager class.
42 :param engine: Container technology used (LXC/Docker/...).
44 :raises NotImplementedError: If container technology is not implemented.
47 self.engine = globals()[engine]()
49 raise NotImplementedError(f"{engine} is not implemented.")
50 self.containers = OrderedDict()
52 def get_container_by_name(self, name):
53 """Get container instance.
55 :param name: Container name.
57 :returns: Container instance.
59 :raises RuntimeError: If failed to get container with name.
62 return self.containers[name]
64 raise RuntimeError(f"Failed to get container with name: {name}")
66 def construct_container(self, **kwargs):
67 """Construct container object on node with specified parameters.
69 :param kwargs: Key-value pairs used to construct container.
73 self.engine.initialize()
76 setattr(self.engine.container, key, kwargs[key])
78 # Set additional environmental variables
80 self.engine.container, u"env",
81 f"MICROSERVICE_LABEL={kwargs[u'name']}"
84 # Store container instance
85 self.containers[kwargs[u"name"]] = self.engine.container
87 def construct_containers(self, **kwargs):
88 """Construct 1..N container(s) on node with specified name.
90 Ordinal number is automatically added to the name of container as
93 :param kwargs: Named parameters.
96 name = kwargs[u"name"]
97 for i in range(kwargs[u"count"]):
98 # Name will contain ordinal suffix
99 kwargs[u"name"] = u"".join([name, str(i+1)])
101 self.construct_container(i=i, **kwargs)
103 def acquire_all_containers(self):
104 """Acquire all containers."""
105 for container in self.containers:
106 self.engine.container = self.containers[container]
107 self.engine.acquire()
109 def build_all_containers(self):
110 """Build all containers."""
111 for container in self.containers:
112 self.engine.container = self.containers[container]
115 def create_all_containers(self):
116 """Create all containers."""
117 for container in self.containers:
118 self.engine.container = self.containers[container]
121 def execute_on_container(self, name, command):
122 """Execute command on container with name.
124 :param name: Container name.
125 :param command: Command to execute.
129 self.engine.container = self.get_container_by_name(name)
130 self.engine.execute(command)
132 def execute_on_all_containers(self, command):
133 """Execute command on all containers.
135 :param command: Command to execute.
138 for container in self.containers:
139 self.engine.container = self.containers[container]
140 self.engine.execute(command)
142 def start_vpp_in_all_containers(self):
143 """Start VPP in all containers."""
144 for container in self.containers:
145 self.engine.container = self.containers[container]
146 # We need to install supervisor client/server system to control VPP
148 self.engine.install_supervisor()
149 self.engine.start_vpp()
151 def restart_vpp_in_all_containers(self):
152 """Restart VPP in all containers."""
153 for container in self.containers:
154 self.engine.container = self.containers[container]
155 self.engine.restart_vpp()
157 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
158 """Configure VPP in all containers.
160 :param chain_topology: Topology used for chaining containers can be
161 chain or cross_horiz. Chain topology is using 1 memif pair per
162 container. Cross_horiz topology is using 1 memif and 1 physical
163 interface in container (only single container can be configured).
164 :param kwargs: Named parameters.
165 :type chain_topology: str
168 # Count number of DUTs based on node's host information
172 self.containers[container].node[u"host"]
173 for container in self.containers
177 mod = len(self.containers) // dut_cnt
179 for i, container in enumerate(self.containers):
182 sid1 = i % mod * 2 + 1
183 sid2 = i % mod * 2 + 2
184 self.engine.container = self.containers[container]
185 guest_dir = self.engine.container.mnt[0].split(u":")[1]
187 if chain_topology == u"chain":
188 self._configure_vpp_chain_l2xc(
189 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
190 guest_dir=guest_dir, **kwargs
192 elif chain_topology == u"cross_horiz":
193 self._configure_vpp_cross_horiz(
194 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
195 guest_dir=guest_dir, **kwargs
197 elif chain_topology == u"chain_functional":
198 self._configure_vpp_chain_functional(
199 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
200 guest_dir=guest_dir, **kwargs
202 elif chain_topology == u"chain_ip4":
203 self._configure_vpp_chain_ip4(
204 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
205 guest_dir=guest_dir, **kwargs
207 elif chain_topology == u"pipeline_ip4":
208 self._configure_vpp_pipeline_ip4(
209 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
210 guest_dir=guest_dir, **kwargs
214 f"Container topology {chain_topology} not implemented"
217 def _configure_vpp_chain_l2xc(self, **kwargs):
218 """Configure VPP in chain topology with l2xc.
220 :param kwargs: Named parameters.
223 self.engine.create_vpp_startup_config()
224 self.engine.create_vpp_exec_config(
225 u"memif_create_chain_l2xc.exec",
226 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
227 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
228 socket1=f"{kwargs[u'guest_dir']}/memif-"
229 f"{self.engine.container.name}-{kwargs[u'sid1']}",
230 socket2=f"{kwargs[u'guest_dir']}/memif-"
231 f"{self.engine.container.name}-{kwargs[u'sid2']}"
234 def _configure_vpp_cross_horiz(self, **kwargs):
235 """Configure VPP in cross horizontal topology (single memif).
237 :param kwargs: Named parameters.
240 if u"DUT1" in self.engine.container.name:
241 if_pci = Topology.get_interface_pci_addr(
242 self.engine.container.node, kwargs[u"dut1_if"])
243 if_name = Topology.get_interface_name(
244 self.engine.container.node, kwargs[u"dut1_if"])
245 if u"DUT2" in self.engine.container.name:
246 if_pci = Topology.get_interface_pci_addr(
247 self.engine.container.node, kwargs[u"dut2_if"])
248 if_name = Topology.get_interface_name(
249 self.engine.container.node, kwargs[u"dut2_if"])
250 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
251 self.engine.create_vpp_exec_config(
252 u"memif_create_cross_horizon.exec",
253 mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
254 socket1=f"{kwargs[u'guest_dir']}/memif-"
255 f"{self.engine.container.name}-{kwargs[u'sid1']}"
258 def _configure_vpp_chain_functional(self, **kwargs):
259 """Configure VPP in chain topology with l2xc (functional).
261 :param kwargs: Named parameters.
264 self.engine.create_vpp_startup_config_func_dev()
265 self.engine.create_vpp_exec_config(
266 u"memif_create_chain_functional.exec",
267 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
268 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
269 socket1=f"{kwargs[u'guest_dir']}/memif-"
270 f"{self.engine.container.name}-{kwargs[u'sid1']}",
271 socket2=f"{kwargs[u'guest_dir']}/memif-"
272 f"{self.engine.container.name}-{kwargs[u'sid2']}",
276 def _configure_vpp_chain_ip4(self, **kwargs):
277 """Configure VPP in chain topology with ip4.
279 :param kwargs: Named parameters.
282 self.engine.create_vpp_startup_config()
284 vif1_mac = kwargs[u"tg_if1_mac"] \
285 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
286 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
287 vif2_mac = kwargs[u"tg_if2_mac"] \
288 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
289 else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
290 self.engine.create_vpp_exec_config(
291 u"memif_create_chain_ip4.exec",
292 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
293 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
294 socket1=f"{kwargs[u'guest_dir']}/memif-"
295 f"{self.engine.container.name}-{kwargs[u'sid1']}",
296 socket2=f"{kwargs[u'guest_dir']}/memif-"
297 f"{self.engine.container.name}-{kwargs[u'sid2']}",
298 mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
299 mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
300 vif1_mac=vif1_mac, vif2_mac=vif2_mac
303 def _configure_vpp_pipeline_ip4(self, **kwargs):
304 """Configure VPP in pipeline topology with ip4.
306 :param kwargs: Named parameters.
309 self.engine.create_vpp_startup_config()
310 node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
311 mid1 = kwargs[u"mid1"]
312 mid2 = kwargs[u"mid2"]
314 role2 = u"master" if node in (kwargs[u"nodes"], 1) else u"slave"
315 kwargs[u"mid2"] = kwargs[u"mid2"] if node in (kwargs[u"nodes"], 1) \
316 else kwargs[u"mid2"] + 1
317 vif1_mac = kwargs[u"tg_if1_mac"] \
318 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
319 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
320 vif2_mac = kwargs[u"tg_if2_mac"] \
321 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
322 else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
323 socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
324 f"{kwargs[u'sid1']}" if node == 1 \
325 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
326 socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
327 f"{kwargs[u'sid2']}" \
328 if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
329 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
331 self.engine.create_vpp_exec_config(
332 u"memif_create_pipeline_ip4.exec",
333 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
334 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
335 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
336 mac1=f"52:54:00:00:{mid1:02X}:01",
337 mac2=f"52:54:00:00:{mid2:02X}:02",
338 vif1_mac=vif1_mac, vif2_mac=vif2_mac
341 def stop_all_containers(self):
342 """Stop all containers."""
343 for container in self.containers:
344 self.engine.container = self.containers[container]
347 def destroy_all_containers(self):
348 """Destroy all containers."""
349 for container in self.containers:
350 self.engine.container = self.containers[container]
351 self.engine.destroy()
354 class ContainerEngine:
355 """Abstract class for container engine."""
358 """Init ContainerEngine object."""
359 self.container = None
361 def initialize(self):
362 """Initialize container object."""
363 self.container = Container()
365 def acquire(self, force):
366 """Acquire/download container.
368 :param force: Destroy a container if exists and create.
371 raise NotImplementedError
374 """Build container (compile)."""
375 raise NotImplementedError
378 """Create/deploy container."""
379 raise NotImplementedError
381 def execute(self, command):
382 """Execute process inside container.
384 :param command: Command to run inside container.
387 raise NotImplementedError
390 """Stop container."""
391 raise NotImplementedError
394 """Destroy/remove container."""
395 raise NotImplementedError
398 """Info about container."""
399 raise NotImplementedError
401 def system_info(self):
403 raise NotImplementedError
405 def install_supervisor(self):
406 """Install supervisord inside a container."""
407 if isinstance(self, LXC):
408 self.execute(u"sleep 3; apt-get update")
409 self.execute(u"apt-get install -y supervisor")
411 u"[unix_http_server]\n" \
412 u"file = /tmp/supervisor.sock\n\n" \
413 u"[rpcinterface:supervisor]\n" \
414 u"supervisor.rpcinterface_factory = " \
415 u"supervisor.rpcinterface:make_main_rpcinterface\n\n" \
416 u"[supervisorctl]\n" \
417 u"serverurl = unix:///tmp/supervisor.sock\n\n" \
419 u"pidfile = /tmp/supervisord.pid\n" \
420 u"identifier = supervisor\n" \
421 u"directory = /tmp\n" \
422 u"logfile = /tmp/supervisord.log\n" \
423 u"loglevel = debug\n" \
424 u"nodaemon = false\n\n"
426 f'echo "{config}" > {SUPERVISOR_CONF} && '
427 f'supervisord -c {SUPERVISOR_CONF}'
431 """Start VPP inside a container."""
435 u"command = /usr/bin/vpp -c /etc/vpp/startup.conf\n" \
436 u"autostart = false\n" \
437 u"autorestart = false\n" \
438 u"redirect_stderr = true\n" \
441 f'echo "{config}" >> {SUPERVISOR_CONF} && supervisorctl reload'
443 self.execute(u"supervisorctl start vpp")
445 # pylint: disable=import-outside-toplevel
446 from robot.libraries.BuiltIn import BuiltIn
447 topo_instance = BuiltIn().get_library_instance(
448 u"resources.libraries.python.topology.Topology"
450 topo_instance.add_new_socket(
454 f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
457 topo_instance.add_new_socket(
461 f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
465 def restart_vpp(self):
466 """Restart VPP service inside a container."""
467 self.execute(u"supervisorctl restart vpp")
468 self.execute(u"cat /tmp/supervisord.log")
470 def create_base_vpp_startup_config(self):
471 """Create base startup configuration of VPP on container.
473 :returns: Base VPP startup configuration.
474 :rtype: VppConfigGenerator
476 cpuset_cpus = self.container.cpuset_cpus
478 # Create config instance
479 vpp_config = VppConfigGenerator()
480 vpp_config.set_node(self.container.node)
481 vpp_config.add_unix_cli_listen()
482 vpp_config.add_unix_nodaemon()
483 vpp_config.add_unix_exec(u"/tmp/running.exec")
484 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
485 vpp_config.add_statseg_per_node_counters(value=u"on")
486 # We will pop the first core from the list to be a main core
487 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
488 # If more cores in the list, the rest will be used as workers.
490 corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
491 vpp_config.add_cpu_corelist_workers(corelist_workers)
495 def create_vpp_startup_config(self):
496 """Create startup configuration of VPP without DPDK on container.
498 vpp_config = self.create_base_vpp_startup_config()
499 vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
501 # Apply configuration
502 self.execute(u"mkdir -p /etc/vpp/")
504 f'echo "{vpp_config.get_config_str()}" | '
505 f'tee /etc/vpp/startup.conf'
508 def create_vpp_startup_config_dpdk_dev(self, *devices):
509 """Create startup configuration of VPP with DPDK on container.
511 :param devices: List of PCI devices to add.
514 vpp_config = self.create_base_vpp_startup_config()
515 vpp_config.add_dpdk_dev(*devices)
516 vpp_config.add_dpdk_no_tx_checksum_offload()
517 vpp_config.add_dpdk_log_level(u"debug")
518 vpp_config.add_plugin(u"disable", u"default")
519 vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
520 vpp_config.add_plugin(u"enable", u"memif_plugin.so")
522 # Apply configuration
523 self.execute(u"mkdir -p /etc/vpp/")
525 f'echo "{vpp_config.get_config_str()}" | '
526 f'tee /etc/vpp/startup.conf'
529 def create_vpp_startup_config_func_dev(self):
530 """Create startup configuration of VPP on container for functional
533 # Create config instance
534 vpp_config = VppConfigGenerator()
535 vpp_config.set_node(self.container.node)
536 vpp_config.add_unix_cli_listen()
537 vpp_config.add_unix_nodaemon()
538 vpp_config.add_unix_exec(u"/tmp/running.exec")
539 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
540 vpp_config.add_statseg_per_node_counters(value=u"on")
541 vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
543 # Apply configuration
544 self.execute(u"mkdir -p /etc/vpp/")
546 f'echo "{vpp_config.get_config_str()}" | '
547 f'tee /etc/vpp/startup.conf'
550 def create_vpp_exec_config(self, template_file, **kwargs):
551 """Create VPP exec configuration on container.
553 :param template_file: File name of a template script.
554 :param kwargs: Parameters for script.
555 :type template_file: str
558 running = u"/tmp/running.exec"
560 template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
562 with open(template, "r") as src_file:
563 src = Template(src_file.read())
564 self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
566 def is_container_running(self):
567 """Check if container is running."""
568 raise NotImplementedError
570 def is_container_present(self):
571 """Check if container is present."""
572 raise NotImplementedError
574 def _configure_cgroup(self, name):
575 """Configure the control group associated with a container.
577 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
578 container is initialized a new cgroup /docker or /lxc is created under
579 cpuset parent tree. This newly created cgroup is inheriting parent
580 setting for cpu/mem exclusive parameter and thus cannot be overriden
581 within /docker or /lxc cgroup. This function is supposed to set cgroups
582 to allow coexistence of both engines.
584 :param name: Name of cgroup.
586 :raises RuntimeError: If applying cgroup settings via cgset failed.
588 ret, _, _ = self.container.ssh.exec_command_sudo(
589 u"cgset -r cpuset.cpu_exclusive=0 /"
592 raise RuntimeError(u"Failed to apply cgroup settings.")
594 ret, _, _ = self.container.ssh.exec_command_sudo(
595 u"cgset -r cpuset.mem_exclusive=0 /"
598 raise RuntimeError(u"Failed to apply cgroup settings.")
600 ret, _, _ = self.container.ssh.exec_command_sudo(
601 f"cgcreate -g cpuset:/{name}"
604 raise RuntimeError(u"Failed to copy cgroup settings from root.")
606 ret, _, _ = self.container.ssh.exec_command_sudo(
607 f"cgset -r cpuset.cpu_exclusive=0 /{name}"
610 raise RuntimeError(u"Failed to apply cgroup settings.")
612 ret, _, _ = self.container.ssh.exec_command_sudo(
613 f"cgset -r cpuset.mem_exclusive=0 /{name}"
616 raise RuntimeError(u"Failed to apply cgroup settings.")
619 class LXC(ContainerEngine):
620 """LXC implementation."""
622 # Implicit constructor is inherited.
624 def acquire(self, force=True):
625 """Acquire a privileged system object where configuration is stored.
627 :param force: If a container exists, destroy it and create a new
630 :raises RuntimeError: If creating the container or writing the container
633 if self.is_container_present():
639 target_arch = u"arm64" \
640 if Topology.get_node_arch(self.container.node) == u"aarch64" \
643 image = self.container.image if self.container.image \
644 else f"-d ubuntu -r bionic -a {target_arch}"
646 cmd = f"lxc-create -t download --name {self.container.name} " \
647 f"-- {image} --no-validate"
649 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
651 raise RuntimeError(u"Failed to create container.")
653 self._configure_cgroup(u"lxc")
656 """Create/deploy an application inside a container on system.
658 :raises RuntimeError: If creating the container fails.
660 if self.container.mnt:
662 # https://github.com/lxc/lxc/issues/434
663 mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
664 ret, _, _ = self.container.ssh.exec_command_sudo(
665 f"sh -c \"echo '{mnt_e}' >> "
666 f"/var/lib/lxc/{self.container.name}/config\""
670 f"Failed to write {self.container.name} config."
673 for mount in self.container.mnt:
674 host_dir, guest_dir = mount.split(u":")
675 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
676 else u"bind,create=file"
677 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
678 f"none {options} 0 0"
679 self.container.ssh.exec_command_sudo(
680 f"sh -c \"mkdir -p {host_dir}\""
682 ret, _, _ = self.container.ssh.exec_command_sudo(
683 f"sh -c \"echo '{entry}' "
684 f">> /var/lib/lxc/{self.container.name}/config\""
688 f"Failed to write {self.container.name} config."
691 cpuset_cpus = u",".join(
692 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
693 if self.container.cpuset_cpus else u""
695 ret, _, _ = self.container.ssh.exec_command_sudo(
696 f"lxc-start --name {self.container.name} --daemon"
700 f"Failed to start container {self.container.name}."
702 self._lxc_wait(u"RUNNING")
704 # Workaround for LXC to be able to allocate all cpus including isolated.
705 ret, _, _ = self.container.ssh.exec_command_sudo(
706 u"cgset --copy-from / lxc/"
709 raise RuntimeError(u"Failed to copy cgroup to LXC")
711 ret, _, _ = self.container.ssh.exec_command_sudo(
712 f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
716 f"Failed to set cpuset.cpus to container {self.container.name}."
719 def execute(self, command):
720 """Start a process inside a running container.
722 Runs the specified command inside the container specified by name. The
723 container has to be running already.
725 :param command: Command to run inside container.
727 :raises RuntimeError: If running the command failed.
729 env = u"--keep-env " + u" ".join(
730 f"--set-var {env!s}" for env in self.container.env) \
731 if self.container.env else u""
733 cmd = f"lxc-attach {env} --name {self.container.name} " \
734 f"-- /bin/sh -c '{command}; exit $?'"
736 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
739 f"Failed to run command inside container {self.container.name}."
745 :raises RuntimeError: If stopping the container failed.
747 cmd = f"lxc-stop --name {self.container.name}"
749 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
752 f"Failed to stop container {self.container.name}."
754 self._lxc_wait(u"STOPPED|FROZEN")
757 """Destroy a container.
759 :raises RuntimeError: If destroying container failed.
761 cmd = f"lxc-destroy --force --name {self.container.name}"
763 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
766 f"Failed to destroy container {self.container.name}."
770 """Query and shows information about a container.
772 :raises RuntimeError: If getting info about a container failed.
774 cmd = f"lxc-info --name {self.container.name}"
776 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
779 f"Failed to get info about container {self.container.name}."
782 def system_info(self):
783 """Check the current kernel for LXC support.
785 :raises RuntimeError: If checking LXC support failed.
787 cmd = u"lxc-checkconfig"
789 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
791 raise RuntimeError(u"Failed to check LXC support.")
793 def is_container_running(self):
794 """Check if container is running on node.
796 :returns: True if container is running.
798 :raises RuntimeError: If getting info about a container failed.
800 cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
802 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
805 f"Failed to get info about container {self.container.name}."
807 return u"RUNNING" in stdout
809 def is_container_present(self):
810 """Check if container is existing on node.
812 :returns: True if container is present.
814 :raises RuntimeError: If getting info about a container failed.
816 cmd = f"lxc-info --no-humanize --name {self.container.name}"
818 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
821 def _lxc_wait(self, state):
822 """Wait for a specific container state.
824 :param state: Specify the container state(s) to wait for.
826 :raises RuntimeError: If waiting for state of a container failed.
828 cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
830 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
833 f"Failed to wait for state '{state}' "
834 f"of container {self.container.name}."
838 class Docker(ContainerEngine):
839 """Docker implementation."""
841 # Implicit constructor is inherited.
843 def acquire(self, force=True):
844 """Pull an image or a repository from a registry.
846 :param force: Destroy a container if exists.
848 :raises RuntimeError: If pulling a container failed.
850 if self.is_container_present():
856 if not self.container.image:
857 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
858 if Topology.get_node_arch(self.container.node) == u"aarch64" \
859 else Constants.DOCKER_SUT_IMAGE_UBUNTU
860 setattr(self.container, u"image", img)
862 cmd = f"docker pull {self.container.image}"
864 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
867 f"Failed to create container {self.container.name}."
870 if self.container.cpuset_cpus:
871 self._configure_cgroup(u"docker")
874 """Create/deploy container.
876 :raises RuntimeError: If creating a container failed.
878 cpuset_cpus = u"--cpuset-cpus=" + u",".join(
879 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
880 if self.container.cpuset_cpus else u""
882 cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
883 if self.container.cpuset_mems is not None else u""
884 # Temporary workaround - disabling due to bug in memif
887 env = u" ".join(f"--env {env!s}" for env in self.container.env) \
888 if self.container.env else u""
890 command = str(self.container.command) if self.container.command else u""
893 f"--publish {var!s}" for var in self.container.publish
894 ) if self.container.publish else u""
897 f"--volume {mnt!s}" for mnt in self.container.mnt) \
898 if self.container.mnt else u""
900 cmd = f"docker run --privileged --detach --interactive --tty --rm " \
901 f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
902 f"{env} {volume} --name {self.container.name} " \
903 f"{self.container.image} {command}"
905 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
908 f"Failed to create container {self.container.name}"
913 def execute(self, command):
914 """Start a process inside a running container.
916 Runs the specified command inside the container specified by name. The
917 container has to be running already.
919 :param command: Command to run inside container.
921 :raises RuntimeError: If running the command in a container failed.
923 cmd = f"docker exec --interactive {self.container.name} " \
924 f"/bin/sh -c '{command}; exit $?'"
926 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
929 f"Failed to execute command in container {self.container.name}."
933 """Stop running container.
935 :raises RuntimeError: If stopping a container failed.
937 cmd = f"docker stop {self.container.name}"
939 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
942 f"Failed to stop container {self.container.name}."
946 """Remove a container.
948 :raises RuntimeError: If removing a container failed.
950 cmd = f"docker rm --force {self.container.name}"
952 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
955 f"Failed to destroy container {self.container.name}."
959 """Return low-level information on Docker objects.
961 :raises RuntimeError: If getting info about a container failed.
963 cmd = f"docker inspect {self.container.name}"
965 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
968 f"Failed to get info about container {self.container.name}."
971 def system_info(self):
972 """Display the docker system-wide information.
974 :raises RuntimeError: If displaying system information failed.
976 cmd = u"docker system info"
978 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
980 raise RuntimeError(u"Failed to get system info.")
982 def is_container_present(self):
983 """Check if container is present on node.
985 :returns: True if container is present.
987 :raises RuntimeError: If getting info about a container failed.
989 cmd = f"docker ps --all --quiet --filter name={self.container.name}"
991 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
994 f"Failed to get info about container {self.container.name}."
998 def is_container_running(self):
999 """Check if container is running on node.
1001 :returns: True if container is running.
1003 :raises RuntimeError: If getting info about a container failed.
1005 cmd = f"docker ps --quiet --filter name={self.container.name}"
1007 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1010 f"Failed to get info about container {self.container.name}."
1016 """Container class."""
1018 def __getattr__(self, attr):
1019 """Get attribute custom implementation.
1021 :param attr: Attribute to get.
1023 :returns: Attribute value or None.
1027 return self.__dict__[attr]
1031 def __setattr__(self, attr, value):
1032 """Set attribute custom implementation.
1034 :param attr: Attribute to set.
1035 :param value: Value to set.
1040 # Check if attribute exists
1043 # Creating new attribute
1045 self.__dict__[u"ssh"] = SSH()
1046 self.__dict__[u"ssh"].connect(value)
1047 self.__dict__[attr] = value
1049 # Updating attribute base of type
1050 if isinstance(self.__dict__[attr], list):
1051 self.__dict__[attr].append(value)
1053 self.__dict__[attr] = value