1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library to manipulate Containers."""
16 from collections import OrderedDict, Counter
18 from string import Template
20 from robot.libraries.BuiltIn import BuiltIn
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.ssh import SSH
24 from resources.libraries.python.topology import Topology, SocketType
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
29 u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
32 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
35 class ContainerManager:
36 """Container lifecycle management class."""
38 def __init__(self, engine):
39 """Initialize Container Manager class.
41 :param engine: Container technology used (LXC/Docker/...).
43 :raises NotImplementedError: If container technology is not implemented.
46 self.engine = globals()[engine]()
48 raise NotImplementedError(f"{engine} is not implemented.")
49 self.containers = OrderedDict()
51 def get_container_by_name(self, name):
52 """Get container instance.
54 :param name: Container name.
56 :returns: Container instance.
58 :raises RuntimeError: If failed to get container with name.
61 return self.containers[name]
63 raise RuntimeError(f"Failed to get container with name: {name}")
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
79 self.engine.container, u"env",
80 f"MICROSERVICE_LABEL={kwargs[u'name']}"
83 # Store container instance
84 self.containers[kwargs[u"name"]] = self.engine.container
86 def construct_containers(self, **kwargs):
87 """Construct 1..N container(s) on node with specified name.
89 Ordinal number is automatically added to the name of container as
92 :param kwargs: Named parameters.
95 name = kwargs[u"name"]
96 for i in range(kwargs[u"count"]):
97 # Name will contain ordinal suffix
98 kwargs[u"name"] = u"".join([name, str(i+1)])
100 self.construct_container(i=i, **kwargs)
102 def acquire_all_containers(self):
103 """Acquire all containers."""
104 for container in self.containers:
105 self.engine.container = self.containers[container]
106 self.engine.acquire()
108 def build_all_containers(self):
109 """Build all containers."""
110 for container in self.containers:
111 self.engine.container = self.containers[container]
114 def create_all_containers(self):
115 """Create all containers."""
116 for container in self.containers:
117 self.engine.container = self.containers[container]
120 def execute_on_container(self, name, command):
121 """Execute command on container with name.
123 :param name: Container name.
124 :param command: Command to execute.
128 self.engine.container = self.get_container_by_name(name)
129 self.engine.execute(command)
131 def execute_on_all_containers(self, command):
132 """Execute command on all containers.
134 :param command: Command to execute.
137 for container in self.containers:
138 self.engine.container = self.containers[container]
139 self.engine.execute(command)
141 def start_vpp_in_all_containers(self):
142 """Start VPP in all containers."""
143 for container in self.containers:
144 self.engine.container = self.containers[container]
145 # We need to install supervisor client/server system to control VPP
147 self.engine.install_supervisor()
148 self.engine.start_vpp()
150 def restart_vpp_in_all_containers(self):
151 """Restart VPP in all containers."""
152 for container in self.containers:
153 self.engine.container = self.containers[container]
154 self.engine.restart_vpp()
156 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
157 """Configure VPP in all containers.
159 :param chain_topology: Topology used for chaining containers can be
160 chain or cross_horiz. Chain topology is using 1 memif pair per
161 container. Cross_horiz topology is using 1 memif and 1 physical
162 interface in container (only single container can be configured).
163 :param kwargs: Named parameters.
164 :type chain_topology: str
167 # Count number of DUTs based on node's host information
171 self.containers[container].node[u"host"]
172 for container in self.containers
176 mod = len(self.containers) // dut_cnt
178 for i, container in enumerate(self.containers):
181 sid1 = i % mod * 2 + 1
182 sid2 = i % mod * 2 + 2
183 self.engine.container = self.containers[container]
184 guest_dir = self.engine.container.mnt[0].split(u":")[1]
186 if chain_topology == u"chain":
187 self._configure_vpp_chain_l2xc(
188 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
189 guest_dir=guest_dir, **kwargs
191 elif chain_topology == u"cross_horiz":
192 self._configure_vpp_cross_horiz(
193 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
194 guest_dir=guest_dir, **kwargs
196 elif chain_topology == u"chain_functional":
197 self._configure_vpp_chain_functional(
198 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
199 guest_dir=guest_dir, **kwargs
201 elif chain_topology == u"chain_ip4":
202 self._configure_vpp_chain_ip4(
203 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
204 guest_dir=guest_dir, **kwargs
206 elif chain_topology == u"pipeline_ip4":
207 self._configure_vpp_pipeline_ip4(
208 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
209 guest_dir=guest_dir, **kwargs
213 f"Container topology {chain_topology} not implemented"
216 def _configure_vpp_chain_l2xc(self, **kwargs):
217 """Configure VPP in chain topology with l2xc.
219 :param kwargs: Named parameters.
222 self.engine.create_vpp_startup_config()
223 self.engine.create_vpp_exec_config(
224 u"memif_create_chain_l2xc.exec",
225 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
226 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
227 socket1=f"{kwargs[u'guest_dir']}/memif-"
228 f"{self.engine.container.name}-{kwargs[u'sid1']}",
229 socket2=f"{kwargs[u'guest_dir']}/memif-"
230 f"{self.engine.container.name}-{kwargs[u'sid2']}"
233 def _configure_vpp_cross_horiz(self, **kwargs):
234 """Configure VPP in cross horizontal topology (single memif).
236 :param kwargs: Named parameters.
239 if u"DUT1" in self.engine.container.name:
240 if_pci = Topology.get_interface_pci_addr(
241 self.engine.container.node, kwargs[u"dut1_if"])
242 if_name = Topology.get_interface_name(
243 self.engine.container.node, kwargs[u"dut1_if"])
244 if u"DUT2" in self.engine.container.name:
245 if_pci = Topology.get_interface_pci_addr(
246 self.engine.container.node, kwargs[u"dut2_if"])
247 if_name = Topology.get_interface_name(
248 self.engine.container.node, kwargs[u"dut2_if"])
249 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
250 self.engine.create_vpp_exec_config(
251 u"memif_create_cross_horizon.exec",
252 mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
253 socket1=f"{kwargs[u'guest_dir']}/memif-"
254 f"{self.engine.container.name}-{kwargs[u'sid1']}"
257 def _configure_vpp_chain_functional(self, **kwargs):
258 """Configure VPP in chain topology with l2xc (functional).
260 :param kwargs: Named parameters.
263 self.engine.create_vpp_startup_config_func_dev()
264 self.engine.create_vpp_exec_config(
265 u"memif_create_chain_functional.exec",
266 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
267 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
268 socket1=f"{kwargs[u'guest_dir']}/memif-"
269 f"{self.engine.container.name}-{kwargs[u'sid1']}",
270 socket2=f"{kwargs[u'guest_dir']}/memif-"
271 f"{self.engine.container.name}-{kwargs[u'sid2']}",
275 def _configure_vpp_chain_ip4(self, **kwargs):
276 """Configure VPP in chain topology with ip4.
278 :param kwargs: Named parameters.
281 self.engine.create_vpp_startup_config()
283 vif1_mac = kwargs[u"tg_if1_mac"] \
284 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
285 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
286 vif2_mac = kwargs[u"tg_if2_mac"] \
287 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
288 else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
289 self.engine.create_vpp_exec_config(
290 u"memif_create_chain_ip4.exec",
291 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
292 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
293 socket1=f"{kwargs[u'guest_dir']}/memif-"
294 f"{self.engine.container.name}-{kwargs[u'sid1']}",
295 socket2=f"{kwargs[u'guest_dir']}/memif-"
296 f"{self.engine.container.name}-{kwargs[u'sid2']}",
297 mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
298 mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
299 vif1_mac=vif1_mac, vif2_mac=vif2_mac
302 def _configure_vpp_pipeline_ip4(self, **kwargs):
303 """Configure VPP in pipeline topology with ip4.
305 :param kwargs: Named parameters.
308 self.engine.create_vpp_startup_config()
309 node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
310 mid1 = kwargs[u"mid1"]
311 mid2 = kwargs[u"mid2"]
313 role2 = u"master" if node in (kwargs[u"nodes"], 1) else u"slave"
314 kwargs[u"mid2"] = kwargs[u"mid2"] if node in (kwargs[u"nodes"], 1) \
315 else kwargs[u"mid2"] + 1
316 vif1_mac = kwargs[u"tg_if1_mac"] \
317 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
318 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
319 vif2_mac = kwargs[u"tg_if2_mac"] \
320 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
321 else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
322 socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
323 f"{kwargs[u'sid1']}" if node == 1 \
324 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
325 socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
326 f"{kwargs[u'sid2']}" \
327 if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
328 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
330 self.engine.create_vpp_exec_config(
331 u"memif_create_pipeline_ip4.exec",
332 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
333 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
334 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
335 mac1=f"52:54:00:00:{mid1:02X}:01",
336 mac2=f"52:54:00:00:{mid2:02X}:02",
337 vif1_mac=vif1_mac, vif2_mac=vif2_mac
340 def stop_all_containers(self):
341 """Stop all containers."""
342 for container in self.containers:
343 self.engine.container = self.containers[container]
346 def destroy_all_containers(self):
347 """Destroy all containers."""
348 for container in self.containers:
349 self.engine.container = self.containers[container]
350 self.engine.destroy()
353 class ContainerEngine:
354 """Abstract class for container engine."""
357 """Init ContainerEngine object."""
358 self.container = None
360 def initialize(self):
361 """Initialize container object."""
362 self.container = Container()
364 def acquire(self, force):
365 """Acquire/download container.
367 :param force: Destroy a container if exists and create.
370 raise NotImplementedError
373 """Build container (compile)."""
374 raise NotImplementedError
377 """Create/deploy container."""
378 raise NotImplementedError
380 def execute(self, command):
381 """Execute process inside container.
383 :param command: Command to run inside container.
386 raise NotImplementedError
389 """Stop container."""
390 raise NotImplementedError
393 """Destroy/remove container."""
394 raise NotImplementedError
397 """Info about container."""
398 raise NotImplementedError
400 def system_info(self):
402 raise NotImplementedError
404 def install_supervisor(self):
405 """Install supervisord inside a container."""
406 if isinstance(self, LXC):
407 self.execute(u"sleep 3; apt-get update")
408 self.execute(u"apt-get install -y supervisor")
410 u"[unix_http_server]\n" \
411 u"file = /tmp/supervisor.sock\n\n" \
412 u"[rpcinterface:supervisor]\n" \
413 u"supervisor.rpcinterface_factory = " \
414 u"supervisor.rpcinterface:make_main_rpcinterface\n\n" \
415 u"[supervisorctl]\n" \
416 u"serverurl = unix:///tmp/supervisor.sock\n\n" \
418 u"pidfile = /tmp/supervisord.pid\n" \
419 u"identifier = supervisor\n" \
420 u"directory = /tmp\n" \
421 u"logfile = /tmp/supervisord.log\n" \
422 u"loglevel = debug\n" \
423 u"nodaemon = false\n\n"
425 f'echo "{config}" > {SUPERVISOR_CONF} && '
426 f'supervisord -c {SUPERVISOR_CONF}'
430 """Start VPP inside a container."""
434 u"command = /usr/bin/vpp -c /etc/vpp/startup.conf\n" \
435 u"autostart = false\n" \
436 u"autorestart = false\n" \
437 u"redirect_stderr = true\n" \
440 f'echo "{config}" >> {SUPERVISOR_CONF} && supervisorctl reload'
442 self.execute(u"supervisorctl start vpp")
444 topo_instance = BuiltIn().get_library_instance(
445 u"resources.libraries.python.topology.Topology"
447 topo_instance.add_new_socket(
451 f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
454 topo_instance.add_new_socket(
458 f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
462 def restart_vpp(self):
463 """Restart VPP service inside a container."""
464 self.execute(u"supervisorctl restart vpp")
465 self.execute(u"cat /tmp/supervisord.log")
467 def create_base_vpp_startup_config(self):
468 """Create base startup configuration of VPP on container.
470 :returns: Base VPP startup configuration.
471 :rtype: VppConfigGenerator
473 cpuset_cpus = self.container.cpuset_cpus
475 # Create config instance
476 vpp_config = VppConfigGenerator()
477 vpp_config.set_node(self.container.node)
478 vpp_config.add_unix_cli_listen()
479 vpp_config.add_unix_nodaemon()
480 vpp_config.add_unix_exec(u"/tmp/running.exec")
481 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
482 vpp_config.add_statseg_per_node_counters(value=u"on")
483 # We will pop the first core from the list to be a main core
484 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
485 # If more cores in the list, the rest will be used as workers.
487 corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
488 vpp_config.add_cpu_corelist_workers(corelist_workers)
492 def create_vpp_startup_config(self):
493 """Create startup configuration of VPP without DPDK on container.
495 vpp_config = self.create_base_vpp_startup_config()
496 vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
498 # Apply configuration
499 self.execute(u"mkdir -p /etc/vpp/")
501 f'echo "{vpp_config.get_config_str()}" | '
502 f'tee /etc/vpp/startup.conf'
505 def create_vpp_startup_config_dpdk_dev(self, *devices):
506 """Create startup configuration of VPP with DPDK on container.
508 :param devices: List of PCI devices to add.
511 vpp_config = self.create_base_vpp_startup_config()
512 vpp_config.add_dpdk_dev(*devices)
513 vpp_config.add_dpdk_no_tx_checksum_offload()
514 vpp_config.add_dpdk_log_level(u"debug")
515 vpp_config.add_plugin(u"disable", u"default")
516 vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
517 vpp_config.add_plugin(u"enable", u"memif_plugin.so")
519 # Apply configuration
520 self.execute(u"mkdir -p /etc/vpp/")
522 f'echo "{vpp_config.get_config_str()}" | '
523 f'tee /etc/vpp/startup.conf'
526 def create_vpp_startup_config_func_dev(self):
527 """Create startup configuration of VPP on container for functional
530 # Create config instance
531 vpp_config = VppConfigGenerator()
532 vpp_config.set_node(self.container.node)
533 vpp_config.add_unix_cli_listen()
534 vpp_config.add_unix_nodaemon()
535 vpp_config.add_unix_exec(u"/tmp/running.exec")
536 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
537 vpp_config.add_statseg_per_node_counters(value=u"on")
538 vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
540 # Apply configuration
541 self.execute(u"mkdir -p /etc/vpp/")
543 f'echo "{vpp_config.get_config_str()}" | '
544 f'tee /etc/vpp/startup.conf'
547 def create_vpp_exec_config(self, template_file, **kwargs):
548 """Create VPP exec configuration on container.
550 :param template_file: File name of a template script.
551 :param kwargs: Parameters for script.
552 :type template_file: str
555 running = u"/tmp/running.exec"
557 template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
559 with open(template, "r") as src_file:
560 src = Template(src_file.read())
561 self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
563 def is_container_running(self):
564 """Check if container is running."""
565 raise NotImplementedError
567 def is_container_present(self):
568 """Check if container is present."""
569 raise NotImplementedError
571 def _configure_cgroup(self, name):
572 """Configure the control group associated with a container.
574 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
575 container is initialized a new cgroup /docker or /lxc is created under
576 cpuset parent tree. This newly created cgroup is inheriting parent
577 setting for cpu/mem exclusive parameter and thus cannot be overriden
578 within /docker or /lxc cgroup. This function is supposed to set cgroups
579 to allow coexistence of both engines.
581 :param name: Name of cgroup.
583 :raises RuntimeError: If applying cgroup settings via cgset failed.
585 ret, _, _ = self.container.ssh.exec_command_sudo(
586 u"cgset -r cpuset.cpu_exclusive=0 /"
589 raise RuntimeError(u"Failed to apply cgroup settings.")
591 ret, _, _ = self.container.ssh.exec_command_sudo(
592 u"cgset -r cpuset.mem_exclusive=0 /"
595 raise RuntimeError(u"Failed to apply cgroup settings.")
597 ret, _, _ = self.container.ssh.exec_command_sudo(
598 f"cgcreate -g cpuset:/{name}"
601 raise RuntimeError(u"Failed to copy cgroup settings from root.")
603 ret, _, _ = self.container.ssh.exec_command_sudo(
604 f"cgset -r cpuset.cpu_exclusive=0 /{name}"
607 raise RuntimeError(u"Failed to apply cgroup settings.")
609 ret, _, _ = self.container.ssh.exec_command_sudo(
610 f"cgset -r cpuset.mem_exclusive=0 /{name}"
613 raise RuntimeError(u"Failed to apply cgroup settings.")
616 class LXC(ContainerEngine):
617 """LXC implementation."""
619 # Implicit constructor is inherited.
621 def acquire(self, force=True):
622 """Acquire a privileged system object where configuration is stored.
624 :param force: If a container exists, destroy it and create a new
627 :raises RuntimeError: If creating the container or writing the container
630 if self.is_container_present():
636 target_arch = u"arm64" \
637 if Topology.get_node_arch(self.container.node) == u"aarch64" \
640 image = self.container.image if self.container.image \
641 else f"-d ubuntu -r bionic -a {target_arch}"
643 cmd = f"lxc-create -t download --name {self.container.name} " \
644 f"-- {image} --no-validate"
646 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
648 raise RuntimeError(u"Failed to create container.")
650 self._configure_cgroup(u"lxc")
653 """Build container (compile).
655 TODO: Remove from parent class if no sibling implements this.
657 raise NotImplementedError
660 """Create/deploy an application inside a container on system.
662 :raises RuntimeError: If creating the container fails.
664 if self.container.mnt:
666 # https://github.com/lxc/lxc/issues/434
667 mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
668 ret, _, _ = self.container.ssh.exec_command_sudo(
669 f"sh -c \"echo '{mnt_e}' >> "
670 f"/var/lib/lxc/{self.container.name}/config\""
674 f"Failed to write {self.container.name} config."
677 for mount in self.container.mnt:
678 host_dir, guest_dir = mount.split(u":")
679 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
680 else u"bind,create=file"
681 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
682 f"none {options} 0 0"
683 self.container.ssh.exec_command_sudo(
684 f"sh -c \"mkdir -p {host_dir}\""
686 ret, _, _ = self.container.ssh.exec_command_sudo(
687 f"sh -c \"echo '{entry}' "
688 f">> /var/lib/lxc/{self.container.name}/config\""
692 f"Failed to write {self.container.name} config."
695 cpuset_cpus = u",".join(
696 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
697 if self.container.cpuset_cpus else u""
699 ret, _, _ = self.container.ssh.exec_command_sudo(
700 f"lxc-start --name {self.container.name} --daemon"
704 f"Failed to start container {self.container.name}."
706 self._lxc_wait(u"RUNNING")
708 # Workaround for LXC to be able to allocate all cpus including isolated.
709 ret, _, _ = self.container.ssh.exec_command_sudo(
710 u"cgset --copy-from / lxc/"
713 raise RuntimeError(u"Failed to copy cgroup to LXC")
715 ret, _, _ = self.container.ssh.exec_command_sudo(
716 f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
720 f"Failed to set cpuset.cpus to container {self.container.name}."
723 def execute(self, command):
724 """Start a process inside a running container.
726 Runs the specified command inside the container specified by name. The
727 container has to be running already.
729 :param command: Command to run inside container.
731 :raises RuntimeError: If running the command failed.
733 env = u"--keep-env " + u" ".join(
734 f"--set-var {env!s}" for env in self.container.env) \
735 if self.container.env else u""
737 cmd = f"lxc-attach {env} --name {self.container.name} " \
738 f"-- /bin/sh -c '{command}; exit $?'"
740 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
743 f"Failed to run command inside container {self.container.name}."
749 :raises RuntimeError: If stopping the container failed.
751 cmd = f"lxc-stop --name {self.container.name}"
753 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
756 f"Failed to stop container {self.container.name}."
758 self._lxc_wait(u"STOPPED|FROZEN")
761 """Destroy a container.
763 :raises RuntimeError: If destroying container failed.
765 cmd = f"lxc-destroy --force --name {self.container.name}"
767 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
770 f"Failed to destroy container {self.container.name}."
774 """Query and shows information about a container.
776 :raises RuntimeError: If getting info about a container failed.
778 cmd = f"lxc-info --name {self.container.name}"
780 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
783 f"Failed to get info about container {self.container.name}."
786 def system_info(self):
787 """Check the current kernel for LXC support.
789 :raises RuntimeError: If checking LXC support failed.
791 cmd = u"lxc-checkconfig"
793 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
795 raise RuntimeError(u"Failed to check LXC support.")
797 def is_container_running(self):
798 """Check if container is running on node.
800 :returns: True if container is running.
802 :raises RuntimeError: If getting info about a container failed.
804 cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
806 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
809 f"Failed to get info about container {self.container.name}."
811 return u"RUNNING" in stdout
813 def is_container_present(self):
814 """Check if container is existing on node.
816 :returns: True if container is present.
818 :raises RuntimeError: If getting info about a container failed.
820 cmd = f"lxc-info --no-humanize --name {self.container.name}"
822 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
825 def _lxc_wait(self, state):
826 """Wait for a specific container state.
828 :param state: Specify the container state(s) to wait for.
830 :raises RuntimeError: If waiting for state of a container failed.
832 cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
834 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
837 f"Failed to wait for state '{state}' "
838 f"of container {self.container.name}."
842 class Docker(ContainerEngine):
843 """Docker implementation."""
845 # Implicit constructor is inherited.
847 def acquire(self, force=True):
848 """Pull an image or a repository from a registry.
850 :param force: Destroy a container if exists.
852 :raises RuntimeError: If pulling a container failed.
854 if self.is_container_present():
860 if not self.container.image:
861 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
862 if Topology.get_node_arch(self.container.node) == u"aarch64" \
863 else Constants.DOCKER_SUT_IMAGE_UBUNTU
864 setattr(self.container, u"image", img)
866 cmd = f"docker pull {self.container.image}"
868 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
871 f"Failed to create container {self.container.name}."
874 if self.container.cpuset_cpus:
875 self._configure_cgroup(u"docker")
878 """Build container (compile).
880 TODO: Remove from parent class if no sibling implements this.
882 raise NotImplementedError
885 """Create/deploy container.
887 :raises RuntimeError: If creating a container failed.
889 cpuset_cpus = u"--cpuset-cpus=" + u",".join(
890 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
891 if self.container.cpuset_cpus else u""
893 cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
894 if self.container.cpuset_mems is not None else u""
895 # Temporary workaround - disabling due to bug in memif
898 env = u" ".join(f"--env {env!s}" for env in self.container.env) \
899 if self.container.env else u""
901 command = str(self.container.command) if self.container.command else u""
904 f"--publish {var!s}" for var in self.container.publish
905 ) if self.container.publish else u""
908 f"--volume {mnt!s}" for mnt in self.container.mnt) \
909 if self.container.mnt else u""
911 cmd = f"docker run --privileged --detach --interactive --tty --rm " \
912 f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
913 f"{env} {volume} --name {self.container.name} " \
914 f"{self.container.image} {command}"
916 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
919 f"Failed to create container {self.container.name}"
924 def execute(self, command):
925 """Start a process inside a running container.
927 Runs the specified command inside the container specified by name. The
928 container has to be running already.
930 :param command: Command to run inside container.
932 :raises RuntimeError: If running the command in a container failed.
934 cmd = f"docker exec --interactive {self.container.name} " \
935 f"/bin/sh -c '{command}; exit $?'"
937 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
940 f"Failed to execute command in container {self.container.name}."
944 """Stop running container.
946 :raises RuntimeError: If stopping a container failed.
948 cmd = f"docker stop {self.container.name}"
950 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
953 f"Failed to stop container {self.container.name}."
957 """Remove a container.
959 :raises RuntimeError: If removing a container failed.
961 cmd = f"docker rm --force {self.container.name}"
963 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
966 f"Failed to destroy container {self.container.name}."
970 """Return low-level information on Docker objects.
972 :raises RuntimeError: If getting info about a container failed.
974 cmd = f"docker inspect {self.container.name}"
976 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
979 f"Failed to get info about container {self.container.name}."
982 def system_info(self):
983 """Display the docker system-wide information.
985 :raises RuntimeError: If displaying system information failed.
987 cmd = u"docker system info"
989 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
991 raise RuntimeError(u"Failed to get system info.")
993 def is_container_present(self):
994 """Check if container is present on node.
996 :returns: True if container is present.
998 :raises RuntimeError: If getting info about a container failed.
1000 cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1002 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1005 f"Failed to get info about container {self.container.name}."
1009 def is_container_running(self):
1010 """Check if container is running on node.
1012 :returns: True if container is running.
1014 :raises RuntimeError: If getting info about a container failed.
1016 cmd = f"docker ps --quiet --filter name={self.container.name}"
1018 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1021 f"Failed to get info about container {self.container.name}."
1027 """Container class."""
1029 def __getattr__(self, attr):
1030 """Get attribute custom implementation.
1032 :param attr: Attribute to get.
1034 :returns: Attribute value or None.
1038 return self.__dict__[attr]
1042 def __setattr__(self, attr, value):
1043 """Set attribute custom implementation.
1045 :param attr: Attribute to set.
1046 :param value: Value to set.
1051 # Check if attribute exists
1054 # Creating new attribute
1056 self.__dict__[u"ssh"] = SSH()
1057 self.__dict__[u"ssh"].connect(value)
1058 self.__dict__[attr] = value
1060 # Updating attribute base of type
1061 if isinstance(self.__dict__[attr], list):
1062 self.__dict__[attr].append(value)
1064 self.__dict__[attr] = value