Python3: resources and libraries
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
16
17 """Library to manipulate Containers."""
18
19 from collections import OrderedDict, Counter
20 from io import open
21 from string import Template
22
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.ssh import SSH
25 from resources.libraries.python.topology import Topology, SocketType
26 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
27
28
29 __all__ = [
30     u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
31 ]
32
33 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
34
35
36 class ContainerManager:
37     """Container lifecycle management class."""
38
39     def __init__(self, engine):
40         """Initialize Container Manager class.
41
42         :param engine: Container technology used (LXC/Docker/...).
43         :type engine: str
44         :raises NotImplementedError: If container technology is not implemented.
45         """
46         try:
47             self.engine = globals()[engine]()
48         except KeyError:
49             raise NotImplementedError(f"{engine} is not implemented.")
50         self.containers = OrderedDict()
51
52     def get_container_by_name(self, name):
53         """Get container instance.
54
55         :param name: Container name.
56         :type name: str
57         :returns: Container instance.
58         :rtype: Container
59         :raises RuntimeError: If failed to get container with name.
60         """
61         try:
62             return self.containers[name]
63         except KeyError:
64             raise RuntimeError(f"Failed to get container with name: {name}")
65
66     def construct_container(self, **kwargs):
67         """Construct container object on node with specified parameters.
68
69         :param kwargs: Key-value pairs used to construct container.
70         :param kwargs: dict
71         """
72         # Create base class
73         self.engine.initialize()
74         # Set parameters
75         for key in kwargs:
76             setattr(self.engine.container, key, kwargs[key])
77
78         # Set additional environmental variables
79         setattr(
80             self.engine.container, u"env",
81             f"MICROSERVICE_LABEL={kwargs[u'name']}"
82         )
83
84         # Store container instance
85         self.containers[kwargs[u"name"]] = self.engine.container
86
87     def construct_containers(self, **kwargs):
88         """Construct 1..N container(s) on node with specified name.
89
90         Ordinal number is automatically added to the name of container as
91         suffix.
92
93         :param kwargs: Named parameters.
94         :param kwargs: dict
95         """
96         name = kwargs[u"name"]
97         for i in range(kwargs[u"count"]):
98             # Name will contain ordinal suffix
99             kwargs[u"name"] = u"".join([name, str(i+1)])
100             # Create container
101             self.construct_container(i=i, **kwargs)
102
103     def acquire_all_containers(self):
104         """Acquire all containers."""
105         for container in self.containers:
106             self.engine.container = self.containers[container]
107             self.engine.acquire()
108
109     def build_all_containers(self):
110         """Build all containers."""
111         for container in self.containers:
112             self.engine.container = self.containers[container]
113             self.engine.build()
114
115     def create_all_containers(self):
116         """Create all containers."""
117         for container in self.containers:
118             self.engine.container = self.containers[container]
119             self.engine.create()
120
121     def execute_on_container(self, name, command):
122         """Execute command on container with name.
123
124         :param name: Container name.
125         :param command: Command to execute.
126         :type name: str
127         :type command: str
128         """
129         self.engine.container = self.get_container_by_name(name)
130         self.engine.execute(command)
131
132     def execute_on_all_containers(self, command):
133         """Execute command on all containers.
134
135         :param command: Command to execute.
136         :type command: str
137         """
138         for container in self.containers:
139             self.engine.container = self.containers[container]
140             self.engine.execute(command)
141
142     def start_vpp_in_all_containers(self):
143         """Start VPP in all containers."""
144         for container in self.containers:
145             self.engine.container = self.containers[container]
146             # We need to install supervisor client/server system to control VPP
147             # as a service
148             self.engine.install_supervisor()
149             self.engine.start_vpp()
150
151     def restart_vpp_in_all_containers(self):
152         """Restart VPP in all containers."""
153         for container in self.containers:
154             self.engine.container = self.containers[container]
155             self.engine.restart_vpp()
156
157     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
158         """Configure VPP in all containers.
159
160         :param chain_topology: Topology used for chaining containers can be
161             chain or cross_horiz. Chain topology is using 1 memif pair per
162             container. Cross_horiz topology is using 1 memif and 1 physical
163             interface in container (only single container can be configured).
164         :param kwargs: Named parameters.
165         :type chain_topology: str
166         :param kwargs: dict
167         """
168         # Count number of DUTs based on node's host information
169         dut_cnt = len(
170             Counter(
171                 [
172                     self.containers[container].node[u"host"]
173                     for container in self.containers
174                 ]
175             )
176         )
177         mod = len(self.containers) // dut_cnt
178
179         for i, container in enumerate(self.containers):
180             mid1 = i % mod + 1
181             mid2 = i % mod + 1
182             sid1 = i % mod * 2 + 1
183             sid2 = i % mod * 2 + 2
184             self.engine.container = self.containers[container]
185             guest_dir = self.engine.container.mnt[0].split(u":")[1]
186
187             if chain_topology == u"chain":
188                 self._configure_vpp_chain_l2xc(
189                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
190                     guest_dir=guest_dir, **kwargs
191                 )
192             elif chain_topology == u"cross_horiz":
193                 self._configure_vpp_cross_horiz(
194                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
195                     guest_dir=guest_dir, **kwargs
196                 )
197             elif chain_topology == u"chain_functional":
198                 self._configure_vpp_chain_functional(
199                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
200                     guest_dir=guest_dir, **kwargs
201                 )
202             elif chain_topology == u"chain_ip4":
203                 self._configure_vpp_chain_ip4(
204                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
205                     guest_dir=guest_dir, **kwargs
206                 )
207             elif chain_topology == u"pipeline_ip4":
208                 self._configure_vpp_pipeline_ip4(
209                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
210                     guest_dir=guest_dir, **kwargs
211                 )
212             else:
213                 raise RuntimeError(
214                     f"Container topology {chain_topology} not implemented"
215                 )
216
217     def _configure_vpp_chain_l2xc(self, **kwargs):
218         """Configure VPP in chain topology with l2xc.
219
220         :param kwargs: Named parameters.
221         :param kwargs: dict
222         """
223         self.engine.create_vpp_startup_config()
224         self.engine.create_vpp_exec_config(
225             u"memif_create_chain_l2xc.exec",
226             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
227             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
228             socket1=f"{kwargs[u'guest_dir']}/memif-"
229             f"{self.engine.container.name}-{kwargs[u'sid1']}",
230             socket2=f"{kwargs[u'guest_dir']}/memif-"
231             f"{self.engine.container.name}-{kwargs[u'sid2']}"
232         )
233
234     def _configure_vpp_cross_horiz(self, **kwargs):
235         """Configure VPP in cross horizontal topology (single memif).
236
237         :param kwargs: Named parameters.
238         :param kwargs: dict
239         """
240         if u"DUT1" in self.engine.container.name:
241             if_pci = Topology.get_interface_pci_addr(
242                 self.engine.container.node, kwargs[u"dut1_if"])
243             if_name = Topology.get_interface_name(
244                 self.engine.container.node, kwargs[u"dut1_if"])
245         if u"DUT2" in self.engine.container.name:
246             if_pci = Topology.get_interface_pci_addr(
247                 self.engine.container.node, kwargs[u"dut2_if"])
248             if_name = Topology.get_interface_name(
249                 self.engine.container.node, kwargs[u"dut2_if"])
250         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
251         self.engine.create_vpp_exec_config(
252             u"memif_create_cross_horizon.exec",
253             mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
254             socket1=f"{kwargs[u'guest_dir']}/memif-"
255             f"{self.engine.container.name}-{kwargs[u'sid1']}"
256         )
257
258     def _configure_vpp_chain_functional(self, **kwargs):
259         """Configure VPP in chain topology with l2xc (functional).
260
261         :param kwargs: Named parameters.
262         :param kwargs: dict
263         """
264         self.engine.create_vpp_startup_config_func_dev()
265         self.engine.create_vpp_exec_config(
266             u"memif_create_chain_functional.exec",
267             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
268             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
269             socket1=f"{kwargs[u'guest_dir']}/memif-"
270             f"{self.engine.container.name}-{kwargs[u'sid1']}",
271             socket2=f"{kwargs[u'guest_dir']}/memif-"
272             f"{self.engine.container.name}-{kwargs[u'sid2']}",
273             rx_mode=u"interrupt"
274         )
275
276     def _configure_vpp_chain_ip4(self, **kwargs):
277         """Configure VPP in chain topology with ip4.
278
279         :param kwargs: Named parameters.
280         :param kwargs: dict
281         """
282         self.engine.create_vpp_startup_config()
283
284         vif1_mac = kwargs[u"tg_if1_mac"] \
285             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
286             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
287         vif2_mac = kwargs[u"tg_if2_mac"] \
288             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
289             else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
290         self.engine.create_vpp_exec_config(
291             u"memif_create_chain_ip4.exec",
292             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
293             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
294             socket1=f"{kwargs[u'guest_dir']}/memif-"
295             f"{self.engine.container.name}-{kwargs[u'sid1']}",
296             socket2=f"{kwargs[u'guest_dir']}/memif-"
297             f"{self.engine.container.name}-{kwargs[u'sid2']}",
298             mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
299             mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
300             vif1_mac=vif1_mac, vif2_mac=vif2_mac
301         )
302
303     def _configure_vpp_pipeline_ip4(self, **kwargs):
304         """Configure VPP in pipeline topology with ip4.
305
306         :param kwargs: Named parameters.
307         :param kwargs: dict
308         """
309         self.engine.create_vpp_startup_config()
310         node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
311         mid1 = kwargs[u"mid1"]
312         mid2 = kwargs[u"mid2"]
313         role1 = u"master"
314         role2 = u"master" if node in (kwargs[u"nodes"], 1) else u"slave"
315         kwargs[u"mid2"] = kwargs[u"mid2"] if node in (kwargs[u"nodes"], 1) \
316             else kwargs[u"mid2"] + 1
317         vif1_mac = kwargs[u"tg_if1_mac"] \
318             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
319             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
320         vif2_mac = kwargs[u"tg_if2_mac"] \
321             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
322             else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
323         socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
324             f"{kwargs[u'sid1']}" if node == 1 \
325             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
326         socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
327             f"{kwargs[u'sid2']}" \
328             if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
329             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
330
331         self.engine.create_vpp_exec_config(
332             u"memif_create_pipeline_ip4.exec",
333             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
334             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
335             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
336             mac1=f"52:54:00:00:{mid1:02X}:01",
337             mac2=f"52:54:00:00:{mid2:02X}:02",
338             vif1_mac=vif1_mac, vif2_mac=vif2_mac
339         )
340
341     def stop_all_containers(self):
342         """Stop all containers."""
343         for container in self.containers:
344             self.engine.container = self.containers[container]
345             self.engine.stop()
346
347     def destroy_all_containers(self):
348         """Destroy all containers."""
349         for container in self.containers:
350             self.engine.container = self.containers[container]
351             self.engine.destroy()
352
353
354 class ContainerEngine:
355     """Abstract class for container engine."""
356
357     def __init__(self):
358         """Init ContainerEngine object."""
359         self.container = None
360
361     def initialize(self):
362         """Initialize container object."""
363         self.container = Container()
364
365     def acquire(self, force):
366         """Acquire/download container.
367
368         :param force: Destroy a container if exists and create.
369         :type force: bool
370         """
371         raise NotImplementedError
372
373     def build(self):
374         """Build container (compile)."""
375         raise NotImplementedError
376
377     def create(self):
378         """Create/deploy container."""
379         raise NotImplementedError
380
381     def execute(self, command):
382         """Execute process inside container.
383
384         :param command: Command to run inside container.
385         :type command: str
386         """
387         raise NotImplementedError
388
389     def stop(self):
390         """Stop container."""
391         raise NotImplementedError
392
393     def destroy(self):
394         """Destroy/remove container."""
395         raise NotImplementedError
396
397     def info(self):
398         """Info about container."""
399         raise NotImplementedError
400
401     def system_info(self):
402         """System info."""
403         raise NotImplementedError
404
405     def install_supervisor(self):
406         """Install supervisord inside a container."""
407         if isinstance(self, LXC):
408             self.execute(u"sleep 3; apt-get update")
409             self.execute(u"apt-get install -y supervisor")
410             config = \
411                 u"[unix_http_server]\n" \
412                 u"file  = /tmp/supervisor.sock\n\n" \
413                 u"[rpcinterface:supervisor]\n" \
414                 u"supervisor.rpcinterface_factory = " \
415                 u"supervisor.rpcinterface:make_main_rpcinterface\n\n" \
416                 u"[supervisorctl]\n" \
417                 u"serverurl = unix:///tmp/supervisor.sock\n\n" \
418                 u"[supervisord]\n" \
419                 u"pidfile = /tmp/supervisord.pid\n" \
420                 u"identifier = supervisor\n" \
421                 u"directory = /tmp\n" \
422                 u"logfile = /tmp/supervisord.log\n" \
423                 u"loglevel = debug\n" \
424                 u"nodaemon = false\n\n"
425             self.execute(
426                 f'echo "{config}" > {SUPERVISOR_CONF} && '
427                 f'supervisord -c {SUPERVISOR_CONF}'
428             )
429
430     def start_vpp(self):
431         """Start VPP inside a container."""
432
433         config = \
434             u"[program:vpp]\n" \
435             u"command = /usr/bin/vpp -c /etc/vpp/startup.conf\n" \
436             u"autostart = false\n" \
437             u"autorestart = false\n" \
438             u"redirect_stderr = true\n" \
439             u"priority = 1"
440         self.execute(
441             f'echo "{config}" >> {SUPERVISOR_CONF} && supervisorctl reload'
442         )
443         self.execute(u"supervisorctl start vpp")
444
445         # pylint: disable=import-outside-toplevel
446         from robot.libraries.BuiltIn import BuiltIn
447         topo_instance = BuiltIn().get_library_instance(
448             u"resources.libraries.python.topology.Topology"
449         )
450         topo_instance.add_new_socket(
451             self.container.node,
452             SocketType.PAPI,
453             self.container.name,
454             f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
455             f"api.sock"
456         )
457         topo_instance.add_new_socket(
458             self.container.node,
459             SocketType.STATS,
460             self.container.name,
461             f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
462             f"stats.sock"
463         )
464
465     def restart_vpp(self):
466         """Restart VPP service inside a container."""
467         self.execute(u"supervisorctl restart vpp")
468         self.execute(u"cat /tmp/supervisord.log")
469
470     def create_base_vpp_startup_config(self):
471         """Create base startup configuration of VPP on container.
472
473         :returns: Base VPP startup configuration.
474         :rtype: VppConfigGenerator
475         """
476         cpuset_cpus = self.container.cpuset_cpus
477
478         # Create config instance
479         vpp_config = VppConfigGenerator()
480         vpp_config.set_node(self.container.node)
481         vpp_config.add_unix_cli_listen()
482         vpp_config.add_unix_nodaemon()
483         vpp_config.add_unix_exec(u"/tmp/running.exec")
484         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
485         vpp_config.add_statseg_per_node_counters(value=u"on")
486         # We will pop the first core from the list to be a main core
487         vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
488         # If more cores in the list, the rest will be used as workers.
489         if cpuset_cpus:
490             corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
491             vpp_config.add_cpu_corelist_workers(corelist_workers)
492
493         return vpp_config
494
495     def create_vpp_startup_config(self):
496         """Create startup configuration of VPP without DPDK on container.
497         """
498         vpp_config = self.create_base_vpp_startup_config()
499         vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
500
501         # Apply configuration
502         self.execute(u"mkdir -p /etc/vpp/")
503         self.execute(
504             f'echo "{vpp_config.get_config_str()}" | '
505             f'tee /etc/vpp/startup.conf'
506         )
507
508     def create_vpp_startup_config_dpdk_dev(self, *devices):
509         """Create startup configuration of VPP with DPDK on container.
510
511         :param devices: List of PCI devices to add.
512         :type devices: list
513         """
514         vpp_config = self.create_base_vpp_startup_config()
515         vpp_config.add_dpdk_dev(*devices)
516         vpp_config.add_dpdk_no_tx_checksum_offload()
517         vpp_config.add_dpdk_log_level(u"debug")
518         vpp_config.add_plugin(u"disable", u"default")
519         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
520         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
521
522         # Apply configuration
523         self.execute(u"mkdir -p /etc/vpp/")
524         self.execute(
525             f'echo "{vpp_config.get_config_str()}" | '
526             f'tee /etc/vpp/startup.conf'
527         )
528
529     def create_vpp_startup_config_func_dev(self):
530         """Create startup configuration of VPP on container for functional
531         vpp_device tests.
532         """
533         # Create config instance
534         vpp_config = VppConfigGenerator()
535         vpp_config.set_node(self.container.node)
536         vpp_config.add_unix_cli_listen()
537         vpp_config.add_unix_nodaemon()
538         vpp_config.add_unix_exec(u"/tmp/running.exec")
539         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
540         vpp_config.add_statseg_per_node_counters(value=u"on")
541         vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
542
543         # Apply configuration
544         self.execute(u"mkdir -p /etc/vpp/")
545         self.execute(
546             f'echo "{vpp_config.get_config_str()}" | '
547             f'tee /etc/vpp/startup.conf'
548         )
549
550     def create_vpp_exec_config(self, template_file, **kwargs):
551         """Create VPP exec configuration on container.
552
553         :param template_file: File name of a template script.
554         :param kwargs: Parameters for script.
555         :type template_file: str
556         :type kwargs: dict
557         """
558         running = u"/tmp/running.exec"
559
560         template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
561
562         with open(template, "r") as src_file:
563             src = Template(src_file.read())
564             self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
565
566     def is_container_running(self):
567         """Check if container is running."""
568         raise NotImplementedError
569
570     def is_container_present(self):
571         """Check if container is present."""
572         raise NotImplementedError
573
574     def _configure_cgroup(self, name):
575         """Configure the control group associated with a container.
576
577         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
578         container is initialized a new cgroup /docker or /lxc is created under
579         cpuset parent tree. This newly created cgroup is inheriting parent
580         setting for cpu/mem exclusive parameter and thus cannot be overriden
581         within /docker or /lxc cgroup. This function is supposed to set cgroups
582         to allow coexistence of both engines.
583
584         :param name: Name of cgroup.
585         :type name: str
586         :raises RuntimeError: If applying cgroup settings via cgset failed.
587         """
588         ret, _, _ = self.container.ssh.exec_command_sudo(
589             u"cgset -r cpuset.cpu_exclusive=0 /"
590         )
591         if int(ret) != 0:
592             raise RuntimeError(u"Failed to apply cgroup settings.")
593
594         ret, _, _ = self.container.ssh.exec_command_sudo(
595             u"cgset -r cpuset.mem_exclusive=0 /"
596         )
597         if int(ret) != 0:
598             raise RuntimeError(u"Failed to apply cgroup settings.")
599
600         ret, _, _ = self.container.ssh.exec_command_sudo(
601             f"cgcreate -g cpuset:/{name}"
602         )
603         if int(ret) != 0:
604             raise RuntimeError(u"Failed to copy cgroup settings from root.")
605
606         ret, _, _ = self.container.ssh.exec_command_sudo(
607             f"cgset -r cpuset.cpu_exclusive=0 /{name}"
608         )
609         if int(ret) != 0:
610             raise RuntimeError(u"Failed to apply cgroup settings.")
611
612         ret, _, _ = self.container.ssh.exec_command_sudo(
613             f"cgset -r cpuset.mem_exclusive=0 /{name}"
614         )
615         if int(ret) != 0:
616             raise RuntimeError(u"Failed to apply cgroup settings.")
617
618
619 class LXC(ContainerEngine):
620     """LXC implementation."""
621
622     # Implicit constructor is inherited.
623
624     def acquire(self, force=True):
625         """Acquire a privileged system object where configuration is stored.
626
627         :param force: If a container exists, destroy it and create a new
628             container.
629         :type force: bool
630         :raises RuntimeError: If creating the container or writing the container
631             config fails.
632         """
633         if self.is_container_present():
634             if force:
635                 self.destroy()
636             else:
637                 return
638
639         target_arch = u"arm64" \
640             if Topology.get_node_arch(self.container.node) == u"aarch64" \
641             else u"amd64"
642
643         image = self.container.image if self.container.image \
644             else f"-d ubuntu -r bionic -a {target_arch}"
645
646         cmd = f"lxc-create -t download --name {self.container.name} " \
647             f"-- {image} --no-validate"
648
649         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
650         if int(ret) != 0:
651             raise RuntimeError(u"Failed to create container.")
652
653         self._configure_cgroup(u"lxc")
654
655     def create(self):
656         """Create/deploy an application inside a container on system.
657
658         :raises RuntimeError: If creating the container fails.
659         """
660         if self.container.mnt:
661             # LXC fix for tmpfs
662             # https://github.com/lxc/lxc/issues/434
663             mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
664             ret, _, _ = self.container.ssh.exec_command_sudo(
665                 f"sh -c \"echo '{mnt_e}' >> "
666                 f"/var/lib/lxc/{self.container.name}/config\""
667             )
668             if int(ret) != 0:
669                 raise RuntimeError(
670                     f"Failed to write {self.container.name} config."
671                 )
672
673             for mount in self.container.mnt:
674                 host_dir, guest_dir = mount.split(u":")
675                 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
676                     else u"bind,create=file"
677                 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
678                     f"none {options} 0 0"
679                 self.container.ssh.exec_command_sudo(
680                     f"sh -c \"mkdir -p {host_dir}\""
681                 )
682                 ret, _, _ = self.container.ssh.exec_command_sudo(
683                     f"sh -c \"echo '{entry}' "
684                     f">> /var/lib/lxc/{self.container.name}/config\""
685                 )
686                 if int(ret) != 0:
687                     raise RuntimeError(
688                         f"Failed to write {self.container.name} config."
689                     )
690
691         cpuset_cpus = u",".join(
692             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
693             if self.container.cpuset_cpus else u""
694
695         ret, _, _ = self.container.ssh.exec_command_sudo(
696             f"lxc-start --name {self.container.name} --daemon"
697         )
698         if int(ret) != 0:
699             raise RuntimeError(
700                 f"Failed to start container {self.container.name}."
701             )
702         self._lxc_wait(u"RUNNING")
703
704         # Workaround for LXC to be able to allocate all cpus including isolated.
705         ret, _, _ = self.container.ssh.exec_command_sudo(
706             u"cgset --copy-from / lxc/"
707         )
708         if int(ret) != 0:
709             raise RuntimeError(u"Failed to copy cgroup to LXC")
710
711         ret, _, _ = self.container.ssh.exec_command_sudo(
712             f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
713         )
714         if int(ret) != 0:
715             raise RuntimeError(
716                 f"Failed to set cpuset.cpus to container {self.container.name}."
717             )
718
719     def execute(self, command):
720         """Start a process inside a running container.
721
722         Runs the specified command inside the container specified by name. The
723         container has to be running already.
724
725         :param command: Command to run inside container.
726         :type command: str
727         :raises RuntimeError: If running the command failed.
728         """
729         env = u"--keep-env " + u" ".join(
730             f"--set-var {env!s}" for env in self.container.env) \
731             if self.container.env else u""
732
733         cmd = f"lxc-attach {env} --name {self.container.name} " \
734             f"-- /bin/sh -c '{command}; exit $?'"
735
736         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
737         if int(ret) != 0:
738             raise RuntimeError(
739                 f"Failed to run command inside container {self.container.name}."
740             )
741
742     def stop(self):
743         """Stop a container.
744
745         :raises RuntimeError: If stopping the container failed.
746         """
747         cmd = f"lxc-stop --name {self.container.name}"
748
749         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
750         if int(ret) != 0:
751             raise RuntimeError(
752                 f"Failed to stop container {self.container.name}."
753             )
754         self._lxc_wait(u"STOPPED|FROZEN")
755
756     def destroy(self):
757         """Destroy a container.
758
759         :raises RuntimeError: If destroying container failed.
760         """
761         cmd = f"lxc-destroy --force --name {self.container.name}"
762
763         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
764         if int(ret) != 0:
765             raise RuntimeError(
766                 f"Failed to destroy container {self.container.name}."
767             )
768
769     def info(self):
770         """Query and shows information about a container.
771
772         :raises RuntimeError: If getting info about a container failed.
773         """
774         cmd = f"lxc-info --name {self.container.name}"
775
776         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
777         if int(ret) != 0:
778             raise RuntimeError(
779                 f"Failed to get info about container {self.container.name}."
780             )
781
782     def system_info(self):
783         """Check the current kernel for LXC support.
784
785         :raises RuntimeError: If checking LXC support failed.
786         """
787         cmd = u"lxc-checkconfig"
788
789         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
790         if int(ret) != 0:
791             raise RuntimeError(u"Failed to check LXC support.")
792
793     def is_container_running(self):
794         """Check if container is running on node.
795
796         :returns: True if container is running.
797         :rtype: bool
798         :raises RuntimeError: If getting info about a container failed.
799         """
800         cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
801
802         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
803         if int(ret) != 0:
804             raise RuntimeError(
805                 f"Failed to get info about container {self.container.name}."
806             )
807         return u"RUNNING" in stdout
808
809     def is_container_present(self):
810         """Check if container is existing on node.
811
812         :returns: True if container is present.
813         :rtype: bool
814         :raises RuntimeError: If getting info about a container failed.
815         """
816         cmd = f"lxc-info --no-humanize --name {self.container.name}"
817
818         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
819         return not ret
820
821     def _lxc_wait(self, state):
822         """Wait for a specific container state.
823
824         :param state: Specify the container state(s) to wait for.
825         :type state: str
826         :raises RuntimeError: If waiting for state of a container failed.
827         """
828         cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
829
830         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
831         if int(ret) != 0:
832             raise RuntimeError(
833                 f"Failed to wait for state '{state}' "
834                 f"of container {self.container.name}."
835             )
836
837
838 class Docker(ContainerEngine):
839     """Docker implementation."""
840
841     # Implicit constructor is inherited.
842
843     def acquire(self, force=True):
844         """Pull an image or a repository from a registry.
845
846         :param force: Destroy a container if exists.
847         :type force: bool
848         :raises RuntimeError: If pulling a container failed.
849         """
850         if self.is_container_present():
851             if force:
852                 self.destroy()
853             else:
854                 return
855
856         if not self.container.image:
857             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
858                 if Topology.get_node_arch(self.container.node) == u"aarch64" \
859                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
860             setattr(self.container, u"image", img)
861
862         cmd = f"docker pull {self.container.image}"
863
864         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
865         if int(ret) != 0:
866             raise RuntimeError(
867                 f"Failed to create container {self.container.name}."
868             )
869
870         if self.container.cpuset_cpus:
871             self._configure_cgroup(u"docker")
872
873     def create(self):
874         """Create/deploy container.
875
876         :raises RuntimeError: If creating a container failed.
877         """
878         cpuset_cpus = u"--cpuset-cpus=" + u",".join(
879             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
880             if self.container.cpuset_cpus else u""
881
882         cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
883             if self.container.cpuset_mems is not None else u""
884         # Temporary workaround - disabling due to bug in memif
885         cpuset_mems = u""
886
887         env = u" ".join(f"--env {env!s}" for env in self.container.env) \
888             if self.container.env else u""
889
890         command = str(self.container.command) if self.container.command else u""
891
892         publish = u" ".join(
893             f"--publish  {var!s}" for var in self.container.publish
894         ) if self.container.publish else u""
895
896         volume = u" ".join(
897             f"--volume {mnt!s}" for mnt in self.container.mnt) \
898             if self.container.mnt else u""
899
900         cmd = f"docker run --privileged --detach --interactive --tty --rm " \
901             f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
902             f"{env} {volume} --name {self.container.name} " \
903             f"{self.container.image} {command}"
904
905         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
906         if int(ret) != 0:
907             raise RuntimeError(
908                 f"Failed to create container {self.container.name}"
909             )
910
911         self.info()
912
913     def execute(self, command):
914         """Start a process inside a running container.
915
916         Runs the specified command inside the container specified by name. The
917         container has to be running already.
918
919         :param command: Command to run inside container.
920         :type command: str
921         :raises RuntimeError: If running the command in a container failed.
922         """
923         cmd = f"docker exec --interactive {self.container.name} " \
924             f"/bin/sh -c '{command}; exit $?'"
925
926         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
927         if int(ret) != 0:
928             raise RuntimeError(
929                 f"Failed to execute command in container {self.container.name}."
930             )
931
932     def stop(self):
933         """Stop running container.
934
935         :raises RuntimeError: If stopping a container failed.
936         """
937         cmd = f"docker stop {self.container.name}"
938
939         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
940         if int(ret) != 0:
941             raise RuntimeError(
942                 f"Failed to stop container {self.container.name}."
943             )
944
945     def destroy(self):
946         """Remove a container.
947
948         :raises RuntimeError: If removing a container failed.
949         """
950         cmd = f"docker rm --force {self.container.name}"
951
952         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
953         if int(ret) != 0:
954             raise RuntimeError(
955                 f"Failed to destroy container {self.container.name}."
956             )
957
958     def info(self):
959         """Return low-level information on Docker objects.
960
961         :raises RuntimeError: If getting info about a container failed.
962         """
963         cmd = f"docker inspect {self.container.name}"
964
965         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
966         if int(ret) != 0:
967             raise RuntimeError(
968                 f"Failed to get info about container {self.container.name}."
969             )
970
971     def system_info(self):
972         """Display the docker system-wide information.
973
974         :raises RuntimeError: If displaying system information failed.
975         """
976         cmd = u"docker system info"
977
978         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
979         if int(ret) != 0:
980             raise RuntimeError(u"Failed to get system info.")
981
982     def is_container_present(self):
983         """Check if container is present on node.
984
985         :returns: True if container is present.
986         :rtype: bool
987         :raises RuntimeError: If getting info about a container failed.
988         """
989         cmd = f"docker ps --all --quiet --filter name={self.container.name}"
990
991         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
992         if int(ret) != 0:
993             raise RuntimeError(
994                 f"Failed to get info about container {self.container.name}."
995             )
996         return bool(stdout)
997
998     def is_container_running(self):
999         """Check if container is running on node.
1000
1001         :returns: True if container is running.
1002         :rtype: bool
1003         :raises RuntimeError: If getting info about a container failed.
1004         """
1005         cmd = f"docker ps --quiet --filter name={self.container.name}"
1006
1007         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1008         if int(ret) != 0:
1009             raise RuntimeError(
1010                 f"Failed to get info about container {self.container.name}."
1011             )
1012         return bool(stdout)
1013
1014
1015 class Container:
1016     """Container class."""
1017
1018     def __getattr__(self, attr):
1019         """Get attribute custom implementation.
1020
1021         :param attr: Attribute to get.
1022         :type attr: str
1023         :returns: Attribute value or None.
1024         :rtype: any
1025         """
1026         try:
1027             return self.__dict__[attr]
1028         except KeyError:
1029             return None
1030
1031     def __setattr__(self, attr, value):
1032         """Set attribute custom implementation.
1033
1034         :param attr: Attribute to set.
1035         :param value: Value to set.
1036         :type attr: str
1037         :type value: any
1038         """
1039         try:
1040             # Check if attribute exists
1041             self.__dict__[attr]
1042         except KeyError:
1043             # Creating new attribute
1044             if attr == u"node":
1045                 self.__dict__[u"ssh"] = SSH()
1046                 self.__dict__[u"ssh"].connect(value)
1047             self.__dict__[attr] = value
1048         else:
1049             # Updating attribute base of type
1050             if isinstance(self.__dict__[attr], list):
1051                 self.__dict__[attr].append(value)
1052             else:
1053                 self.__dict__[attr] = value