Deal with some "pylint: disable=" comments
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library to manipulate Containers."""
15
16 from collections import OrderedDict, Counter
17 from io import open
18 from string import Template
19
20 from robot.libraries.BuiltIn import BuiltIn
21
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.ssh import SSH
24 from resources.libraries.python.topology import Topology, SocketType
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
26
27
28 __all__ = [
29     u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
30 ]
31
32 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
33
34
35 class ContainerManager:
36     """Container lifecycle management class."""
37
38     def __init__(self, engine):
39         """Initialize Container Manager class.
40
41         :param engine: Container technology used (LXC/Docker/...).
42         :type engine: str
43         :raises NotImplementedError: If container technology is not implemented.
44         """
45         try:
46             self.engine = globals()[engine]()
47         except KeyError:
48             raise NotImplementedError(f"{engine} is not implemented.")
49         self.containers = OrderedDict()
50
51     def get_container_by_name(self, name):
52         """Get container instance.
53
54         :param name: Container name.
55         :type name: str
56         :returns: Container instance.
57         :rtype: Container
58         :raises RuntimeError: If failed to get container with name.
59         """
60         try:
61             return self.containers[name]
62         except KeyError:
63             raise RuntimeError(f"Failed to get container with name: {name}")
64
65     def construct_container(self, **kwargs):
66         """Construct container object on node with specified parameters.
67
68         :param kwargs: Key-value pairs used to construct container.
69         :param kwargs: dict
70         """
71         # Create base class
72         self.engine.initialize()
73         # Set parameters
74         for key in kwargs:
75             setattr(self.engine.container, key, kwargs[key])
76
77         # Set additional environmental variables
78         setattr(
79             self.engine.container, u"env",
80             f"MICROSERVICE_LABEL={kwargs[u'name']}"
81         )
82
83         # Store container instance
84         self.containers[kwargs[u"name"]] = self.engine.container
85
86     def construct_containers(self, **kwargs):
87         """Construct 1..N container(s) on node with specified name.
88
89         Ordinal number is automatically added to the name of container as
90         suffix.
91
92         :param kwargs: Named parameters.
93         :param kwargs: dict
94         """
95         name = kwargs[u"name"]
96         for i in range(kwargs[u"count"]):
97             # Name will contain ordinal suffix
98             kwargs[u"name"] = u"".join([name, str(i+1)])
99             # Create container
100             self.construct_container(i=i, **kwargs)
101
102     def acquire_all_containers(self):
103         """Acquire all containers."""
104         for container in self.containers:
105             self.engine.container = self.containers[container]
106             self.engine.acquire()
107
108     def build_all_containers(self):
109         """Build all containers."""
110         for container in self.containers:
111             self.engine.container = self.containers[container]
112             self.engine.build()
113
114     def create_all_containers(self):
115         """Create all containers."""
116         for container in self.containers:
117             self.engine.container = self.containers[container]
118             self.engine.create()
119
120     def execute_on_container(self, name, command):
121         """Execute command on container with name.
122
123         :param name: Container name.
124         :param command: Command to execute.
125         :type name: str
126         :type command: str
127         """
128         self.engine.container = self.get_container_by_name(name)
129         self.engine.execute(command)
130
131     def execute_on_all_containers(self, command):
132         """Execute command on all containers.
133
134         :param command: Command to execute.
135         :type command: str
136         """
137         for container in self.containers:
138             self.engine.container = self.containers[container]
139             self.engine.execute(command)
140
141     def start_vpp_in_all_containers(self):
142         """Start VPP in all containers."""
143         for container in self.containers:
144             self.engine.container = self.containers[container]
145             # We need to install supervisor client/server system to control VPP
146             # as a service
147             self.engine.install_supervisor()
148             self.engine.start_vpp()
149
150     def restart_vpp_in_all_containers(self):
151         """Restart VPP in all containers."""
152         for container in self.containers:
153             self.engine.container = self.containers[container]
154             self.engine.restart_vpp()
155
156     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
157         """Configure VPP in all containers.
158
159         :param chain_topology: Topology used for chaining containers can be
160             chain or cross_horiz. Chain topology is using 1 memif pair per
161             container. Cross_horiz topology is using 1 memif and 1 physical
162             interface in container (only single container can be configured).
163         :param kwargs: Named parameters.
164         :type chain_topology: str
165         :param kwargs: dict
166         """
167         # Count number of DUTs based on node's host information
168         dut_cnt = len(
169             Counter(
170                 [
171                     self.containers[container].node[u"host"]
172                     for container in self.containers
173                 ]
174             )
175         )
176         mod = len(self.containers) // dut_cnt
177
178         for i, container in enumerate(self.containers):
179             mid1 = i % mod + 1
180             mid2 = i % mod + 1
181             sid1 = i % mod * 2 + 1
182             sid2 = i % mod * 2 + 2
183             self.engine.container = self.containers[container]
184             guest_dir = self.engine.container.mnt[0].split(u":")[1]
185
186             if chain_topology == u"chain":
187                 self._configure_vpp_chain_l2xc(
188                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
189                     guest_dir=guest_dir, **kwargs
190                 )
191             elif chain_topology == u"cross_horiz":
192                 self._configure_vpp_cross_horiz(
193                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
194                     guest_dir=guest_dir, **kwargs
195                 )
196             elif chain_topology == u"chain_functional":
197                 self._configure_vpp_chain_functional(
198                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
199                     guest_dir=guest_dir, **kwargs
200                 )
201             elif chain_topology == u"chain_ip4":
202                 self._configure_vpp_chain_ip4(
203                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
204                     guest_dir=guest_dir, **kwargs
205                 )
206             elif chain_topology == u"pipeline_ip4":
207                 self._configure_vpp_pipeline_ip4(
208                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
209                     guest_dir=guest_dir, **kwargs
210                 )
211             else:
212                 raise RuntimeError(
213                     f"Container topology {chain_topology} not implemented"
214                 )
215
216     def _configure_vpp_chain_l2xc(self, **kwargs):
217         """Configure VPP in chain topology with l2xc.
218
219         :param kwargs: Named parameters.
220         :param kwargs: dict
221         """
222         self.engine.create_vpp_startup_config()
223         self.engine.create_vpp_exec_config(
224             u"memif_create_chain_l2xc.exec",
225             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
226             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
227             socket1=f"{kwargs[u'guest_dir']}/memif-"
228             f"{self.engine.container.name}-{kwargs[u'sid1']}",
229             socket2=f"{kwargs[u'guest_dir']}/memif-"
230             f"{self.engine.container.name}-{kwargs[u'sid2']}"
231         )
232
233     def _configure_vpp_cross_horiz(self, **kwargs):
234         """Configure VPP in cross horizontal topology (single memif).
235
236         :param kwargs: Named parameters.
237         :param kwargs: dict
238         """
239         if u"DUT1" in self.engine.container.name:
240             if_pci = Topology.get_interface_pci_addr(
241                 self.engine.container.node, kwargs[u"dut1_if"])
242             if_name = Topology.get_interface_name(
243                 self.engine.container.node, kwargs[u"dut1_if"])
244         if u"DUT2" in self.engine.container.name:
245             if_pci = Topology.get_interface_pci_addr(
246                 self.engine.container.node, kwargs[u"dut2_if"])
247             if_name = Topology.get_interface_name(
248                 self.engine.container.node, kwargs[u"dut2_if"])
249         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
250         self.engine.create_vpp_exec_config(
251             u"memif_create_cross_horizon.exec",
252             mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
253             socket1=f"{kwargs[u'guest_dir']}/memif-"
254             f"{self.engine.container.name}-{kwargs[u'sid1']}"
255         )
256
257     def _configure_vpp_chain_functional(self, **kwargs):
258         """Configure VPP in chain topology with l2xc (functional).
259
260         :param kwargs: Named parameters.
261         :param kwargs: dict
262         """
263         self.engine.create_vpp_startup_config_func_dev()
264         self.engine.create_vpp_exec_config(
265             u"memif_create_chain_functional.exec",
266             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
267             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
268             socket1=f"{kwargs[u'guest_dir']}/memif-"
269             f"{self.engine.container.name}-{kwargs[u'sid1']}",
270             socket2=f"{kwargs[u'guest_dir']}/memif-"
271             f"{self.engine.container.name}-{kwargs[u'sid2']}",
272             rx_mode=u"interrupt"
273         )
274
275     def _configure_vpp_chain_ip4(self, **kwargs):
276         """Configure VPP in chain topology with ip4.
277
278         :param kwargs: Named parameters.
279         :param kwargs: dict
280         """
281         self.engine.create_vpp_startup_config()
282
283         vif1_mac = kwargs[u"tg_if1_mac"] \
284             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
285             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
286         vif2_mac = kwargs[u"tg_if2_mac"] \
287             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
288             else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
289         self.engine.create_vpp_exec_config(
290             u"memif_create_chain_ip4.exec",
291             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
292             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
293             socket1=f"{kwargs[u'guest_dir']}/memif-"
294             f"{self.engine.container.name}-{kwargs[u'sid1']}",
295             socket2=f"{kwargs[u'guest_dir']}/memif-"
296             f"{self.engine.container.name}-{kwargs[u'sid2']}",
297             mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
298             mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
299             vif1_mac=vif1_mac, vif2_mac=vif2_mac
300         )
301
302     def _configure_vpp_pipeline_ip4(self, **kwargs):
303         """Configure VPP in pipeline topology with ip4.
304
305         :param kwargs: Named parameters.
306         :param kwargs: dict
307         """
308         self.engine.create_vpp_startup_config()
309         node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
310         mid1 = kwargs[u"mid1"]
311         mid2 = kwargs[u"mid2"]
312         role1 = u"master"
313         role2 = u"master" if node in (kwargs[u"nodes"], 1) else u"slave"
314         kwargs[u"mid2"] = kwargs[u"mid2"] if node in (kwargs[u"nodes"], 1) \
315             else kwargs[u"mid2"] + 1
316         vif1_mac = kwargs[u"tg_if1_mac"] \
317             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
318             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
319         vif2_mac = kwargs[u"tg_if2_mac"] \
320             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
321             else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
322         socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
323             f"{kwargs[u'sid1']}" if node == 1 \
324             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
325         socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
326             f"{kwargs[u'sid2']}" \
327             if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
328             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
329
330         self.engine.create_vpp_exec_config(
331             u"memif_create_pipeline_ip4.exec",
332             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
333             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
334             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
335             mac1=f"52:54:00:00:{mid1:02X}:01",
336             mac2=f"52:54:00:00:{mid2:02X}:02",
337             vif1_mac=vif1_mac, vif2_mac=vif2_mac
338         )
339
340     def stop_all_containers(self):
341         """Stop all containers."""
342         for container in self.containers:
343             self.engine.container = self.containers[container]
344             self.engine.stop()
345
346     def destroy_all_containers(self):
347         """Destroy all containers."""
348         for container in self.containers:
349             self.engine.container = self.containers[container]
350             self.engine.destroy()
351
352
353 class ContainerEngine:
354     """Abstract class for container engine."""
355
356     def __init__(self):
357         """Init ContainerEngine object."""
358         self.container = None
359
360     def initialize(self):
361         """Initialize container object."""
362         self.container = Container()
363
364     def acquire(self, force):
365         """Acquire/download container.
366
367         :param force: Destroy a container if exists and create.
368         :type force: bool
369         """
370         raise NotImplementedError
371
372     def build(self):
373         """Build container (compile)."""
374         raise NotImplementedError
375
376     def create(self):
377         """Create/deploy container."""
378         raise NotImplementedError
379
380     def execute(self, command):
381         """Execute process inside container.
382
383         :param command: Command to run inside container.
384         :type command: str
385         """
386         raise NotImplementedError
387
388     def stop(self):
389         """Stop container."""
390         raise NotImplementedError
391
392     def destroy(self):
393         """Destroy/remove container."""
394         raise NotImplementedError
395
396     def info(self):
397         """Info about container."""
398         raise NotImplementedError
399
400     def system_info(self):
401         """System info."""
402         raise NotImplementedError
403
404     def install_supervisor(self):
405         """Install supervisord inside a container."""
406         if isinstance(self, LXC):
407             self.execute(u"sleep 3; apt-get update")
408             self.execute(u"apt-get install -y supervisor")
409             config = \
410                 u"[unix_http_server]\n" \
411                 u"file  = /tmp/supervisor.sock\n\n" \
412                 u"[rpcinterface:supervisor]\n" \
413                 u"supervisor.rpcinterface_factory = " \
414                 u"supervisor.rpcinterface:make_main_rpcinterface\n\n" \
415                 u"[supervisorctl]\n" \
416                 u"serverurl = unix:///tmp/supervisor.sock\n\n" \
417                 u"[supervisord]\n" \
418                 u"pidfile = /tmp/supervisord.pid\n" \
419                 u"identifier = supervisor\n" \
420                 u"directory = /tmp\n" \
421                 u"logfile = /tmp/supervisord.log\n" \
422                 u"loglevel = debug\n" \
423                 u"nodaemon = false\n\n"
424             self.execute(
425                 f'echo "{config}" > {SUPERVISOR_CONF} && '
426                 f'supervisord -c {SUPERVISOR_CONF}'
427             )
428
429     def start_vpp(self):
430         """Start VPP inside a container."""
431
432         config = \
433             u"[program:vpp]\n" \
434             u"command = /usr/bin/vpp -c /etc/vpp/startup.conf\n" \
435             u"autostart = false\n" \
436             u"autorestart = false\n" \
437             u"redirect_stderr = true\n" \
438             u"priority = 1"
439         self.execute(
440             f'echo "{config}" >> {SUPERVISOR_CONF} && supervisorctl reload'
441         )
442         self.execute(u"supervisorctl start vpp")
443
444         topo_instance = BuiltIn().get_library_instance(
445             u"resources.libraries.python.topology.Topology"
446         )
447         topo_instance.add_new_socket(
448             self.container.node,
449             SocketType.PAPI,
450             self.container.name,
451             f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
452             f"api.sock"
453         )
454         topo_instance.add_new_socket(
455             self.container.node,
456             SocketType.STATS,
457             self.container.name,
458             f"{self.container.root}/tmp/vpp_sockets/{self.container.name}/"
459             f"stats.sock"
460         )
461
462     def restart_vpp(self):
463         """Restart VPP service inside a container."""
464         self.execute(u"supervisorctl restart vpp")
465         self.execute(u"cat /tmp/supervisord.log")
466
467     def create_base_vpp_startup_config(self):
468         """Create base startup configuration of VPP on container.
469
470         :returns: Base VPP startup configuration.
471         :rtype: VppConfigGenerator
472         """
473         cpuset_cpus = self.container.cpuset_cpus
474
475         # Create config instance
476         vpp_config = VppConfigGenerator()
477         vpp_config.set_node(self.container.node)
478         vpp_config.add_unix_cli_listen()
479         vpp_config.add_unix_nodaemon()
480         vpp_config.add_unix_exec(u"/tmp/running.exec")
481         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
482         vpp_config.add_statseg_per_node_counters(value=u"on")
483         # We will pop the first core from the list to be a main core
484         vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
485         # If more cores in the list, the rest will be used as workers.
486         if cpuset_cpus:
487             corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
488             vpp_config.add_cpu_corelist_workers(corelist_workers)
489
490         return vpp_config
491
492     def create_vpp_startup_config(self):
493         """Create startup configuration of VPP without DPDK on container.
494         """
495         vpp_config = self.create_base_vpp_startup_config()
496         vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
497
498         # Apply configuration
499         self.execute(u"mkdir -p /etc/vpp/")
500         self.execute(
501             f'echo "{vpp_config.get_config_str()}" | '
502             f'tee /etc/vpp/startup.conf'
503         )
504
505     def create_vpp_startup_config_dpdk_dev(self, *devices):
506         """Create startup configuration of VPP with DPDK on container.
507
508         :param devices: List of PCI devices to add.
509         :type devices: list
510         """
511         vpp_config = self.create_base_vpp_startup_config()
512         vpp_config.add_dpdk_dev(*devices)
513         vpp_config.add_dpdk_no_tx_checksum_offload()
514         vpp_config.add_dpdk_log_level(u"debug")
515         vpp_config.add_plugin(u"disable", u"default")
516         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
517         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
518
519         # Apply configuration
520         self.execute(u"mkdir -p /etc/vpp/")
521         self.execute(
522             f'echo "{vpp_config.get_config_str()}" | '
523             f'tee /etc/vpp/startup.conf'
524         )
525
526     def create_vpp_startup_config_func_dev(self):
527         """Create startup configuration of VPP on container for functional
528         vpp_device tests.
529         """
530         # Create config instance
531         vpp_config = VppConfigGenerator()
532         vpp_config.set_node(self.container.node)
533         vpp_config.add_unix_cli_listen()
534         vpp_config.add_unix_nodaemon()
535         vpp_config.add_unix_exec(u"/tmp/running.exec")
536         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
537         vpp_config.add_statseg_per_node_counters(value=u"on")
538         vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
539
540         # Apply configuration
541         self.execute(u"mkdir -p /etc/vpp/")
542         self.execute(
543             f'echo "{vpp_config.get_config_str()}" | '
544             f'tee /etc/vpp/startup.conf'
545         )
546
547     def create_vpp_exec_config(self, template_file, **kwargs):
548         """Create VPP exec configuration on container.
549
550         :param template_file: File name of a template script.
551         :param kwargs: Parameters for script.
552         :type template_file: str
553         :type kwargs: dict
554         """
555         running = u"/tmp/running.exec"
556
557         template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
558
559         with open(template, "r") as src_file:
560             src = Template(src_file.read())
561             self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
562
563     def is_container_running(self):
564         """Check if container is running."""
565         raise NotImplementedError
566
567     def is_container_present(self):
568         """Check if container is present."""
569         raise NotImplementedError
570
571     def _configure_cgroup(self, name):
572         """Configure the control group associated with a container.
573
574         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
575         container is initialized a new cgroup /docker or /lxc is created under
576         cpuset parent tree. This newly created cgroup is inheriting parent
577         setting for cpu/mem exclusive parameter and thus cannot be overriden
578         within /docker or /lxc cgroup. This function is supposed to set cgroups
579         to allow coexistence of both engines.
580
581         :param name: Name of cgroup.
582         :type name: str
583         :raises RuntimeError: If applying cgroup settings via cgset failed.
584         """
585         ret, _, _ = self.container.ssh.exec_command_sudo(
586             u"cgset -r cpuset.cpu_exclusive=0 /"
587         )
588         if int(ret) != 0:
589             raise RuntimeError(u"Failed to apply cgroup settings.")
590
591         ret, _, _ = self.container.ssh.exec_command_sudo(
592             u"cgset -r cpuset.mem_exclusive=0 /"
593         )
594         if int(ret) != 0:
595             raise RuntimeError(u"Failed to apply cgroup settings.")
596
597         ret, _, _ = self.container.ssh.exec_command_sudo(
598             f"cgcreate -g cpuset:/{name}"
599         )
600         if int(ret) != 0:
601             raise RuntimeError(u"Failed to copy cgroup settings from root.")
602
603         ret, _, _ = self.container.ssh.exec_command_sudo(
604             f"cgset -r cpuset.cpu_exclusive=0 /{name}"
605         )
606         if int(ret) != 0:
607             raise RuntimeError(u"Failed to apply cgroup settings.")
608
609         ret, _, _ = self.container.ssh.exec_command_sudo(
610             f"cgset -r cpuset.mem_exclusive=0 /{name}"
611         )
612         if int(ret) != 0:
613             raise RuntimeError(u"Failed to apply cgroup settings.")
614
615
616 class LXC(ContainerEngine):
617     """LXC implementation."""
618
619     # Implicit constructor is inherited.
620
621     def acquire(self, force=True):
622         """Acquire a privileged system object where configuration is stored.
623
624         :param force: If a container exists, destroy it and create a new
625             container.
626         :type force: bool
627         :raises RuntimeError: If creating the container or writing the container
628             config fails.
629         """
630         if self.is_container_present():
631             if force:
632                 self.destroy()
633             else:
634                 return
635
636         target_arch = u"arm64" \
637             if Topology.get_node_arch(self.container.node) == u"aarch64" \
638             else u"amd64"
639
640         image = self.container.image if self.container.image \
641             else f"-d ubuntu -r bionic -a {target_arch}"
642
643         cmd = f"lxc-create -t download --name {self.container.name} " \
644             f"-- {image} --no-validate"
645
646         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
647         if int(ret) != 0:
648             raise RuntimeError(u"Failed to create container.")
649
650         self._configure_cgroup(u"lxc")
651
652     def build(self):
653         """Build container (compile).
654
655         TODO: Remove from parent class if no sibling implements this.
656         """
657         raise NotImplementedError
658
659     def create(self):
660         """Create/deploy an application inside a container on system.
661
662         :raises RuntimeError: If creating the container fails.
663         """
664         if self.container.mnt:
665             # LXC fix for tmpfs
666             # https://github.com/lxc/lxc/issues/434
667             mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
668             ret, _, _ = self.container.ssh.exec_command_sudo(
669                 f"sh -c \"echo '{mnt_e}' >> "
670                 f"/var/lib/lxc/{self.container.name}/config\""
671             )
672             if int(ret) != 0:
673                 raise RuntimeError(
674                     f"Failed to write {self.container.name} config."
675                 )
676
677             for mount in self.container.mnt:
678                 host_dir, guest_dir = mount.split(u":")
679                 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
680                     else u"bind,create=file"
681                 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
682                     f"none {options} 0 0"
683                 self.container.ssh.exec_command_sudo(
684                     f"sh -c \"mkdir -p {host_dir}\""
685                 )
686                 ret, _, _ = self.container.ssh.exec_command_sudo(
687                     f"sh -c \"echo '{entry}' "
688                     f">> /var/lib/lxc/{self.container.name}/config\""
689                 )
690                 if int(ret) != 0:
691                     raise RuntimeError(
692                         f"Failed to write {self.container.name} config."
693                     )
694
695         cpuset_cpus = u",".join(
696             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
697             if self.container.cpuset_cpus else u""
698
699         ret, _, _ = self.container.ssh.exec_command_sudo(
700             f"lxc-start --name {self.container.name} --daemon"
701         )
702         if int(ret) != 0:
703             raise RuntimeError(
704                 f"Failed to start container {self.container.name}."
705             )
706         self._lxc_wait(u"RUNNING")
707
708         # Workaround for LXC to be able to allocate all cpus including isolated.
709         ret, _, _ = self.container.ssh.exec_command_sudo(
710             u"cgset --copy-from / lxc/"
711         )
712         if int(ret) != 0:
713             raise RuntimeError(u"Failed to copy cgroup to LXC")
714
715         ret, _, _ = self.container.ssh.exec_command_sudo(
716             f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
717         )
718         if int(ret) != 0:
719             raise RuntimeError(
720                 f"Failed to set cpuset.cpus to container {self.container.name}."
721             )
722
723     def execute(self, command):
724         """Start a process inside a running container.
725
726         Runs the specified command inside the container specified by name. The
727         container has to be running already.
728
729         :param command: Command to run inside container.
730         :type command: str
731         :raises RuntimeError: If running the command failed.
732         """
733         env = u"--keep-env " + u" ".join(
734             f"--set-var {env!s}" for env in self.container.env) \
735             if self.container.env else u""
736
737         cmd = f"lxc-attach {env} --name {self.container.name} " \
738             f"-- /bin/sh -c '{command}; exit $?'"
739
740         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
741         if int(ret) != 0:
742             raise RuntimeError(
743                 f"Failed to run command inside container {self.container.name}."
744             )
745
746     def stop(self):
747         """Stop a container.
748
749         :raises RuntimeError: If stopping the container failed.
750         """
751         cmd = f"lxc-stop --name {self.container.name}"
752
753         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
754         if int(ret) != 0:
755             raise RuntimeError(
756                 f"Failed to stop container {self.container.name}."
757             )
758         self._lxc_wait(u"STOPPED|FROZEN")
759
760     def destroy(self):
761         """Destroy a container.
762
763         :raises RuntimeError: If destroying container failed.
764         """
765         cmd = f"lxc-destroy --force --name {self.container.name}"
766
767         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
768         if int(ret) != 0:
769             raise RuntimeError(
770                 f"Failed to destroy container {self.container.name}."
771             )
772
773     def info(self):
774         """Query and shows information about a container.
775
776         :raises RuntimeError: If getting info about a container failed.
777         """
778         cmd = f"lxc-info --name {self.container.name}"
779
780         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
781         if int(ret) != 0:
782             raise RuntimeError(
783                 f"Failed to get info about container {self.container.name}."
784             )
785
786     def system_info(self):
787         """Check the current kernel for LXC support.
788
789         :raises RuntimeError: If checking LXC support failed.
790         """
791         cmd = u"lxc-checkconfig"
792
793         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
794         if int(ret) != 0:
795             raise RuntimeError(u"Failed to check LXC support.")
796
797     def is_container_running(self):
798         """Check if container is running on node.
799
800         :returns: True if container is running.
801         :rtype: bool
802         :raises RuntimeError: If getting info about a container failed.
803         """
804         cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
805
806         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
807         if int(ret) != 0:
808             raise RuntimeError(
809                 f"Failed to get info about container {self.container.name}."
810             )
811         return u"RUNNING" in stdout
812
813     def is_container_present(self):
814         """Check if container is existing on node.
815
816         :returns: True if container is present.
817         :rtype: bool
818         :raises RuntimeError: If getting info about a container failed.
819         """
820         cmd = f"lxc-info --no-humanize --name {self.container.name}"
821
822         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
823         return not ret
824
825     def _lxc_wait(self, state):
826         """Wait for a specific container state.
827
828         :param state: Specify the container state(s) to wait for.
829         :type state: str
830         :raises RuntimeError: If waiting for state of a container failed.
831         """
832         cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
833
834         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
835         if int(ret) != 0:
836             raise RuntimeError(
837                 f"Failed to wait for state '{state}' "
838                 f"of container {self.container.name}."
839             )
840
841
842 class Docker(ContainerEngine):
843     """Docker implementation."""
844
845     # Implicit constructor is inherited.
846
847     def acquire(self, force=True):
848         """Pull an image or a repository from a registry.
849
850         :param force: Destroy a container if exists.
851         :type force: bool
852         :raises RuntimeError: If pulling a container failed.
853         """
854         if self.is_container_present():
855             if force:
856                 self.destroy()
857             else:
858                 return
859
860         if not self.container.image:
861             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
862                 if Topology.get_node_arch(self.container.node) == u"aarch64" \
863                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
864             setattr(self.container, u"image", img)
865
866         cmd = f"docker pull {self.container.image}"
867
868         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
869         if int(ret) != 0:
870             raise RuntimeError(
871                 f"Failed to create container {self.container.name}."
872             )
873
874         if self.container.cpuset_cpus:
875             self._configure_cgroup(u"docker")
876
877     def build(self):
878         """Build container (compile).
879
880         TODO: Remove from parent class if no sibling implements this.
881         """
882         raise NotImplementedError
883
884     def create(self):
885         """Create/deploy container.
886
887         :raises RuntimeError: If creating a container failed.
888         """
889         cpuset_cpus = u"--cpuset-cpus=" + u",".join(
890             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
891             if self.container.cpuset_cpus else u""
892
893         cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
894             if self.container.cpuset_mems is not None else u""
895         # Temporary workaround - disabling due to bug in memif
896         cpuset_mems = u""
897
898         env = u" ".join(f"--env {env!s}" for env in self.container.env) \
899             if self.container.env else u""
900
901         command = str(self.container.command) if self.container.command else u""
902
903         publish = u" ".join(
904             f"--publish  {var!s}" for var in self.container.publish
905         ) if self.container.publish else u""
906
907         volume = u" ".join(
908             f"--volume {mnt!s}" for mnt in self.container.mnt) \
909             if self.container.mnt else u""
910
911         cmd = f"docker run --privileged --detach --interactive --tty --rm " \
912             f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
913             f"{env} {volume} --name {self.container.name} " \
914             f"{self.container.image} {command}"
915
916         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
917         if int(ret) != 0:
918             raise RuntimeError(
919                 f"Failed to create container {self.container.name}"
920             )
921
922         self.info()
923
924     def execute(self, command):
925         """Start a process inside a running container.
926
927         Runs the specified command inside the container specified by name. The
928         container has to be running already.
929
930         :param command: Command to run inside container.
931         :type command: str
932         :raises RuntimeError: If running the command in a container failed.
933         """
934         cmd = f"docker exec --interactive {self.container.name} " \
935             f"/bin/sh -c '{command}; exit $?'"
936
937         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
938         if int(ret) != 0:
939             raise RuntimeError(
940                 f"Failed to execute command in container {self.container.name}."
941             )
942
943     def stop(self):
944         """Stop running container.
945
946         :raises RuntimeError: If stopping a container failed.
947         """
948         cmd = f"docker stop {self.container.name}"
949
950         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
951         if int(ret) != 0:
952             raise RuntimeError(
953                 f"Failed to stop container {self.container.name}."
954             )
955
956     def destroy(self):
957         """Remove a container.
958
959         :raises RuntimeError: If removing a container failed.
960         """
961         cmd = f"docker rm --force {self.container.name}"
962
963         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
964         if int(ret) != 0:
965             raise RuntimeError(
966                 f"Failed to destroy container {self.container.name}."
967             )
968
969     def info(self):
970         """Return low-level information on Docker objects.
971
972         :raises RuntimeError: If getting info about a container failed.
973         """
974         cmd = f"docker inspect {self.container.name}"
975
976         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
977         if int(ret) != 0:
978             raise RuntimeError(
979                 f"Failed to get info about container {self.container.name}."
980             )
981
982     def system_info(self):
983         """Display the docker system-wide information.
984
985         :raises RuntimeError: If displaying system information failed.
986         """
987         cmd = u"docker system info"
988
989         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
990         if int(ret) != 0:
991             raise RuntimeError(u"Failed to get system info.")
992
993     def is_container_present(self):
994         """Check if container is present on node.
995
996         :returns: True if container is present.
997         :rtype: bool
998         :raises RuntimeError: If getting info about a container failed.
999         """
1000         cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1001
1002         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1003         if int(ret) != 0:
1004             raise RuntimeError(
1005                 f"Failed to get info about container {self.container.name}."
1006             )
1007         return bool(stdout)
1008
1009     def is_container_running(self):
1010         """Check if container is running on node.
1011
1012         :returns: True if container is running.
1013         :rtype: bool
1014         :raises RuntimeError: If getting info about a container failed.
1015         """
1016         cmd = f"docker ps --quiet --filter name={self.container.name}"
1017
1018         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1019         if int(ret) != 0:
1020             raise RuntimeError(
1021                 f"Failed to get info about container {self.container.name}."
1022             )
1023         return bool(stdout)
1024
1025
1026 class Container:
1027     """Container class."""
1028
1029     def __getattr__(self, attr):
1030         """Get attribute custom implementation.
1031
1032         :param attr: Attribute to get.
1033         :type attr: str
1034         :returns: Attribute value or None.
1035         :rtype: any
1036         """
1037         try:
1038             return self.__dict__[attr]
1039         except KeyError:
1040             return None
1041
1042     def __setattr__(self, attr, value):
1043         """Set attribute custom implementation.
1044
1045         :param attr: Attribute to set.
1046         :param value: Value to set.
1047         :type attr: str
1048         :type value: any
1049         """
1050         try:
1051             # Check if attribute exists
1052             self.__dict__[attr]
1053         except KeyError:
1054             # Creating new attribute
1055             if attr == u"node":
1056                 self.__dict__[u"ssh"] = SSH()
1057                 self.__dict__[u"ssh"].connect(value)
1058             self.__dict__[attr] = value
1059         else:
1060             # Updating attribute base of type
1061             if isinstance(self.__dict__[attr], list):
1062                 self.__dict__[attr].append(value)
1063             else:
1064                 self.__dict__[attr] = value