FIX: LXC Container directory initialization
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
16
17 """Library to manipulate Containers."""
18
19 from string import Template
20 from collections import OrderedDict, Counter
21
22 from resources.libraries.python.ssh import SSH
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.topology import Topology, SocketType
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
26
27
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
29
30 SUPERVISOR_CONF = '/etc/supervisor/supervisord.conf'
31
32
33 class ContainerManager(object):
34     """Container lifecycle management class."""
35
36     def __init__(self, engine):
37         """Initialize Container Manager class.
38
39         :param engine: Container technology used (LXC/Docker/...).
40         :type engine: str
41         :raises NotImplementedError: If container technology is not implemented.
42         """
43         try:
44             self.engine = globals()[engine]()
45         except KeyError:
46             raise NotImplementedError('{engine} is not implemented.'.
47                                       format(engine=engine))
48         self.containers = OrderedDict()
49
50     def get_container_by_name(self, name):
51         """Get container instance.
52
53         :param name: Container name.
54         :type name: str
55         :returns: Container instance.
56         :rtype: Container
57         :raises RuntimeError: If failed to get container with name.
58         """
59         try:
60             return self.containers[name]
61         except KeyError:
62             raise RuntimeError('Failed to get container with name: {name}'.
63                                format(name=name))
64
65     def construct_container(self, **kwargs):
66         """Construct container object on node with specified parameters.
67
68         :param kwargs: Key-value pairs used to construct container.
69         :param kwargs: dict
70         """
71         # Create base class
72         self.engine.initialize()
73         # Set parameters
74         for key in kwargs:
75             setattr(self.engine.container, key, kwargs[key])
76
77         # Set additional environmental variables
78         setattr(self.engine.container, 'env',
79                 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
80
81         # Store container instance
82         self.containers[kwargs['name']] = self.engine.container
83
84     def construct_containers(self, **kwargs):
85         """Construct 1..N container(s) on node with specified name.
86
87         Ordinal number is automatically added to the name of container as
88         suffix.
89
90         :param kwargs: Named parameters.
91         :param kwargs: dict
92         """
93         name = kwargs['name']
94         for i in range(kwargs['count']):
95             # Name will contain ordinal suffix
96             kwargs['name'] = ''.join([name, str(i+1)])
97             # Create container
98             self.construct_container(i=i, **kwargs)
99
100     def acquire_all_containers(self):
101         """Acquire all containers."""
102         for container in self.containers:
103             self.engine.container = self.containers[container]
104             self.engine.acquire()
105
106     def build_all_containers(self):
107         """Build all containers."""
108         for container in self.containers:
109             self.engine.container = self.containers[container]
110             self.engine.build()
111
112     def create_all_containers(self):
113         """Create all containers."""
114         for container in self.containers:
115             self.engine.container = self.containers[container]
116             self.engine.create()
117
118     def execute_on_container(self, name, command):
119         """Execute command on container with name.
120
121         :param name: Container name.
122         :param command: Command to execute.
123         :type name: str
124         :type command: str
125         """
126         self.engine.container = self.get_container_by_name(name)
127         self.engine.execute(command)
128
129     def execute_on_all_containers(self, command):
130         """Execute command on all containers.
131
132         :param command: Command to execute.
133         :type command: str
134         """
135         for container in self.containers:
136             self.engine.container = self.containers[container]
137             self.engine.execute(command)
138
139     def start_vpp_in_all_containers(self):
140         """Start VPP in all containers."""
141         for container in self.containers:
142             self.engine.container = self.containers[container]
143             # We need to install supervisor client/server system to control VPP
144             # as a service
145             self.engine.install_supervisor()
146             self.engine.start_vpp()
147
148     def restart_vpp_in_all_containers(self):
149         """Restart VPP in all containers."""
150         for container in self.containers:
151             self.engine.container = self.containers[container]
152             self.engine.restart_vpp()
153
154     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
155         """Configure VPP in all containers.
156
157         :param chain_topology: Topology used for chaining containers can be
158             chain or cross_horiz. Chain topology is using 1 memif pair per
159             container. Cross_horiz topology is using 1 memif and 1 physical
160             interface in container (only single container can be configured).
161         :param kwargs: Named parameters.
162         :type chain_topology: str
163         :param kwargs: dict
164         """
165         # Count number of DUTs based on node's host information
166         dut_cnt = len(Counter([self.containers[container].node['host']
167                                for container in self.containers]))
168         mod = len(self.containers)/dut_cnt
169
170         for i, container in enumerate(self.containers):
171             mid1 = i % mod + 1
172             mid2 = i % mod + 1
173             sid1 = i % mod * 2 + 1
174             sid2 = i % mod * 2 + 2
175             self.engine.container = self.containers[container]
176             guest_dir = self.engine.container.mnt[0].split(':')[1]
177
178             if chain_topology == 'chain':
179                 self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
180                                                sid1=sid1, sid2=sid2,
181                                                guest_dir=guest_dir,
182                                                **kwargs)
183             elif chain_topology == 'cross_horiz':
184                 self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
185                                                 sid1=sid1, sid2=sid2,
186                                                 guest_dir=guest_dir,
187                                                 **kwargs)
188             elif chain_topology == 'chain_functional':
189                 self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
190                                                      sid1=sid1, sid2=sid2,
191                                                      guest_dir=guest_dir,
192                                                      **kwargs)
193             elif chain_topology == 'chain_ip4':
194                 self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
195                                               sid1=sid1, sid2=sid2,
196                                               guest_dir=guest_dir,
197                                               **kwargs)
198             elif chain_topology == 'pipeline_ip4':
199                 self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
200                                                  sid1=sid1, sid2=sid2,
201                                                  guest_dir=guest_dir,
202                                                  **kwargs)
203             else:
204                 raise RuntimeError('Container topology {name} not implemented'.
205                                    format(name=chain_topology))
206
207     def _configure_vpp_chain_l2xc(self, **kwargs):
208         """Configure VPP in chain topology with l2xc.
209
210         :param kwargs: Named parameters.
211         :param kwargs: dict
212         """
213         self.engine.create_vpp_startup_config()
214         self.engine.create_vpp_exec_config(
215             'memif_create_chain_l2xc.exec',
216             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
217             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
218             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
219             format(c=self.engine.container, **kwargs),
220             socket2='{guest_dir}/memif-{c.name}-{sid2}'.
221             format(c=self.engine.container, **kwargs))
222
223     def _configure_vpp_cross_horiz(self, **kwargs):
224         """Configure VPP in cross horizontal topology (single memif).
225
226         :param kwargs: Named parameters.
227         :param kwargs: dict
228         """
229         if 'DUT1' in self.engine.container.name:
230             if_pci = Topology.get_interface_pci_addr(
231                 self.engine.container.node, kwargs['dut1_if'])
232             if_name = Topology.get_interface_name(
233                 self.engine.container.node, kwargs['dut1_if'])
234         if 'DUT2' in self.engine.container.name:
235             if_pci = Topology.get_interface_pci_addr(
236                 self.engine.container.node, kwargs['dut2_if'])
237             if_name = Topology.get_interface_name(
238                 self.engine.container.node, kwargs['dut2_if'])
239         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
240         self.engine.create_vpp_exec_config(
241             'memif_create_cross_horizon.exec',
242             mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
243             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
244             format(c=self.engine.container, **kwargs))
245
246     def _configure_vpp_chain_functional(self, **kwargs):
247         """Configure VPP in chain topology with l2xc (functional).
248
249         :param kwargs: Named parameters.
250         :param kwargs: dict
251         """
252         self.engine.create_vpp_startup_config_func_dev()
253         self.engine.create_vpp_exec_config(
254             'memif_create_chain_functional.exec',
255             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
256             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
257             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
258             format(c=self.engine.container, **kwargs),
259             socket2='{guest_dir}/memif-{c.name}-{sid2}'.
260             format(c=self.engine.container, **kwargs),
261             rx_mode='interrupt')
262
263     def _configure_vpp_chain_ip4(self, **kwargs):
264         """Configure VPP in chain topology with ip4.
265
266         :param kwargs: Named parameters.
267         :param kwargs: dict
268         """
269         self.engine.create_vpp_startup_config()
270
271         vif1_mac = kwargs['tg_if1_mac'] \
272             if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
273             else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
274         vif2_mac = kwargs['tg_if2_mac'] \
275             if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
276             else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
277         self.engine.create_vpp_exec_config(
278             'memif_create_chain_ip4.exec',
279             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
280             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
281             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
282             format(c=self.engine.container, **kwargs),
283             socket2='{guest_dir}/memif-{c.name}-{sid2}'.
284             format(c=self.engine.container, **kwargs),
285             mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
286             mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
287             vif1_mac=vif1_mac, vif2_mac=vif2_mac)
288
289     def _configure_vpp_pipeline_ip4(self, **kwargs):
290         """Configure VPP in pipeline topology with ip4.
291
292         :param kwargs: Named parameters.
293         :param kwargs: dict
294         """
295         self.engine.create_vpp_startup_config()
296         node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
297         mid1 = kwargs['mid1']
298         mid2 = kwargs['mid2']
299         role1 = 'master'
300         role2 = 'master' \
301             if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
302             else 'slave'
303         kwargs['mid2'] = kwargs['mid2'] \
304             if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
305             else kwargs['mid2'] + 1
306         vif1_mac = kwargs['tg_if1_mac'] \
307             if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
308             else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
309         vif2_mac = kwargs['tg_if2_mac'] \
310             if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
311             else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
312         socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
313             format(c=self.engine.container, **kwargs) \
314             if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
315             format(c=self.engine.container, **kwargs)
316         socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
317             format(c=self.engine.container, **kwargs) \
318             if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
319             else '{guest_dir}/memif-pipe-{mid2}'.\
320             format(c=self.engine.container, **kwargs)
321
322         self.engine.create_vpp_exec_config(
323             'memif_create_pipeline_ip4.exec',
324             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
325             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
326             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
327             mac1='52:54:00:00:{0:02X}:01'.format(mid1),
328             mac2='52:54:00:00:{0:02X}:02'.format(mid2),
329             vif1_mac=vif1_mac, vif2_mac=vif2_mac)
330
331     def stop_all_containers(self):
332         """Stop all containers."""
333         for container in self.containers:
334             self.engine.container = self.containers[container]
335             self.engine.stop()
336
337     def destroy_all_containers(self):
338         """Destroy all containers."""
339         for container in self.containers:
340             self.engine.container = self.containers[container]
341             self.engine.destroy()
342
343
344 class ContainerEngine(object):
345     """Abstract class for container engine."""
346
347     def __init__(self):
348         """Init ContainerEngine object."""
349         self.container = None
350
351     def initialize(self):
352         """Initialize container object."""
353         self.container = Container()
354
355     def acquire(self, force):
356         """Acquire/download container.
357
358         :param force: Destroy a container if exists and create.
359         :type force: bool
360         """
361         raise NotImplementedError
362
363     def build(self):
364         """Build container (compile)."""
365         raise NotImplementedError
366
367     def create(self):
368         """Create/deploy container."""
369         raise NotImplementedError
370
371     def execute(self, command):
372         """Execute process inside container.
373
374         :param command: Command to run inside container.
375         :type command: str
376         """
377         raise NotImplementedError
378
379     def stop(self):
380         """Stop container."""
381         raise NotImplementedError
382
383     def destroy(self):
384         """Destroy/remove container."""
385         raise NotImplementedError
386
387     def info(self):
388         """Info about container."""
389         raise NotImplementedError
390
391     def system_info(self):
392         """System info."""
393         raise NotImplementedError
394
395     def install_supervisor(self):
396         """Install supervisord inside a container."""
397         if isinstance(self, LXC):
398             self.execute('sleep 3; apt-get update')
399             self.execute('apt-get install -y supervisor')
400             self.execute('echo "{config}" > {config_file} && '
401                          'supervisord -c {config_file}'.
402                          format(
403                              config='[unix_http_server]\n'
404                              'file  = /tmp/supervisor.sock\n\n'
405                              '[rpcinterface:supervisor]\n'
406                              'supervisor.rpcinterface_factory = supervisor.'
407                              'rpcinterface:make_main_rpcinterface\n\n'
408                              '[supervisorctl]\n'
409                              'serverurl = unix:///tmp/supervisor.sock\n\n'
410                              '[supervisord]\n'
411                              'pidfile = /tmp/supervisord.pid\n'
412                              'identifier = supervisor\n'
413                              'directory = /tmp\n'
414                              'logfile = /tmp/supervisord.log\n'
415                              'loglevel = debug\n'
416                              'nodaemon = false\n\n',
417                              config_file=SUPERVISOR_CONF))
418
419     def start_vpp(self):
420         """Start VPP inside a container."""
421         self.execute('echo "{config}" >> {config_file} && '
422                      'supervisorctl reload'.
423                      format(
424                          config='[program:vpp]\n'
425                          'command = /usr/bin/vpp -c /etc/vpp/startup.conf\n'
426                          'autostart = false\n'
427                          'autorestart = false\n'
428                          'redirect_stderr = true\n'
429                          'priority = 1',
430                          config_file=SUPERVISOR_CONF))
431         self.execute('supervisorctl start vpp')
432
433         from robot.libraries.BuiltIn import BuiltIn
434         topo_instance = BuiltIn().get_library_instance(
435             'resources.libraries.python.topology.Topology')
436         topo_instance.add_new_socket(
437             self.container.node,
438             SocketType.PAPI,
439             self.container.name,
440             '{root}/tmp/vpp_sockets/{name}/api.sock'.
441             format(root=self.container.root, name=self.container.name))
442         topo_instance.add_new_socket(
443             self.container.node,
444             SocketType.STATS,
445             self.container.name,
446             '{root}/tmp/vpp_sockets/{name}/stats.sock'.
447             format(root=self.container.root, name=self.container.name))
448
449     def restart_vpp(self):
450         """Restart VPP service inside a container."""
451         self.execute('supervisorctl restart vpp')
452         self.execute('cat /tmp/supervisord.log')
453
454     def create_base_vpp_startup_config(self):
455         """Create base startup configuration of VPP on container.
456
457         :returns: Base VPP startup configuration.
458         :rtype: VppConfigGenerator
459         """
460         cpuset_cpus = self.container.cpuset_cpus
461
462         # Create config instance
463         vpp_config = VppConfigGenerator()
464         vpp_config.set_node(self.container.node)
465         vpp_config.add_unix_cli_listen()
466         vpp_config.add_unix_nodaemon()
467         vpp_config.add_unix_exec('/tmp/running.exec')
468         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
469         vpp_config.add_statseg_per_node_counters(value='on')
470         # We will pop the first core from the list to be a main core
471         vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
472         # If more cores in the list, the rest will be used as workers.
473         if cpuset_cpus:
474             corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
475             vpp_config.add_cpu_corelist_workers(corelist_workers)
476
477         return vpp_config
478
479     def create_vpp_startup_config(self):
480         """Create startup configuration of VPP without DPDK on container.
481         """
482         vpp_config = self.create_base_vpp_startup_config()
483         vpp_config.add_plugin('disable', 'dpdk_plugin.so')
484
485         # Apply configuration
486         self.execute('mkdir -p /etc/vpp/')
487         self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
488                      .format(config=vpp_config.get_config_str()))
489
490     def create_vpp_startup_config_dpdk_dev(self, *devices):
491         """Create startup configuration of VPP with DPDK on container.
492
493         :param devices: List of PCI devices to add.
494         :type devices: list
495         """
496         vpp_config = self.create_base_vpp_startup_config()
497         vpp_config.add_dpdk_dev(*devices)
498         vpp_config.add_dpdk_no_tx_checksum_offload()
499         vpp_config.add_dpdk_log_level('debug')
500         vpp_config.add_plugin('disable', 'default')
501         vpp_config.add_plugin('enable', 'dpdk_plugin.so')
502         vpp_config.add_plugin('enable', 'memif_plugin.so')
503
504         # Apply configuration
505         self.execute('mkdir -p /etc/vpp/')
506         self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
507                      .format(config=vpp_config.get_config_str()))
508
509     def create_vpp_startup_config_func_dev(self):
510         """Create startup configuration of VPP on container for functional
511         vpp_device tests.
512         """
513         # Create config instance
514         vpp_config = VppConfigGenerator()
515         vpp_config.set_node(self.container.node)
516         vpp_config.add_unix_cli_listen()
517         vpp_config.add_unix_nodaemon()
518         vpp_config.add_unix_exec('/tmp/running.exec')
519         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
520         vpp_config.add_statseg_per_node_counters(value='on')
521         vpp_config.add_plugin('disable', 'dpdk_plugin.so')
522
523         # Apply configuration
524         self.execute('mkdir -p /etc/vpp/')
525         self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
526                      .format(config=vpp_config.get_config_str()))
527
528     def create_vpp_exec_config(self, template_file, **kwargs):
529         """Create VPP exec configuration on container.
530
531         :param template_file: File name of a template script.
532         :param kwargs: Parameters for script.
533         :type template_file: str
534         :type kwargs: dict
535         """
536         running = '/tmp/running.exec'
537
538         template = '{res}/{tpl}'.format(
539             res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
540
541         with open(template, 'r') as src_file:
542             src = Template(src_file.read())
543             self.execute('echo "{out}" > {running}'.format(
544                 out=src.safe_substitute(**kwargs), running=running))
545
546     def is_container_running(self):
547         """Check if container is running."""
548         raise NotImplementedError
549
550     def is_container_present(self):
551         """Check if container is present."""
552         raise NotImplementedError
553
554     def _configure_cgroup(self, name):
555         """Configure the control group associated with a container.
556
557         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
558         container is initialized a new cgroup /docker or /lxc is created under
559         cpuset parent tree. This newly created cgroup is inheriting parent
560         setting for cpu/mem exclusive parameter and thus cannot be overriden
561         within /docker or /lxc cgroup. This function is supposed to set cgroups
562         to allow coexistence of both engines.
563
564         :param name: Name of cgroup.
565         :type name: str
566         :raises RuntimeError: If applying cgroup settings via cgset failed.
567         """
568         ret, _, _ = self.container.ssh.exec_command_sudo(
569             'cgset -r cpuset.cpu_exclusive=0 /')
570         if int(ret) != 0:
571             raise RuntimeError('Failed to apply cgroup settings.')
572
573         ret, _, _ = self.container.ssh.exec_command_sudo(
574             'cgset -r cpuset.mem_exclusive=0 /')
575         if int(ret) != 0:
576             raise RuntimeError('Failed to apply cgroup settings.')
577
578         ret, _, _ = self.container.ssh.exec_command_sudo(
579             'cgcreate -g cpuset:/{name}'.format(name=name))
580         if int(ret) != 0:
581             raise RuntimeError('Failed to copy cgroup settings from root.')
582
583         ret, _, _ = self.container.ssh.exec_command_sudo(
584             'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
585         if int(ret) != 0:
586             raise RuntimeError('Failed to apply cgroup settings.')
587
588         ret, _, _ = self.container.ssh.exec_command_sudo(
589             'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
590         if int(ret) != 0:
591             raise RuntimeError('Failed to apply cgroup settings.')
592
593
594 class LXC(ContainerEngine):
595     """LXC implementation."""
596
597     # Implicit constructor is inherited.
598
599     def acquire(self, force=True):
600         """Acquire a privileged system object where configuration is stored.
601
602         :param force: If a container exists, destroy it and create a new
603             container.
604         :type force: bool
605         :raises RuntimeError: If creating the container or writing the container
606             config fails.
607         """
608         if self.is_container_present():
609             if force:
610                 self.destroy()
611             else:
612                 return
613
614         target_arch = 'arm64' \
615             if Topology.get_node_arch(self.container.node) == 'aarch64' \
616             else 'amd64'
617
618         image = self.container.image if self.container.image else\
619             "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
620
621         cmd = 'lxc-create -t download --name {c.name} -- {image} '\
622             '--no-validate'.format(c=self.container, image=image)
623
624         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
625         if int(ret) != 0:
626             raise RuntimeError('Failed to create container.')
627
628         self._configure_cgroup('lxc')
629
630     def create(self):
631         """Create/deploy an application inside a container on system.
632
633         :raises RuntimeError: If creating the container fails.
634         """
635         if self.container.mnt:
636             # LXC fix for tmpfs
637             # https://github.com/lxc/lxc/issues/434
638             ret, _, _ = self.container.ssh.exec_command_sudo(
639                 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
640                 format(e="lxc.mount.entry = tmpfs run tmpfs defaults",
641                        c=self.container))
642             if int(ret) != 0:
643                 raise RuntimeError('Failed to write {c.name} config.'.
644                                    format(c=self.container))
645
646             for mount in self.container.mnt:
647                 host_dir, guest_dir = mount.split(':')
648                 options = 'bind,create=dir' \
649                     if guest_dir.endswith('/') else 'bind,create=file'
650                 entry = 'lxc.mount.entry = {host_dir} {guest_dir} none ' \
651                     '{options} 0 0'.format(
652                         host_dir=host_dir, guest_dir=guest_dir[1:],
653                         options=options)
654                 self.container.ssh.exec_command_sudo(
655                     "sh -c 'mkdir -p {host_dir}'".format(host_dir=host_dir))
656                 ret, _, _ = self.container.ssh.exec_command_sudo(
657                     "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
658                     format(e=entry, c=self.container))
659                 if int(ret) != 0:
660                     raise RuntimeError('Failed to write {c.name} config.'
661                                        .format(c=self.container))
662
663         cpuset_cpus = '{0}'.format(
664             ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
665             if self.container.cpuset_cpus else ''
666
667         ret, _, _ = self.container.ssh.exec_command_sudo(
668             'lxc-start --name {c.name} --daemon'.format(c=self.container))
669         if int(ret) != 0:
670             raise RuntimeError('Failed to start container {c.name}.'.
671                                format(c=self.container))
672         self._lxc_wait('RUNNING')
673
674         # Workaround for LXC to be able to allocate all cpus including isolated.
675         ret, _, _ = self.container.ssh.exec_command_sudo(
676             'cgset --copy-from / lxc/')
677         if int(ret) != 0:
678             raise RuntimeError('Failed to copy cgroup to LXC')
679
680         ret, _, _ = self.container.ssh.exec_command_sudo(
681             'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
682             format(c=self.container, cpus=cpuset_cpus))
683         if int(ret) != 0:
684             raise RuntimeError('Failed to set cpuset.cpus to container '
685                                '{c.name}.'.format(c=self.container))
686
687     def execute(self, command):
688         """Start a process inside a running container.
689
690         Runs the specified command inside the container specified by name. The
691         container has to be running already.
692
693         :param command: Command to run inside container.
694         :type command: str
695         :raises RuntimeError: If running the command failed.
696         """
697         env = '--keep-env {0}'.format(
698             ' '.join('--set-var %s' % env for env in self.container.env))\
699             if self.container.env else ''
700
701         cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
702             "exit $?'".format(env=env, c=self.container, command=command)
703
704         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
705         if int(ret) != 0:
706             raise RuntimeError('Failed to run command inside container '
707                                '{c.name}.'.format(c=self.container))
708
709     def stop(self):
710         """Stop a container.
711
712         :raises RuntimeError: If stopping the container failed.
713         """
714         cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
715
716         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
717         if int(ret) != 0:
718             raise RuntimeError('Failed to stop container {c.name}.'
719                                .format(c=self.container))
720         self._lxc_wait('STOPPED|FROZEN')
721
722     def destroy(self):
723         """Destroy a container.
724
725         :raises RuntimeError: If destroying container failed.
726         """
727         cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
728
729         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
730         if int(ret) != 0:
731             raise RuntimeError('Failed to destroy container {c.name}.'
732                                .format(c=self.container))
733
734     def info(self):
735         """Query and shows information about a container.
736
737         :raises RuntimeError: If getting info about a container failed.
738         """
739         cmd = 'lxc-info --name {c.name}'.format(c=self.container)
740
741         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
742         if int(ret) != 0:
743             raise RuntimeError('Failed to get info about container {c.name}.'
744                                .format(c=self.container))
745
746     def system_info(self):
747         """Check the current kernel for LXC support.
748
749         :raises RuntimeError: If checking LXC support failed.
750         """
751         cmd = 'lxc-checkconfig'
752
753         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
754         if int(ret) != 0:
755             raise RuntimeError('Failed to check LXC support.')
756
757     def is_container_running(self):
758         """Check if container is running on node.
759
760         :returns: True if container is running.
761         :rtype: bool
762         :raises RuntimeError: If getting info about a container failed.
763         """
764         cmd = 'lxc-info --no-humanize --state --name {c.name}'\
765             .format(c=self.container)
766
767         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
768         if int(ret) != 0:
769             raise RuntimeError('Failed to get info about container {c.name}.'
770                                .format(c=self.container))
771         return True if 'RUNNING' in stdout else False
772
773     def is_container_present(self):
774         """Check if container is existing on node.
775
776         :returns: True if container is present.
777         :rtype: bool
778         :raises RuntimeError: If getting info about a container failed.
779         """
780         cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
781
782         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
783         return False if int(ret) else True
784
785     def _lxc_wait(self, state):
786         """Wait for a specific container state.
787
788         :param state: Specify the container state(s) to wait for.
789         :type state: str
790         :raises RuntimeError: If waiting for state of a container failed.
791         """
792         cmd = 'lxc-wait --name {c.name} --state "{s}"'\
793             .format(c=self.container, s=state)
794
795         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
796         if int(ret) != 0:
797             raise RuntimeError('Failed to wait for state "{s}" of container '
798                                '{c.name}.'.format(s=state, c=self.container))
799
800
801 class Docker(ContainerEngine):
802     """Docker implementation."""
803
804     # Implicit constructor is inherited.
805
806     def acquire(self, force=True):
807         """Pull an image or a repository from a registry.
808
809         :param force: Destroy a container if exists.
810         :type force: bool
811         :raises RuntimeError: If pulling a container failed.
812         """
813         if self.is_container_present():
814             if force:
815                 self.destroy()
816             else:
817                 return
818
819         if not self.container.image:
820             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
821                 if Topology.get_node_arch(self.container.node) == 'aarch64' \
822                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
823             setattr(self.container, 'image', img)
824
825         cmd = 'docker pull {image}'.format(image=self.container.image)
826
827         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
828         if int(ret) != 0:
829             raise RuntimeError('Failed to create container {c.name}.'
830                                .format(c=self.container))
831
832         if self.container.cpuset_cpus:
833             self._configure_cgroup('docker')
834
835     def create(self):
836         """Create/deploy container.
837
838         :raises RuntimeError: If creating a container failed.
839         """
840         cpuset_cpus = '--cpuset-cpus={0}'.format(
841             ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
842             if self.container.cpuset_cpus else ''
843
844         cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
845             if self.container.cpuset_mems is not None else ''
846         # Temporary workaround - disabling due to bug in memif
847         cpuset_mems = ''
848
849         env = '{0}'.format(
850             ' '.join('--env %s' % env for env in self.container.env))\
851             if self.container.env else ''
852
853         command = '{0}'.format(self.container.command)\
854             if self.container.command else ''
855
856         publish = '{0}'.format(
857             ' '.join('--publish %s' % var for var in self.container.publish))\
858             if self.container.publish else ''
859
860         volume = '{0}'.format(
861             ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
862             if self.container.mnt else ''
863
864         cmd = 'docker run '\
865             '--privileged --detach --interactive --tty --rm '\
866             '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
867             '{env} {volume} --name {container.name} {container.image} '\
868             '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
869                                container=self.container, command=command,
870                                env=env, publish=publish, volume=volume)
871
872         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
873         if int(ret) != 0:
874             raise RuntimeError('Failed to create container {c.name}'
875                                .format(c=self.container))
876
877         self.info()
878
879     def execute(self, command):
880         """Start a process inside a running container.
881
882         Runs the specified command inside the container specified by name. The
883         container has to be running already.
884
885         :param command: Command to run inside container.
886         :type command: str
887         :raises RuntimeError: If running the command in a container failed.
888         """
889         cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
890             "exit $?'".format(c=self.container, command=command)
891
892         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
893         if int(ret) != 0:
894             raise RuntimeError('Failed to execute command in container '
895                                '{c.name}.'.format(c=self.container))
896
897     def stop(self):
898         """Stop running container.
899
900         :raises RuntimeError: If stopping a container failed.
901         """
902         cmd = 'docker stop {c.name}'.format(c=self.container)
903
904         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
905         if int(ret) != 0:
906             raise RuntimeError('Failed to stop container {c.name}.'
907                                .format(c=self.container))
908
909     def destroy(self):
910         """Remove a container.
911
912         :raises RuntimeError: If removing a container failed.
913         """
914         cmd = 'docker rm --force {c.name}'.format(c=self.container)
915
916         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
917         if int(ret) != 0:
918             raise RuntimeError('Failed to destroy container {c.name}.'
919                                .format(c=self.container))
920
921     def info(self):
922         """Return low-level information on Docker objects.
923
924         :raises RuntimeError: If getting info about a container failed.
925         """
926         cmd = 'docker inspect {c.name}'.format(c=self.container)
927
928         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
929         if int(ret) != 0:
930             raise RuntimeError('Failed to get info about container {c.name}.'
931                                .format(c=self.container))
932
933     def system_info(self):
934         """Display the docker system-wide information.
935
936         :raises RuntimeError: If displaying system information failed.
937         """
938         cmd = 'docker system info'
939
940         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
941         if int(ret) != 0:
942             raise RuntimeError('Failed to get system info.')
943
944     def is_container_present(self):
945         """Check if container is present on node.
946
947         :returns: True if container is present.
948         :rtype: bool
949         :raises RuntimeError: If getting info about a container failed.
950         """
951         cmd = 'docker ps --all --quiet --filter name={c.name}'\
952             .format(c=self.container)
953
954         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
955         if int(ret) != 0:
956             raise RuntimeError('Failed to get info about container {c.name}.'
957                                .format(c=self.container))
958         return True if stdout else False
959
960     def is_container_running(self):
961         """Check if container is running on node.
962
963         :returns: True if container is running.
964         :rtype: bool
965         :raises RuntimeError: If getting info about a container failed.
966         """
967         cmd = 'docker ps --quiet --filter name={c.name}'\
968             .format(c=self.container)
969
970         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
971         if int(ret) != 0:
972             raise RuntimeError('Failed to get info about container {c.name}.'
973                                .format(c=self.container))
974         return True if stdout else False
975
976
977 class Container(object):
978     """Container class."""
979
980     def __init__(self):
981         """Initialize Container object."""
982         pass
983
984     def __getattr__(self, attr):
985         """Get attribute custom implementation.
986
987         :param attr: Attribute to get.
988         :type attr: str
989         :returns: Attribute value or None.
990         :rtype: any
991         """
992         try:
993             return self.__dict__[attr]
994         except KeyError:
995             return None
996
997     def __setattr__(self, attr, value):
998         """Set attribute custom implementation.
999
1000         :param attr: Attribute to set.
1001         :param value: Value to set.
1002         :type attr: str
1003         :type value: any
1004         """
1005         try:
1006             # Check if attribute exists
1007             self.__dict__[attr]
1008         except KeyError:
1009             # Creating new attribute
1010             if attr == 'node':
1011                 self.__dict__['ssh'] = SSH()
1012                 self.__dict__['ssh'].connect(value)
1013             self.__dict__[attr] = value
1014         else:
1015             # Updating attribute base of type
1016             if isinstance(self.__dict__[attr], list):
1017                 self.__dict__[attr].append(value)
1018             else:
1019                 self.__dict__[attr] = value