FIX: Paths for new Docker images
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
16
17 """Library to manipulate Containers."""
18
19 from string import Template
20 from collections import OrderedDict, Counter
21
22 from resources.libraries.python.ssh import SSH
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.topology import Topology, SocketType
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
26
27
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
29
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
31
32
33 class ContainerManager(object):
34     """Container lifecycle management class."""
35
36     def __init__(self, engine):
37         """Initialize Container Manager class.
38
39         :param engine: Container technology used (LXC/Docker/...).
40         :type engine: str
41         :raises NotImplementedError: If container technology is not implemented.
42         """
43         try:
44             self.engine = globals()[engine]()
45         except KeyError:
46             raise NotImplementedError('{engine} is not implemented.'.
47                                       format(engine=engine))
48         self.containers = OrderedDict()
49
50     def get_container_by_name(self, name):
51         """Get container instance.
52
53         :param name: Container name.
54         :type name: str
55         :returns: Container instance.
56         :rtype: Container
57         :raises RuntimeError: If failed to get container with name.
58         """
59         try:
60             return self.containers[name]
61         except KeyError:
62             raise RuntimeError('Failed to get container with name: {name}'.
63                                format(name=name))
64
65     def construct_container(self, **kwargs):
66         """Construct container object on node with specified parameters.
67
68         :param kwargs: Key-value pairs used to construct container.
69         :param kwargs: dict
70         """
71         # Create base class
72         self.engine.initialize()
73         # Set parameters
74         for key in kwargs:
75             setattr(self.engine.container, key, kwargs[key])
76
77         # Set additional environmental variables
78         setattr(self.engine.container, 'env',
79                 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
80
81         # Store container instance
82         self.containers[kwargs['name']] = self.engine.container
83
84     def construct_containers(self, **kwargs):
85         """Construct 1..N container(s) on node with specified name.
86
87         Ordinal number is automatically added to the name of container as
88         suffix.
89
90         :param kwargs: Named parameters.
91         :param kwargs: dict
92         """
93         name = kwargs['name']
94         for i in range(kwargs['count']):
95             # Name will contain ordinal suffix
96             kwargs['name'] = ''.join([name, str(i+1)])
97             # Create container
98             self.construct_container(i=i, **kwargs)
99
100     def acquire_all_containers(self):
101         """Acquire all containers."""
102         for container in self.containers:
103             self.engine.container = self.containers[container]
104             self.engine.acquire()
105
106     def build_all_containers(self):
107         """Build all containers."""
108         for container in self.containers:
109             self.engine.container = self.containers[container]
110             self.engine.build()
111
112     def create_all_containers(self):
113         """Create all containers."""
114         for container in self.containers:
115             self.engine.container = self.containers[container]
116             self.engine.create()
117
118     def execute_on_container(self, name, command):
119         """Execute command on container with name.
120
121         :param name: Container name.
122         :param command: Command to execute.
123         :type name: str
124         :type command: str
125         """
126         self.engine.container = self.get_container_by_name(name)
127         self.engine.execute(command)
128
129     def execute_on_all_containers(self, command):
130         """Execute command on all containers.
131
132         :param command: Command to execute.
133         :type command: str
134         """
135         for container in self.containers:
136             self.engine.container = self.containers[container]
137             self.engine.execute(command)
138
139     def start_vpp_in_all_containers(self):
140         """Start VPP in all containers."""
141         for container in self.containers:
142             self.engine.container = self.containers[container]
143             # We need to install supervisor client/server system to control VPP
144             # as a service
145             self.engine.install_supervisor()
146             self.engine.start_vpp()
147
148     def restart_vpp_in_all_containers(self):
149         """Restart VPP in all containers."""
150         for container in self.containers:
151             self.engine.container = self.containers[container]
152             self.engine.restart_vpp()
153
154     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
155         """Configure VPP in all containers.
156
157         :param chain_topology: Topology used for chaining containers can be
158             chain or cross_horiz. Chain topology is using 1 memif pair per
159             container. Cross_horiz topology is using 1 memif and 1 physical
160             interface in container (only single container can be configured).
161         :param kwargs: Named parameters.
162         :type chain_topology: str
163         :param kwargs: dict
164         """
165         # Count number of DUTs based on node's host information
166         dut_cnt = len(Counter([self.containers[container].node['host']
167                                for container in self.containers]))
168         mod = len(self.containers)/dut_cnt
169
170         for i, container in enumerate(self.containers):
171             mid1 = i % mod + 1
172             mid2 = i % mod + 1
173             sid1 = i % mod * 2 + 1
174             sid2 = i % mod * 2 + 2
175             self.engine.container = self.containers[container]
176             guest_dir = self.engine.container.mnt[0].split(':')[1]
177
178             if chain_topology == 'chain':
179                 self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
180                                                sid1=sid1, sid2=sid2,
181                                                guest_dir=guest_dir,
182                                                **kwargs)
183             elif chain_topology == 'cross_horiz':
184                 self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
185                                                 sid1=sid1, sid2=sid2,
186                                                 guest_dir=guest_dir,
187                                                 **kwargs)
188             elif chain_topology == 'chain_functional':
189                 self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
190                                                      sid1=sid1, sid2=sid2,
191                                                      guest_dir=guest_dir,
192                                                      **kwargs)
193             elif chain_topology == 'chain_ip4':
194                 self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
195                                               sid1=sid1, sid2=sid2,
196                                               guest_dir=guest_dir,
197                                               **kwargs)
198             elif chain_topology == 'pipeline_ip4':
199                 self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
200                                                  sid1=sid1, sid2=sid2,
201                                                  guest_dir=guest_dir,
202                                                  **kwargs)
203             else:
204                 raise RuntimeError('Container topology {name} not implemented'.
205                                    format(name=chain_topology))
206
207     def _configure_vpp_chain_l2xc(self, **kwargs):
208         """Configure VPP in chain topology with l2xc.
209
210         :param kwargs: Named parameters.
211         :param kwargs: dict
212         """
213         self.engine.create_vpp_startup_config()
214         self.engine.create_vpp_exec_config(
215             'memif_create_chain_l2xc.exec',
216             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
217             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
218             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
219             format(c=self.engine.container, **kwargs),
220             socket2='{guest_dir}/memif-{c.name}-{sid2}'.
221             format(c=self.engine.container, **kwargs))
222
223     def _configure_vpp_cross_horiz(self, **kwargs):
224         """Configure VPP in cross horizontal topology (single memif).
225
226         :param kwargs: Named parameters.
227         :param kwargs: dict
228         """
229         if 'DUT1' in self.engine.container.name:
230             if_pci = Topology.get_interface_pci_addr(
231                 self.engine.container.node, kwargs['dut1_if'])
232             if_name = Topology.get_interface_name(
233                 self.engine.container.node, kwargs['dut1_if'])
234         if 'DUT2' in self.engine.container.name:
235             if_pci = Topology.get_interface_pci_addr(
236                 self.engine.container.node, kwargs['dut2_if'])
237             if_name = Topology.get_interface_name(
238                 self.engine.container.node, kwargs['dut2_if'])
239         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
240         self.engine.create_vpp_exec_config(
241             'memif_create_cross_horizon.exec',
242             mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
243             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
244             format(c=self.engine.container, **kwargs))
245
246     def _configure_vpp_chain_functional(self, **kwargs):
247         """Configure VPP in chain topology with l2xc (functional).
248
249         :param kwargs: Named parameters.
250         :param kwargs: dict
251         """
252         self.engine.create_vpp_startup_config_func_dev()
253         self.engine.create_vpp_exec_config(
254             'memif_create_chain_functional.exec',
255             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
256             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
257             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
258             format(c=self.engine.container, **kwargs),
259             socket2='{guest_dir}/memif-{c.name}-{sid2}'.
260             format(c=self.engine.container, **kwargs),
261             rx_mode='interrupt')
262
263     def _configure_vpp_chain_ip4(self, **kwargs):
264         """Configure VPP in chain topology with ip4.
265
266         :param kwargs: Named parameters.
267         :param kwargs: dict
268         """
269         self.engine.create_vpp_startup_config()
270
271         vif1_mac = kwargs['tg_if1_mac'] \
272             if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
273             else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
274         vif2_mac = kwargs['tg_if2_mac'] \
275             if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
276             else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
277         self.engine.create_vpp_exec_config(
278             'memif_create_chain_ip4.exec',
279             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
280             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
281             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
282             format(c=self.engine.container, **kwargs),
283             socket2='{guest_dir}/memif-{c.name}-{sid2}'.
284             format(c=self.engine.container, **kwargs),
285             mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
286             mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
287             vif1_mac=vif1_mac, vif2_mac=vif2_mac)
288
289     def _configure_vpp_pipeline_ip4(self, **kwargs):
290         """Configure VPP in pipeline topology with ip4.
291
292         :param kwargs: Named parameters.
293         :param kwargs: dict
294         """
295         self.engine.create_vpp_startup_config()
296         node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
297         mid1 = kwargs['mid1']
298         mid2 = kwargs['mid2']
299         role1 = 'master'
300         role2 = 'master' \
301             if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
302             else 'slave'
303         kwargs['mid2'] = kwargs['mid2'] \
304             if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
305             else kwargs['mid2'] + 1
306         vif1_mac = kwargs['tg_if1_mac'] \
307             if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
308             else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
309         vif2_mac = kwargs['tg_if2_mac'] \
310             if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
311             else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
312         socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
313             format(c=self.engine.container, **kwargs) \
314             if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
315             format(c=self.engine.container, **kwargs)
316         socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
317             format(c=self.engine.container, **kwargs) \
318             if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
319             else '{guest_dir}/memif-pipe-{mid2}'.\
320             format(c=self.engine.container, **kwargs)
321
322         self.engine.create_vpp_exec_config(
323             'memif_create_pipeline_ip4.exec',
324             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
325             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
326             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
327             mac1='52:54:00:00:{0:02X}:01'.format(mid1),
328             mac2='52:54:00:00:{0:02X}:02'.format(mid2),
329             vif1_mac=vif1_mac, vif2_mac=vif2_mac)
330
331     def stop_all_containers(self):
332         """Stop all containers."""
333         for container in self.containers:
334             self.engine.container = self.containers[container]
335             self.engine.stop()
336
337     def destroy_all_containers(self):
338         """Destroy all containers."""
339         for container in self.containers:
340             self.engine.container = self.containers[container]
341             self.engine.destroy()
342
343
344 class ContainerEngine(object):
345     """Abstract class for container engine."""
346
347     def __init__(self):
348         """Init ContainerEngine object."""
349         self.container = None
350
351     def initialize(self):
352         """Initialize container object."""
353         self.container = Container()
354
355     def acquire(self, force):
356         """Acquire/download container.
357
358         :param force: Destroy a container if exists and create.
359         :type force: bool
360         """
361         raise NotImplementedError
362
363     def build(self):
364         """Build container (compile)."""
365         raise NotImplementedError
366
367     def create(self):
368         """Create/deploy container."""
369         raise NotImplementedError
370
371     def execute(self, command):
372         """Execute process inside container.
373
374         :param command: Command to run inside container.
375         :type command: str
376         """
377         raise NotImplementedError
378
379     def stop(self):
380         """Stop container."""
381         raise NotImplementedError
382
383     def destroy(self):
384         """Destroy/remove container."""
385         raise NotImplementedError
386
387     def info(self):
388         """Info about container."""
389         raise NotImplementedError
390
391     def system_info(self):
392         """System info."""
393         raise NotImplementedError
394
395     def install_supervisor(self):
396         """Install supervisord inside a container."""
397         if isinstance(self, LXC):
398             self.execute('sleep 3; apt-get update')
399             self.execute('apt-get install -y supervisor')
400         self.execute('echo "{config}" > {config_file} && '
401                      'unlink /tmp/supervisor.sock && '
402                      'supervisord -c {config_file}'.
403                      format(
404                          config='[unix_http_server]\n'
405                          'file  = /tmp/supervisor.sock\n\n'
406                          '[rpcinterface:supervisor]\n'
407                          'supervisor.rpcinterface_factory = '
408                          'supervisor.rpcinterface:make_main_rpcinterface\n\n'
409                          '[supervisorctl]\n'
410                          'serverurl = unix:///tmp/supervisor.sock\n\n'
411                          '[supervisord]\n'
412                          'pidfile = /tmp/supervisord.pid\n'
413                          'identifier = supervisor\n'
414                          'directory = /tmp\n'
415                          'logfile = /tmp/supervisord.log\n'
416                          'loglevel = debug\n'
417                          'nodaemon = false\n\n',
418                          config_file=SUPERVISOR_CONF))
419
420     def start_vpp(self):
421         """Start VPP inside a container."""
422         self.execute('echo "{config}" >> {config_file}'.
423                      format(
424                          config='[program:vpp]\n'
425                          'command = /usr/bin/vpp -c /etc/vpp/startup.conf\n'
426                          'autostart = false\n'
427                          'autorestart = false\n'
428                          'redirect_stderr = true\n'
429                          'priority = 1',
430                          config_file=SUPERVISOR_CONF))
431         self.execute('supervisorctl reload')
432         self.execute('supervisorctl start vpp')
433
434         from robot.libraries.BuiltIn import BuiltIn
435         topo_instance = BuiltIn().get_library_instance(
436             'resources.libraries.python.topology.Topology')
437         topo_instance.add_new_socket(
438             self.container.node,
439             SocketType.PAPI,
440             self.container.name,
441             '{root}/tmp/vpp_sockets/{name}/api.sock'.
442             format(root=self.container.root, name=self.container.name))
443         topo_instance.add_new_socket(
444             self.container.node,
445             SocketType.STATS,
446             self.container.name,
447             '{root}/tmp/vpp_sockets/{name}/stats.sock'.
448             format(root=self.container.root, name=self.container.name))
449
450     def restart_vpp(self):
451         """Restart VPP service inside a container."""
452         self.execute('supervisorctl restart vpp')
453         self.execute('cat /tmp/supervisord.log')
454
455     def create_base_vpp_startup_config(self):
456         """Create base startup configuration of VPP on container.
457
458         :returns: Base VPP startup configuration.
459         :rtype: VppConfigGenerator
460         """
461         cpuset_cpus = self.container.cpuset_cpus
462
463         # Create config instance
464         vpp_config = VppConfigGenerator()
465         vpp_config.set_node(self.container.node)
466         vpp_config.add_unix_cli_listen()
467         vpp_config.add_unix_nodaemon()
468         vpp_config.add_unix_exec('/tmp/running.exec')
469         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
470         vpp_config.add_statseg_per_node_counters(value='on')
471         # We will pop the first core from the list to be a main core
472         vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
473         # If more cores in the list, the rest will be used as workers.
474         if cpuset_cpus:
475             corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
476             vpp_config.add_cpu_corelist_workers(corelist_workers)
477
478         return vpp_config
479
480     def create_vpp_startup_config(self):
481         """Create startup configuration of VPP without DPDK on container.
482         """
483         vpp_config = self.create_base_vpp_startup_config()
484         vpp_config.add_plugin('disable', 'dpdk_plugin.so')
485
486         # Apply configuration
487         self.execute('mkdir -p /etc/vpp/')
488         self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
489                      .format(config=vpp_config.get_config_str()))
490
491     def create_vpp_startup_config_dpdk_dev(self, *devices):
492         """Create startup configuration of VPP with DPDK on container.
493
494         :param devices: List of PCI devices to add.
495         :type devices: list
496         """
497         vpp_config = self.create_base_vpp_startup_config()
498         vpp_config.add_dpdk_dev(*devices)
499         vpp_config.add_dpdk_no_tx_checksum_offload()
500         vpp_config.add_dpdk_log_level('debug')
501         vpp_config.add_plugin('disable', 'default')
502         vpp_config.add_plugin('enable', 'dpdk_plugin.so')
503         vpp_config.add_plugin('enable', 'memif_plugin.so')
504
505         # Apply configuration
506         self.execute('mkdir -p /etc/vpp/')
507         self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
508                      .format(config=vpp_config.get_config_str()))
509
510     def create_vpp_startup_config_func_dev(self):
511         """Create startup configuration of VPP on container for functional
512         vpp_device tests.
513         """
514         # Create config instance
515         vpp_config = VppConfigGenerator()
516         vpp_config.set_node(self.container.node)
517         vpp_config.add_unix_cli_listen()
518         vpp_config.add_unix_nodaemon()
519         vpp_config.add_unix_exec('/tmp/running.exec')
520         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
521         vpp_config.add_statseg_per_node_counters(value='on')
522         vpp_config.add_plugin('disable', 'dpdk_plugin.so')
523
524         # Apply configuration
525         self.execute('mkdir -p /etc/vpp/')
526         self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
527                      .format(config=vpp_config.get_config_str()))
528
529     def create_vpp_exec_config(self, template_file, **kwargs):
530         """Create VPP exec configuration on container.
531
532         :param template_file: File name of a template script.
533         :param kwargs: Parameters for script.
534         :type template_file: str
535         :type kwargs: dict
536         """
537         running = '/tmp/running.exec'
538
539         template = '{res}/{tpl}'.format(
540             res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
541
542         with open(template, 'r') as src_file:
543             src = Template(src_file.read())
544             self.execute('echo "{out}" > {running}'.format(
545                 out=src.safe_substitute(**kwargs), running=running))
546
547     def is_container_running(self):
548         """Check if container is running."""
549         raise NotImplementedError
550
551     def is_container_present(self):
552         """Check if container is present."""
553         raise NotImplementedError
554
555     def _configure_cgroup(self, name):
556         """Configure the control group associated with a container.
557
558         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
559         container is initialized a new cgroup /docker or /lxc is created under
560         cpuset parent tree. This newly created cgroup is inheriting parent
561         setting for cpu/mem exclusive parameter and thus cannot be overriden
562         within /docker or /lxc cgroup. This function is supposed to set cgroups
563         to allow coexistence of both engines.
564
565         :param name: Name of cgroup.
566         :type name: str
567         :raises RuntimeError: If applying cgroup settings via cgset failed.
568         """
569         ret, _, _ = self.container.ssh.exec_command_sudo(
570             'cgset -r cpuset.cpu_exclusive=0 /')
571         if int(ret) != 0:
572             raise RuntimeError('Failed to apply cgroup settings.')
573
574         ret, _, _ = self.container.ssh.exec_command_sudo(
575             'cgset -r cpuset.mem_exclusive=0 /')
576         if int(ret) != 0:
577             raise RuntimeError('Failed to apply cgroup settings.')
578
579         ret, _, _ = self.container.ssh.exec_command_sudo(
580             'cgcreate -g cpuset:/{name}'.format(name=name))
581         if int(ret) != 0:
582             raise RuntimeError('Failed to copy cgroup settings from root.')
583
584         ret, _, _ = self.container.ssh.exec_command_sudo(
585             'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
586         if int(ret) != 0:
587             raise RuntimeError('Failed to apply cgroup settings.')
588
589         ret, _, _ = self.container.ssh.exec_command_sudo(
590             'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
591         if int(ret) != 0:
592             raise RuntimeError('Failed to apply cgroup settings.')
593
594
595 class LXC(ContainerEngine):
596     """LXC implementation."""
597
598     # Implicit constructor is inherited.
599
600     def acquire(self, force=True):
601         """Acquire a privileged system object where configuration is stored.
602
603         :param force: If a container exists, destroy it and create a new
604             container.
605         :type force: bool
606         :raises RuntimeError: If creating the container or writing the container
607             config fails.
608         """
609         if self.is_container_present():
610             if force:
611                 self.destroy()
612             else:
613                 return
614
615         target_arch = 'arm64' \
616             if Topology.get_node_arch(self.container.node) == 'aarch64' \
617             else 'amd64'
618
619         image = self.container.image if self.container.image else\
620             "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
621
622         cmd = 'lxc-create -t download --name {c.name} -- {image} '\
623             '--no-validate'.format(c=self.container, image=image)
624
625         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
626         if int(ret) != 0:
627             raise RuntimeError('Failed to create container.')
628
629         self._configure_cgroup('lxc')
630
631     def create(self):
632         """Create/deploy an application inside a container on system.
633
634         :raises RuntimeError: If creating the container fails.
635         """
636         if self.container.mnt:
637             # LXC fix for tmpfs
638             # https://github.com/lxc/lxc/issues/434
639             ret, _, _ = self.container.ssh.exec_command_sudo(
640                 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
641                 format(e="lxc.mount.entry = tmpfs run tmpfs defaults",
642                        c=self.container))
643             if int(ret) != 0:
644                 raise RuntimeError('Failed to write {c.name} config.'.
645                                    format(c=self.container))
646
647             for mount in self.container.mnt:
648                 host_dir, guest_dir = mount.split(':')
649                 options = 'bind,create=dir' \
650                     if guest_dir.endswith('/') else 'bind,create=file'
651                 entry = 'lxc.mount.entry = {host_dir} {guest_dir} none ' \
652                     '{options} 0 0'.format(
653                         host_dir=host_dir, guest_dir=guest_dir[1:],
654                         options=options)
655                 ret, _, _ = self.container.ssh.exec_command_sudo(
656                     "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
657                     format(e=entry, c=self.container))
658                 if int(ret) != 0:
659                     raise RuntimeError('Failed to write {c.name} config.'
660                                        .format(c=self.container))
661
662         cpuset_cpus = '{0}'.format(
663             ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
664             if self.container.cpuset_cpus else ''
665
666         ret, _, _ = self.container.ssh.exec_command_sudo(
667             'lxc-start --name {c.name} --daemon'.format(c=self.container))
668         if int(ret) != 0:
669             raise RuntimeError('Failed to start container {c.name}.'.
670                                format(c=self.container))
671         self._lxc_wait('RUNNING')
672
673         # Workaround for LXC to be able to allocate all cpus including isolated.
674         ret, _, _ = self.container.ssh.exec_command_sudo(
675             'cgset --copy-from / lxc/')
676         if int(ret) != 0:
677             raise RuntimeError('Failed to copy cgroup to LXC')
678
679         ret, _, _ = self.container.ssh.exec_command_sudo(
680             'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
681             format(c=self.container, cpus=cpuset_cpus))
682         if int(ret) != 0:
683             raise RuntimeError('Failed to set cpuset.cpus to container '
684                                '{c.name}.'.format(c=self.container))
685
686     def execute(self, command):
687         """Start a process inside a running container.
688
689         Runs the specified command inside the container specified by name. The
690         container has to be running already.
691
692         :param command: Command to run inside container.
693         :type command: str
694         :raises RuntimeError: If running the command failed.
695         """
696         env = '--keep-env {0}'.format(
697             ' '.join('--set-var %s' % env for env in self.container.env))\
698             if self.container.env else ''
699
700         cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
701             "exit $?'".format(env=env, c=self.container, command=command)
702
703         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
704         if int(ret) != 0:
705             raise RuntimeError('Failed to run command inside container '
706                                '{c.name}.'.format(c=self.container))
707
708     def stop(self):
709         """Stop a container.
710
711         :raises RuntimeError: If stopping the container failed.
712         """
713         cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
714
715         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
716         if int(ret) != 0:
717             raise RuntimeError('Failed to stop container {c.name}.'
718                                .format(c=self.container))
719         self._lxc_wait('STOPPED|FROZEN')
720
721     def destroy(self):
722         """Destroy a container.
723
724         :raises RuntimeError: If destroying container failed.
725         """
726         cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
727
728         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
729         if int(ret) != 0:
730             raise RuntimeError('Failed to destroy container {c.name}.'
731                                .format(c=self.container))
732
733     def info(self):
734         """Query and shows information about a container.
735
736         :raises RuntimeError: If getting info about a container failed.
737         """
738         cmd = 'lxc-info --name {c.name}'.format(c=self.container)
739
740         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
741         if int(ret) != 0:
742             raise RuntimeError('Failed to get info about container {c.name}.'
743                                .format(c=self.container))
744
745     def system_info(self):
746         """Check the current kernel for LXC support.
747
748         :raises RuntimeError: If checking LXC support failed.
749         """
750         cmd = 'lxc-checkconfig'
751
752         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
753         if int(ret) != 0:
754             raise RuntimeError('Failed to check LXC support.')
755
756     def is_container_running(self):
757         """Check if container is running on node.
758
759         :returns: True if container is running.
760         :rtype: bool
761         :raises RuntimeError: If getting info about a container failed.
762         """
763         cmd = 'lxc-info --no-humanize --state --name {c.name}'\
764             .format(c=self.container)
765
766         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
767         if int(ret) != 0:
768             raise RuntimeError('Failed to get info about container {c.name}.'
769                                .format(c=self.container))
770         return True if 'RUNNING' in stdout else False
771
772     def is_container_present(self):
773         """Check if container is existing on node.
774
775         :returns: True if container is present.
776         :rtype: bool
777         :raises RuntimeError: If getting info about a container failed.
778         """
779         cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
780
781         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
782         return False if int(ret) else True
783
784     def _lxc_wait(self, state):
785         """Wait for a specific container state.
786
787         :param state: Specify the container state(s) to wait for.
788         :type state: str
789         :raises RuntimeError: If waiting for state of a container failed.
790         """
791         cmd = 'lxc-wait --name {c.name} --state "{s}"'\
792             .format(c=self.container, s=state)
793
794         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
795         if int(ret) != 0:
796             raise RuntimeError('Failed to wait for state "{s}" of container '
797                                '{c.name}.'.format(s=state, c=self.container))
798
799
800 class Docker(ContainerEngine):
801     """Docker implementation."""
802
803     # Implicit constructor is inherited.
804
805     def acquire(self, force=True):
806         """Pull an image or a repository from a registry.
807
808         :param force: Destroy a container if exists.
809         :type force: bool
810         :raises RuntimeError: If pulling a container failed.
811         """
812         if self.is_container_present():
813             if force:
814                 self.destroy()
815             else:
816                 return
817
818         if not self.container.image:
819             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
820                 if Topology.get_node_arch(self.container.node) == 'aarch64' \
821                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
822             setattr(self.container, 'image', img)
823
824         cmd = 'docker pull {image}'.format(image=self.container.image)
825
826         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
827         if int(ret) != 0:
828             raise RuntimeError('Failed to create container {c.name}.'
829                                .format(c=self.container))
830
831         if self.container.cpuset_cpus:
832             self._configure_cgroup('docker')
833
834     def create(self):
835         """Create/deploy container.
836
837         :raises RuntimeError: If creating a container failed.
838         """
839         cpuset_cpus = '--cpuset-cpus={0}'.format(
840             ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
841             if self.container.cpuset_cpus else ''
842
843         cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
844             if self.container.cpuset_mems is not None else ''
845         # Temporary workaround - disabling due to bug in memif
846         cpuset_mems = ''
847
848         env = '{0}'.format(
849             ' '.join('--env %s' % env for env in self.container.env))\
850             if self.container.env else ''
851
852         command = '{0}'.format(self.container.command)\
853             if self.container.command else ''
854
855         publish = '{0}'.format(
856             ' '.join('--publish %s' % var for var in self.container.publish))\
857             if self.container.publish else ''
858
859         volume = '{0}'.format(
860             ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
861             if self.container.mnt else ''
862
863         cmd = 'docker run '\
864             '--privileged --detach --interactive --tty --rm '\
865             '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
866             '{env} {volume} --name {container.name} {container.image} '\
867             '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
868                                container=self.container, command=command,
869                                env=env, publish=publish, volume=volume)
870
871         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
872         if int(ret) != 0:
873             raise RuntimeError('Failed to create container {c.name}'
874                                .format(c=self.container))
875
876         self.info()
877
878     def execute(self, command):
879         """Start a process inside a running container.
880
881         Runs the specified command inside the container specified by name. The
882         container has to be running already.
883
884         :param command: Command to run inside container.
885         :type command: str
886         :raises RuntimeError: If running the command in a container failed.
887         """
888         cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
889             "exit $?'".format(c=self.container, command=command)
890
891         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
892         if int(ret) != 0:
893             raise RuntimeError('Failed to execute command in container '
894                                '{c.name}.'.format(c=self.container))
895
896     def stop(self):
897         """Stop running container.
898
899         :raises RuntimeError: If stopping a container failed.
900         """
901         cmd = 'docker stop {c.name}'.format(c=self.container)
902
903         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
904         if int(ret) != 0:
905             raise RuntimeError('Failed to stop container {c.name}.'
906                                .format(c=self.container))
907
908     def destroy(self):
909         """Remove a container.
910
911         :raises RuntimeError: If removing a container failed.
912         """
913         cmd = 'docker rm --force {c.name}'.format(c=self.container)
914
915         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
916         if int(ret) != 0:
917             raise RuntimeError('Failed to destroy container {c.name}.'
918                                .format(c=self.container))
919
920     def info(self):
921         """Return low-level information on Docker objects.
922
923         :raises RuntimeError: If getting info about a container failed.
924         """
925         cmd = 'docker inspect {c.name}'.format(c=self.container)
926
927         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
928         if int(ret) != 0:
929             raise RuntimeError('Failed to get info about container {c.name}.'
930                                .format(c=self.container))
931
932     def system_info(self):
933         """Display the docker system-wide information.
934
935         :raises RuntimeError: If displaying system information failed.
936         """
937         cmd = 'docker system info'
938
939         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
940         if int(ret) != 0:
941             raise RuntimeError('Failed to get system info.')
942
943     def is_container_present(self):
944         """Check if container is present on node.
945
946         :returns: True if container is present.
947         :rtype: bool
948         :raises RuntimeError: If getting info about a container failed.
949         """
950         cmd = 'docker ps --all --quiet --filter name={c.name}'\
951             .format(c=self.container)
952
953         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
954         if int(ret) != 0:
955             raise RuntimeError('Failed to get info about container {c.name}.'
956                                .format(c=self.container))
957         return True if stdout else False
958
959     def is_container_running(self):
960         """Check if container is running on node.
961
962         :returns: True if container is running.
963         :rtype: bool
964         :raises RuntimeError: If getting info about a container failed.
965         """
966         cmd = 'docker ps --quiet --filter name={c.name}'\
967             .format(c=self.container)
968
969         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
970         if int(ret) != 0:
971             raise RuntimeError('Failed to get info about container {c.name}.'
972                                .format(c=self.container))
973         return True if stdout else False
974
975
976 class Container(object):
977     """Container class."""
978
979     def __init__(self):
980         """Initialize Container object."""
981         pass
982
983     def __getattr__(self, attr):
984         """Get attribute custom implementation.
985
986         :param attr: Attribute to get.
987         :type attr: str
988         :returns: Attribute value or None.
989         :rtype: any
990         """
991         try:
992             return self.__dict__[attr]
993         except KeyError:
994             return None
995
996     def __setattr__(self, attr, value):
997         """Set attribute custom implementation.
998
999         :param attr: Attribute to set.
1000         :param value: Value to set.
1001         :type attr: str
1002         :type value: any
1003         """
1004         try:
1005             # Check if attribute exists
1006             self.__dict__[attr]
1007         except KeyError:
1008             # Creating new attribute
1009             if attr == 'node':
1010                 self.__dict__['ssh'] = SSH()
1011                 self.__dict__['ssh'].connect(value)
1012             self.__dict__[attr] = value
1013         else:
1014             # Updating attribute base of type
1015             if isinstance(self.__dict__[attr], list):
1016                 self.__dict__[attr].append(value)
1017             else:
1018                 self.__dict__[attr] = value