CSIT-1477: add 1n_tx2 VPP Device
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
16
17 """Library to manipulate Containers."""
18
19 from string import Template
20 from collections import OrderedDict, Counter
21
22 from resources.libraries.python.ssh import SSH
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.topology import Topology
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
26
27
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
29
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
31
32
33 class ContainerManager(object):
34     """Container lifecycle management class."""
35
36     def __init__(self, engine):
37         """Initialize Container Manager class.
38
39         :param engine: Container technology used (LXC/Docker/...).
40         :type engine: str
41         :raises NotImplementedError: If container technology is not implemented.
42         """
43         try:
44             self.engine = globals()[engine]()
45         except KeyError:
46             raise NotImplementedError('{engine} is not implemented.'.
47                                       format(engine=engine))
48         self.containers = OrderedDict()
49
50     def get_container_by_name(self, name):
51         """Get container instance.
52
53         :param name: Container name.
54         :type name: str
55         :returns: Container instance.
56         :rtype: Container
57         :raises RuntimeError: If failed to get container with name.
58         """
59         try:
60             return self.containers[name]
61         except KeyError:
62             raise RuntimeError('Failed to get container with name: {name}'.
63                                format(name=name))
64
65     def construct_container(self, **kwargs):
66         """Construct container object on node with specified parameters.
67
68         :param kwargs: Key-value pairs used to construct container.
69         :param kwargs: dict
70         """
71         # Create base class
72         self.engine.initialize()
73         # Set parameters
74         for key in kwargs:
75             setattr(self.engine.container, key, kwargs[key])
76
77         # Set additional environmental variables
78         setattr(self.engine.container, 'env',
79                 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
80
81         # Store container instance
82         self.containers[kwargs['name']] = self.engine.container
83
84     def construct_containers(self, **kwargs):
85         """Construct 1..N container(s) on node with specified name.
86
87         Ordinal number is automatically added to the name of container as
88         suffix.
89
90         :param kwargs: Named parameters.
91         :param kwargs: dict
92         """
93         name = kwargs['name']
94         for i in range(kwargs['count']):
95             # Name will contain ordinal suffix
96             kwargs['name'] = ''.join([name, str(i+1)])
97             # Create container
98             self.construct_container(i=i, **kwargs)
99
100     def acquire_all_containers(self):
101         """Acquire all containers."""
102         for container in self.containers:
103             self.engine.container = self.containers[container]
104             self.engine.acquire()
105
106     def build_all_containers(self):
107         """Build all containers."""
108         for container in self.containers:
109             self.engine.container = self.containers[container]
110             self.engine.build()
111
112     def create_all_containers(self):
113         """Create all containers."""
114         for container in self.containers:
115             self.engine.container = self.containers[container]
116             self.engine.create()
117
118     def execute_on_container(self, name, command):
119         """Execute command on container with name.
120
121         :param name: Container name.
122         :param command: Command to execute.
123         :type name: str
124         :type command: str
125         """
126         self.engine.container = self.get_container_by_name(name)
127         self.engine.execute(command)
128
129     def execute_on_all_containers(self, command):
130         """Execute command on all containers.
131
132         :param command: Command to execute.
133         :type command: str
134         """
135         for container in self.containers:
136             self.engine.container = self.containers[container]
137             self.engine.execute(command)
138
139     def start_vpp_in_all_containers(self):
140         """Start VPP in all containers."""
141         for container in self.containers:
142             self.engine.container = self.containers[container]
143             # We need to install supervisor client/server system to control VPP
144             # as a service
145             self.engine.install_supervisor()
146             self.engine.start_vpp()
147
148     def restart_vpp_in_all_containers(self):
149         """Restart VPP in all containers."""
150         for container in self.containers:
151             self.engine.container = self.containers[container]
152             self.engine.restart_vpp()
153
154     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
155         """Configure VPP in all containers.
156
157         :param chain_topology: Topology used for chaining containers can be
158             chain or cross_horiz. Chain topology is using 1 memif pair per
159             container. Cross_horiz topology is using 1 memif and 1 physical
160             interface in container (only single container can be configured).
161         :param kwargs: Named parameters.
162         :type chain_topology: str
163         :param kwargs: dict
164         """
165         # Count number of DUTs based on node's host information
166         dut_cnt = len(Counter([self.containers[container].node['host']
167                                for container in self.containers]))
168         mod = len(self.containers)/dut_cnt
169
170         for i, container in enumerate(self.containers):
171             mid1 = i % mod + 1
172             mid2 = i % mod + 1
173             sid1 = i % mod * 2 + 1
174             sid2 = i % mod * 2 + 2
175             self.engine.container = self.containers[container]
176             guest_dir = self.engine.container.mnt[0].split(':')[1]
177
178             if chain_topology == 'chain':
179                 self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
180                                                sid1=sid1, sid2=sid2,
181                                                guest_dir=guest_dir,
182                                                **kwargs)
183             elif chain_topology == 'cross_horiz':
184                 self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
185                                                 sid1=sid1, sid2=sid2,
186                                                 guest_dir=guest_dir,
187                                                 **kwargs)
188             elif chain_topology == 'chain_functional':
189                 self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
190                                                      sid1=sid1, sid2=sid2,
191                                                      guest_dir=guest_dir,
192                                                      **kwargs)
193             elif chain_topology == 'chain_ip4':
194                 self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
195                                               sid1=sid1, sid2=sid2,
196                                               guest_dir=guest_dir,
197                                               **kwargs)
198             elif chain_topology == 'pipeline_ip4':
199                 self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
200                                                  sid1=sid1, sid2=sid2,
201                                                  guest_dir=guest_dir,
202                                                  **kwargs)
203             else:
204                 raise RuntimeError('Container topology {name} not implemented'.
205                                    format(name=chain_topology))
206
207     def _configure_vpp_chain_l2xc(self, **kwargs):
208         """Configure VPP in chain topology with l2xc.
209
210         :param kwargs: Named parameters.
211         :param kwargs: dict
212         """
213         self.engine.create_vpp_startup_config()
214         self.engine.create_vpp_exec_config(
215             'memif_create_chain_l2xc.exec',
216             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
217             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
218             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
219             format(c=self.engine.container, **kwargs),
220             socket2='{guest_dir}/memif-{c.name}-{sid2}'.
221             format(c=self.engine.container, **kwargs))
222
223     def _configure_vpp_cross_horiz(self, **kwargs):
224         """Configure VPP in cross horizontal topology (single memif).
225
226         :param kwargs: Named parameters.
227         :param kwargs: dict
228         """
229         if 'DUT1' in self.engine.container.name:
230             if_pci = Topology.get_interface_pci_addr(
231                 self.engine.container.node, kwargs['dut1_if'])
232             if_name = Topology.get_interface_name(
233                 self.engine.container.node, kwargs['dut1_if'])
234         if 'DUT2' in self.engine.container.name:
235             if_pci = Topology.get_interface_pci_addr(
236                 self.engine.container.node, kwargs['dut2_if'])
237             if_name = Topology.get_interface_name(
238                 self.engine.container.node, kwargs['dut2_if'])
239         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
240         self.engine.create_vpp_exec_config(
241             'memif_create_cross_horizon.exec',
242             mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
243             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
244             format(c=self.engine.container, **kwargs))
245
246     def _configure_vpp_chain_functional(self, **kwargs):
247         """Configure VPP in chain topology with l2xc (functional).
248
249         :param kwargs: Named parameters.
250         :param kwargs: dict
251         """
252         self.engine.create_vpp_startup_config_func_dev()
253         self.engine.create_vpp_exec_config(
254             'memif_create_chain_functional.exec',
255             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
256             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
257             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
258             format(c=self.engine.container, **kwargs),
259             socket2='{guest_dir}/memif-{c.name}-{sid2}'.
260             format(c=self.engine.container, **kwargs),
261             rx_mode='interrupt')
262
263     def _configure_vpp_chain_ip4(self, **kwargs):
264         """Configure VPP in chain topology with ip4.
265
266         :param kwargs: Named parameters.
267         :param kwargs: dict
268         """
269         self.engine.create_vpp_startup_config()
270
271         vif1_mac = kwargs['tg_if1_mac'] \
272             if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
273             else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
274         vif2_mac = kwargs['tg_if2_mac'] \
275             if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
276             else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
277         self.engine.create_vpp_exec_config(
278             'memif_create_chain_ip4.exec',
279             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
280             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
281             socket1='{guest_dir}/memif-{c.name}-{sid1}'.
282             format(c=self.engine.container, **kwargs),
283             socket2='{guest_dir}/memif-{c.name}-{sid2}'.
284             format(c=self.engine.container, **kwargs),
285             mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
286             mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
287             vif1_mac=vif1_mac, vif2_mac=vif2_mac)
288
289     def _configure_vpp_pipeline_ip4(self, **kwargs):
290         """Configure VPP in pipeline topology with ip4.
291
292         :param kwargs: Named parameters.
293         :param kwargs: dict
294         """
295         self.engine.create_vpp_startup_config()
296         node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
297         mid1 = kwargs['mid1']
298         mid2 = kwargs['mid2']
299         role1 = 'master'
300         role2 = 'master' \
301             if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
302             else 'slave'
303         kwargs['mid2'] = kwargs['mid2'] \
304             if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
305             else kwargs['mid2'] + 1
306         vif1_mac = kwargs['tg_if1_mac'] \
307             if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
308             else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
309         vif2_mac = kwargs['tg_if2_mac'] \
310             if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
311             else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
312         socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
313             format(c=self.engine.container, **kwargs) \
314             if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
315             format(c=self.engine.container, **kwargs)
316         socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
317             format(c=self.engine.container, **kwargs) \
318             if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
319             else '{guest_dir}/memif-pipe-{mid2}'.\
320             format(c=self.engine.container, **kwargs)
321
322         self.engine.create_vpp_exec_config(
323             'memif_create_pipeline_ip4.exec',
324             mid1=kwargs['mid1'], mid2=kwargs['mid2'],
325             sid1=kwargs['sid1'], sid2=kwargs['sid2'],
326             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
327             mac1='52:54:00:00:{0:02X}:01'.format(mid1),
328             mac2='52:54:00:00:{0:02X}:02'.format(mid2),
329             vif1_mac=vif1_mac, vif2_mac=vif2_mac)
330
331     def stop_all_containers(self):
332         """Stop all containers."""
333         for container in self.containers:
334             self.engine.container = self.containers[container]
335             self.engine.stop()
336
337     def destroy_all_containers(self):
338         """Destroy all containers."""
339         for container in self.containers:
340             self.engine.container = self.containers[container]
341             self.engine.destroy()
342
343
344 class ContainerEngine(object):
345     """Abstract class for container engine."""
346
347     def __init__(self):
348         """Init ContainerEngine object."""
349         self.container = None
350
351     def initialize(self):
352         """Initialize container object."""
353         self.container = Container()
354
355     def acquire(self, force):
356         """Acquire/download container.
357
358         :param force: Destroy a container if exists and create.
359         :type force: bool
360         """
361         raise NotImplementedError
362
363     def build(self):
364         """Build container (compile)."""
365         raise NotImplementedError
366
367     def create(self):
368         """Create/deploy container."""
369         raise NotImplementedError
370
371     def execute(self, command):
372         """Execute process inside container.
373
374         :param command: Command to run inside container.
375         :type command: str
376         """
377         raise NotImplementedError
378
379     def stop(self):
380         """Stop container."""
381         raise NotImplementedError
382
383     def destroy(self):
384         """Destroy/remove container."""
385         raise NotImplementedError
386
387     def info(self):
388         """Info about container."""
389         raise NotImplementedError
390
391     def system_info(self):
392         """System info."""
393         raise NotImplementedError
394
395     def install_supervisor(self):
396         """Install supervisord inside a container."""
397         if isinstance(self, LXC):
398             self.execute('sleep 3; apt-get update')
399             self.execute('apt-get install -y supervisor')
400         self.execute('echo "{config}" > {config_file} && '
401                      'supervisord -c {config_file}'.
402                      format(
403                          config='[unix_http_server]\n'
404                          'file  = /tmp/supervisor.sock\n\n'
405                          '[rpcinterface:supervisor]\n'
406                          'supervisor.rpcinterface_factory = '
407                          'supervisor.rpcinterface:make_main_rpcinterface\n\n'
408                          '[supervisorctl]\n'
409                          'serverurl = unix:///tmp/supervisor.sock\n\n'
410                          '[supervisord]\n'
411                          'pidfile = /tmp/supervisord.pid\n'
412                          'identifier = supervisor\n'
413                          'directory = /tmp\n'
414                          'logfile=/tmp/supervisord.log\n'
415                          'loglevel=debug\n'
416                          'nodaemon=false\n\n',
417                          config_file=SUPERVISOR_CONF))
418
419     def start_vpp(self):
420         """Start VPP inside a container."""
421         self.execute('echo "{config}" >> {config_file}'.
422                      format(
423                          config='[program:vpp]\n'
424                          'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
425                          'autostart=false\n'
426                          'autorestart=false\n'
427                          'redirect_stderr=true\n'
428                          'priority=1',
429                          config_file=SUPERVISOR_CONF))
430         self.execute('supervisorctl reload')
431         self.execute('supervisorctl start vpp')
432
433     def restart_vpp(self):
434         """Restart VPP service inside a container."""
435         self.execute('supervisorctl restart vpp')
436         self.execute('cat /tmp/supervisord.log')
437
438     def create_base_vpp_startup_config(self):
439         """Create base startup configuration of VPP on container.
440
441         :returns: Base VPP startup configuration.
442         :rtype: VppConfigGenerator
443         """
444         cpuset_cpus = self.container.cpuset_cpus
445
446         # Create config instance
447         vpp_config = VppConfigGenerator()
448         vpp_config.set_node(self.container.node)
449         vpp_config.add_unix_cli_listen()
450         vpp_config.add_unix_nodaemon()
451         vpp_config.add_unix_exec('/tmp/running.exec')
452         # We will pop the first core from the list to be a main core
453         vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
454         # If more cores in the list, the rest will be used as workers.
455         if cpuset_cpus:
456             corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
457             vpp_config.add_cpu_corelist_workers(corelist_workers)
458
459         return vpp_config
460
461     def create_vpp_startup_config(self):
462         """Create startup configuration of VPP without DPDK on container.
463         """
464         vpp_config = self.create_base_vpp_startup_config()
465         vpp_config.add_plugin('disable', 'dpdk_plugin.so')
466
467         # Apply configuration
468         self.execute('mkdir -p /etc/vpp/')
469         self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
470                      .format(config=vpp_config.get_config_str()))
471
472     def create_vpp_startup_config_dpdk_dev(self, *devices):
473         """Create startup configuration of VPP with DPDK on container.
474
475         :param devices: List of PCI devices to add.
476         :type devices: list
477         """
478         vpp_config = self.create_base_vpp_startup_config()
479         vpp_config.add_dpdk_dev(*devices)
480         vpp_config.add_dpdk_no_tx_checksum_offload()
481         vpp_config.add_dpdk_log_level('debug')
482         vpp_config.add_plugin('disable', 'default')
483         vpp_config.add_plugin('enable', 'dpdk_plugin.so')
484         vpp_config.add_plugin('enable', 'memif_plugin.so')
485
486         # Apply configuration
487         self.execute('mkdir -p /etc/vpp/')
488         self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
489                      .format(config=vpp_config.get_config_str()))
490
491     def create_vpp_startup_config_func_dev(self):
492         """Create startup configuration of VPP on container for functional
493         vpp_device tests.
494         """
495         # Create config instance
496         vpp_config = VppConfigGenerator()
497         vpp_config.set_node(self.container.node)
498         vpp_config.add_unix_cli_listen()
499         vpp_config.add_unix_nodaemon()
500         vpp_config.add_unix_exec('/tmp/running.exec')
501         vpp_config.add_plugin('disable', 'dpdk_plugin.so')
502
503         # Apply configuration
504         self.execute('mkdir -p /etc/vpp/')
505         self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
506                      .format(config=vpp_config.get_config_str()))
507
508     def create_vpp_exec_config(self, template_file, **kwargs):
509         """Create VPP exec configuration on container.
510
511         :param template_file: File name of a template script.
512         :param kwargs: Parameters for script.
513         :type template_file: str
514         :type kwargs: dict
515         """
516         running = '/tmp/running.exec'
517
518         template = '{res}/{tpl}'.format(
519             res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
520
521         with open(template, 'r') as src_file:
522             src = Template(src_file.read())
523             self.execute('echo "{out}" > {running}'.format(
524                 out=src.safe_substitute(**kwargs), running=running))
525
526     def is_container_running(self):
527         """Check if container is running."""
528         raise NotImplementedError
529
530     def is_container_present(self):
531         """Check if container is present."""
532         raise NotImplementedError
533
534     def _configure_cgroup(self, name):
535         """Configure the control group associated with a container.
536
537         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
538         container is initialized a new cgroup /docker or /lxc is created under
539         cpuset parent tree. This newly created cgroup is inheriting parent
540         setting for cpu/mem exclusive parameter and thus cannot be overriden
541         within /docker or /lxc cgroup. This function is supposed to set cgroups
542         to allow coexistence of both engines.
543
544         :param name: Name of cgroup.
545         :type name: str
546         :raises RuntimeError: If applying cgroup settings via cgset failed.
547         """
548         ret, _, _ = self.container.ssh.exec_command_sudo(
549             'cgset -r cpuset.cpu_exclusive=0 /')
550         if int(ret) != 0:
551             raise RuntimeError('Failed to apply cgroup settings.')
552
553         ret, _, _ = self.container.ssh.exec_command_sudo(
554             'cgset -r cpuset.mem_exclusive=0 /')
555         if int(ret) != 0:
556             raise RuntimeError('Failed to apply cgroup settings.')
557
558         ret, _, _ = self.container.ssh.exec_command_sudo(
559             'cgcreate -g cpuset:/{name}'.format(name=name))
560         if int(ret) != 0:
561             raise RuntimeError('Failed to copy cgroup settings from root.')
562
563         ret, _, _ = self.container.ssh.exec_command_sudo(
564             'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
565         if int(ret) != 0:
566             raise RuntimeError('Failed to apply cgroup settings.')
567
568         ret, _, _ = self.container.ssh.exec_command_sudo(
569             'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
570         if int(ret) != 0:
571             raise RuntimeError('Failed to apply cgroup settings.')
572
573
574 class LXC(ContainerEngine):
575     """LXC implementation."""
576
577     # Implicit constructor is inherited.
578
579     def acquire(self, force=True):
580         """Acquire a privileged system object where configuration is stored.
581
582         :param force: If a container exists, destroy it and create a new
583             container.
584         :type force: bool
585         :raises RuntimeError: If creating the container or writing the container
586             config fails.
587         """
588         if self.is_container_present():
589             if force:
590                 self.destroy()
591             else:
592                 return
593
594         image = self.container.image if self.container.image else\
595             "-d ubuntu -r bionic -a amd64"
596
597         cmd = 'lxc-create -t download --name {c.name} -- {image} '\
598             '--no-validate'.format(c=self.container, image=image)
599
600         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
601         if int(ret) != 0:
602             raise RuntimeError('Failed to create container.')
603
604         self._configure_cgroup('lxc')
605
606     def create(self):
607         """Create/deploy an application inside a container on system.
608
609         :raises RuntimeError: If creating the container fails.
610         """
611         if self.container.mnt:
612             for mount in self.container.mnt:
613                 host_dir, guest_dir = mount.split(':')
614                 options = 'bind,create=dir' \
615                     if guest_dir.endswith('/') else 'bind,create=file'
616                 entry = 'lxc.mount.entry = {host_dir} '\
617                     '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \
618                     '{options} 0 0'.format(c=self.container,
619                                            host_dir=host_dir,
620                                            guest_dir=guest_dir,
621                                            options=options)
622                 ret, _, _ = self.container.ssh.exec_command_sudo(
623                     "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
624                     format(e=entry, c=self.container))
625                 if int(ret) != 0:
626                     raise RuntimeError('Failed to write {c.name} config.'
627                                        .format(c=self.container))
628
629         cpuset_cpus = '{0}'.format(
630             ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
631             if self.container.cpuset_cpus else ''
632
633         ret, _, _ = self.container.ssh.exec_command_sudo(
634             'lxc-start --name {c.name} --daemon'.
635             format(c=self.container))
636         if int(ret) != 0:
637             raise RuntimeError('Failed to start container {c.name}.'.
638                                format(c=self.container))
639         self._lxc_wait('RUNNING')
640
641         # Workaround for LXC to be able to allocate all cpus including isolated.
642         ret, _, _ = self.container.ssh.exec_command_sudo(
643             'cgset --copy-from / lxc/')
644         if int(ret) != 0:
645             raise RuntimeError('Failed to copy cgroup to LXC')
646
647         ret, _, _ = self.container.ssh.exec_command_sudo(
648             'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
649             format(c=self.container, cpus=cpuset_cpus))
650         if int(ret) != 0:
651             raise RuntimeError('Failed to set cpuset.cpus to container '
652                                '{c.name}.'.format(c=self.container))
653
654     def execute(self, command):
655         """Start a process inside a running container.
656
657         Runs the specified command inside the container specified by name. The
658         container has to be running already.
659
660         :param command: Command to run inside container.
661         :type command: str
662         :raises RuntimeError: If running the command failed.
663         """
664         env = '--keep-env {0}'.format(
665             ' '.join('--set-var %s' % env for env in self.container.env))\
666             if self.container.env else ''
667
668         cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
669             "exit $?'".format(env=env, c=self.container, command=command)
670
671         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
672         if int(ret) != 0:
673             raise RuntimeError('Failed to run command inside container '
674                                '{c.name}.'.format(c=self.container))
675
676     def stop(self):
677         """Stop a container.
678
679         :raises RuntimeError: If stopping the container failed.
680         """
681         cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
682
683         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
684         if int(ret) != 0:
685             raise RuntimeError('Failed to stop container {c.name}.'
686                                .format(c=self.container))
687         self._lxc_wait('STOPPED|FROZEN')
688
689     def destroy(self):
690         """Destroy a container.
691
692         :raises RuntimeError: If destroying container failed.
693         """
694         cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
695
696         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
697         if int(ret) != 0:
698             raise RuntimeError('Failed to destroy container {c.name}.'
699                                .format(c=self.container))
700
701     def info(self):
702         """Query and shows information about a container.
703
704         :raises RuntimeError: If getting info about a container failed.
705         """
706         cmd = 'lxc-info --name {c.name}'.format(c=self.container)
707
708         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
709         if int(ret) != 0:
710             raise RuntimeError('Failed to get info about container {c.name}.'
711                                .format(c=self.container))
712
713     def system_info(self):
714         """Check the current kernel for LXC support.
715
716         :raises RuntimeError: If checking LXC support failed.
717         """
718         cmd = 'lxc-checkconfig'
719
720         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
721         if int(ret) != 0:
722             raise RuntimeError('Failed to check LXC support.')
723
724     def is_container_running(self):
725         """Check if container is running on node.
726
727         :returns: True if container is running.
728         :rtype: bool
729         :raises RuntimeError: If getting info about a container failed.
730         """
731         cmd = 'lxc-info --no-humanize --state --name {c.name}'\
732             .format(c=self.container)
733
734         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
735         if int(ret) != 0:
736             raise RuntimeError('Failed to get info about container {c.name}.'
737                                .format(c=self.container))
738         return True if 'RUNNING' in stdout else False
739
740     def is_container_present(self):
741         """Check if container is existing on node.
742
743         :returns: True if container is present.
744         :rtype: bool
745         :raises RuntimeError: If getting info about a container failed.
746         """
747         cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
748
749         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
750         return False if int(ret) else True
751
752     def _lxc_wait(self, state):
753         """Wait for a specific container state.
754
755         :param state: Specify the container state(s) to wait for.
756         :type state: str
757         :raises RuntimeError: If waiting for state of a container failed.
758         """
759         cmd = 'lxc-wait --name {c.name} --state "{s}"'\
760             .format(c=self.container, s=state)
761
762         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
763         if int(ret) != 0:
764             raise RuntimeError('Failed to wait for state "{s}" of container '
765                                '{c.name}.'.format(s=state, c=self.container))
766
767
768 class Docker(ContainerEngine):
769     """Docker implementation."""
770
771     # Implicit constructor is inherited.
772
773     def acquire(self, force=True):
774         """Pull an image or a repository from a registry.
775
776         :param force: Destroy a container if exists.
777         :type force: bool
778         :raises RuntimeError: If pulling a container failed.
779         """
780         if self.is_container_present():
781             if force:
782                 self.destroy()
783             else:
784                 return
785
786         if not self.container.image:
787             setattr(self.container, 'image',
788                     Constants.DOCKER_SUT_IMAGE_UBUNTU)
789
790         cmd = 'docker pull {image}'.format(image=self.container.image)
791
792         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
793         if int(ret) != 0:
794             raise RuntimeError('Failed to create container {c.name}.'
795                                .format(c=self.container))
796         if self.container.cpuset_cpus:
797             self._configure_cgroup('docker')
798
799     def create(self):
800         """Create/deploy container.
801
802         :raises RuntimeError: If creating a container failed.
803         """
804         cpuset_cpus = '--cpuset-cpus={0}'.format(
805             ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
806             if self.container.cpuset_cpus else ''
807
808         cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
809             if self.container.cpuset_mems is not None else ''
810         # Temporary workaround - disabling due to bug in memif
811         cpuset_mems = ''
812
813         env = '{0}'.format(
814             ' '.join('--env %s' % env for env in self.container.env))\
815             if self.container.env else ''
816
817         command = '{0}'.format(self.container.command)\
818             if self.container.command else ''
819
820         publish = '{0}'.format(
821             ' '.join('--publish %s' % var for var in self.container.publish))\
822             if self.container.publish else ''
823
824         volume = '{0}'.format(
825             ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
826             if self.container.mnt else ''
827
828         cmd = 'docker run '\
829             '--privileged --detach --interactive --tty --rm '\
830             '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
831             '{env} {volume} --name {container.name} {container.image} '\
832             '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
833                                container=self.container, command=command,
834                                env=env, publish=publish, volume=volume)
835
836         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
837         if int(ret) != 0:
838             raise RuntimeError('Failed to create container {c.name}'
839                                .format(c=self.container))
840
841         self.info()
842
843     def execute(self, command):
844         """Start a process inside a running container.
845
846         Runs the specified command inside the container specified by name. The
847         container has to be running already.
848
849         :param command: Command to run inside container.
850         :type command: str
851         :raises RuntimeError: If running the command in a container failed.
852         """
853         cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
854             "exit $?'".format(c=self.container, command=command)
855
856         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
857         if int(ret) != 0:
858             raise RuntimeError('Failed to execute command in container '
859                                '{c.name}.'.format(c=self.container))
860
861     def stop(self):
862         """Stop running container.
863
864         :raises RuntimeError: If stopping a container failed.
865         """
866         cmd = 'docker stop {c.name}'.format(c=self.container)
867
868         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
869         if int(ret) != 0:
870             raise RuntimeError('Failed to stop container {c.name}.'
871                                .format(c=self.container))
872
873     def destroy(self):
874         """Remove a container.
875
876         :raises RuntimeError: If removing a container failed.
877         """
878         cmd = 'docker rm --force {c.name}'.format(c=self.container)
879
880         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
881         if int(ret) != 0:
882             raise RuntimeError('Failed to destroy container {c.name}.'
883                                .format(c=self.container))
884
885     def info(self):
886         """Return low-level information on Docker objects.
887
888         :raises RuntimeError: If getting info about a container failed.
889         """
890         cmd = 'docker inspect {c.name}'.format(c=self.container)
891
892         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
893         if int(ret) != 0:
894             raise RuntimeError('Failed to get info about container {c.name}.'
895                                .format(c=self.container))
896
897     def system_info(self):
898         """Display the docker system-wide information.
899
900         :raises RuntimeError: If displaying system information failed.
901         """
902         cmd = 'docker system info'
903
904         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
905         if int(ret) != 0:
906             raise RuntimeError('Failed to get system info.')
907
908     def is_container_present(self):
909         """Check if container is present on node.
910
911         :returns: True if container is present.
912         :rtype: bool
913         :raises RuntimeError: If getting info about a container failed.
914         """
915         cmd = 'docker ps --all --quiet --filter name={c.name}'\
916             .format(c=self.container)
917
918         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
919         if int(ret) != 0:
920             raise RuntimeError('Failed to get info about container {c.name}.'
921                                .format(c=self.container))
922         return True if stdout else False
923
924     def is_container_running(self):
925         """Check if container is running on node.
926
927         :returns: True if container is running.
928         :rtype: bool
929         :raises RuntimeError: If getting info about a container failed.
930         """
931         cmd = 'docker ps --quiet --filter name={c.name}'\
932             .format(c=self.container)
933
934         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
935         if int(ret) != 0:
936             raise RuntimeError('Failed to get info about container {c.name}.'
937                                .format(c=self.container))
938         return True if stdout else False
939
940
941 class Container(object):
942     """Container class."""
943
944     def __init__(self):
945         """Initialize Container object."""
946         pass
947
948     def __getattr__(self, attr):
949         """Get attribute custom implementation.
950
951         :param attr: Attribute to get.
952         :type attr: str
953         :returns: Attribute value or None.
954         :rtype: any
955         """
956         try:
957             return self.__dict__[attr]
958         except KeyError:
959             return None
960
961     def __setattr__(self, attr, value):
962         """Set attribute custom implementation.
963
964         :param attr: Attribute to set.
965         :param value: Value to set.
966         :type attr: str
967         :type value: any
968         """
969         try:
970             # Check if attribute exists
971             self.__dict__[attr]
972         except KeyError:
973             # Creating new attribute
974             if attr == 'node':
975                 self.__dict__['ssh'] = SSH()
976                 self.__dict__['ssh'].connect(value)
977             self.__dict__[attr] = value
978         else:
979             # Updating attribute base of type
980             if isinstance(self.__dict__[attr], list):
981                 self.__dict__[attr].append(value)
982             else:
983                 self.__dict__[attr] = value

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.