1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from collections import OrderedDict, Counter
21 from resources.libraries.python.ssh import SSH
22 from resources.libraries.python.constants import Constants
23 from resources.libraries.python.CpuUtils import CpuUtils
24 from resources.libraries.python.topology import Topology
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
33 class ContainerManager(object):
34 """Container lifecycle management class."""
36 def __init__(self, engine):
37 """Initialize Container Manager class.
39 :param engine: Container technology used (LXC/Docker/...).
41 :raises NotImplementedError: If container technology is not implemented.
44 self.engine = globals()[engine]()
46 raise NotImplementedError('{engine} is not implemented.'.
47 format(engine=engine))
48 self.containers = OrderedDict()
50 def get_container_by_name(self, name):
51 """Get container instance.
53 :param name: Container name.
55 :returns: Container instance.
57 :raises RuntimeError: If failed to get container with name.
60 return self.containers[name]
62 raise RuntimeError('Failed to get container with name: {name}'.
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
78 setattr(self.engine.container, 'env',
79 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
81 # Set cpuset.cpus cgroup
82 skip_cnt = kwargs['cpu_skip']
83 smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
84 if not kwargs['cpu_shared']:
85 skip_cnt += kwargs['i'] * kwargs['cpu_count']
86 self.engine.container.cpuset_cpus = \
87 CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
88 cpu_node=kwargs['cpuset_mems'],
93 CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
94 cpu_node=kwargs['cpuset_mems'],
96 cpu_cnt=kwargs['cpu_count']-1,
99 # Store container instance
100 self.containers[kwargs['name']] = self.engine.container
102 def construct_containers(self, **kwargs):
103 """Construct 1..N container(s) on node with specified name.
105 Ordinal number is automatically added to the name of container as
108 :param kwargs: Named parameters.
111 name = kwargs['name']
112 for i in range(kwargs['count']):
113 # Name will contain ordinal suffix
114 kwargs['name'] = ''.join([name, str(i+1)])
116 self.construct_container(i=i, **kwargs)
118 def acquire_all_containers(self):
119 """Acquire all containers."""
120 for container in self.containers:
121 self.engine.container = self.containers[container]
122 self.engine.acquire()
124 def build_all_containers(self):
125 """Build all containers."""
126 for container in self.containers:
127 self.engine.container = self.containers[container]
130 def create_all_containers(self):
131 """Create all containers."""
132 for container in self.containers:
133 self.engine.container = self.containers[container]
136 def execute_on_container(self, name, command):
137 """Execute command on container with name.
139 :param name: Container name.
140 :param command: Command to execute.
144 self.engine.container = self.get_container_by_name(name)
145 self.engine.execute(command)
147 def execute_on_all_containers(self, command):
148 """Execute command on all containers.
150 :param command: Command to execute.
153 for container in self.containers:
154 self.engine.container = self.containers[container]
155 self.engine.execute(command)
157 def install_vpp_in_all_containers(self):
158 """Install VPP into all containers."""
159 for container in self.containers:
160 self.engine.container = self.containers[container]
161 # We need to install supervisor client/server system to control VPP
163 self.engine.install_supervisor()
164 self.engine.install_vpp()
165 self.engine.restart_vpp()
167 def restart_vpp_in_all_containers(self):
168 """Restart VPP on all containers."""
169 for container in self.containers:
170 self.engine.container = self.containers[container]
171 self.engine.restart_vpp()
173 def configure_vpp_in_all_containers(self, chain_topology,
174 dut1_if=None, dut2_if=None):
175 """Configure VPP in all containers.
177 :param chain_topology: Topology used for chaining containers can be
178 chain or cross_horiz. Chain topology is using 1 memif pair per
179 container. Cross_horiz topology is using 1 memif and 1 physical
180 interface in container (only single container can be configured).
181 :param dut1_if: Interface on DUT1 directly connected to DUT2.
182 :param dut2_if: Interface on DUT2 directly connected to DUT1.
183 :type container_topology: str
187 # Count number of DUTs based on node's host information
188 dut_cnt = len(Counter([self.containers[container].node['host']
189 for container in self.containers]))
190 mod = len(self.containers)/dut_cnt
191 container_vat_template = 'memif_create_{topology}.vat'.format(
192 topology=chain_topology)
194 if chain_topology == 'chain':
195 for i, container in enumerate(self.containers):
198 sid1 = i % mod * 2 + 1
199 sid2 = i % mod * 2 + 2
200 self.engine.container = self.containers[container]
201 self.engine.create_vpp_startup_config()
202 self.engine.create_vpp_exec_config(container_vat_template, \
203 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, \
204 socket1='memif-{c.name}-{sid}'. \
205 format(c=self.engine.container, sid=sid1), \
206 socket2='memif-{c.name}-{sid}'. \
207 format(c=self.engine.container, sid=sid2))
208 elif chain_topology == 'cross_horiz':
210 raise RuntimeError('Container chain topology {topology} '
211 'supports only single container.'.
212 format(topology=chain_topology))
213 for i, container in enumerate(self.containers):
215 sid1 = i % mod * 2 + 1
216 self.engine.container = self.containers[container]
217 if 'DUT1' in self.engine.container.name:
218 if_pci = Topology.get_interface_pci_addr( \
219 self.engine.container.node, dut1_if)
220 if_name = Topology.get_interface_name( \
221 self.engine.container.node, dut1_if)
222 if 'DUT2' in self.engine.container.name:
223 if_pci = Topology.get_interface_pci_addr( \
224 self.engine.container.node, dut2_if)
225 if_name = Topology.get_interface_name( \
226 self.engine.container.node, dut2_if)
227 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
228 self.engine.create_vpp_exec_config(container_vat_template, \
229 mid1=mid1, sid1=sid1, if_name=if_name, \
230 socket1='memif-{c.name}-{sid}'. \
231 format(c=self.engine.container, sid=sid1))
233 raise RuntimeError('Container topology {topology} not implemented'.
234 format(topology=chain_topology))
236 def stop_all_containers(self):
237 """Stop all containers."""
238 for container in self.containers:
239 self.engine.container = self.containers[container]
242 def destroy_all_containers(self):
243 """Destroy all containers."""
244 for container in self.containers:
245 self.engine.container = self.containers[container]
246 self.engine.destroy()
249 class ContainerEngine(object):
250 """Abstract class for container engine."""
253 """Init ContainerEngine object."""
254 self.container = None
256 def initialize(self):
257 """Initialize container object."""
258 self.container = Container()
260 def acquire(self, force):
261 """Acquire/download container.
263 :param force: Destroy a container if exists and create.
266 raise NotImplementedError
269 """Build container (compile)."""
270 raise NotImplementedError
273 """Create/deploy container."""
274 raise NotImplementedError
276 def execute(self, command):
277 """Execute process inside container.
279 :param command: Command to run inside container.
282 raise NotImplementedError
285 """Stop container."""
286 raise NotImplementedError
289 """Destroy/remove container."""
290 raise NotImplementedError
293 """Info about container."""
294 raise NotImplementedError
296 def system_info(self):
298 raise NotImplementedError
300 def install_supervisor(self):
301 """Install supervisord inside a container."""
302 self.execute('sleep 3')
303 self.execute('apt-get update')
304 self.execute('apt-get install -y supervisor')
305 self.execute('echo "{config}" > {config_file}'.
307 config='[unix_http_server]\n'
308 'file = /tmp/supervisor.sock\n\n'
309 '[rpcinterface:supervisor]\n'
310 'supervisor.rpcinterface_factory = '
311 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
313 'serverurl = unix:///tmp/supervisor.sock\n\n'
315 'pidfile = /tmp/supervisord.pid\n'
316 'identifier = supervisor\n'
318 'logfile=/tmp/supervisord.log\n'
320 'nodaemon=false\n\n',
321 config_file=SUPERVISOR_CONF))
322 self.execute('supervisord -c {config_file}'.
323 format(config_file=SUPERVISOR_CONF))
325 def install_vpp(self):
326 """Install VPP inside a container."""
327 self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf')
328 self.execute('apt-get update')
329 if self.container.install_dkms:
331 'apt-get install -y dkms && '
332 'dpkg -i --force-all {guest_dir}/install_dir/*.deb'.
333 format(guest_dir=self.container.mnt[0].split(':')[1]))
336 'for i in $(ls -I \"*dkms*\" {guest_dir}/install_dir/); do '
337 'dpkg -i --force-all {guest_dir}/install_dir/$i; done'.
338 format(guest_dir=self.container.mnt[0].split(':')[1]))
339 self.execute('apt-get -f install -y')
340 self.execute('apt-get install -y ca-certificates')
341 self.execute('echo "{config}" >> {config_file}'.
343 config='[program:vpp]\n'
344 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
345 'autorestart=false\n'
346 'redirect_stderr=true\n'
348 config_file=SUPERVISOR_CONF))
349 self.execute('supervisorctl reload')
350 self.execute('supervisorctl restart vpp')
352 def restart_vpp(self):
353 """Restart VPP service inside a container."""
354 self.execute('supervisorctl restart vpp')
355 self.execute('cat /tmp/supervisord.log')
357 def create_base_vpp_startup_config(self):
358 """Create base startup configuration of VPP on container.
360 :returns: Base VPP startup configuration.
361 :rtype: VppConfigGenerator
363 cpuset_cpus = self.container.cpuset_cpus
365 # Create config instance
366 vpp_config = VppConfigGenerator()
367 vpp_config.set_node(self.container.node)
368 vpp_config.add_unix_cli_listen()
369 vpp_config.add_unix_nodaemon()
370 vpp_config.add_unix_exec('/tmp/running.exec')
371 # We will pop first core from list to be main core
372 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
373 # if this is not only core in list, the rest will be used as workers.
375 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
376 vpp_config.add_cpu_corelist_workers(corelist_workers)
380 def create_vpp_startup_config(self):
381 """Create startup configuration of VPP without DPDK on container.
383 vpp_config = self.create_base_vpp_startup_config()
384 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
386 # Apply configuration
387 self.execute('mkdir -p /etc/vpp/')
388 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
389 .format(config=vpp_config.get_config_str()))
391 def create_vpp_startup_config_dpdk_dev(self, *devices):
392 """Create startup configuration of VPP with DPDK on container.
394 :param devices: List of PCI devices to add.
397 vpp_config = self.create_base_vpp_startup_config()
398 vpp_config.add_dpdk_dev(*devices)
399 vpp_config.add_dpdk_no_tx_checksum_offload()
400 vpp_config.add_dpdk_log_level('debug')
401 vpp_config.add_plugin('disable', 'default')
402 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
403 vpp_config.add_plugin('enable', 'memif_plugin.so')
405 # Apply configuration
406 self.execute('mkdir -p /etc/vpp/')
407 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
408 .format(config=vpp_config.get_config_str()))
410 def create_vpp_exec_config(self, vat_template_file, **kwargs):
411 """Create VPP exec configuration on container.
413 :param vat_template_file: File name of a VAT template script.
414 :param kwargs: Parameters for VAT script.
415 :type vat_template_file: str
418 vat_file_path = '{p}/{f}'.format(p=Constants.RESOURCES_TPL_VAT,
421 with open(vat_file_path, 'r') as template_file:
422 cmd_template = template_file.readlines()
423 for line_tmpl in cmd_template:
424 vat_cmd = line_tmpl.format(**kwargs)
425 self.execute('echo "{c}" >> /tmp/running.exec'
426 .format(c=vat_cmd.replace('\n', '')))
428 def is_container_running(self):
429 """Check if container is running."""
430 raise NotImplementedError
432 def is_container_present(self):
433 """Check if container is present."""
434 raise NotImplementedError
436 def _configure_cgroup(self, name):
437 """Configure the control group associated with a container.
439 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
440 container is initialized a new cgroup /docker or /lxc is created under
441 cpuset parent tree. This newly created cgroup is inheriting parent
442 setting for cpu/mem exclusive parameter and thus cannot be overriden
443 within /docker or /lxc cgroup. This function is supposed to set cgroups
444 to allow coexistence of both engines.
446 :param name: Name of cgroup.
448 :raises RuntimeError: If applying cgroup settings via cgset failed.
450 ret, _, _ = self.container.ssh.exec_command_sudo(
451 'cgset -r cpuset.cpu_exclusive=0 /')
453 raise RuntimeError('Failed to apply cgroup settings.')
455 ret, _, _ = self.container.ssh.exec_command_sudo(
456 'cgset -r cpuset.mem_exclusive=0 /')
458 raise RuntimeError('Failed to apply cgroup settings.')
460 ret, _, _ = self.container.ssh.exec_command_sudo(
461 'cgcreate -g cpuset:/{name}'.format(name=name))
463 raise RuntimeError('Failed to copy cgroup settings from root.')
465 ret, _, _ = self.container.ssh.exec_command_sudo(
466 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
468 raise RuntimeError('Failed to apply cgroup settings.')
470 ret, _, _ = self.container.ssh.exec_command_sudo(
471 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
473 raise RuntimeError('Failed to apply cgroup settings.')
476 class LXC(ContainerEngine):
477 """LXC implementation."""
480 """Initialize LXC object."""
481 super(LXC, self).__init__()
483 def acquire(self, force=True):
484 """Acquire a privileged system object where configuration is stored.
486 :param force: If a container exists, destroy it and create a new
489 :raises RuntimeError: If creating the container or writing the container
492 if self.is_container_present():
498 image = self.container.image if self.container.image else\
499 "-d ubuntu -r xenial -a amd64"
501 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
502 '--no-validate'.format(c=self.container, image=image)
504 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
506 raise RuntimeError('Failed to create container.')
508 self._configure_cgroup('lxc')
511 """Create/deploy an application inside a container on system.
513 :raises RuntimeError: If creating the container fails.
515 if self.container.mnt:
516 for mount in self.container.mnt:
517 host_dir, guest_dir = mount.split(':')
518 entry = 'lxc.mount.entry = {host_dir} '\
519 '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \
520 'bind,create=dir 0 0'.format(c=self.container,
523 ret, _, _ = self.container.ssh.exec_command_sudo(
524 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
525 format(e=entry, c=self.container))
527 raise RuntimeError('Failed to write {c.name} config.'
528 .format(c=self.container))
530 cpuset_cpus = '{0}'.format(
531 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
532 if self.container.cpuset_cpus else ''
534 ret, _, _ = self.container.ssh.exec_command_sudo(
535 'lxc-start --name {c.name} --daemon'.
536 format(c=self.container))
538 raise RuntimeError('Failed to start container {c.name}.'.
539 format(c=self.container))
540 self._lxc_wait('RUNNING')
542 # Workaround for LXC to be able to allocate all cpus including isolated.
543 ret, _, _ = self.container.ssh.exec_command_sudo(
544 'cgset --copy-from / lxc/')
546 raise RuntimeError('Failed to copy cgroup to LXC')
548 ret, _, _ = self.container.ssh.exec_command_sudo(
549 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
550 format(c=self.container, cpus=cpuset_cpus))
552 raise RuntimeError('Failed to set cpuset.cpus to container '
553 '{c.name}.'.format(c=self.container))
555 def execute(self, command):
556 """Start a process inside a running container.
558 Runs the specified command inside the container specified by name. The
559 container has to be running already.
561 :param command: Command to run inside container.
563 :raises RuntimeError: If running the command failed.
565 env = '--keep-env {0}'.format(
566 ' '.join('--set-var %s' % env for env in self.container.env))\
567 if self.container.env else ''
569 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
570 "exit $?'".format(env=env, c=self.container, command=command)
572 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
574 raise RuntimeError('Failed to run command inside container '
575 '{c.name}.'.format(c=self.container))
580 :raises RuntimeError: If stopping the container failed.
582 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
584 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
586 raise RuntimeError('Failed to stop container {c.name}.'
587 .format(c=self.container))
588 self._lxc_wait('STOPPED|FROZEN')
591 """Destroy a container.
593 :raises RuntimeError: If destroying container failed.
595 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
597 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
599 raise RuntimeError('Failed to destroy container {c.name}.'
600 .format(c=self.container))
603 """Query and shows information about a container.
605 :raises RuntimeError: If getting info about a container failed.
607 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
609 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
611 raise RuntimeError('Failed to get info about container {c.name}.'
612 .format(c=self.container))
614 def system_info(self):
615 """Check the current kernel for LXC support.
617 :raises RuntimeError: If checking LXC support failed.
619 cmd = 'lxc-checkconfig'
621 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
623 raise RuntimeError('Failed to check LXC support.')
625 def is_container_running(self):
626 """Check if container is running on node.
628 :returns: True if container is running.
630 :raises RuntimeError: If getting info about a container failed.
632 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
633 .format(c=self.container)
635 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
637 raise RuntimeError('Failed to get info about container {c.name}.'
638 .format(c=self.container))
639 return True if 'RUNNING' in stdout else False
641 def is_container_present(self):
642 """Check if container is existing on node.
644 :returns: True if container is present.
646 :raises RuntimeError: If getting info about a container failed.
648 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
650 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
651 return False if int(ret) else True
653 def _lxc_wait(self, state):
654 """Wait for a specific container state.
656 :param state: Specify the container state(s) to wait for.
658 :raises RuntimeError: If waiting for state of a container failed.
660 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
661 .format(c=self.container, s=state)
663 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
665 raise RuntimeError('Failed to wait for state "{s}" of container '
666 '{c.name}.'.format(s=state, c=self.container))
669 class Docker(ContainerEngine):
670 """Docker implementation."""
673 """Initialize Docker object."""
674 super(Docker, self).__init__()
676 def acquire(self, force=True):
677 """Pull an image or a repository from a registry.
679 :param force: Destroy a container if exists.
681 :raises RuntimeError: If pulling a container failed.
683 if self.is_container_present():
689 cmd = 'docker pull {c.image}'.format(c=self.container)
691 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
693 raise RuntimeError('Failed to create container {c.name}.'
694 .format(c=self.container))
695 self._configure_cgroup('docker')
698 """Create/deploy container.
700 :raises RuntimeError: If creating a container failed.
702 cpuset_cpus = '--cpuset-cpus={0}'.format(
703 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
704 if self.container.cpuset_cpus else ''
706 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
707 if self.container.cpuset_mems is not None else ''
708 # Temporary workaround - disabling due to bug in memif
712 ' '.join('--env %s' % env for env in self.container.env))\
713 if self.container.env else ''
715 command = '{0}'.format(self.container.command)\
716 if self.container.command else ''
718 publish = '{0}'.format(
719 ' '.join('--publish %s' % var for var in self.container.publish))\
720 if self.container.publish else ''
722 volume = '{0}'.format(
723 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
724 if self.container.mnt else ''
727 '--privileged --detach --interactive --tty --rm '\
728 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
729 '{env} {volume} --name {container.name} {container.image} '\
730 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
731 container=self.container, command=command,
732 env=env, publish=publish, volume=volume)
734 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
736 raise RuntimeError('Failed to create container {c.name}'
737 .format(c=self.container))
741 def execute(self, command):
742 """Start a process inside a running container.
744 Runs the specified command inside the container specified by name. The
745 container has to be running already.
747 :param command: Command to run inside container.
749 :raises RuntimeError: If runnig the command in a container failed.
751 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
752 "exit $?'".format(c=self.container, command=command)
754 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
756 raise RuntimeError('Failed to execute command in container '
757 '{c.name}.'.format(c=self.container))
760 """Stop running container.
762 :raises RuntimeError: If stopping a container failed.
764 cmd = 'docker stop {c.name}'.format(c=self.container)
766 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
768 raise RuntimeError('Failed to stop container {c.name}.'
769 .format(c=self.container))
772 """Remove a container.
774 :raises RuntimeError: If removing a container failed.
776 cmd = 'docker rm --force {c.name}'.format(c=self.container)
778 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
780 raise RuntimeError('Failed to destroy container {c.name}.'
781 .format(c=self.container))
784 """Return low-level information on Docker objects.
786 :raises RuntimeError: If getting info about a container failed.
788 cmd = 'docker inspect {c.name}'.format(c=self.container)
790 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
792 raise RuntimeError('Failed to get info about container {c.name}.'
793 .format(c=self.container))
795 def system_info(self):
796 """Display the docker system-wide information.
798 :raises RuntimeError: If displaying system information failed.
800 cmd = 'docker system info'
802 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
804 raise RuntimeError('Failed to get system info.')
806 def is_container_present(self):
807 """Check if container is present on node.
809 :returns: True if container is present.
811 :raises RuntimeError: If getting info about a container failed.
813 cmd = 'docker ps --all --quiet --filter name={c.name}'\
814 .format(c=self.container)
816 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
818 raise RuntimeError('Failed to get info about container {c.name}.'
819 .format(c=self.container))
820 return True if stdout else False
822 def is_container_running(self):
823 """Check if container is running on node.
825 :returns: True if container is running.
827 :raises RuntimeError: If getting info about a container failed.
829 cmd = 'docker ps --quiet --filter name={c.name}'\
830 .format(c=self.container)
832 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
834 raise RuntimeError('Failed to get info about container {c.name}.'
835 .format(c=self.container))
836 return True if stdout else False
839 class Container(object):
840 """Container class."""
843 """Initialize Container object."""
846 def __getattr__(self, attr):
847 """Get attribute custom implementation.
849 :param attr: Attribute to get.
851 :returns: Attribute value or None.
855 return self.__dict__[attr]
859 def __setattr__(self, attr, value):
860 """Set attribute custom implementation.
862 :param attr: Attribute to set.
863 :param value: Value to set.
868 # Check if attribute exists
871 # Creating new attribute
873 self.__dict__['ssh'] = SSH()
874 self.__dict__['ssh'].connect(value)
875 self.__dict__[attr] = value
877 # Updating attribute base of type
878 if isinstance(self.__dict__[attr], list):
879 self.__dict__[attr].append(value)
881 self.__dict__[attr] = value