1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from collections import OrderedDict, Counter
21 from resources.libraries.python.ssh import SSH
22 from resources.libraries.python.constants import Constants
23 from resources.libraries.python.CpuUtils import CpuUtils
24 from resources.libraries.python.topology import Topology
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
33 class ContainerManager(object):
34 """Container lifecycle management class."""
36 def __init__(self, engine):
37 """Initialize Container Manager class.
39 :param engine: Container technology used (LXC/Docker/...).
41 :raises NotImplementedError: If container technology is not implemented.
44 self.engine = globals()[engine]()
46 raise NotImplementedError('{engine} is not implemented.'.
47 format(engine=engine))
48 self.containers = OrderedDict()
50 def get_container_by_name(self, name):
51 """Get container instance.
53 :param name: Container name.
55 :returns: Container instance.
57 :raises RuntimeError: If failed to get container with name.
60 return self.containers[name]
62 raise RuntimeError('Failed to get container with name: {name}'.
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
78 setattr(self.engine.container, 'env',
79 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
81 # Set cpuset.cpus cgroup
82 skip_cnt = kwargs['cpu_skip']
83 smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
84 if not kwargs['cpu_shared']:
85 skip_cnt += kwargs['i'] * kwargs['cpu_count']
86 self.engine.container.cpuset_cpus = \
87 CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
88 cpu_node=kwargs['cpuset_mems'],
93 CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
94 cpu_node=kwargs['cpuset_mems'],
96 cpu_cnt=kwargs['cpu_count']-1,
99 # Store container instance
100 self.containers[kwargs['name']] = self.engine.container
102 def construct_containers(self, **kwargs):
103 """Construct 1..N container(s) on node with specified name.
105 Ordinal number is automatically added to the name of container as
108 :param kwargs: Named parameters.
111 name = kwargs['name']
112 for i in range(kwargs['count']):
113 # Name will contain ordinal suffix
114 kwargs['name'] = ''.join([name, str(i+1)])
116 self.construct_container(i=i, **kwargs)
118 def acquire_all_containers(self):
119 """Acquire all containers."""
120 for container in self.containers:
121 self.engine.container = self.containers[container]
122 self.engine.acquire()
124 def build_all_containers(self):
125 """Build all containers."""
126 for container in self.containers:
127 self.engine.container = self.containers[container]
130 def create_all_containers(self):
131 """Create all containers."""
132 for container in self.containers:
133 self.engine.container = self.containers[container]
136 def execute_on_container(self, name, command):
137 """Execute command on container with name.
139 :param name: Container name.
140 :param command: Command to execute.
144 self.engine.container = self.get_container_by_name(name)
145 self.engine.execute(command)
147 def execute_on_all_containers(self, command):
148 """Execute command on all containers.
150 :param command: Command to execute.
153 for container in self.containers:
154 self.engine.container = self.containers[container]
155 self.engine.execute(command)
157 def install_vpp_in_all_containers(self):
158 """Install VPP into all containers."""
159 for container in self.containers:
160 self.engine.container = self.containers[container]
161 # We need to install supervisor client/server system to control VPP
163 self.engine.install_supervisor()
164 self.engine.install_vpp()
165 self.engine.restart_vpp()
167 def restart_vpp_in_all_containers(self):
168 """Restart VPP on all containers."""
169 for container in self.containers:
170 self.engine.container = self.containers[container]
171 self.engine.restart_vpp()
173 def configure_vpp_in_all_containers(self, chain_topology,
174 dut1_if=None, dut2_if=None):
175 """Configure VPP in all containers.
177 :param chain_topology: Topology used for chaining containers can be
178 chain or cross_horiz. Chain topology is using 1 memif pair per
179 container. Cross_horiz topology is using 1 memif and 1 physical
180 interface in container (only single container can be configured).
181 :param dut1_if: Interface on DUT1 directly connected to DUT2.
182 :param dut2_if: Interface on DUT2 directly connected to DUT1.
183 :type container_topology: str
187 # Count number of DUTs based on node's host information
188 dut_cnt = len(Counter([self.containers[container].node['host']
189 for container in self.containers]))
190 mod = len(self.containers)/dut_cnt
191 container_vat_template = 'memif_create_{topology}.vat'.format(
192 topology=chain_topology)
194 if chain_topology == 'chain':
195 for i, container in enumerate(self.containers):
198 sid1 = i % mod * 2 + 1
199 sid2 = i % mod * 2 + 2
200 self.engine.container = self.containers[container]
201 self.engine.create_vpp_startup_config()
202 self.engine.create_vpp_exec_config(container_vat_template, \
203 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, \
204 socket1='memif-{c.name}-{sid}'. \
205 format(c=self.engine.container, sid=sid1), \
206 socket2='memif-{c.name}-{sid}'. \
207 format(c=self.engine.container, sid=sid2))
208 elif chain_topology == 'cross_horiz':
210 raise RuntimeError('Container chain topology {topology} '
211 'supports only single container.'.
212 format(topology=chain_topology))
213 for i, container in enumerate(self.containers):
215 sid1 = i % mod * 2 + 1
216 self.engine.container = self.containers[container]
217 if 'DUT1' in self.engine.container.name:
218 if_pci = Topology.get_interface_pci_addr( \
219 self.engine.container.node, dut1_if)
220 if_name = Topology.get_interface_name( \
221 self.engine.container.node, dut1_if)
222 if 'DUT2' in self.engine.container.name:
223 if_pci = Topology.get_interface_pci_addr( \
224 self.engine.container.node, dut2_if)
225 if_name = Topology.get_interface_name( \
226 self.engine.container.node, dut2_if)
227 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
228 self.engine.create_vpp_exec_config(container_vat_template, \
229 mid1=mid1, sid1=sid1, if_name=if_name, \
230 socket1='memif-{c.name}-{sid}'. \
231 format(c=self.engine.container, sid=sid1))
233 raise RuntimeError('Container topology {topology} not implemented'.
234 format(topology=chain_topology))
236 def stop_all_containers(self):
237 """Stop all containers."""
238 for container in self.containers:
239 self.engine.container = self.containers[container]
242 def destroy_all_containers(self):
243 """Destroy all containers."""
244 for container in self.containers:
245 self.engine.container = self.containers[container]
246 self.engine.destroy()
249 class ContainerEngine(object):
250 """Abstract class for container engine."""
253 """Init ContainerEngine object."""
254 self.container = None
256 def initialize(self):
257 """Initialize container object."""
258 self.container = Container()
260 def acquire(self, force):
261 """Acquire/download container.
263 :param force: Destroy a container if exists and create.
266 raise NotImplementedError
269 """Build container (compile)."""
270 raise NotImplementedError
273 """Create/deploy container."""
274 raise NotImplementedError
276 def execute(self, command):
277 """Execute process inside container.
279 :param command: Command to run inside container.
282 raise NotImplementedError
285 """Stop container."""
286 raise NotImplementedError
289 """Destroy/remove container."""
290 raise NotImplementedError
293 """Info about container."""
294 raise NotImplementedError
296 def system_info(self):
298 raise NotImplementedError
300 def install_supervisor(self):
301 """Install supervisord inside a container."""
302 self.execute('sleep 3')
303 self.execute('apt-get update')
304 self.execute('apt-get install -y supervisor')
305 self.execute('echo "{config}" > {config_file}'.
307 config='[unix_http_server]\n'
308 'file = /tmp/supervisor.sock\n\n'
309 '[rpcinterface:supervisor]\n'
310 'supervisor.rpcinterface_factory = '
311 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
313 'serverurl = unix:///tmp/supervisor.sock\n\n'
315 'pidfile = /tmp/supervisord.pid\n'
316 'identifier = supervisor\n'
318 'logfile=/tmp/supervisord.log\n'
320 'nodaemon=false\n\n',
321 config_file=SUPERVISOR_CONF))
322 self.execute('supervisord -c {config_file}'.
323 format(config_file=SUPERVISOR_CONF))
325 def install_vpp(self):
326 """Install VPP inside a container."""
327 self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf')
328 self.execute('apt-get update')
329 if self.container.install_dkms:
331 'apt-get install -y dkms && '
332 'dpkg -i --force-all '
333 '{guest_dir}/openvpp-testing/download_dir/*.deb'.
334 format(guest_dir=self.container.mnt[0].split(':')[1]))
337 'for i in $(ls -I \"*dkms*\" '
338 '{guest_dir}/openvpp-testing/download_dir/); do '
339 'dpkg -i --force-all '
340 '{guest_dir}/openvpp-testing/download_dir/$i; done'.
341 format(guest_dir=self.container.mnt[0].split(':')[1]))
342 self.execute('apt-get -f install -y')
343 self.execute('apt-get install -y ca-certificates')
344 self.execute('echo "{config}" >> {config_file}'.
346 config='[program:vpp]\n'
347 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
348 'autorestart=false\n'
349 'redirect_stderr=true\n'
351 config_file=SUPERVISOR_CONF))
352 self.execute('supervisorctl reload')
353 self.execute('supervisorctl restart vpp')
355 def restart_vpp(self):
356 """Restart VPP service inside a container."""
357 self.execute('supervisorctl restart vpp')
358 self.execute('cat /tmp/supervisord.log')
360 def create_base_vpp_startup_config(self):
361 """Create base startup configuration of VPP on container.
363 :returns: Base VPP startup configuration.
364 :rtype: VppConfigGenerator
366 cpuset_cpus = self.container.cpuset_cpus
368 # Create config instance
369 vpp_config = VppConfigGenerator()
370 vpp_config.set_node(self.container.node)
371 vpp_config.add_unix_cli_listen()
372 vpp_config.add_unix_nodaemon()
373 vpp_config.add_unix_exec('/tmp/running.exec')
374 # We will pop first core from list to be main core
375 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
376 # if this is not only core in list, the rest will be used as workers.
378 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
379 vpp_config.add_cpu_corelist_workers(corelist_workers)
383 def create_vpp_startup_config(self):
384 """Create startup configuration of VPP without DPDK on container.
386 vpp_config = self.create_base_vpp_startup_config()
387 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
389 # Apply configuration
390 self.execute('mkdir -p /etc/vpp/')
391 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
392 .format(config=vpp_config.get_config_str()))
394 def create_vpp_startup_config_dpdk_dev(self, *devices):
395 """Create startup configuration of VPP with DPDK on container.
397 :param devices: List of PCI devices to add.
400 vpp_config = self.create_base_vpp_startup_config()
401 vpp_config.add_dpdk_dev(*devices)
402 vpp_config.add_dpdk_no_tx_checksum_offload()
403 vpp_config.add_dpdk_log_level('debug')
404 vpp_config.add_plugin('disable', 'default')
405 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
406 vpp_config.add_plugin('enable', 'memif_plugin.so')
408 # Apply configuration
409 self.execute('mkdir -p /etc/vpp/')
410 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
411 .format(config=vpp_config.get_config_str()))
413 def create_vpp_exec_config(self, vat_template_file, **kwargs):
414 """Create VPP exec configuration on container.
416 :param vat_template_file: File name of a VAT template script.
417 :param kwargs: Parameters for VAT script.
418 :type vat_template_file: str
421 vat_file_path = '{p}/{f}'.format(p=Constants.RESOURCES_TPL_VAT,
424 with open(vat_file_path, 'r') as template_file:
425 cmd_template = template_file.readlines()
426 for line_tmpl in cmd_template:
427 vat_cmd = line_tmpl.format(**kwargs)
428 self.execute('echo "{c}" >> /tmp/running.exec'
429 .format(c=vat_cmd.replace('\n', '')))
431 def is_container_running(self):
432 """Check if container is running."""
433 raise NotImplementedError
435 def is_container_present(self):
436 """Check if container is present."""
437 raise NotImplementedError
439 def _configure_cgroup(self, name):
440 """Configure the control group associated with a container.
442 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
443 container is initialized a new cgroup /docker or /lxc is created under
444 cpuset parent tree. This newly created cgroup is inheriting parent
445 setting for cpu/mem exclusive parameter and thus cannot be overriden
446 within /docker or /lxc cgroup. This function is supposed to set cgroups
447 to allow coexistence of both engines.
449 :param name: Name of cgroup.
451 :raises RuntimeError: If applying cgroup settings via cgset failed.
453 ret, _, _ = self.container.ssh.exec_command_sudo(
454 'cgset -r cpuset.cpu_exclusive=0 /')
456 raise RuntimeError('Failed to apply cgroup settings.')
458 ret, _, _ = self.container.ssh.exec_command_sudo(
459 'cgset -r cpuset.mem_exclusive=0 /')
461 raise RuntimeError('Failed to apply cgroup settings.')
463 ret, _, _ = self.container.ssh.exec_command_sudo(
464 'cgcreate -g cpuset:/{name}'.format(name=name))
466 raise RuntimeError('Failed to copy cgroup settings from root.')
468 ret, _, _ = self.container.ssh.exec_command_sudo(
469 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
471 raise RuntimeError('Failed to apply cgroup settings.')
473 ret, _, _ = self.container.ssh.exec_command_sudo(
474 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
476 raise RuntimeError('Failed to apply cgroup settings.')
479 class LXC(ContainerEngine):
480 """LXC implementation."""
482 # Implicit constructor is inherited.
484 def acquire(self, force=True):
485 """Acquire a privileged system object where configuration is stored.
487 :param force: If a container exists, destroy it and create a new
490 :raises RuntimeError: If creating the container or writing the container
493 if self.is_container_present():
499 image = self.container.image if self.container.image else\
500 "-d ubuntu -r xenial -a amd64"
502 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
503 '--no-validate'.format(c=self.container, image=image)
505 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
507 raise RuntimeError('Failed to create container.')
509 self._configure_cgroup('lxc')
512 """Create/deploy an application inside a container on system.
514 :raises RuntimeError: If creating the container fails.
516 if self.container.mnt:
517 for mount in self.container.mnt:
518 host_dir, guest_dir = mount.split(':')
519 entry = 'lxc.mount.entry = {host_dir} '\
520 '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \
521 'bind,create=dir 0 0'.format(c=self.container,
524 ret, _, _ = self.container.ssh.exec_command_sudo(
525 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
526 format(e=entry, c=self.container))
528 raise RuntimeError('Failed to write {c.name} config.'
529 .format(c=self.container))
531 cpuset_cpus = '{0}'.format(
532 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
533 if self.container.cpuset_cpus else ''
535 ret, _, _ = self.container.ssh.exec_command_sudo(
536 'lxc-start --name {c.name} --daemon'.
537 format(c=self.container))
539 raise RuntimeError('Failed to start container {c.name}.'.
540 format(c=self.container))
541 self._lxc_wait('RUNNING')
543 # Workaround for LXC to be able to allocate all cpus including isolated.
544 ret, _, _ = self.container.ssh.exec_command_sudo(
545 'cgset --copy-from / lxc/')
547 raise RuntimeError('Failed to copy cgroup to LXC')
549 ret, _, _ = self.container.ssh.exec_command_sudo(
550 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
551 format(c=self.container, cpus=cpuset_cpus))
553 raise RuntimeError('Failed to set cpuset.cpus to container '
554 '{c.name}.'.format(c=self.container))
556 def execute(self, command):
557 """Start a process inside a running container.
559 Runs the specified command inside the container specified by name. The
560 container has to be running already.
562 :param command: Command to run inside container.
564 :raises RuntimeError: If running the command failed.
566 env = '--keep-env {0}'.format(
567 ' '.join('--set-var %s' % env for env in self.container.env))\
568 if self.container.env else ''
570 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
571 "exit $?'".format(env=env, c=self.container, command=command)
573 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
575 raise RuntimeError('Failed to run command inside container '
576 '{c.name}.'.format(c=self.container))
581 :raises RuntimeError: If stopping the container failed.
583 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
585 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
587 raise RuntimeError('Failed to stop container {c.name}.'
588 .format(c=self.container))
589 self._lxc_wait('STOPPED|FROZEN')
592 """Destroy a container.
594 :raises RuntimeError: If destroying container failed.
596 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
598 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
600 raise RuntimeError('Failed to destroy container {c.name}.'
601 .format(c=self.container))
604 """Query and shows information about a container.
606 :raises RuntimeError: If getting info about a container failed.
608 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
610 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
612 raise RuntimeError('Failed to get info about container {c.name}.'
613 .format(c=self.container))
615 def system_info(self):
616 """Check the current kernel for LXC support.
618 :raises RuntimeError: If checking LXC support failed.
620 cmd = 'lxc-checkconfig'
622 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
624 raise RuntimeError('Failed to check LXC support.')
626 def is_container_running(self):
627 """Check if container is running on node.
629 :returns: True if container is running.
631 :raises RuntimeError: If getting info about a container failed.
633 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
634 .format(c=self.container)
636 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
638 raise RuntimeError('Failed to get info about container {c.name}.'
639 .format(c=self.container))
640 return True if 'RUNNING' in stdout else False
642 def is_container_present(self):
643 """Check if container is existing on node.
645 :returns: True if container is present.
647 :raises RuntimeError: If getting info about a container failed.
649 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
651 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
652 return False if int(ret) else True
654 def _lxc_wait(self, state):
655 """Wait for a specific container state.
657 :param state: Specify the container state(s) to wait for.
659 :raises RuntimeError: If waiting for state of a container failed.
661 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
662 .format(c=self.container, s=state)
664 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
666 raise RuntimeError('Failed to wait for state "{s}" of container '
667 '{c.name}.'.format(s=state, c=self.container))
670 class Docker(ContainerEngine):
671 """Docker implementation."""
673 # Implicit constructor is inherited.
675 def acquire(self, force=True):
676 """Pull an image or a repository from a registry.
678 :param force: Destroy a container if exists.
680 :raises RuntimeError: If pulling a container failed.
682 if self.is_container_present():
688 image = self.container.image if self.container.image else\
689 "ubuntu:xenial-20180412"
691 cmd = 'docker pull {image}'.format(image=image)
693 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
695 raise RuntimeError('Failed to create container {c.name}.'
696 .format(c=self.container))
697 self._configure_cgroup('docker')
700 """Create/deploy container.
702 :raises RuntimeError: If creating a container failed.
704 cpuset_cpus = '--cpuset-cpus={0}'.format(
705 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
706 if self.container.cpuset_cpus else ''
708 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
709 if self.container.cpuset_mems is not None else ''
710 # Temporary workaround - disabling due to bug in memif
714 ' '.join('--env %s' % env for env in self.container.env))\
715 if self.container.env else ''
717 command = '{0}'.format(self.container.command)\
718 if self.container.command else ''
720 publish = '{0}'.format(
721 ' '.join('--publish %s' % var for var in self.container.publish))\
722 if self.container.publish else ''
724 volume = '{0}'.format(
725 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
726 if self.container.mnt else ''
729 '--privileged --detach --interactive --tty --rm '\
730 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
731 '{env} {volume} --name {container.name} {container.image} '\
732 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
733 container=self.container, command=command,
734 env=env, publish=publish, volume=volume)
736 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
738 raise RuntimeError('Failed to create container {c.name}'
739 .format(c=self.container))
743 def execute(self, command):
744 """Start a process inside a running container.
746 Runs the specified command inside the container specified by name. The
747 container has to be running already.
749 :param command: Command to run inside container.
751 :raises RuntimeError: If runnig the command in a container failed.
753 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
754 "exit $?'".format(c=self.container, command=command)
756 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
758 raise RuntimeError('Failed to execute command in container '
759 '{c.name}.'.format(c=self.container))
762 """Stop running container.
764 :raises RuntimeError: If stopping a container failed.
766 cmd = 'docker stop {c.name}'.format(c=self.container)
768 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
770 raise RuntimeError('Failed to stop container {c.name}.'
771 .format(c=self.container))
774 """Remove a container.
776 :raises RuntimeError: If removing a container failed.
778 cmd = 'docker rm --force {c.name}'.format(c=self.container)
780 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
782 raise RuntimeError('Failed to destroy container {c.name}.'
783 .format(c=self.container))
786 """Return low-level information on Docker objects.
788 :raises RuntimeError: If getting info about a container failed.
790 cmd = 'docker inspect {c.name}'.format(c=self.container)
792 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
794 raise RuntimeError('Failed to get info about container {c.name}.'
795 .format(c=self.container))
797 def system_info(self):
798 """Display the docker system-wide information.
800 :raises RuntimeError: If displaying system information failed.
802 cmd = 'docker system info'
804 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
806 raise RuntimeError('Failed to get system info.')
808 def is_container_present(self):
809 """Check if container is present on node.
811 :returns: True if container is present.
813 :raises RuntimeError: If getting info about a container failed.
815 cmd = 'docker ps --all --quiet --filter name={c.name}'\
816 .format(c=self.container)
818 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
820 raise RuntimeError('Failed to get info about container {c.name}.'
821 .format(c=self.container))
822 return True if stdout else False
824 def is_container_running(self):
825 """Check if container is running on node.
827 :returns: True if container is running.
829 :raises RuntimeError: If getting info about a container failed.
831 cmd = 'docker ps --quiet --filter name={c.name}'\
832 .format(c=self.container)
834 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
836 raise RuntimeError('Failed to get info about container {c.name}.'
837 .format(c=self.container))
838 return True if stdout else False
841 class Container(object):
842 """Container class."""
845 """Initialize Container object."""
848 def __getattr__(self, attr):
849 """Get attribute custom implementation.
851 :param attr: Attribute to get.
853 :returns: Attribute value or None.
857 return self.__dict__[attr]
861 def __setattr__(self, attr, value):
862 """Set attribute custom implementation.
864 :param attr: Attribute to set.
865 :param value: Value to set.
870 # Check if attribute exists
873 # Creating new attribute
875 self.__dict__['ssh'] = SSH()
876 self.__dict__['ssh'].connect(value)
877 self.__dict__[attr] = value
879 # Updating attribute base of type
880 if isinstance(self.__dict__[attr], list):
881 self.__dict__[attr].append(value)
883 self.__dict__[attr] = value