1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from collections import OrderedDict, Counter
21 from resources.libraries.python.ssh import SSH
22 from resources.libraries.python.constants import Constants
23 from resources.libraries.python.CpuUtils import CpuUtils
24 from resources.libraries.python.topology import Topology
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
33 class ContainerManager(object):
34 """Container lifecycle management class."""
36 def __init__(self, engine):
37 """Initialize Container Manager class.
39 :param engine: Container technology used (LXC/Docker/...).
41 :raises NotImplementedError: If container technology is not implemented.
44 self.engine = globals()[engine]()
46 raise NotImplementedError('{engine} is not implemented.'.
47 format(engine=engine))
48 self.containers = OrderedDict()
50 def get_container_by_name(self, name):
51 """Get container instance.
53 :param name: Container name.
55 :returns: Container instance.
57 :raises RuntimeError: If failed to get container with name.
60 return self.containers[name]
62 raise RuntimeError('Failed to get container with name: {name}'.
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
78 setattr(self.engine.container, 'env',
79 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
81 # Set cpuset.cpus cgroup
82 skip_cnt = kwargs['cpu_skip']
83 smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
84 if not kwargs['cpu_shared']:
85 skip_cnt += kwargs['i'] * kwargs['cpu_count']
86 self.engine.container.cpuset_cpus = \
87 CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
88 cpu_node=kwargs['cpuset_mems'],
93 CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
94 cpu_node=kwargs['cpuset_mems'],
96 cpu_cnt=kwargs['cpu_count']-1,
99 # Store container instance
100 self.containers[kwargs['name']] = self.engine.container
102 def construct_containers(self, **kwargs):
103 """Construct 1..N container(s) on node with specified name.
105 Ordinal number is automatically added to the name of container as
108 :param kwargs: Named parameters.
111 name = kwargs['name']
112 for i in range(kwargs['count']):
113 # Name will contain ordinal suffix
114 kwargs['name'] = ''.join([name, str(i+1)])
116 self.construct_container(i=i, **kwargs)
118 def acquire_all_containers(self):
119 """Acquire all containers."""
120 for container in self.containers:
121 self.engine.container = self.containers[container]
122 self.engine.acquire()
124 def build_all_containers(self):
125 """Build all containers."""
126 for container in self.containers:
127 self.engine.container = self.containers[container]
130 def create_all_containers(self):
131 """Create all containers."""
132 for container in self.containers:
133 self.engine.container = self.containers[container]
136 def execute_on_container(self, name, command):
137 """Execute command on container with name.
139 :param name: Container name.
140 :param command: Command to execute.
144 self.engine.container = self.get_container_by_name(name)
145 self.engine.execute(command)
147 def execute_on_all_containers(self, command):
148 """Execute command on all containers.
150 :param command: Command to execute.
153 for container in self.containers:
154 self.engine.container = self.containers[container]
155 self.engine.execute(command)
157 def install_vpp_in_all_containers(self):
158 """Install VPP into all containers."""
159 for container in self.containers:
160 self.engine.container = self.containers[container]
161 # We need to install supervisor client/server system to control VPP
163 self.engine.install_supervisor()
164 self.engine.install_vpp()
165 self.engine.restart_vpp()
167 def restart_vpp_in_all_containers(self):
168 """Restart VPP on all containers."""
169 for container in self.containers:
170 self.engine.container = self.containers[container]
171 self.engine.restart_vpp()
173 def configure_vpp_in_all_containers(self, chain_topology,
174 dut1_if=None, dut2_if=None):
175 """Configure VPP in all containers.
177 :param chain_topology: Topology used for chaining containers can be
178 chain or cross_horiz. Chain topology is using 1 memif pair per
179 container. Cross_horiz topology is using 1 memif and 1 physical
180 interface in container (only single container can be configured).
181 :param dut1_if: Interface on DUT1 directly connected to DUT2.
182 :param dut2_if: Interface on DUT2 directly connected to DUT1.
183 :type container_topology: str
187 # Count number of DUTs based on node's host information
188 dut_cnt = len(Counter([self.containers[container].node['host']
189 for container in self.containers]))
190 mod = len(self.containers)/dut_cnt
191 container_vat_template = 'memif_create_{topology}.vat'.format(
192 topology=chain_topology)
194 if chain_topology == 'chain':
195 for i, container in enumerate(self.containers):
198 sid1 = i % mod * 2 + 1
199 sid2 = i % mod * 2 + 2
200 self.engine.container = self.containers[container]
201 self.engine.create_vpp_startup_config()
202 self.engine.create_vpp_exec_config(container_vat_template, \
203 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, \
204 socket1='memif-{c.name}-{sid}'. \
205 format(c=self.engine.container, sid=sid1), \
206 socket2='memif-{c.name}-{sid}'. \
207 format(c=self.engine.container, sid=sid2))
208 elif chain_topology == 'cross_horiz':
210 raise RuntimeError('Container chain topology {topology} '
211 'supports only single container.'.
212 format(topology=chain_topology))
213 for i, container in enumerate(self.containers):
215 sid1 = i % mod * 2 + 1
216 self.engine.container = self.containers[container]
217 if 'DUT1' in self.engine.container.name:
218 if_pci = Topology.get_interface_pci_addr( \
219 self.engine.container.node, dut1_if)
220 if_name = Topology.get_interface_name( \
221 self.engine.container.node, dut1_if)
222 if 'DUT2' in self.engine.container.name:
223 if_pci = Topology.get_interface_pci_addr( \
224 self.engine.container.node, dut2_if)
225 if_name = Topology.get_interface_name( \
226 self.engine.container.node, dut2_if)
227 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
228 self.engine.create_vpp_exec_config(container_vat_template, \
229 mid1=mid1, sid1=sid1, if_name=if_name, \
230 socket1='memif-{c.name}-{sid}'. \
231 format(c=self.engine.container, sid=sid1))
233 raise RuntimeError('Container topology {topology} not implemented'.
234 format(topology=chain_topology))
236 def stop_all_containers(self):
237 """Stop all containers."""
238 for container in self.containers:
239 self.engine.container = self.containers[container]
242 def destroy_all_containers(self):
243 """Destroy all containers."""
244 for container in self.containers:
245 self.engine.container = self.containers[container]
246 self.engine.destroy()
249 class ContainerEngine(object):
250 """Abstract class for container engine."""
253 """Init ContainerEngine object."""
254 self.container = None
256 def initialize(self):
257 """Initialize container object."""
258 self.container = Container()
260 def acquire(self, force):
261 """Acquire/download container.
263 :param force: Destroy a container if exists and create.
266 raise NotImplementedError
269 """Build container (compile)."""
270 raise NotImplementedError
273 """Create/deploy container."""
274 raise NotImplementedError
276 def execute(self, command):
277 """Execute process inside container.
279 :param command: Command to run inside container.
282 raise NotImplementedError
285 """Stop container."""
286 raise NotImplementedError
289 """Destroy/remove container."""
290 raise NotImplementedError
293 """Info about container."""
294 raise NotImplementedError
296 def system_info(self):
298 raise NotImplementedError
300 def install_supervisor(self):
301 """Install supervisord inside a container."""
302 self.execute('sleep 3')
303 self.execute('apt-get update')
304 self.execute('apt-get install -y supervisor')
305 self.execute('echo "{config}" > {config_file}'.
307 config='[unix_http_server]\n'
308 'file = /tmp/supervisor.sock\n\n'
309 '[rpcinterface:supervisor]\n'
310 'supervisor.rpcinterface_factory = '
311 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
313 'serverurl = unix:///tmp/supervisor.sock\n\n'
315 'pidfile = /tmp/supervisord.pid\n'
316 'identifier = supervisor\n'
318 'logfile=/tmp/supervisord.log\n'
320 'nodaemon=false\n\n',
321 config_file=SUPERVISOR_CONF))
322 self.execute('supervisord -c {config_file}'.
323 format(config_file=SUPERVISOR_CONF))
325 def install_vpp(self):
326 """Install VPP inside a container."""
327 self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf')
328 self.execute('apt-get update')
329 if self.container.install_dkms:
331 'apt-get install -y dkms && '
332 'dpkg -i --force-all '
333 '{guest_dir}/openvpp-testing/download_dir/*.deb'.
334 format(guest_dir=self.container.mnt[0].split(':')[1]))
337 'for i in $(ls -I \"*dkms*\" '
338 '{guest_dir}/openvpp-testing/download_dir/); do '
339 'dpkg -i --force-all '
340 '{guest_dir}/openvpp-testing/download_dir/$i; done'.
341 format(guest_dir=self.container.mnt[0].split(':')[1]))
342 self.execute('apt-get -f install -y')
343 self.execute('apt-get install -y ca-certificates')
344 self.execute('echo "{config}" >> {config_file}'.
346 config='[program:vpp]\n'
347 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
348 'autorestart=false\n'
349 'redirect_stderr=true\n'
351 config_file=SUPERVISOR_CONF))
352 self.execute('supervisorctl reload')
353 self.execute('supervisorctl restart vpp')
355 def restart_vpp(self):
356 """Restart VPP service inside a container."""
357 self.execute('supervisorctl restart vpp')
358 self.execute('cat /tmp/supervisord.log')
360 def create_base_vpp_startup_config(self):
361 """Create base startup configuration of VPP on container.
363 :returns: Base VPP startup configuration.
364 :rtype: VppConfigGenerator
366 cpuset_cpus = self.container.cpuset_cpus
368 # Create config instance
369 vpp_config = VppConfigGenerator()
370 vpp_config.set_node(self.container.node)
371 vpp_config.add_unix_cli_listen()
372 vpp_config.add_unix_nodaemon()
373 vpp_config.add_unix_exec('/tmp/running.exec')
374 # We will pop first core from list to be main core
375 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
376 # if this is not only core in list, the rest will be used as workers.
378 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
379 vpp_config.add_cpu_corelist_workers(corelist_workers)
383 def create_vpp_startup_config(self):
384 """Create startup configuration of VPP without DPDK on container.
386 vpp_config = self.create_base_vpp_startup_config()
387 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
389 # Apply configuration
390 self.execute('mkdir -p /etc/vpp/')
391 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
392 .format(config=vpp_config.get_config_str()))
394 def create_vpp_startup_config_dpdk_dev(self, *devices):
395 """Create startup configuration of VPP with DPDK on container.
397 :param devices: List of PCI devices to add.
400 vpp_config = self.create_base_vpp_startup_config()
401 vpp_config.add_dpdk_dev(*devices)
402 vpp_config.add_dpdk_no_tx_checksum_offload()
403 vpp_config.add_dpdk_log_level('debug')
404 vpp_config.add_plugin('disable', 'default')
405 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
406 vpp_config.add_plugin('enable', 'memif_plugin.so')
408 # Apply configuration
409 self.execute('mkdir -p /etc/vpp/')
410 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
411 .format(config=vpp_config.get_config_str()))
413 def create_vpp_exec_config(self, vat_template_file, **kwargs):
414 """Create VPP exec configuration on container.
416 :param vat_template_file: File name of a VAT template script.
417 :param kwargs: Parameters for VAT script.
418 :type vat_template_file: str
421 vat_file_path = '{p}/{f}'.format(p=Constants.RESOURCES_TPL_VAT,
424 with open(vat_file_path, 'r') as template_file:
425 cmd_template = template_file.readlines()
426 for line_tmpl in cmd_template:
427 vat_cmd = line_tmpl.format(**kwargs)
428 self.execute('echo "{c}" >> /tmp/running.exec'
429 .format(c=vat_cmd.replace('\n', '')))
431 def is_container_running(self):
432 """Check if container is running."""
433 raise NotImplementedError
435 def is_container_present(self):
436 """Check if container is present."""
437 raise NotImplementedError
439 def _configure_cgroup(self, name):
440 """Configure the control group associated with a container.
442 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
443 container is initialized a new cgroup /docker or /lxc is created under
444 cpuset parent tree. This newly created cgroup is inheriting parent
445 setting for cpu/mem exclusive parameter and thus cannot be overriden
446 within /docker or /lxc cgroup. This function is supposed to set cgroups
447 to allow coexistence of both engines.
449 :param name: Name of cgroup.
451 :raises RuntimeError: If applying cgroup settings via cgset failed.
453 ret, _, _ = self.container.ssh.exec_command_sudo(
454 'cgset -r cpuset.cpu_exclusive=0 /')
456 raise RuntimeError('Failed to apply cgroup settings.')
458 ret, _, _ = self.container.ssh.exec_command_sudo(
459 'cgset -r cpuset.mem_exclusive=0 /')
461 raise RuntimeError('Failed to apply cgroup settings.')
463 ret, _, _ = self.container.ssh.exec_command_sudo(
464 'cgcreate -g cpuset:/{name}'.format(name=name))
466 raise RuntimeError('Failed to copy cgroup settings from root.')
468 ret, _, _ = self.container.ssh.exec_command_sudo(
469 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
471 raise RuntimeError('Failed to apply cgroup settings.')
473 ret, _, _ = self.container.ssh.exec_command_sudo(
474 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
476 raise RuntimeError('Failed to apply cgroup settings.')
479 class LXC(ContainerEngine):
480 """LXC implementation."""
483 """Initialize LXC object."""
484 super(LXC, self).__init__()
486 def acquire(self, force=True):
487 """Acquire a privileged system object where configuration is stored.
489 :param force: If a container exists, destroy it and create a new
492 :raises RuntimeError: If creating the container or writing the container
495 if self.is_container_present():
501 image = self.container.image if self.container.image else\
502 "-d ubuntu -r xenial -a amd64"
504 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
505 '--no-validate'.format(c=self.container, image=image)
507 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
509 raise RuntimeError('Failed to create container.')
511 self._configure_cgroup('lxc')
514 """Create/deploy an application inside a container on system.
516 :raises RuntimeError: If creating the container fails.
518 if self.container.mnt:
519 for mount in self.container.mnt:
520 host_dir, guest_dir = mount.split(':')
521 entry = 'lxc.mount.entry = {host_dir} '\
522 '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \
523 'bind,create=dir 0 0'.format(c=self.container,
526 ret, _, _ = self.container.ssh.exec_command_sudo(
527 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
528 format(e=entry, c=self.container))
530 raise RuntimeError('Failed to write {c.name} config.'
531 .format(c=self.container))
533 cpuset_cpus = '{0}'.format(
534 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
535 if self.container.cpuset_cpus else ''
537 ret, _, _ = self.container.ssh.exec_command_sudo(
538 'lxc-start --name {c.name} --daemon'.
539 format(c=self.container))
541 raise RuntimeError('Failed to start container {c.name}.'.
542 format(c=self.container))
543 self._lxc_wait('RUNNING')
545 # Workaround for LXC to be able to allocate all cpus including isolated.
546 ret, _, _ = self.container.ssh.exec_command_sudo(
547 'cgset --copy-from / lxc/')
549 raise RuntimeError('Failed to copy cgroup to LXC')
551 ret, _, _ = self.container.ssh.exec_command_sudo(
552 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
553 format(c=self.container, cpus=cpuset_cpus))
555 raise RuntimeError('Failed to set cpuset.cpus to container '
556 '{c.name}.'.format(c=self.container))
558 def execute(self, command):
559 """Start a process inside a running container.
561 Runs the specified command inside the container specified by name. The
562 container has to be running already.
564 :param command: Command to run inside container.
566 :raises RuntimeError: If running the command failed.
568 env = '--keep-env {0}'.format(
569 ' '.join('--set-var %s' % env for env in self.container.env))\
570 if self.container.env else ''
572 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
573 "exit $?'".format(env=env, c=self.container, command=command)
575 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
577 raise RuntimeError('Failed to run command inside container '
578 '{c.name}.'.format(c=self.container))
583 :raises RuntimeError: If stopping the container failed.
585 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
587 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
589 raise RuntimeError('Failed to stop container {c.name}.'
590 .format(c=self.container))
591 self._lxc_wait('STOPPED|FROZEN')
594 """Destroy a container.
596 :raises RuntimeError: If destroying container failed.
598 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
600 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
602 raise RuntimeError('Failed to destroy container {c.name}.'
603 .format(c=self.container))
606 """Query and shows information about a container.
608 :raises RuntimeError: If getting info about a container failed.
610 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
612 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
614 raise RuntimeError('Failed to get info about container {c.name}.'
615 .format(c=self.container))
617 def system_info(self):
618 """Check the current kernel for LXC support.
620 :raises RuntimeError: If checking LXC support failed.
622 cmd = 'lxc-checkconfig'
624 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
626 raise RuntimeError('Failed to check LXC support.')
628 def is_container_running(self):
629 """Check if container is running on node.
631 :returns: True if container is running.
633 :raises RuntimeError: If getting info about a container failed.
635 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
636 .format(c=self.container)
638 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
640 raise RuntimeError('Failed to get info about container {c.name}.'
641 .format(c=self.container))
642 return True if 'RUNNING' in stdout else False
644 def is_container_present(self):
645 """Check if container is existing on node.
647 :returns: True if container is present.
649 :raises RuntimeError: If getting info about a container failed.
651 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
653 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
654 return False if int(ret) else True
656 def _lxc_wait(self, state):
657 """Wait for a specific container state.
659 :param state: Specify the container state(s) to wait for.
661 :raises RuntimeError: If waiting for state of a container failed.
663 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
664 .format(c=self.container, s=state)
666 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
668 raise RuntimeError('Failed to wait for state "{s}" of container '
669 '{c.name}.'.format(s=state, c=self.container))
672 class Docker(ContainerEngine):
673 """Docker implementation."""
676 """Initialize Docker object."""
677 super(Docker, self).__init__()
679 def acquire(self, force=True):
680 """Pull an image or a repository from a registry.
682 :param force: Destroy a container if exists.
684 :raises RuntimeError: If pulling a container failed.
686 if self.is_container_present():
692 image = self.container.image if self.container.image else\
693 "ubuntu:xenial-20180412"
695 cmd = 'docker pull {image}'.format(image=image)
697 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
699 raise RuntimeError('Failed to create container {c.name}.'
700 .format(c=self.container))
701 self._configure_cgroup('docker')
704 """Create/deploy container.
706 :raises RuntimeError: If creating a container failed.
708 cpuset_cpus = '--cpuset-cpus={0}'.format(
709 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
710 if self.container.cpuset_cpus else ''
712 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
713 if self.container.cpuset_mems is not None else ''
714 # Temporary workaround - disabling due to bug in memif
718 ' '.join('--env %s' % env for env in self.container.env))\
719 if self.container.env else ''
721 command = '{0}'.format(self.container.command)\
722 if self.container.command else ''
724 publish = '{0}'.format(
725 ' '.join('--publish %s' % var for var in self.container.publish))\
726 if self.container.publish else ''
728 volume = '{0}'.format(
729 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
730 if self.container.mnt else ''
733 '--privileged --detach --interactive --tty --rm '\
734 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
735 '{env} {volume} --name {container.name} {container.image} '\
736 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
737 container=self.container, command=command,
738 env=env, publish=publish, volume=volume)
740 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
742 raise RuntimeError('Failed to create container {c.name}'
743 .format(c=self.container))
747 def execute(self, command):
748 """Start a process inside a running container.
750 Runs the specified command inside the container specified by name. The
751 container has to be running already.
753 :param command: Command to run inside container.
755 :raises RuntimeError: If runnig the command in a container failed.
757 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
758 "exit $?'".format(c=self.container, command=command)
760 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
762 raise RuntimeError('Failed to execute command in container '
763 '{c.name}.'.format(c=self.container))
766 """Stop running container.
768 :raises RuntimeError: If stopping a container failed.
770 cmd = 'docker stop {c.name}'.format(c=self.container)
772 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
774 raise RuntimeError('Failed to stop container {c.name}.'
775 .format(c=self.container))
778 """Remove a container.
780 :raises RuntimeError: If removing a container failed.
782 cmd = 'docker rm --force {c.name}'.format(c=self.container)
784 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
786 raise RuntimeError('Failed to destroy container {c.name}.'
787 .format(c=self.container))
790 """Return low-level information on Docker objects.
792 :raises RuntimeError: If getting info about a container failed.
794 cmd = 'docker inspect {c.name}'.format(c=self.container)
796 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
798 raise RuntimeError('Failed to get info about container {c.name}.'
799 .format(c=self.container))
801 def system_info(self):
802 """Display the docker system-wide information.
804 :raises RuntimeError: If displaying system information failed.
806 cmd = 'docker system info'
808 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
810 raise RuntimeError('Failed to get system info.')
812 def is_container_present(self):
813 """Check if container is present on node.
815 :returns: True if container is present.
817 :raises RuntimeError: If getting info about a container failed.
819 cmd = 'docker ps --all --quiet --filter name={c.name}'\
820 .format(c=self.container)
822 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
824 raise RuntimeError('Failed to get info about container {c.name}.'
825 .format(c=self.container))
826 return True if stdout else False
828 def is_container_running(self):
829 """Check if container is running on node.
831 :returns: True if container is running.
833 :raises RuntimeError: If getting info about a container failed.
835 cmd = 'docker ps --quiet --filter name={c.name}'\
836 .format(c=self.container)
838 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
840 raise RuntimeError('Failed to get info about container {c.name}.'
841 .format(c=self.container))
842 return True if stdout else False
845 class Container(object):
846 """Container class."""
849 """Initialize Container object."""
852 def __getattr__(self, attr):
853 """Get attribute custom implementation.
855 :param attr: Attribute to get.
857 :returns: Attribute value or None.
861 return self.__dict__[attr]
865 def __setattr__(self, attr, value):
866 """Set attribute custom implementation.
868 :param attr: Attribute to set.
869 :param value: Value to set.
874 # Check if attribute exists
877 # Creating new attribute
879 self.__dict__['ssh'] = SSH()
880 self.__dict__['ssh'].connect(value)
881 self.__dict__[attr] = value
883 # Updating attribute base of type
884 if isinstance(self.__dict__[attr], list):
885 self.__dict__[attr].append(value)
887 self.__dict__[attr] = value