1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from collections import OrderedDict, Counter
21 from resources.libraries.python.ssh import SSH
22 from resources.libraries.python.constants import Constants
23 from resources.libraries.python.topology import Topology
24 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
27 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
29 SUPERVISOR_CONF = '/etc/supervisord.conf'
32 class ContainerManager(object):
33 """Container lifecycle management class."""
35 def __init__(self, engine):
36 """Initialize Container Manager class.
38 :param engine: Container technology used (LXC/Docker/...).
40 :raises NotImplementedError: If container technology is not implemented.
43 self.engine = globals()[engine]()
45 raise NotImplementedError('{engine} is not implemented.'.
46 format(engine=engine))
47 self.containers = OrderedDict()
49 def get_container_by_name(self, name):
50 """Get container instance.
52 :param name: Container name.
54 :returns: Container instance.
56 :raises RuntimeError: If failed to get container with name.
59 return self.containers[name]
61 raise RuntimeError('Failed to get container with name: {name}'.
64 def construct_container(self, **kwargs):
65 """Construct container object on node with specified parameters.
67 :param kwargs: Key-value pairs used to construct container.
71 self.engine.initialize()
74 setattr(self.engine.container, key, kwargs[key])
76 # Set additional environmental variables
77 setattr(self.engine.container, 'env',
78 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
80 # Store container instance
81 self.containers[kwargs['name']] = self.engine.container
83 def construct_containers(self, **kwargs):
84 """Construct 1..N container(s) on node with specified name.
86 Ordinal number is automatically added to the name of container as
89 :param kwargs: Named parameters.
93 for i in range(kwargs['count']):
94 # Name will contain ordinal suffix
95 kwargs['name'] = ''.join([name, str(i+1)])
97 self.construct_container(i=i, **kwargs)
99 def acquire_all_containers(self):
100 """Acquire all containers."""
101 for container in self.containers:
102 self.engine.container = self.containers[container]
103 self.engine.acquire()
105 def build_all_containers(self):
106 """Build all containers."""
107 for container in self.containers:
108 self.engine.container = self.containers[container]
111 def create_all_containers(self):
112 """Create all containers."""
113 for container in self.containers:
114 self.engine.container = self.containers[container]
117 def execute_on_container(self, name, command):
118 """Execute command on container with name.
120 :param name: Container name.
121 :param command: Command to execute.
125 self.engine.container = self.get_container_by_name(name)
126 self.engine.execute(command)
128 def execute_on_all_containers(self, command):
129 """Execute command on all containers.
131 :param command: Command to execute.
134 for container in self.containers:
135 self.engine.container = self.containers[container]
136 self.engine.execute(command)
138 def install_vpp_in_all_containers(self):
139 """Install VPP into all containers."""
140 for container in self.containers:
141 self.engine.container = self.containers[container]
142 # We need to install supervisor client/server system to control VPP
144 self.engine.install_supervisor()
145 self.engine.install_vpp()
146 self.engine.restart_vpp()
148 def restart_vpp_in_all_containers(self):
149 """Restart VPP on all containers."""
150 for container in self.containers:
151 self.engine.container = self.containers[container]
152 self.engine.restart_vpp()
154 def configure_vpp_in_all_containers(self, chain_topology,
155 dut1_if=None, dut2_if=None):
156 """Configure VPP in all containers.
158 :param chain_topology: Topology used for chaining containers can be
159 chain or cross_horiz. Chain topology is using 1 memif pair per
160 container. Cross_horiz topology is using 1 memif and 1 physical
161 interface in container (only single container can be configured).
162 :param dut1_if: Interface on DUT1 directly connected to DUT2.
163 :param dut2_if: Interface on DUT2 directly connected to DUT1.
164 :type container_topology: str
168 # Count number of DUTs based on node's host information
169 dut_cnt = len(Counter([self.containers[container].node['host']
170 for container in self.containers]))
171 mod = len(self.containers)/dut_cnt
172 container_vat_template = 'memif_create_{topology}.vat'.format(
173 topology=chain_topology)
175 if chain_topology == 'chain':
176 for i, container in enumerate(self.containers):
179 sid1 = i % mod * 2 + 1
180 sid2 = i % mod * 2 + 2
181 self.engine.container = self.containers[container]
182 self.engine.create_vpp_startup_config()
183 self.engine.create_vpp_exec_config(container_vat_template, \
184 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, \
185 socket1='memif-{c.name}-{sid}'. \
186 format(c=self.engine.container, sid=sid1), \
187 socket2='memif-{c.name}-{sid}'. \
188 format(c=self.engine.container, sid=sid2))
189 elif chain_topology == 'cross_horiz':
191 raise RuntimeError('Container chain topology {topology} '
192 'supports only single container.'.
193 format(topology=chain_topology))
194 for i, container in enumerate(self.containers):
196 sid1 = i % mod * 2 + 1
197 self.engine.container = self.containers[container]
198 if 'DUT1' in self.engine.container.name:
199 if_pci = Topology.get_interface_pci_addr( \
200 self.engine.container.node, dut1_if)
201 if_name = Topology.get_interface_name( \
202 self.engine.container.node, dut1_if)
203 if 'DUT2' in self.engine.container.name:
204 if_pci = Topology.get_interface_pci_addr( \
205 self.engine.container.node, dut2_if)
206 if_name = Topology.get_interface_name( \
207 self.engine.container.node, dut2_if)
208 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
209 self.engine.create_vpp_exec_config(container_vat_template, \
210 mid1=mid1, sid1=sid1, if_name=if_name, \
211 socket1='memif-{c.name}-{sid}'. \
212 format(c=self.engine.container, sid=sid1))
214 raise RuntimeError('Container topology {topology} not implemented'.
215 format(topology=chain_topology))
217 def stop_all_containers(self):
218 """Stop all containers."""
219 for container in self.containers:
220 self.engine.container = self.containers[container]
223 def destroy_all_containers(self):
224 """Destroy all containers."""
225 for container in self.containers:
226 self.engine.container = self.containers[container]
227 self.engine.destroy()
230 class ContainerEngine(object):
231 """Abstract class for container engine."""
234 """Init ContainerEngine object."""
235 self.container = None
237 def initialize(self):
238 """Initialize container object."""
239 self.container = Container()
241 def acquire(self, force):
242 """Acquire/download container.
244 :param force: Destroy a container if exists and create.
247 raise NotImplementedError
250 """Build container (compile)."""
251 raise NotImplementedError
254 """Create/deploy container."""
255 raise NotImplementedError
257 def execute(self, command):
258 """Execute process inside container.
260 :param command: Command to run inside container.
263 raise NotImplementedError
266 """Stop container."""
267 raise NotImplementedError
270 """Destroy/remove container."""
271 raise NotImplementedError
274 """Info about container."""
275 raise NotImplementedError
277 def system_info(self):
279 raise NotImplementedError
281 def install_supervisor(self):
282 """Install supervisord inside a container."""
283 self.execute('sleep 3')
284 self.execute('apt-get update')
285 self.execute('apt-get install -y supervisor')
286 self.execute('echo "{config}" > {config_file}'.
288 config='[unix_http_server]\n'
289 'file = /tmp/supervisor.sock\n\n'
290 '[rpcinterface:supervisor]\n'
291 'supervisor.rpcinterface_factory = '
292 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
294 'serverurl = unix:///tmp/supervisor.sock\n\n'
296 'pidfile = /tmp/supervisord.pid\n'
297 'identifier = supervisor\n'
299 'logfile=/tmp/supervisord.log\n'
301 'nodaemon=false\n\n',
302 config_file=SUPERVISOR_CONF))
303 self.execute('supervisord -c {config_file}'.
304 format(config_file=SUPERVISOR_CONF))
306 def install_vpp(self):
307 """Install VPP inside a container."""
308 self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf')
309 self.execute('apt-get update')
310 # Workaround for install xenial vpp build on bionic ubuntu.
311 self.execute('apt-get install -y wget')
312 self.execute('deb=$(mktemp) && wget -O "${deb}" '
313 'http://launchpadlibrarian.net/336117627/'
314 'libmbedcrypto0_2.5.1-1ubuntu1_amd64.deb && '
315 'dpkg -i "${deb}" && '
317 self.execute('deb=$(mktemp) && wget -O "${deb}" '
318 'http://launchpadlibrarian.net/252876048/'
319 'libboost-system1.58.0_1.58.0+dfsg-5ubuntu3_amd64.deb && '
320 'dpkg -i "${deb}" && '
323 'dpkg -i --force-all '
324 '{guest_dir}/openvpp-testing/download_dir/*.deb'.
325 format(guest_dir=self.container.mnt[0].split(':')[1]))
326 self.execute('apt-get -f install -y')
327 self.execute('apt-get install -y ca-certificates')
328 self.execute('echo "{config}" >> {config_file}'.
330 config='[program:vpp]\n'
331 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
332 'autorestart=false\n'
333 'redirect_stderr=true\n'
335 config_file=SUPERVISOR_CONF))
336 self.execute('supervisorctl reload')
337 self.execute('supervisorctl restart vpp')
339 def restart_vpp(self):
340 """Restart VPP service inside a container."""
341 self.execute('supervisorctl restart vpp')
342 self.execute('cat /tmp/supervisord.log')
344 def create_base_vpp_startup_config(self):
345 """Create base startup configuration of VPP on container.
347 :returns: Base VPP startup configuration.
348 :rtype: VppConfigGenerator
350 cpuset_cpus = self.container.cpuset_cpus
352 # Create config instance
353 vpp_config = VppConfigGenerator()
354 vpp_config.set_node(self.container.node)
355 vpp_config.add_unix_cli_listen()
356 vpp_config.add_unix_nodaemon()
357 vpp_config.add_unix_exec('/tmp/running.exec')
358 # We will pop first core from list to be main core
359 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
360 # if this is not only core in list, the rest will be used as workers.
362 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
363 vpp_config.add_cpu_corelist_workers(corelist_workers)
367 def create_vpp_startup_config(self):
368 """Create startup configuration of VPP without DPDK on container.
370 vpp_config = self.create_base_vpp_startup_config()
371 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
373 # Apply configuration
374 self.execute('mkdir -p /etc/vpp/')
375 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
376 .format(config=vpp_config.get_config_str()))
378 def create_vpp_startup_config_dpdk_dev(self, *devices):
379 """Create startup configuration of VPP with DPDK on container.
381 :param devices: List of PCI devices to add.
384 vpp_config = self.create_base_vpp_startup_config()
385 vpp_config.add_dpdk_dev(*devices)
386 vpp_config.add_dpdk_no_tx_checksum_offload()
387 vpp_config.add_dpdk_log_level('debug')
388 vpp_config.add_plugin('disable', 'default')
389 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
390 vpp_config.add_plugin('enable', 'memif_plugin.so')
392 # Apply configuration
393 self.execute('mkdir -p /etc/vpp/')
394 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
395 .format(config=vpp_config.get_config_str()))
397 def create_vpp_exec_config(self, vat_template_file, **kwargs):
398 """Create VPP exec configuration on container.
400 :param vat_template_file: File name of a VAT template script.
401 :param kwargs: Parameters for VAT script.
402 :type vat_template_file: str
405 vat_file_path = '{p}/{f}'.format(p=Constants.RESOURCES_TPL_VAT,
408 with open(vat_file_path, 'r') as template_file:
409 cmd_template = template_file.readlines()
410 for line_tmpl in cmd_template:
411 vat_cmd = line_tmpl.format(**kwargs)
412 self.execute('echo "{c}" >> /tmp/running.exec'
413 .format(c=vat_cmd.replace('\n', '')))
415 def is_container_running(self):
416 """Check if container is running."""
417 raise NotImplementedError
419 def is_container_present(self):
420 """Check if container is present."""
421 raise NotImplementedError
423 def _configure_cgroup(self, name):
424 """Configure the control group associated with a container.
426 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
427 container is initialized a new cgroup /docker or /lxc is created under
428 cpuset parent tree. This newly created cgroup is inheriting parent
429 setting for cpu/mem exclusive parameter and thus cannot be overriden
430 within /docker or /lxc cgroup. This function is supposed to set cgroups
431 to allow coexistence of both engines.
433 :param name: Name of cgroup.
435 :raises RuntimeError: If applying cgroup settings via cgset failed.
437 ret, _, _ = self.container.ssh.exec_command_sudo(
438 'cgset -r cpuset.cpu_exclusive=0 /')
440 raise RuntimeError('Failed to apply cgroup settings.')
442 ret, _, _ = self.container.ssh.exec_command_sudo(
443 'cgset -r cpuset.mem_exclusive=0 /')
445 raise RuntimeError('Failed to apply cgroup settings.')
447 ret, _, _ = self.container.ssh.exec_command_sudo(
448 'cgcreate -g cpuset:/{name}'.format(name=name))
450 raise RuntimeError('Failed to copy cgroup settings from root.')
452 ret, _, _ = self.container.ssh.exec_command_sudo(
453 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
455 raise RuntimeError('Failed to apply cgroup settings.')
457 ret, _, _ = self.container.ssh.exec_command_sudo(
458 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
460 raise RuntimeError('Failed to apply cgroup settings.')
463 class LXC(ContainerEngine):
464 """LXC implementation."""
466 # Implicit constructor is inherited.
468 def acquire(self, force=True):
469 """Acquire a privileged system object where configuration is stored.
471 :param force: If a container exists, destroy it and create a new
474 :raises RuntimeError: If creating the container or writing the container
477 if self.is_container_present():
483 image = self.container.image if self.container.image else\
484 "-d ubuntu -r xenial -a amd64"
486 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
487 '--no-validate'.format(c=self.container, image=image)
489 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
491 raise RuntimeError('Failed to create container.')
493 self._configure_cgroup('lxc')
496 """Create/deploy an application inside a container on system.
498 :raises RuntimeError: If creating the container fails.
500 if self.container.mnt:
501 for mount in self.container.mnt:
502 host_dir, guest_dir = mount.split(':')
503 entry = 'lxc.mount.entry = {host_dir} '\
504 '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \
505 'bind,create=dir 0 0'.format(c=self.container,
508 ret, _, _ = self.container.ssh.exec_command_sudo(
509 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
510 format(e=entry, c=self.container))
512 raise RuntimeError('Failed to write {c.name} config.'
513 .format(c=self.container))
515 cpuset_cpus = '{0}'.format(
516 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
517 if self.container.cpuset_cpus else ''
519 ret, _, _ = self.container.ssh.exec_command_sudo(
520 'lxc-start --name {c.name} --daemon'.
521 format(c=self.container))
523 raise RuntimeError('Failed to start container {c.name}.'.
524 format(c=self.container))
525 self._lxc_wait('RUNNING')
527 # Workaround for LXC to be able to allocate all cpus including isolated.
528 ret, _, _ = self.container.ssh.exec_command_sudo(
529 'cgset --copy-from / lxc/')
531 raise RuntimeError('Failed to copy cgroup to LXC')
533 ret, _, _ = self.container.ssh.exec_command_sudo(
534 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
535 format(c=self.container, cpus=cpuset_cpus))
537 raise RuntimeError('Failed to set cpuset.cpus to container '
538 '{c.name}.'.format(c=self.container))
540 def execute(self, command):
541 """Start a process inside a running container.
543 Runs the specified command inside the container specified by name. The
544 container has to be running already.
546 :param command: Command to run inside container.
548 :raises RuntimeError: If running the command failed.
550 env = '--keep-env {0}'.format(
551 ' '.join('--set-var %s' % env for env in self.container.env))\
552 if self.container.env else ''
554 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
555 "exit $?'".format(env=env, c=self.container, command=command)
557 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
559 raise RuntimeError('Failed to run command inside container '
560 '{c.name}.'.format(c=self.container))
565 :raises RuntimeError: If stopping the container failed.
567 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
569 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
571 raise RuntimeError('Failed to stop container {c.name}.'
572 .format(c=self.container))
573 self._lxc_wait('STOPPED|FROZEN')
576 """Destroy a container.
578 :raises RuntimeError: If destroying container failed.
580 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
582 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
584 raise RuntimeError('Failed to destroy container {c.name}.'
585 .format(c=self.container))
588 """Query and shows information about a container.
590 :raises RuntimeError: If getting info about a container failed.
592 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
594 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
596 raise RuntimeError('Failed to get info about container {c.name}.'
597 .format(c=self.container))
599 def system_info(self):
600 """Check the current kernel for LXC support.
602 :raises RuntimeError: If checking LXC support failed.
604 cmd = 'lxc-checkconfig'
606 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
608 raise RuntimeError('Failed to check LXC support.')
610 def is_container_running(self):
611 """Check if container is running on node.
613 :returns: True if container is running.
615 :raises RuntimeError: If getting info about a container failed.
617 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
618 .format(c=self.container)
620 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
622 raise RuntimeError('Failed to get info about container {c.name}.'
623 .format(c=self.container))
624 return True if 'RUNNING' in stdout else False
626 def is_container_present(self):
627 """Check if container is existing on node.
629 :returns: True if container is present.
631 :raises RuntimeError: If getting info about a container failed.
633 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
635 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
636 return False if int(ret) else True
638 def _lxc_wait(self, state):
639 """Wait for a specific container state.
641 :param state: Specify the container state(s) to wait for.
643 :raises RuntimeError: If waiting for state of a container failed.
645 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
646 .format(c=self.container, s=state)
648 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
650 raise RuntimeError('Failed to wait for state "{s}" of container '
651 '{c.name}.'.format(s=state, c=self.container))
654 class Docker(ContainerEngine):
655 """Docker implementation."""
657 # Implicit constructor is inherited.
659 def acquire(self, force=True):
660 """Pull an image or a repository from a registry.
662 :param force: Destroy a container if exists.
664 :raises RuntimeError: If pulling a container failed.
666 if self.is_container_present():
672 if not self.container.image:
673 setattr(self.container, 'image', 'snergster/csit-sut:latest')
675 cmd = 'docker pull {image}'.format(image=self.container.image)
677 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
679 raise RuntimeError('Failed to create container {c.name}.'
680 .format(c=self.container))
681 self._configure_cgroup('docker')
684 """Create/deploy container.
686 :raises RuntimeError: If creating a container failed.
688 cpuset_cpus = '--cpuset-cpus={0}'.format(
689 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
690 if self.container.cpuset_cpus else ''
692 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
693 if self.container.cpuset_mems is not None else ''
694 # Temporary workaround - disabling due to bug in memif
698 ' '.join('--env %s' % env for env in self.container.env))\
699 if self.container.env else ''
701 command = '{0}'.format(self.container.command)\
702 if self.container.command else ''
704 publish = '{0}'.format(
705 ' '.join('--publish %s' % var for var in self.container.publish))\
706 if self.container.publish else ''
708 volume = '{0}'.format(
709 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
710 if self.container.mnt else ''
713 '--privileged --detach --interactive --tty --rm '\
714 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
715 '{env} {volume} --name {container.name} {container.image} '\
716 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
717 container=self.container, command=command,
718 env=env, publish=publish, volume=volume)
720 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
722 raise RuntimeError('Failed to create container {c.name}'
723 .format(c=self.container))
727 def execute(self, command):
728 """Start a process inside a running container.
730 Runs the specified command inside the container specified by name. The
731 container has to be running already.
733 :param command: Command to run inside container.
735 :raises RuntimeError: If runnig the command in a container failed.
737 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
738 "exit $?'".format(c=self.container, command=command)
740 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
742 raise RuntimeError('Failed to execute command in container '
743 '{c.name}.'.format(c=self.container))
746 """Stop running container.
748 :raises RuntimeError: If stopping a container failed.
750 cmd = 'docker stop {c.name}'.format(c=self.container)
752 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
754 raise RuntimeError('Failed to stop container {c.name}.'
755 .format(c=self.container))
758 """Remove a container.
760 :raises RuntimeError: If removing a container failed.
762 cmd = 'docker rm --force {c.name}'.format(c=self.container)
764 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
766 raise RuntimeError('Failed to destroy container {c.name}.'
767 .format(c=self.container))
770 """Return low-level information on Docker objects.
772 :raises RuntimeError: If getting info about a container failed.
774 cmd = 'docker inspect {c.name}'.format(c=self.container)
776 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
778 raise RuntimeError('Failed to get info about container {c.name}.'
779 .format(c=self.container))
781 def system_info(self):
782 """Display the docker system-wide information.
784 :raises RuntimeError: If displaying system information failed.
786 cmd = 'docker system info'
788 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
790 raise RuntimeError('Failed to get system info.')
792 def is_container_present(self):
793 """Check if container is present on node.
795 :returns: True if container is present.
797 :raises RuntimeError: If getting info about a container failed.
799 cmd = 'docker ps --all --quiet --filter name={c.name}'\
800 .format(c=self.container)
802 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
804 raise RuntimeError('Failed to get info about container {c.name}.'
805 .format(c=self.container))
806 return True if stdout else False
808 def is_container_running(self):
809 """Check if container is running on node.
811 :returns: True if container is running.
813 :raises RuntimeError: If getting info about a container failed.
815 cmd = 'docker ps --quiet --filter name={c.name}'\
816 .format(c=self.container)
818 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
820 raise RuntimeError('Failed to get info about container {c.name}.'
821 .format(c=self.container))
822 return True if stdout else False
825 class Container(object):
826 """Container class."""
829 """Initialize Container object."""
832 def __getattr__(self, attr):
833 """Get attribute custom implementation.
835 :param attr: Attribute to get.
837 :returns: Attribute value or None.
841 return self.__dict__[attr]
845 def __setattr__(self, attr, value):
846 """Set attribute custom implementation.
848 :param attr: Attribute to set.
849 :param value: Value to set.
854 # Check if attribute exists
857 # Creating new attribute
859 self.__dict__['ssh'] = SSH()
860 self.__dict__['ssh'].connect(value)
861 self.__dict__[attr] = value
863 # Updating attribute base of type
864 if isinstance(self.__dict__[attr], list):
865 self.__dict__[attr].append(value)
867 self.__dict__[attr] = value