1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from collections import OrderedDict, Counter
21 from resources.libraries.python.ssh import SSH
22 from resources.libraries.python.constants import Constants
23 from resources.libraries.python.CpuUtils import CpuUtils
24 from resources.libraries.python.topology import Topology
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
33 class ContainerManager(object):
34 """Container lifecycle management class."""
36 def __init__(self, engine):
37 """Initialize Container Manager class.
39 :param engine: Container technology used (LXC/Docker/...).
41 :raises NotImplementedError: If container technology is not implemented.
44 self.engine = globals()[engine]()
46 raise NotImplementedError('{engine} is not implemented.'.
47 format(engine=engine))
48 self.containers = OrderedDict()
50 def get_container_by_name(self, name):
51 """Get container instance.
53 :param name: Container name.
55 :returns: Container instance.
57 :raises RuntimeError: If failed to get container with name.
60 return self.containers[name]
62 raise RuntimeError('Failed to get container with name: {name}'.
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
78 setattr(self.engine.container, 'env',
79 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
81 # Set cpuset.cpus cgroup
82 skip_cnt = kwargs['cpu_skip']
83 if not kwargs['cpu_shared']:
84 skip_cnt += kwargs['i'] * kwargs['cpu_count']
85 self.engine.container.cpuset_cpus = \
86 CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
87 cpu_node=kwargs['cpuset_mems'],
89 cpu_cnt=kwargs['cpu_count'],
90 smt_used=kwargs['smt_used'])
92 # Store container instance
93 self.containers[kwargs['name']] = self.engine.container
95 def construct_containers(self, **kwargs):
96 """Construct 1..N container(s) on node with specified name.
98 Ordinal number is automatically added to the name of container as
101 :param kwargs: Named parameters.
104 name = kwargs['name']
105 for i in range(kwargs['count']):
106 # Name will contain ordinal suffix
107 kwargs['name'] = ''.join([name, str(i+1)])
109 self.construct_container(i=i, **kwargs)
111 def acquire_all_containers(self):
112 """Acquire all containers."""
113 for container in self.containers:
114 self.engine.container = self.containers[container]
115 self.engine.acquire()
117 def build_all_containers(self):
118 """Build all containers."""
119 for container in self.containers:
120 self.engine.container = self.containers[container]
123 def create_all_containers(self):
124 """Create all containers."""
125 for container in self.containers:
126 self.engine.container = self.containers[container]
129 def execute_on_container(self, name, command):
130 """Execute command on container with name.
132 :param name: Container name.
133 :param command: Command to execute.
137 self.engine.container = self.get_container_by_name(name)
138 self.engine.execute(command)
140 def execute_on_all_containers(self, command):
141 """Execute command on all containers.
143 :param command: Command to execute.
146 for container in self.containers:
147 self.engine.container = self.containers[container]
148 self.engine.execute(command)
150 def install_vpp_in_all_containers(self):
151 """Install VPP into all containers."""
152 for container in self.containers:
153 self.engine.container = self.containers[container]
154 # We need to install supervisor client/server system to control VPP
156 self.engine.install_supervisor()
157 self.engine.install_vpp()
158 self.engine.restart_vpp()
160 def restart_vpp_in_all_containers(self):
161 """Restart VPP on all containers."""
162 for container in self.containers:
163 self.engine.container = self.containers[container]
164 self.engine.restart_vpp()
166 def configure_vpp_in_all_containers(self, chain_topology,
167 dut1_if=None, dut2_if=None):
168 """Configure VPP in all containers.
170 :param chain_topology: Topology used for chaining containers can be
171 chain or cross_horiz. Chain topology is using 1 memif pair per
172 container. Cross_horiz topology is using 1 memif and 1 physical
173 interface in container (only single container can be configured).
174 :param dut1_if: Interface on DUT1 directly connected to DUT2.
175 :param dut2_if: Interface on DUT2 directly connected to DUT1.
176 :type container_topology: str
180 # Count number of DUTs based on node's host information
181 dut_cnt = len(Counter([self.containers[container].node['host']
182 for container in self.containers]))
183 mod = len(self.containers)/dut_cnt
184 container_vat_template = 'memif_create_{topology}.vat'.format(
185 topology=chain_topology)
187 if chain_topology == 'chain':
188 for i, container in enumerate(self.containers):
191 sid1 = i % mod * 2 + 1
192 sid2 = i % mod * 2 + 2
193 self.engine.container = self.containers[container]
194 self.engine.create_vpp_startup_config()
195 self.engine.create_vpp_exec_config(container_vat_template, \
196 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, \
197 socket1='memif-{c.name}-{sid}'. \
198 format(c=self.engine.container, sid=sid1), \
199 socket2='memif-{c.name}-{sid}'. \
200 format(c=self.engine.container, sid=sid2))
201 elif chain_topology == 'cross_horiz':
203 raise RuntimeError('Container chain topology {topology} '
204 'supports only single container.'.
205 format(topology=chain_topology))
206 for i, container in enumerate(self.containers):
208 sid1 = i % mod * 2 + 1
209 self.engine.container = self.containers[container]
210 if 'DUT1' in self.engine.container.name:
211 if_pci = Topology.get_interface_pci_addr( \
212 self.engine.container.node, dut1_if)
213 if_name = Topology.get_interface_name( \
214 self.engine.container.node, dut1_if)
215 if 'DUT2' in self.engine.container.name:
216 if_pci = Topology.get_interface_pci_addr( \
217 self.engine.container.node, dut2_if)
218 if_name = Topology.get_interface_name( \
219 self.engine.container.node, dut2_if)
220 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
221 self.engine.create_vpp_exec_config(container_vat_template, \
222 mid1=mid1, sid1=sid1, if_name=if_name, \
223 socket1='memif-{c.name}-{sid}'. \
224 format(c=self.engine.container, sid=sid1))
226 raise RuntimeError('Container topology {topology} not implemented'.
227 format(topology=chain_topology))
229 def stop_all_containers(self):
230 """Stop all containers."""
231 for container in self.containers:
232 self.engine.container = self.containers[container]
235 def destroy_all_containers(self):
236 """Destroy all containers."""
237 for container in self.containers:
238 self.engine.container = self.containers[container]
239 self.engine.destroy()
242 class ContainerEngine(object):
243 """Abstract class for container engine."""
246 """Init ContainerEngine object."""
247 self.container = None
249 def initialize(self):
250 """Initialize container object."""
251 self.container = Container()
253 def acquire(self, force):
254 """Acquire/download container.
256 :param force: Destroy a container if exists and create.
259 raise NotImplementedError
262 """Build container (compile)."""
263 raise NotImplementedError
266 """Create/deploy container."""
267 raise NotImplementedError
269 def execute(self, command):
270 """Execute process inside container.
272 :param command: Command to run inside container.
275 raise NotImplementedError
278 """Stop container."""
279 raise NotImplementedError
282 """Destroy/remove container."""
283 raise NotImplementedError
286 """Info about container."""
287 raise NotImplementedError
289 def system_info(self):
291 raise NotImplementedError
293 def install_supervisor(self):
294 """Install supervisord inside a container."""
295 self.execute('sleep 3')
296 self.execute('apt-get update')
297 self.execute('apt-get install -y supervisor')
298 self.execute('echo "{config}" > {config_file}'.
300 config='[unix_http_server]\n'
301 'file = /tmp/supervisor.sock\n\n'
302 '[rpcinterface:supervisor]\n'
303 'supervisor.rpcinterface_factory = '
304 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
306 'serverurl = unix:///tmp/supervisor.sock\n\n'
308 'pidfile = /tmp/supervisord.pid\n'
309 'identifier = supervisor\n'
311 'logfile=/tmp/supervisord.log\n'
313 'nodaemon=false\n\n',
314 config_file=SUPERVISOR_CONF))
315 self.execute('supervisord -c {config_file}'.
316 format(config_file=SUPERVISOR_CONF))
318 def install_vpp(self):
319 """Install VPP inside a container."""
320 self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf')
321 self.execute('apt-get update')
322 if self.container.install_dkms:
324 'apt-get install -y dkms && '
325 'dpkg -i --force-all {guest_dir}/install_dir/*.deb'.
326 format(guest_dir=self.container.mnt[0].split(':')[1]))
329 'for i in $(ls -I \"*dkms*\" {guest_dir}/install_dir/); do '
330 'dpkg -i --force-all {guest_dir}/install_dir/$i; done'.
331 format(guest_dir=self.container.mnt[0].split(':')[1]))
332 self.execute('apt-get -f install -y')
333 self.execute('apt-get install -y ca-certificates')
334 self.execute('echo "{config}" >> {config_file}'.
336 config='[program:vpp]\n'
337 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
338 'autorestart=false\n'
339 'redirect_stderr=true\n'
341 config_file=SUPERVISOR_CONF))
342 self.execute('supervisorctl reload')
344 def restart_vpp(self):
345 """Restart VPP service inside a container."""
346 self.execute('supervisorctl restart vpp')
347 self.execute('cat /tmp/supervisord.log')
349 def create_base_vpp_startup_config(self):
350 """Create base startup configuration of VPP on container.
352 :returns: Base VPP startup configuration.
353 :rtype: VppConfigGenerator
355 cpuset_cpus = self.container.cpuset_cpus
357 # Create config instance
358 vpp_config = VppConfigGenerator()
359 vpp_config.set_node(self.container.node)
360 vpp_config.add_unix_cli_listen()
361 vpp_config.add_unix_nodaemon()
362 vpp_config.add_unix_exec('/tmp/running.exec')
363 # We will pop first core from list to be main core
364 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
365 # if this is not only core in list, the rest will be used as workers.
367 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
368 vpp_config.add_cpu_corelist_workers(corelist_workers)
372 def create_vpp_startup_config(self):
373 """Create startup configuration of VPP without DPDK on container.
375 vpp_config = self.create_base_vpp_startup_config()
376 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
378 # Apply configuration
379 self.execute('mkdir -p /etc/vpp/')
380 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
381 .format(config=vpp_config.get_config_str()))
383 def create_vpp_startup_config_dpdk_dev(self, *devices):
384 """Create startup configuration of VPP with DPDK on container.
386 :param devices: List of PCI devices to add.
389 vpp_config = self.create_base_vpp_startup_config()
390 vpp_config.add_dpdk_dev(*devices)
391 vpp_config.add_dpdk_no_tx_checksum_offload()
392 vpp_config.add_dpdk_log_level('debug')
393 vpp_config.add_plugin('disable', 'default')
394 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
395 vpp_config.add_plugin('enable', 'memif_plugin.so')
397 # Apply configuration
398 self.execute('mkdir -p /etc/vpp/')
399 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
400 .format(config=vpp_config.get_config_str()))
402 def create_vpp_exec_config(self, vat_template_file, **kwargs):
403 """Create VPP exec configuration on container.
405 :param vat_template_file: File name of a VAT template script.
406 :param kwargs: Parameters for VAT script.
407 :type vat_template_file: str
410 vat_file_path = '{p}/{f}'.format(p=Constants.RESOURCES_TPL_VAT,
413 with open(vat_file_path, 'r') as template_file:
414 cmd_template = template_file.readlines()
415 for line_tmpl in cmd_template:
416 vat_cmd = line_tmpl.format(**kwargs)
417 self.execute('echo "{c}" >> /tmp/running.exec'
418 .format(c=vat_cmd.replace('\n', '')))
420 def is_container_running(self):
421 """Check if container is running."""
422 raise NotImplementedError
424 def is_container_present(self):
425 """Check if container is present."""
426 raise NotImplementedError
428 def _configure_cgroup(self, name):
429 """Configure the control group associated with a container.
431 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
432 container is initialized a new cgroup /docker or /lxc is created under
433 cpuset parent tree. This newly created cgroup is inheriting parent
434 setting for cpu/mem exclusive parameter and thus cannot be overriden
435 within /docker or /lxc cgroup. This function is supposed to set cgroups
436 to allow coexistence of both engines.
438 :param name: Name of cgroup.
440 :raises RuntimeError: If applying cgroup settings via cgset failed.
442 ret, _, _ = self.container.ssh.exec_command_sudo(
443 'cgset -r cpuset.cpu_exclusive=0 /')
445 raise RuntimeError('Failed to apply cgroup settings.')
447 ret, _, _ = self.container.ssh.exec_command_sudo(
448 'cgset -r cpuset.mem_exclusive=0 /')
450 raise RuntimeError('Failed to apply cgroup settings.')
452 ret, _, _ = self.container.ssh.exec_command_sudo(
453 'cgcreate -g cpuset:/{name}'.format(name=name))
455 raise RuntimeError('Failed to copy cgroup settings from root.')
457 ret, _, _ = self.container.ssh.exec_command_sudo(
458 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
460 raise RuntimeError('Failed to apply cgroup settings.')
462 ret, _, _ = self.container.ssh.exec_command_sudo(
463 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
465 raise RuntimeError('Failed to apply cgroup settings.')
468 class LXC(ContainerEngine):
469 """LXC implementation."""
472 """Initialize LXC object."""
473 super(LXC, self).__init__()
475 def acquire(self, force=True):
476 """Acquire a privileged system object where configuration is stored.
478 :param force: If a container exists, destroy it and create a new
481 :raises RuntimeError: If creating the container or writing the container
484 if self.is_container_present():
490 image = self.container.image if self.container.image else\
491 "-d ubuntu -r xenial -a amd64"
493 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
494 '--no-validate'.format(c=self.container, image=image)
496 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
498 raise RuntimeError('Failed to create container.')
500 self._configure_cgroup('lxc')
503 """Create/deploy an application inside a container on system.
505 :raises RuntimeError: If creating the container fails.
507 if self.container.mnt:
508 for mount in self.container.mnt:
509 host_dir, guest_dir = mount.split(':')
510 entry = 'lxc.mount.entry = {host_dir} '\
511 '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \
512 'bind,create=dir 0 0'.format(c=self.container,
515 ret, _, _ = self.container.ssh.exec_command_sudo(
516 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
517 format(e=entry, c=self.container))
519 raise RuntimeError('Failed to write {c.name} config.'
520 .format(c=self.container))
522 cpuset_cpus = '{0}'.format(
523 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
524 if self.container.cpuset_cpus else ''
526 ret, _, _ = self.container.ssh.exec_command_sudo(
527 'lxc-start --name {c.name} --daemon'.
528 format(c=self.container))
530 raise RuntimeError('Failed to start container {c.name}.'.
531 format(c=self.container))
532 self._lxc_wait('RUNNING')
534 # Workaround for LXC to be able to allocate all cpus including isolated.
535 ret, _, _ = self.container.ssh.exec_command_sudo(
536 'cgset --copy-from / lxc/')
538 raise RuntimeError('Failed to copy cgroup to LXC')
540 ret, _, _ = self.container.ssh.exec_command_sudo(
541 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
542 format(c=self.container, cpus=cpuset_cpus))
544 raise RuntimeError('Failed to set cpuset.cpus to container '
545 '{c.name}.'.format(c=self.container))
547 def execute(self, command):
548 """Start a process inside a running container.
550 Runs the specified command inside the container specified by name. The
551 container has to be running already.
553 :param command: Command to run inside container.
555 :raises RuntimeError: If running the command failed.
557 env = '--keep-env {0}'.format(
558 ' '.join('--set-var %s' % env for env in self.container.env))\
559 if self.container.env else ''
561 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
562 "exit $?'".format(env=env, c=self.container, command=command)
564 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
566 raise RuntimeError('Failed to run command inside container '
567 '{c.name}.'.format(c=self.container))
572 :raises RuntimeError: If stopping the container failed.
574 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
576 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
578 raise RuntimeError('Failed to stop container {c.name}.'
579 .format(c=self.container))
580 self._lxc_wait('STOPPED|FROZEN')
583 """Destroy a container.
585 :raises RuntimeError: If destroying container failed.
587 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
589 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
591 raise RuntimeError('Failed to destroy container {c.name}.'
592 .format(c=self.container))
595 """Query and shows information about a container.
597 :raises RuntimeError: If getting info about a container failed.
599 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
601 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
603 raise RuntimeError('Failed to get info about container {c.name}.'
604 .format(c=self.container))
606 def system_info(self):
607 """Check the current kernel for LXC support.
609 :raises RuntimeError: If checking LXC support failed.
611 cmd = 'lxc-checkconfig'
613 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
615 raise RuntimeError('Failed to check LXC support.')
617 def is_container_running(self):
618 """Check if container is running on node.
620 :returns: True if container is running.
622 :raises RuntimeError: If getting info about a container failed.
624 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
625 .format(c=self.container)
627 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
629 raise RuntimeError('Failed to get info about container {c.name}.'
630 .format(c=self.container))
631 return True if 'RUNNING' in stdout else False
633 def is_container_present(self):
634 """Check if container is existing on node.
636 :returns: True if container is present.
638 :raises RuntimeError: If getting info about a container failed.
640 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
642 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
643 return False if int(ret) else True
645 def _lxc_wait(self, state):
646 """Wait for a specific container state.
648 :param state: Specify the container state(s) to wait for.
650 :raises RuntimeError: If waiting for state of a container failed.
652 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
653 .format(c=self.container, s=state)
655 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
657 raise RuntimeError('Failed to wait for state "{s}" of container '
658 '{c.name}.'.format(s=state, c=self.container))
661 class Docker(ContainerEngine):
662 """Docker implementation."""
665 """Initialize Docker object."""
666 super(Docker, self).__init__()
668 def acquire(self, force=True):
669 """Pull an image or a repository from a registry.
671 :param force: Destroy a container if exists.
673 :raises RuntimeError: If pulling a container failed.
675 if self.is_container_present():
681 cmd = 'docker pull {c.image}'.format(c=self.container)
683 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
685 raise RuntimeError('Failed to create container {c.name}.'
686 .format(c=self.container))
687 self._configure_cgroup('docker')
690 """Create/deploy container.
692 :raises RuntimeError: If creating a container failed.
694 cpuset_cpus = '--cpuset-cpus={0}'.format(
695 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
696 if self.container.cpuset_cpus else ''
698 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
699 if self.container.cpuset_mems is not None else ''
700 # Temporary workaround - disabling due to bug in memif
704 ' '.join('--env %s' % env for env in self.container.env))\
705 if self.container.env else ''
707 command = '{0}'.format(self.container.command)\
708 if self.container.command else ''
710 publish = '{0}'.format(
711 ' '.join('--publish %s' % var for var in self.container.publish))\
712 if self.container.publish else ''
714 volume = '{0}'.format(
715 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
716 if self.container.mnt else ''
719 '--privileged --detach --interactive --tty --rm '\
720 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
721 '{env} {volume} --name {container.name} {container.image} '\
722 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
723 container=self.container, command=command,
724 env=env, publish=publish, volume=volume)
726 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
728 raise RuntimeError('Failed to create container {c.name}'
729 .format(c=self.container))
733 def execute(self, command):
734 """Start a process inside a running container.
736 Runs the specified command inside the container specified by name. The
737 container has to be running already.
739 :param command: Command to run inside container.
741 :raises RuntimeError: If runnig the command in a container failed.
743 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
744 "exit $?'".format(c=self.container, command=command)
746 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
748 raise RuntimeError('Failed to execute command in container '
749 '{c.name}.'.format(c=self.container))
752 """Stop running container.
754 :raises RuntimeError: If stopping a container failed.
756 cmd = 'docker stop {c.name}'.format(c=self.container)
758 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
760 raise RuntimeError('Failed to stop container {c.name}.'
761 .format(c=self.container))
764 """Remove a container.
766 :raises RuntimeError: If removing a container failed.
768 cmd = 'docker rm --force {c.name}'.format(c=self.container)
770 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
772 raise RuntimeError('Failed to destroy container {c.name}.'
773 .format(c=self.container))
776 """Return low-level information on Docker objects.
778 :raises RuntimeError: If getting info about a container failed.
780 cmd = 'docker inspect {c.name}'.format(c=self.container)
782 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
784 raise RuntimeError('Failed to get info about container {c.name}.'
785 .format(c=self.container))
787 def system_info(self):
788 """Display the docker system-wide information.
790 :raises RuntimeError: If displaying system information failed.
792 cmd = 'docker system info'
794 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
796 raise RuntimeError('Failed to get system info.')
798 def is_container_present(self):
799 """Check if container is present on node.
801 :returns: True if container is present.
803 :raises RuntimeError: If getting info about a container failed.
805 cmd = 'docker ps --all --quiet --filter name={c.name}'\
806 .format(c=self.container)
808 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
810 raise RuntimeError('Failed to get info about container {c.name}.'
811 .format(c=self.container))
812 return True if stdout else False
814 def is_container_running(self):
815 """Check if container is running on node.
817 :returns: True if container is running.
819 :raises RuntimeError: If getting info about a container failed.
821 cmd = 'docker ps --quiet --filter name={c.name}'\
822 .format(c=self.container)
824 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
826 raise RuntimeError('Failed to get info about container {c.name}.'
827 .format(c=self.container))
828 return True if stdout else False
831 class Container(object):
832 """Container class."""
835 """Initialize Container object."""
838 def __getattr__(self, attr):
839 """Get attribute custom implementation.
841 :param attr: Attribute to get.
843 :returns: Attribute value or None.
847 return self.__dict__[attr]
851 def __setattr__(self, attr, value):
852 """Set attribute custom implementation.
854 :param attr: Attribute to set.
855 :param value: Value to set.
860 # Check if attribute exists
863 # Creating new attribute
865 self.__dict__['ssh'] = SSH()
866 self.__dict__['ssh'].connect(value)
867 self.__dict__[attr] = value
869 # Updating attribute base of type
870 if isinstance(self.__dict__[attr], list):
871 self.__dict__[attr].append(value)
873 self.__dict__[attr] = value