1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from string import Template
20 from collections import OrderedDict, Counter
22 from resources.libraries.python.ssh import SSH
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.topology import Topology, SocketType
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
30 SUPERVISOR_CONF = '/etc/supervisor/supervisord.conf'
33 class ContainerManager(object):
34 """Container lifecycle management class."""
36 def __init__(self, engine):
37 """Initialize Container Manager class.
39 :param engine: Container technology used (LXC/Docker/...).
41 :raises NotImplementedError: If container technology is not implemented.
44 self.engine = globals()[engine]()
46 raise NotImplementedError('{engine} is not implemented.'.
47 format(engine=engine))
48 self.containers = OrderedDict()
50 def get_container_by_name(self, name):
51 """Get container instance.
53 :param name: Container name.
55 :returns: Container instance.
57 :raises RuntimeError: If failed to get container with name.
60 return self.containers[name]
62 raise RuntimeError('Failed to get container with name: {name}'.
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
78 setattr(self.engine.container, 'env',
79 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
81 # Store container instance
82 self.containers[kwargs['name']] = self.engine.container
84 def construct_containers(self, **kwargs):
85 """Construct 1..N container(s) on node with specified name.
87 Ordinal number is automatically added to the name of container as
90 :param kwargs: Named parameters.
94 for i in range(kwargs['count']):
95 # Name will contain ordinal suffix
96 kwargs['name'] = ''.join([name, str(i+1)])
98 self.construct_container(i=i, **kwargs)
100 def acquire_all_containers(self):
101 """Acquire all containers."""
102 for container in self.containers:
103 self.engine.container = self.containers[container]
104 self.engine.acquire()
106 def build_all_containers(self):
107 """Build all containers."""
108 for container in self.containers:
109 self.engine.container = self.containers[container]
112 def create_all_containers(self):
113 """Create all containers."""
114 for container in self.containers:
115 self.engine.container = self.containers[container]
118 def execute_on_container(self, name, command):
119 """Execute command on container with name.
121 :param name: Container name.
122 :param command: Command to execute.
126 self.engine.container = self.get_container_by_name(name)
127 self.engine.execute(command)
129 def execute_on_all_containers(self, command):
130 """Execute command on all containers.
132 :param command: Command to execute.
135 for container in self.containers:
136 self.engine.container = self.containers[container]
137 self.engine.execute(command)
139 def start_vpp_in_all_containers(self):
140 """Start VPP in all containers."""
141 for container in self.containers:
142 self.engine.container = self.containers[container]
143 # We need to install supervisor client/server system to control VPP
145 self.engine.install_supervisor()
146 self.engine.start_vpp()
148 def restart_vpp_in_all_containers(self):
149 """Restart VPP in all containers."""
150 for container in self.containers:
151 self.engine.container = self.containers[container]
152 self.engine.restart_vpp()
154 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
155 """Configure VPP in all containers.
157 :param chain_topology: Topology used for chaining containers can be
158 chain or cross_horiz. Chain topology is using 1 memif pair per
159 container. Cross_horiz topology is using 1 memif and 1 physical
160 interface in container (only single container can be configured).
161 :param kwargs: Named parameters.
162 :type chain_topology: str
165 # Count number of DUTs based on node's host information
166 dut_cnt = len(Counter([self.containers[container].node['host']
167 for container in self.containers]))
168 mod = len(self.containers)/dut_cnt
170 for i, container in enumerate(self.containers):
173 sid1 = i % mod * 2 + 1
174 sid2 = i % mod * 2 + 2
175 self.engine.container = self.containers[container]
176 guest_dir = self.engine.container.mnt[0].split(':')[1]
178 if chain_topology == 'chain':
179 self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
180 sid1=sid1, sid2=sid2,
183 elif chain_topology == 'cross_horiz':
184 self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
185 sid1=sid1, sid2=sid2,
188 elif chain_topology == 'chain_functional':
189 self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
190 sid1=sid1, sid2=sid2,
193 elif chain_topology == 'chain_ip4':
194 self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
195 sid1=sid1, sid2=sid2,
198 elif chain_topology == 'pipeline_ip4':
199 self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
200 sid1=sid1, sid2=sid2,
204 raise RuntimeError('Container topology {name} not implemented'.
205 format(name=chain_topology))
207 def _configure_vpp_chain_l2xc(self, **kwargs):
208 """Configure VPP in chain topology with l2xc.
210 :param kwargs: Named parameters.
213 self.engine.create_vpp_startup_config()
214 self.engine.create_vpp_exec_config(
215 'memif_create_chain_l2xc.exec',
216 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
217 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
218 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
219 format(c=self.engine.container, **kwargs),
220 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
221 format(c=self.engine.container, **kwargs))
223 def _configure_vpp_cross_horiz(self, **kwargs):
224 """Configure VPP in cross horizontal topology (single memif).
226 :param kwargs: Named parameters.
229 if 'DUT1' in self.engine.container.name:
230 if_pci = Topology.get_interface_pci_addr(
231 self.engine.container.node, kwargs['dut1_if'])
232 if_name = Topology.get_interface_name(
233 self.engine.container.node, kwargs['dut1_if'])
234 if 'DUT2' in self.engine.container.name:
235 if_pci = Topology.get_interface_pci_addr(
236 self.engine.container.node, kwargs['dut2_if'])
237 if_name = Topology.get_interface_name(
238 self.engine.container.node, kwargs['dut2_if'])
239 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
240 self.engine.create_vpp_exec_config(
241 'memif_create_cross_horizon.exec',
242 mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
243 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
244 format(c=self.engine.container, **kwargs))
246 def _configure_vpp_chain_functional(self, **kwargs):
247 """Configure VPP in chain topology with l2xc (functional).
249 :param kwargs: Named parameters.
252 self.engine.create_vpp_startup_config_func_dev()
253 self.engine.create_vpp_exec_config(
254 'memif_create_chain_functional.exec',
255 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
256 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
257 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
258 format(c=self.engine.container, **kwargs),
259 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
260 format(c=self.engine.container, **kwargs),
263 def _configure_vpp_chain_ip4(self, **kwargs):
264 """Configure VPP in chain topology with ip4.
266 :param kwargs: Named parameters.
269 self.engine.create_vpp_startup_config()
271 vif1_mac = kwargs['tg_if1_mac'] \
272 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
273 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
274 vif2_mac = kwargs['tg_if2_mac'] \
275 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
276 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
277 self.engine.create_vpp_exec_config(
278 'memif_create_chain_ip4.exec',
279 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
280 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
281 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
282 format(c=self.engine.container, **kwargs),
283 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
284 format(c=self.engine.container, **kwargs),
285 mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
286 mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
287 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
289 def _configure_vpp_pipeline_ip4(self, **kwargs):
290 """Configure VPP in pipeline topology with ip4.
292 :param kwargs: Named parameters.
295 self.engine.create_vpp_startup_config()
296 node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
297 mid1 = kwargs['mid1']
298 mid2 = kwargs['mid2']
301 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
303 kwargs['mid2'] = kwargs['mid2'] \
304 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
305 else kwargs['mid2'] + 1
306 vif1_mac = kwargs['tg_if1_mac'] \
307 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
308 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
309 vif2_mac = kwargs['tg_if2_mac'] \
310 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
311 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
312 socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
313 format(c=self.engine.container, **kwargs) \
314 if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
315 format(c=self.engine.container, **kwargs)
316 socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
317 format(c=self.engine.container, **kwargs) \
318 if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
319 else '{guest_dir}/memif-pipe-{mid2}'.\
320 format(c=self.engine.container, **kwargs)
322 self.engine.create_vpp_exec_config(
323 'memif_create_pipeline_ip4.exec',
324 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
325 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
326 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
327 mac1='52:54:00:00:{0:02X}:01'.format(mid1),
328 mac2='52:54:00:00:{0:02X}:02'.format(mid2),
329 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
331 def stop_all_containers(self):
332 """Stop all containers."""
333 for container in self.containers:
334 self.engine.container = self.containers[container]
337 def destroy_all_containers(self):
338 """Destroy all containers."""
339 for container in self.containers:
340 self.engine.container = self.containers[container]
341 self.engine.destroy()
344 class ContainerEngine(object):
345 """Abstract class for container engine."""
348 """Init ContainerEngine object."""
349 self.container = None
351 def initialize(self):
352 """Initialize container object."""
353 self.container = Container()
355 def acquire(self, force):
356 """Acquire/download container.
358 :param force: Destroy a container if exists and create.
361 raise NotImplementedError
364 """Build container (compile)."""
365 raise NotImplementedError
368 """Create/deploy container."""
369 raise NotImplementedError
371 def execute(self, command):
372 """Execute process inside container.
374 :param command: Command to run inside container.
377 raise NotImplementedError
380 """Stop container."""
381 raise NotImplementedError
384 """Destroy/remove container."""
385 raise NotImplementedError
388 """Info about container."""
389 raise NotImplementedError
391 def system_info(self):
393 raise NotImplementedError
395 def install_supervisor(self):
396 """Install supervisord inside a container."""
397 if isinstance(self, LXC):
398 self.execute('sleep 3; apt-get update')
399 self.execute('apt-get install -y supervisor')
400 self.execute('echo "{config}" > {config_file} && '
401 'supervisord -c {config_file}'.
403 config='[unix_http_server]\n'
404 'file = /tmp/supervisor.sock\n\n'
405 '[rpcinterface:supervisor]\n'
406 'supervisor.rpcinterface_factory = supervisor.'
407 'rpcinterface:make_main_rpcinterface\n\n'
409 'serverurl = unix:///tmp/supervisor.sock\n\n'
411 'pidfile = /tmp/supervisord.pid\n'
412 'identifier = supervisor\n'
414 'logfile = /tmp/supervisord.log\n'
416 'nodaemon = false\n\n',
417 config_file=SUPERVISOR_CONF))
420 """Start VPP inside a container."""
421 self.execute('echo "{config}" >> {config_file} && '
422 'supervisorctl reload'.
424 config='[program:vpp]\n'
425 'command = /usr/bin/vpp -c /etc/vpp/startup.conf\n'
426 'autostart = false\n'
427 'autorestart = false\n'
428 'redirect_stderr = true\n'
430 config_file=SUPERVISOR_CONF))
431 self.execute('supervisorctl start vpp')
433 from robot.libraries.BuiltIn import BuiltIn
434 topo_instance = BuiltIn().get_library_instance(
435 'resources.libraries.python.topology.Topology')
436 topo_instance.add_new_socket(
440 '{root}/tmp/vpp_sockets/{name}/api.sock'.
441 format(root=self.container.root, name=self.container.name))
442 topo_instance.add_new_socket(
446 '{root}/tmp/vpp_sockets/{name}/stats.sock'.
447 format(root=self.container.root, name=self.container.name))
449 def restart_vpp(self):
450 """Restart VPP service inside a container."""
451 self.execute('supervisorctl restart vpp')
452 self.execute('cat /tmp/supervisord.log')
454 def create_base_vpp_startup_config(self):
455 """Create base startup configuration of VPP on container.
457 :returns: Base VPP startup configuration.
458 :rtype: VppConfigGenerator
460 cpuset_cpus = self.container.cpuset_cpus
462 # Create config instance
463 vpp_config = VppConfigGenerator()
464 vpp_config.set_node(self.container.node)
465 vpp_config.add_unix_cli_listen()
466 vpp_config.add_unix_nodaemon()
467 vpp_config.add_unix_exec('/tmp/running.exec')
468 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
469 vpp_config.add_statseg_per_node_counters(value='on')
470 # We will pop the first core from the list to be a main core
471 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
472 # If more cores in the list, the rest will be used as workers.
474 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
475 vpp_config.add_cpu_corelist_workers(corelist_workers)
479 def create_vpp_startup_config(self):
480 """Create startup configuration of VPP without DPDK on container.
482 vpp_config = self.create_base_vpp_startup_config()
483 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
485 # Apply configuration
486 self.execute('mkdir -p /etc/vpp/')
487 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
488 .format(config=vpp_config.get_config_str()))
490 def create_vpp_startup_config_dpdk_dev(self, *devices):
491 """Create startup configuration of VPP with DPDK on container.
493 :param devices: List of PCI devices to add.
496 vpp_config = self.create_base_vpp_startup_config()
497 vpp_config.add_dpdk_dev(*devices)
498 vpp_config.add_dpdk_no_tx_checksum_offload()
499 vpp_config.add_dpdk_log_level('debug')
500 vpp_config.add_plugin('disable', 'default')
501 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
502 vpp_config.add_plugin('enable', 'memif_plugin.so')
504 # Apply configuration
505 self.execute('mkdir -p /etc/vpp/')
506 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
507 .format(config=vpp_config.get_config_str()))
509 def create_vpp_startup_config_func_dev(self):
510 """Create startup configuration of VPP on container for functional
513 # Create config instance
514 vpp_config = VppConfigGenerator()
515 vpp_config.set_node(self.container.node)
516 vpp_config.add_unix_cli_listen()
517 vpp_config.add_unix_nodaemon()
518 vpp_config.add_unix_exec('/tmp/running.exec')
519 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
520 vpp_config.add_statseg_per_node_counters(value='on')
521 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
523 # Apply configuration
524 self.execute('mkdir -p /etc/vpp/')
525 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
526 .format(config=vpp_config.get_config_str()))
528 def create_vpp_exec_config(self, template_file, **kwargs):
529 """Create VPP exec configuration on container.
531 :param template_file: File name of a template script.
532 :param kwargs: Parameters for script.
533 :type template_file: str
536 running = '/tmp/running.exec'
538 template = '{res}/{tpl}'.format(
539 res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
541 with open(template, 'r') as src_file:
542 src = Template(src_file.read())
543 self.execute('echo "{out}" > {running}'.format(
544 out=src.safe_substitute(**kwargs), running=running))
546 def is_container_running(self):
547 """Check if container is running."""
548 raise NotImplementedError
550 def is_container_present(self):
551 """Check if container is present."""
552 raise NotImplementedError
554 def _configure_cgroup(self, name):
555 """Configure the control group associated with a container.
557 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
558 container is initialized a new cgroup /docker or /lxc is created under
559 cpuset parent tree. This newly created cgroup is inheriting parent
560 setting for cpu/mem exclusive parameter and thus cannot be overriden
561 within /docker or /lxc cgroup. This function is supposed to set cgroups
562 to allow coexistence of both engines.
564 :param name: Name of cgroup.
566 :raises RuntimeError: If applying cgroup settings via cgset failed.
568 ret, _, _ = self.container.ssh.exec_command_sudo(
569 'cgset -r cpuset.cpu_exclusive=0 /')
571 raise RuntimeError('Failed to apply cgroup settings.')
573 ret, _, _ = self.container.ssh.exec_command_sudo(
574 'cgset -r cpuset.mem_exclusive=0 /')
576 raise RuntimeError('Failed to apply cgroup settings.')
578 ret, _, _ = self.container.ssh.exec_command_sudo(
579 'cgcreate -g cpuset:/{name}'.format(name=name))
581 raise RuntimeError('Failed to copy cgroup settings from root.')
583 ret, _, _ = self.container.ssh.exec_command_sudo(
584 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
586 raise RuntimeError('Failed to apply cgroup settings.')
588 ret, _, _ = self.container.ssh.exec_command_sudo(
589 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
591 raise RuntimeError('Failed to apply cgroup settings.')
594 class LXC(ContainerEngine):
595 """LXC implementation."""
597 # Implicit constructor is inherited.
599 def acquire(self, force=True):
600 """Acquire a privileged system object where configuration is stored.
602 :param force: If a container exists, destroy it and create a new
605 :raises RuntimeError: If creating the container or writing the container
608 if self.is_container_present():
614 target_arch = 'arm64' \
615 if Topology.get_node_arch(self.container.node) == 'aarch64' \
618 image = self.container.image if self.container.image else\
619 "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
621 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
622 '--no-validate'.format(c=self.container, image=image)
624 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
626 raise RuntimeError('Failed to create container.')
628 self._configure_cgroup('lxc')
631 """Create/deploy an application inside a container on system.
633 :raises RuntimeError: If creating the container fails.
635 if self.container.mnt:
637 # https://github.com/lxc/lxc/issues/434
638 ret, _, _ = self.container.ssh.exec_command_sudo(
639 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
640 format(e="lxc.mount.entry = tmpfs run tmpfs defaults",
643 raise RuntimeError('Failed to write {c.name} config.'.
644 format(c=self.container))
646 for mount in self.container.mnt:
647 host_dir, guest_dir = mount.split(':')
648 options = 'bind,create=dir' \
649 if guest_dir.endswith('/') else 'bind,create=file'
650 entry = 'lxc.mount.entry = {host_dir} {guest_dir} none ' \
651 '{options} 0 0'.format(
652 host_dir=host_dir, guest_dir=guest_dir[1:],
654 self.container.ssh.exec_command_sudo(
655 "sh -c 'mkdir -p {host_dir}'".format(host_dir=host_dir))
656 ret, _, _ = self.container.ssh.exec_command_sudo(
657 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
658 format(e=entry, c=self.container))
660 raise RuntimeError('Failed to write {c.name} config.'
661 .format(c=self.container))
663 cpuset_cpus = '{0}'.format(
664 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
665 if self.container.cpuset_cpus else ''
667 ret, _, _ = self.container.ssh.exec_command_sudo(
668 'lxc-start --name {c.name} --daemon'.format(c=self.container))
670 raise RuntimeError('Failed to start container {c.name}.'.
671 format(c=self.container))
672 self._lxc_wait('RUNNING')
674 # Workaround for LXC to be able to allocate all cpus including isolated.
675 ret, _, _ = self.container.ssh.exec_command_sudo(
676 'cgset --copy-from / lxc/')
678 raise RuntimeError('Failed to copy cgroup to LXC')
680 ret, _, _ = self.container.ssh.exec_command_sudo(
681 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
682 format(c=self.container, cpus=cpuset_cpus))
684 raise RuntimeError('Failed to set cpuset.cpus to container '
685 '{c.name}.'.format(c=self.container))
687 def execute(self, command):
688 """Start a process inside a running container.
690 Runs the specified command inside the container specified by name. The
691 container has to be running already.
693 :param command: Command to run inside container.
695 :raises RuntimeError: If running the command failed.
697 env = '--keep-env {0}'.format(
698 ' '.join('--set-var %s' % env for env in self.container.env))\
699 if self.container.env else ''
701 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
702 "exit $?'".format(env=env, c=self.container, command=command)
704 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
706 raise RuntimeError('Failed to run command inside container '
707 '{c.name}.'.format(c=self.container))
712 :raises RuntimeError: If stopping the container failed.
714 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
716 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
718 raise RuntimeError('Failed to stop container {c.name}.'
719 .format(c=self.container))
720 self._lxc_wait('STOPPED|FROZEN')
723 """Destroy a container.
725 :raises RuntimeError: If destroying container failed.
727 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
729 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
731 raise RuntimeError('Failed to destroy container {c.name}.'
732 .format(c=self.container))
735 """Query and shows information about a container.
737 :raises RuntimeError: If getting info about a container failed.
739 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
741 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
743 raise RuntimeError('Failed to get info about container {c.name}.'
744 .format(c=self.container))
746 def system_info(self):
747 """Check the current kernel for LXC support.
749 :raises RuntimeError: If checking LXC support failed.
751 cmd = 'lxc-checkconfig'
753 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
755 raise RuntimeError('Failed to check LXC support.')
757 def is_container_running(self):
758 """Check if container is running on node.
760 :returns: True if container is running.
762 :raises RuntimeError: If getting info about a container failed.
764 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
765 .format(c=self.container)
767 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
769 raise RuntimeError('Failed to get info about container {c.name}.'
770 .format(c=self.container))
771 return True if 'RUNNING' in stdout else False
773 def is_container_present(self):
774 """Check if container is existing on node.
776 :returns: True if container is present.
778 :raises RuntimeError: If getting info about a container failed.
780 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
782 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
783 return False if int(ret) else True
785 def _lxc_wait(self, state):
786 """Wait for a specific container state.
788 :param state: Specify the container state(s) to wait for.
790 :raises RuntimeError: If waiting for state of a container failed.
792 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
793 .format(c=self.container, s=state)
795 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
797 raise RuntimeError('Failed to wait for state "{s}" of container '
798 '{c.name}.'.format(s=state, c=self.container))
801 class Docker(ContainerEngine):
802 """Docker implementation."""
804 # Implicit constructor is inherited.
806 def acquire(self, force=True):
807 """Pull an image or a repository from a registry.
809 :param force: Destroy a container if exists.
811 :raises RuntimeError: If pulling a container failed.
813 if self.is_container_present():
819 if not self.container.image:
820 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
821 if Topology.get_node_arch(self.container.node) == 'aarch64' \
822 else Constants.DOCKER_SUT_IMAGE_UBUNTU
823 setattr(self.container, 'image', img)
825 cmd = 'docker pull {image}'.format(image=self.container.image)
827 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
829 raise RuntimeError('Failed to create container {c.name}.'
830 .format(c=self.container))
832 if self.container.cpuset_cpus:
833 self._configure_cgroup('docker')
836 """Create/deploy container.
838 :raises RuntimeError: If creating a container failed.
840 cpuset_cpus = '--cpuset-cpus={0}'.format(
841 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
842 if self.container.cpuset_cpus else ''
844 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
845 if self.container.cpuset_mems is not None else ''
846 # Temporary workaround - disabling due to bug in memif
850 ' '.join('--env %s' % env for env in self.container.env))\
851 if self.container.env else ''
853 command = '{0}'.format(self.container.command)\
854 if self.container.command else ''
856 publish = '{0}'.format(
857 ' '.join('--publish %s' % var for var in self.container.publish))\
858 if self.container.publish else ''
860 volume = '{0}'.format(
861 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
862 if self.container.mnt else ''
865 '--privileged --detach --interactive --tty --rm '\
866 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
867 '{env} {volume} --name {container.name} {container.image} '\
868 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
869 container=self.container, command=command,
870 env=env, publish=publish, volume=volume)
872 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
874 raise RuntimeError('Failed to create container {c.name}'
875 .format(c=self.container))
879 def execute(self, command):
880 """Start a process inside a running container.
882 Runs the specified command inside the container specified by name. The
883 container has to be running already.
885 :param command: Command to run inside container.
887 :raises RuntimeError: If running the command in a container failed.
889 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
890 "exit $?'".format(c=self.container, command=command)
892 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
894 raise RuntimeError('Failed to execute command in container '
895 '{c.name}.'.format(c=self.container))
898 """Stop running container.
900 :raises RuntimeError: If stopping a container failed.
902 cmd = 'docker stop {c.name}'.format(c=self.container)
904 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
906 raise RuntimeError('Failed to stop container {c.name}.'
907 .format(c=self.container))
910 """Remove a container.
912 :raises RuntimeError: If removing a container failed.
914 cmd = 'docker rm --force {c.name}'.format(c=self.container)
916 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
918 raise RuntimeError('Failed to destroy container {c.name}.'
919 .format(c=self.container))
922 """Return low-level information on Docker objects.
924 :raises RuntimeError: If getting info about a container failed.
926 cmd = 'docker inspect {c.name}'.format(c=self.container)
928 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
930 raise RuntimeError('Failed to get info about container {c.name}.'
931 .format(c=self.container))
933 def system_info(self):
934 """Display the docker system-wide information.
936 :raises RuntimeError: If displaying system information failed.
938 cmd = 'docker system info'
940 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
942 raise RuntimeError('Failed to get system info.')
944 def is_container_present(self):
945 """Check if container is present on node.
947 :returns: True if container is present.
949 :raises RuntimeError: If getting info about a container failed.
951 cmd = 'docker ps --all --quiet --filter name={c.name}'\
952 .format(c=self.container)
954 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
956 raise RuntimeError('Failed to get info about container {c.name}.'
957 .format(c=self.container))
958 return True if stdout else False
960 def is_container_running(self):
961 """Check if container is running on node.
963 :returns: True if container is running.
965 :raises RuntimeError: If getting info about a container failed.
967 cmd = 'docker ps --quiet --filter name={c.name}'\
968 .format(c=self.container)
970 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
972 raise RuntimeError('Failed to get info about container {c.name}.'
973 .format(c=self.container))
974 return True if stdout else False
977 class Container(object):
978 """Container class."""
981 """Initialize Container object."""
984 def __getattr__(self, attr):
985 """Get attribute custom implementation.
987 :param attr: Attribute to get.
989 :returns: Attribute value or None.
993 return self.__dict__[attr]
997 def __setattr__(self, attr, value):
998 """Set attribute custom implementation.
1000 :param attr: Attribute to set.
1001 :param value: Value to set.
1006 # Check if attribute exists
1009 # Creating new attribute
1011 self.__dict__['ssh'] = SSH()
1012 self.__dict__['ssh'].connect(value)
1013 self.__dict__[attr] = value
1015 # Updating attribute base of type
1016 if isinstance(self.__dict__[attr], list):
1017 self.__dict__[attr].append(value)
1019 self.__dict__[attr] = value