1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from string import Template
20 from collections import OrderedDict, Counter
22 from resources.libraries.python.ssh import SSH
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.topology import Topology, SocketType
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
33 class ContainerManager(object):
34 """Container lifecycle management class."""
36 def __init__(self, engine):
37 """Initialize Container Manager class.
39 :param engine: Container technology used (LXC/Docker/...).
41 :raises NotImplementedError: If container technology is not implemented.
44 self.engine = globals()[engine]()
46 raise NotImplementedError('{engine} is not implemented.'.
47 format(engine=engine))
48 self.containers = OrderedDict()
50 def get_container_by_name(self, name):
51 """Get container instance.
53 :param name: Container name.
55 :returns: Container instance.
57 :raises RuntimeError: If failed to get container with name.
60 return self.containers[name]
62 raise RuntimeError('Failed to get container with name: {name}'.
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
78 setattr(self.engine.container, 'env',
79 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
81 # Store container instance
82 self.containers[kwargs['name']] = self.engine.container
84 def construct_containers(self, **kwargs):
85 """Construct 1..N container(s) on node with specified name.
87 Ordinal number is automatically added to the name of container as
90 :param kwargs: Named parameters.
94 for i in range(kwargs['count']):
95 # Name will contain ordinal suffix
96 kwargs['name'] = ''.join([name, str(i+1)])
98 self.construct_container(i=i, **kwargs)
100 def acquire_all_containers(self):
101 """Acquire all containers."""
102 for container in self.containers:
103 self.engine.container = self.containers[container]
104 self.engine.acquire()
106 def build_all_containers(self):
107 """Build all containers."""
108 for container in self.containers:
109 self.engine.container = self.containers[container]
112 def create_all_containers(self):
113 """Create all containers."""
114 for container in self.containers:
115 self.engine.container = self.containers[container]
118 def execute_on_container(self, name, command):
119 """Execute command on container with name.
121 :param name: Container name.
122 :param command: Command to execute.
126 self.engine.container = self.get_container_by_name(name)
127 self.engine.execute(command)
129 def execute_on_all_containers(self, command):
130 """Execute command on all containers.
132 :param command: Command to execute.
135 for container in self.containers:
136 self.engine.container = self.containers[container]
137 self.engine.execute(command)
139 def start_vpp_in_all_containers(self):
140 """Start VPP in all containers."""
141 for container in self.containers:
142 self.engine.container = self.containers[container]
143 # We need to install supervisor client/server system to control VPP
145 self.engine.install_supervisor()
146 self.engine.start_vpp()
148 def restart_vpp_in_all_containers(self):
149 """Restart VPP in all containers."""
150 for container in self.containers:
151 self.engine.container = self.containers[container]
152 self.engine.restart_vpp()
154 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
155 """Configure VPP in all containers.
157 :param chain_topology: Topology used for chaining containers can be
158 chain or cross_horiz. Chain topology is using 1 memif pair per
159 container. Cross_horiz topology is using 1 memif and 1 physical
160 interface in container (only single container can be configured).
161 :param kwargs: Named parameters.
162 :type chain_topology: str
165 # Count number of DUTs based on node's host information
166 dut_cnt = len(Counter([self.containers[container].node['host']
167 for container in self.containers]))
168 mod = len(self.containers)/dut_cnt
170 for i, container in enumerate(self.containers):
173 sid1 = i % mod * 2 + 1
174 sid2 = i % mod * 2 + 2
175 self.engine.container = self.containers[container]
176 guest_dir = self.engine.container.mnt[0].split(':')[1]
178 if chain_topology == 'chain':
179 self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
180 sid1=sid1, sid2=sid2,
183 elif chain_topology == 'cross_horiz':
184 self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
185 sid1=sid1, sid2=sid2,
188 elif chain_topology == 'chain_functional':
189 self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
190 sid1=sid1, sid2=sid2,
193 elif chain_topology == 'chain_ip4':
194 self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
195 sid1=sid1, sid2=sid2,
198 elif chain_topology == 'pipeline_ip4':
199 self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
200 sid1=sid1, sid2=sid2,
204 raise RuntimeError('Container topology {name} not implemented'.
205 format(name=chain_topology))
207 def _configure_vpp_chain_l2xc(self, **kwargs):
208 """Configure VPP in chain topology with l2xc.
210 :param kwargs: Named parameters.
213 self.engine.create_vpp_startup_config()
214 self.engine.create_vpp_exec_config(
215 'memif_create_chain_l2xc.exec',
216 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
217 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
218 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
219 format(c=self.engine.container, **kwargs),
220 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
221 format(c=self.engine.container, **kwargs))
223 def _configure_vpp_cross_horiz(self, **kwargs):
224 """Configure VPP in cross horizontal topology (single memif).
226 :param kwargs: Named parameters.
229 if 'DUT1' in self.engine.container.name:
230 if_pci = Topology.get_interface_pci_addr(
231 self.engine.container.node, kwargs['dut1_if'])
232 if_name = Topology.get_interface_name(
233 self.engine.container.node, kwargs['dut1_if'])
234 if 'DUT2' in self.engine.container.name:
235 if_pci = Topology.get_interface_pci_addr(
236 self.engine.container.node, kwargs['dut2_if'])
237 if_name = Topology.get_interface_name(
238 self.engine.container.node, kwargs['dut2_if'])
239 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
240 self.engine.create_vpp_exec_config(
241 'memif_create_cross_horizon.exec',
242 mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
243 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
244 format(c=self.engine.container, **kwargs))
246 def _configure_vpp_chain_functional(self, **kwargs):
247 """Configure VPP in chain topology with l2xc (functional).
249 :param kwargs: Named parameters.
252 self.engine.create_vpp_startup_config_func_dev()
253 self.engine.create_vpp_exec_config(
254 'memif_create_chain_functional.exec',
255 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
256 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
257 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
258 format(c=self.engine.container, **kwargs),
259 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
260 format(c=self.engine.container, **kwargs),
263 def _configure_vpp_chain_ip4(self, **kwargs):
264 """Configure VPP in chain topology with ip4.
266 :param kwargs: Named parameters.
269 self.engine.create_vpp_startup_config()
271 vif1_mac = kwargs['tg_if1_mac'] \
272 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
273 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
274 vif2_mac = kwargs['tg_if2_mac'] \
275 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
276 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
277 self.engine.create_vpp_exec_config(
278 'memif_create_chain_ip4.exec',
279 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
280 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
281 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
282 format(c=self.engine.container, **kwargs),
283 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
284 format(c=self.engine.container, **kwargs),
285 mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
286 mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
287 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
289 def _configure_vpp_pipeline_ip4(self, **kwargs):
290 """Configure VPP in pipeline topology with ip4.
292 :param kwargs: Named parameters.
295 self.engine.create_vpp_startup_config()
296 node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
297 mid1 = kwargs['mid1']
298 mid2 = kwargs['mid2']
301 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
303 kwargs['mid2'] = kwargs['mid2'] \
304 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
305 else kwargs['mid2'] + 1
306 vif1_mac = kwargs['tg_if1_mac'] \
307 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
308 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
309 vif2_mac = kwargs['tg_if2_mac'] \
310 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
311 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
312 socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
313 format(c=self.engine.container, **kwargs) \
314 if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
315 format(c=self.engine.container, **kwargs)
316 socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
317 format(c=self.engine.container, **kwargs) \
318 if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
319 else '{guest_dir}/memif-pipe-{mid2}'.\
320 format(c=self.engine.container, **kwargs)
322 self.engine.create_vpp_exec_config(
323 'memif_create_pipeline_ip4.exec',
324 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
325 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
326 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
327 mac1='52:54:00:00:{0:02X}:01'.format(mid1),
328 mac2='52:54:00:00:{0:02X}:02'.format(mid2),
329 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
331 def stop_all_containers(self):
332 """Stop all containers."""
333 for container in self.containers:
334 self.engine.container = self.containers[container]
337 def destroy_all_containers(self):
338 """Destroy all containers."""
339 for container in self.containers:
340 self.engine.container = self.containers[container]
341 self.engine.destroy()
344 class ContainerEngine(object):
345 """Abstract class for container engine."""
348 """Init ContainerEngine object."""
349 self.container = None
351 def initialize(self):
352 """Initialize container object."""
353 self.container = Container()
355 def acquire(self, force):
356 """Acquire/download container.
358 :param force: Destroy a container if exists and create.
361 raise NotImplementedError
364 """Build container (compile)."""
365 raise NotImplementedError
368 """Create/deploy container."""
369 raise NotImplementedError
371 def execute(self, command):
372 """Execute process inside container.
374 :param command: Command to run inside container.
377 raise NotImplementedError
380 """Stop container."""
381 raise NotImplementedError
384 """Destroy/remove container."""
385 raise NotImplementedError
388 """Info about container."""
389 raise NotImplementedError
391 def system_info(self):
393 raise NotImplementedError
395 def install_supervisor(self):
396 """Install supervisord inside a container."""
397 if isinstance(self, LXC):
398 self.execute('sleep 3; apt-get update')
399 self.execute('apt-get install -y supervisor')
400 self.execute('echo "{config}" > {config_file} && '
401 'supervisord -c {config_file}'.
403 config='[unix_http_server]\n'
404 'file = /tmp/supervisor.sock\n\n'
405 '[rpcinterface:supervisor]\n'
406 'supervisor.rpcinterface_factory = '
407 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
409 'serverurl = unix:///tmp/supervisor.sock\n\n'
411 'pidfile = /tmp/supervisord.pid\n'
412 'identifier = supervisor\n'
414 'logfile=/tmp/supervisord.log\n'
416 'nodaemon=false\n\n',
417 config_file=SUPERVISOR_CONF))
420 """Start VPP inside a container."""
421 self.execute('echo "{config}" >> {config_file}'.
423 config='[program:vpp]\n'
424 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
426 'autorestart=false\n'
427 'redirect_stderr=true\n'
429 config_file=SUPERVISOR_CONF))
430 self.execute('supervisorctl reload')
431 self.execute('supervisorctl start vpp')
433 from robot.libraries.BuiltIn import BuiltIn
434 topo_instance = BuiltIn().get_library_instance(
435 'resources.libraries.python.topology.Topology')
436 topo_instance.add_new_socket(
440 '{root}/tmp/vpp_sockets/{name}/api.sock'.
441 format(root=self.container.root, name=self.container.name))
442 topo_instance.add_new_socket(
446 '{root}/tmp/vpp_sockets/{name}/stats.sock'.
447 format(root=self.container.root, name=self.container.name))
449 def restart_vpp(self):
450 """Restart VPP service inside a container."""
451 self.execute('supervisorctl restart vpp')
452 self.execute('cat /tmp/supervisord.log')
454 def create_base_vpp_startup_config(self):
455 """Create base startup configuration of VPP on container.
457 :returns: Base VPP startup configuration.
458 :rtype: VppConfigGenerator
460 cpuset_cpus = self.container.cpuset_cpus
462 # Create config instance
463 vpp_config = VppConfigGenerator()
464 vpp_config.set_node(self.container.node)
465 vpp_config.add_unix_cli_listen()
466 vpp_config.add_unix_nodaemon()
467 vpp_config.add_unix_exec('/tmp/running.exec')
468 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
469 vpp_config.add_statseg_per_node_counters(value='on')
470 # We will pop the first core from the list to be a main core
471 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
472 # If more cores in the list, the rest will be used as workers.
474 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
475 vpp_config.add_cpu_corelist_workers(corelist_workers)
479 def create_vpp_startup_config(self):
480 """Create startup configuration of VPP without DPDK on container.
482 vpp_config = self.create_base_vpp_startup_config()
483 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
485 # Apply configuration
486 self.execute('mkdir -p /etc/vpp/')
487 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
488 .format(config=vpp_config.get_config_str()))
490 def create_vpp_startup_config_dpdk_dev(self, *devices):
491 """Create startup configuration of VPP with DPDK on container.
493 :param devices: List of PCI devices to add.
496 vpp_config = self.create_base_vpp_startup_config()
497 vpp_config.add_dpdk_dev(*devices)
498 vpp_config.add_dpdk_no_tx_checksum_offload()
499 vpp_config.add_dpdk_log_level('debug')
500 vpp_config.add_plugin('disable', 'default')
501 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
502 vpp_config.add_plugin('enable', 'memif_plugin.so')
504 # Apply configuration
505 self.execute('mkdir -p /etc/vpp/')
506 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
507 .format(config=vpp_config.get_config_str()))
509 def create_vpp_startup_config_func_dev(self):
510 """Create startup configuration of VPP on container for functional
513 # Create config instance
514 vpp_config = VppConfigGenerator()
515 vpp_config.set_node(self.container.node)
516 vpp_config.add_unix_cli_listen()
517 vpp_config.add_unix_nodaemon()
518 vpp_config.add_unix_exec('/tmp/running.exec')
519 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
520 vpp_config.add_statseg_per_node_counters(value='on')
521 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
523 # Apply configuration
524 self.execute('mkdir -p /etc/vpp/')
525 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
526 .format(config=vpp_config.get_config_str()))
528 def create_vpp_exec_config(self, template_file, **kwargs):
529 """Create VPP exec configuration on container.
531 :param template_file: File name of a template script.
532 :param kwargs: Parameters for script.
533 :type template_file: str
536 running = '/tmp/running.exec'
538 template = '{res}/{tpl}'.format(
539 res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
541 with open(template, 'r') as src_file:
542 src = Template(src_file.read())
543 self.execute('echo "{out}" > {running}'.format(
544 out=src.safe_substitute(**kwargs), running=running))
546 def is_container_running(self):
547 """Check if container is running."""
548 raise NotImplementedError
550 def is_container_present(self):
551 """Check if container is present."""
552 raise NotImplementedError
554 def _configure_cgroup(self, name):
555 """Configure the control group associated with a container.
557 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
558 container is initialized a new cgroup /docker or /lxc is created under
559 cpuset parent tree. This newly created cgroup is inheriting parent
560 setting for cpu/mem exclusive parameter and thus cannot be overriden
561 within /docker or /lxc cgroup. This function is supposed to set cgroups
562 to allow coexistence of both engines.
564 :param name: Name of cgroup.
566 :raises RuntimeError: If applying cgroup settings via cgset failed.
568 ret, _, _ = self.container.ssh.exec_command_sudo(
569 'cgset -r cpuset.cpu_exclusive=0 /')
571 raise RuntimeError('Failed to apply cgroup settings.')
573 ret, _, _ = self.container.ssh.exec_command_sudo(
574 'cgset -r cpuset.mem_exclusive=0 /')
576 raise RuntimeError('Failed to apply cgroup settings.')
578 ret, _, _ = self.container.ssh.exec_command_sudo(
579 'cgcreate -g cpuset:/{name}'.format(name=name))
581 raise RuntimeError('Failed to copy cgroup settings from root.')
583 ret, _, _ = self.container.ssh.exec_command_sudo(
584 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
586 raise RuntimeError('Failed to apply cgroup settings.')
588 ret, _, _ = self.container.ssh.exec_command_sudo(
589 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
591 raise RuntimeError('Failed to apply cgroup settings.')
594 class LXC(ContainerEngine):
595 """LXC implementation."""
597 # Implicit constructor is inherited.
599 def acquire(self, force=True):
600 """Acquire a privileged system object where configuration is stored.
602 :param force: If a container exists, destroy it and create a new
605 :raises RuntimeError: If creating the container or writing the container
608 if self.is_container_present():
614 target_arch = 'arm64' \
615 if Topology.get_node_arch(self.container.node) == 'aarch64' \
618 image = self.container.image if self.container.image else\
619 "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
621 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
622 '--no-validate'.format(c=self.container, image=image)
624 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
626 raise RuntimeError('Failed to create container.')
628 self._configure_cgroup('lxc')
631 """Create/deploy an application inside a container on system.
633 :raises RuntimeError: If creating the container fails.
635 if self.container.mnt:
636 for mount in self.container.mnt:
637 host_dir, guest_dir = mount.split(':')
638 options = 'bind,create=dir' \
639 if guest_dir.endswith('/') else 'bind,create=file'
640 entry = 'lxc.mount.entry = {host_dir} '\
641 '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \
642 '{options} 0 0'.format(c=self.container,
646 ret, _, _ = self.container.ssh.exec_command_sudo(
647 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
648 format(e=entry, c=self.container))
650 raise RuntimeError('Failed to write {c.name} config.'
651 .format(c=self.container))
653 cpuset_cpus = '{0}'.format(
654 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
655 if self.container.cpuset_cpus else ''
657 ret, _, _ = self.container.ssh.exec_command_sudo(
658 'lxc-start --name {c.name} --daemon'.
659 format(c=self.container))
661 raise RuntimeError('Failed to start container {c.name}.'.
662 format(c=self.container))
663 self._lxc_wait('RUNNING')
665 # Workaround for LXC to be able to allocate all cpus including isolated.
666 ret, _, _ = self.container.ssh.exec_command_sudo(
667 'cgset --copy-from / lxc/')
669 raise RuntimeError('Failed to copy cgroup to LXC')
671 ret, _, _ = self.container.ssh.exec_command_sudo(
672 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
673 format(c=self.container, cpus=cpuset_cpus))
675 raise RuntimeError('Failed to set cpuset.cpus to container '
676 '{c.name}.'.format(c=self.container))
678 def execute(self, command):
679 """Start a process inside a running container.
681 Runs the specified command inside the container specified by name. The
682 container has to be running already.
684 :param command: Command to run inside container.
686 :raises RuntimeError: If running the command failed.
688 env = '--keep-env {0}'.format(
689 ' '.join('--set-var %s' % env for env in self.container.env))\
690 if self.container.env else ''
692 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
693 "exit $?'".format(env=env, c=self.container, command=command)
695 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
697 raise RuntimeError('Failed to run command inside container '
698 '{c.name}.'.format(c=self.container))
703 :raises RuntimeError: If stopping the container failed.
705 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
707 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
709 raise RuntimeError('Failed to stop container {c.name}.'
710 .format(c=self.container))
711 self._lxc_wait('STOPPED|FROZEN')
714 """Destroy a container.
716 :raises RuntimeError: If destroying container failed.
718 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
720 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
722 raise RuntimeError('Failed to destroy container {c.name}.'
723 .format(c=self.container))
726 """Query and shows information about a container.
728 :raises RuntimeError: If getting info about a container failed.
730 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
732 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
734 raise RuntimeError('Failed to get info about container {c.name}.'
735 .format(c=self.container))
737 def system_info(self):
738 """Check the current kernel for LXC support.
740 :raises RuntimeError: If checking LXC support failed.
742 cmd = 'lxc-checkconfig'
744 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
746 raise RuntimeError('Failed to check LXC support.')
748 def is_container_running(self):
749 """Check if container is running on node.
751 :returns: True if container is running.
753 :raises RuntimeError: If getting info about a container failed.
755 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
756 .format(c=self.container)
758 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
760 raise RuntimeError('Failed to get info about container {c.name}.'
761 .format(c=self.container))
762 return True if 'RUNNING' in stdout else False
764 def is_container_present(self):
765 """Check if container is existing on node.
767 :returns: True if container is present.
769 :raises RuntimeError: If getting info about a container failed.
771 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
773 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
774 return False if int(ret) else True
776 def _lxc_wait(self, state):
777 """Wait for a specific container state.
779 :param state: Specify the container state(s) to wait for.
781 :raises RuntimeError: If waiting for state of a container failed.
783 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
784 .format(c=self.container, s=state)
786 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
788 raise RuntimeError('Failed to wait for state "{s}" of container '
789 '{c.name}.'.format(s=state, c=self.container))
792 class Docker(ContainerEngine):
793 """Docker implementation."""
795 # Implicit constructor is inherited.
797 def acquire(self, force=True):
798 """Pull an image or a repository from a registry.
800 :param force: Destroy a container if exists.
802 :raises RuntimeError: If pulling a container failed.
804 if self.is_container_present():
810 if not self.container.image:
811 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
812 if Topology.get_node_arch(self.container.node) == 'aarch64' \
813 else Constants.DOCKER_SUT_IMAGE_UBUNTU
814 setattr(self.container, 'image', img)
816 cmd = 'docker pull {image}'.format(image=self.container.image)
818 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
820 raise RuntimeError('Failed to create container {c.name}.'
821 .format(c=self.container))
823 if self.container.cpuset_cpus:
824 self._configure_cgroup('docker')
827 """Create/deploy container.
829 :raises RuntimeError: If creating a container failed.
831 cpuset_cpus = '--cpuset-cpus={0}'.format(
832 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
833 if self.container.cpuset_cpus else ''
835 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
836 if self.container.cpuset_mems is not None else ''
837 # Temporary workaround - disabling due to bug in memif
841 ' '.join('--env %s' % env for env in self.container.env))\
842 if self.container.env else ''
844 command = '{0}'.format(self.container.command)\
845 if self.container.command else ''
847 publish = '{0}'.format(
848 ' '.join('--publish %s' % var for var in self.container.publish))\
849 if self.container.publish else ''
851 volume = '{0}'.format(
852 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
853 if self.container.mnt else ''
856 '--privileged --detach --interactive --tty --rm '\
857 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
858 '{env} {volume} --name {container.name} {container.image} '\
859 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
860 container=self.container, command=command,
861 env=env, publish=publish, volume=volume)
863 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
865 raise RuntimeError('Failed to create container {c.name}'
866 .format(c=self.container))
870 def execute(self, command):
871 """Start a process inside a running container.
873 Runs the specified command inside the container specified by name. The
874 container has to be running already.
876 :param command: Command to run inside container.
878 :raises RuntimeError: If running the command in a container failed.
880 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
881 "exit $?'".format(c=self.container, command=command)
883 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
885 raise RuntimeError('Failed to execute command in container '
886 '{c.name}.'.format(c=self.container))
889 """Stop running container.
891 :raises RuntimeError: If stopping a container failed.
893 cmd = 'docker stop {c.name}'.format(c=self.container)
895 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
897 raise RuntimeError('Failed to stop container {c.name}.'
898 .format(c=self.container))
901 """Remove a container.
903 :raises RuntimeError: If removing a container failed.
905 cmd = 'docker rm --force {c.name}'.format(c=self.container)
907 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
909 raise RuntimeError('Failed to destroy container {c.name}.'
910 .format(c=self.container))
913 """Return low-level information on Docker objects.
915 :raises RuntimeError: If getting info about a container failed.
917 cmd = 'docker inspect {c.name}'.format(c=self.container)
919 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
921 raise RuntimeError('Failed to get info about container {c.name}.'
922 .format(c=self.container))
924 def system_info(self):
925 """Display the docker system-wide information.
927 :raises RuntimeError: If displaying system information failed.
929 cmd = 'docker system info'
931 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
933 raise RuntimeError('Failed to get system info.')
935 def is_container_present(self):
936 """Check if container is present on node.
938 :returns: True if container is present.
940 :raises RuntimeError: If getting info about a container failed.
942 cmd = 'docker ps --all --quiet --filter name={c.name}'\
943 .format(c=self.container)
945 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
947 raise RuntimeError('Failed to get info about container {c.name}.'
948 .format(c=self.container))
949 return True if stdout else False
951 def is_container_running(self):
952 """Check if container is running on node.
954 :returns: True if container is running.
956 :raises RuntimeError: If getting info about a container failed.
958 cmd = 'docker ps --quiet --filter name={c.name}'\
959 .format(c=self.container)
961 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
963 raise RuntimeError('Failed to get info about container {c.name}.'
964 .format(c=self.container))
965 return True if stdout else False
968 class Container(object):
969 """Container class."""
972 """Initialize Container object."""
975 def __getattr__(self, attr):
976 """Get attribute custom implementation.
978 :param attr: Attribute to get.
980 :returns: Attribute value or None.
984 return self.__dict__[attr]
988 def __setattr__(self, attr, value):
989 """Set attribute custom implementation.
991 :param attr: Attribute to set.
992 :param value: Value to set.
997 # Check if attribute exists
1000 # Creating new attribute
1002 self.__dict__['ssh'] = SSH()
1003 self.__dict__['ssh'].connect(value)
1004 self.__dict__[attr] = value
1006 # Updating attribute base of type
1007 if isinstance(self.__dict__[attr], list):
1008 self.__dict__[attr].append(value)
1010 self.__dict__[attr] = value