1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from string import Template
20 from collections import OrderedDict, Counter
22 from resources.libraries.python.ssh import SSH
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.topology import Topology, SocketType
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
33 class ContainerManager(object):
34 """Container lifecycle management class."""
36 def __init__(self, engine):
37 """Initialize Container Manager class.
39 :param engine: Container technology used (LXC/Docker/...).
41 :raises NotImplementedError: If container technology is not implemented.
44 self.engine = globals()[engine]()
46 raise NotImplementedError('{engine} is not implemented.'.
47 format(engine=engine))
48 self.containers = OrderedDict()
50 def get_container_by_name(self, name):
51 """Get container instance.
53 :param name: Container name.
55 :returns: Container instance.
57 :raises RuntimeError: If failed to get container with name.
60 return self.containers[name]
62 raise RuntimeError('Failed to get container with name: {name}'.
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
78 setattr(self.engine.container, 'env',
79 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
81 # Store container instance
82 self.containers[kwargs['name']] = self.engine.container
84 def construct_containers(self, **kwargs):
85 """Construct 1..N container(s) on node with specified name.
87 Ordinal number is automatically added to the name of container as
90 :param kwargs: Named parameters.
94 for i in range(kwargs['count']):
95 # Name will contain ordinal suffix
96 kwargs['name'] = ''.join([name, str(i+1)])
98 self.construct_container(i=i, **kwargs)
100 def acquire_all_containers(self):
101 """Acquire all containers."""
102 for container in self.containers:
103 self.engine.container = self.containers[container]
104 self.engine.acquire()
106 def build_all_containers(self):
107 """Build all containers."""
108 for container in self.containers:
109 self.engine.container = self.containers[container]
112 def create_all_containers(self):
113 """Create all containers."""
114 for container in self.containers:
115 self.engine.container = self.containers[container]
118 def execute_on_container(self, name, command):
119 """Execute command on container with name.
121 :param name: Container name.
122 :param command: Command to execute.
126 self.engine.container = self.get_container_by_name(name)
127 self.engine.execute(command)
129 def execute_on_all_containers(self, command):
130 """Execute command on all containers.
132 :param command: Command to execute.
135 for container in self.containers:
136 self.engine.container = self.containers[container]
137 self.engine.execute(command)
139 def start_vpp_in_all_containers(self):
140 """Start VPP in all containers."""
141 for container in self.containers:
142 self.engine.container = self.containers[container]
143 # We need to install supervisor client/server system to control VPP
145 self.engine.install_supervisor()
146 self.engine.start_vpp()
148 def restart_vpp_in_all_containers(self):
149 """Restart VPP in all containers."""
150 for container in self.containers:
151 self.engine.container = self.containers[container]
152 self.engine.restart_vpp()
154 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
155 """Configure VPP in all containers.
157 :param chain_topology: Topology used for chaining containers can be
158 chain or cross_horiz. Chain topology is using 1 memif pair per
159 container. Cross_horiz topology is using 1 memif and 1 physical
160 interface in container (only single container can be configured).
161 :param kwargs: Named parameters.
162 :type chain_topology: str
165 # Count number of DUTs based on node's host information
166 dut_cnt = len(Counter([self.containers[container].node['host']
167 for container in self.containers]))
168 mod = len(self.containers)/dut_cnt
170 for i, container in enumerate(self.containers):
173 sid1 = i % mod * 2 + 1
174 sid2 = i % mod * 2 + 2
175 self.engine.container = self.containers[container]
176 guest_dir = self.engine.container.mnt[0].split(':')[1]
178 if chain_topology == 'chain':
179 self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
180 sid1=sid1, sid2=sid2,
183 elif chain_topology == 'cross_horiz':
184 self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
185 sid1=sid1, sid2=sid2,
188 elif chain_topology == 'chain_functional':
189 self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
190 sid1=sid1, sid2=sid2,
193 elif chain_topology == 'chain_ip4':
194 self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
195 sid1=sid1, sid2=sid2,
198 elif chain_topology == 'pipeline_ip4':
199 self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
200 sid1=sid1, sid2=sid2,
204 raise RuntimeError('Container topology {name} not implemented'.
205 format(name=chain_topology))
207 def _configure_vpp_chain_l2xc(self, **kwargs):
208 """Configure VPP in chain topology with l2xc.
210 :param kwargs: Named parameters.
213 self.engine.create_vpp_startup_config()
214 self.engine.create_vpp_exec_config(
215 'memif_create_chain_l2xc.exec',
216 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
217 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
218 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
219 format(c=self.engine.container, **kwargs),
220 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
221 format(c=self.engine.container, **kwargs))
223 def _configure_vpp_cross_horiz(self, **kwargs):
224 """Configure VPP in cross horizontal topology (single memif).
226 :param kwargs: Named parameters.
229 if 'DUT1' in self.engine.container.name:
230 if_pci = Topology.get_interface_pci_addr(
231 self.engine.container.node, kwargs['dut1_if'])
232 if_name = Topology.get_interface_name(
233 self.engine.container.node, kwargs['dut1_if'])
234 if 'DUT2' in self.engine.container.name:
235 if_pci = Topology.get_interface_pci_addr(
236 self.engine.container.node, kwargs['dut2_if'])
237 if_name = Topology.get_interface_name(
238 self.engine.container.node, kwargs['dut2_if'])
239 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
240 self.engine.create_vpp_exec_config(
241 'memif_create_cross_horizon.exec',
242 mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
243 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
244 format(c=self.engine.container, **kwargs))
246 def _configure_vpp_chain_functional(self, **kwargs):
247 """Configure VPP in chain topology with l2xc (functional).
249 :param kwargs: Named parameters.
252 self.engine.create_vpp_startup_config_func_dev()
253 self.engine.create_vpp_exec_config(
254 'memif_create_chain_functional.exec',
255 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
256 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
257 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
258 format(c=self.engine.container, **kwargs),
259 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
260 format(c=self.engine.container, **kwargs),
263 def _configure_vpp_chain_ip4(self, **kwargs):
264 """Configure VPP in chain topology with ip4.
266 :param kwargs: Named parameters.
269 self.engine.create_vpp_startup_config()
271 vif1_mac = kwargs['tg_if1_mac'] \
272 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
273 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
274 vif2_mac = kwargs['tg_if2_mac'] \
275 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
276 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
277 self.engine.create_vpp_exec_config(
278 'memif_create_chain_ip4.exec',
279 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
280 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
281 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
282 format(c=self.engine.container, **kwargs),
283 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
284 format(c=self.engine.container, **kwargs),
285 mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
286 mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
287 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
289 def _configure_vpp_pipeline_ip4(self, **kwargs):
290 """Configure VPP in pipeline topology with ip4.
292 :param kwargs: Named parameters.
295 self.engine.create_vpp_startup_config()
296 node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
297 mid1 = kwargs['mid1']
298 mid2 = kwargs['mid2']
301 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
303 kwargs['mid2'] = kwargs['mid2'] \
304 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
305 else kwargs['mid2'] + 1
306 vif1_mac = kwargs['tg_if1_mac'] \
307 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
308 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
309 vif2_mac = kwargs['tg_if2_mac'] \
310 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
311 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
312 socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
313 format(c=self.engine.container, **kwargs) \
314 if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
315 format(c=self.engine.container, **kwargs)
316 socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
317 format(c=self.engine.container, **kwargs) \
318 if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
319 else '{guest_dir}/memif-pipe-{mid2}'.\
320 format(c=self.engine.container, **kwargs)
322 self.engine.create_vpp_exec_config(
323 'memif_create_pipeline_ip4.exec',
324 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
325 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
326 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
327 mac1='52:54:00:00:{0:02X}:01'.format(mid1),
328 mac2='52:54:00:00:{0:02X}:02'.format(mid2),
329 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
331 def stop_all_containers(self):
332 """Stop all containers."""
333 for container in self.containers:
334 self.engine.container = self.containers[container]
337 def destroy_all_containers(self):
338 """Destroy all containers."""
339 for container in self.containers:
340 self.engine.container = self.containers[container]
341 self.engine.destroy()
344 class ContainerEngine(object):
345 """Abstract class for container engine."""
348 """Init ContainerEngine object."""
349 self.container = None
351 def initialize(self):
352 """Initialize container object."""
353 self.container = Container()
355 def acquire(self, force):
356 """Acquire/download container.
358 :param force: Destroy a container if exists and create.
361 raise NotImplementedError
364 """Build container (compile)."""
365 raise NotImplementedError
368 """Create/deploy container."""
369 raise NotImplementedError
371 def execute(self, command):
372 """Execute process inside container.
374 :param command: Command to run inside container.
377 raise NotImplementedError
380 """Stop container."""
381 raise NotImplementedError
384 """Destroy/remove container."""
385 raise NotImplementedError
388 """Info about container."""
389 raise NotImplementedError
391 def system_info(self):
393 raise NotImplementedError
395 def install_supervisor(self):
396 """Install supervisord inside a container."""
397 if isinstance(self, LXC):
398 self.execute('sleep 3; apt-get update')
399 self.execute('apt-get install -y supervisor')
400 self.execute('echo "{config}" > {config_file} && '
401 'supervisord -c {config_file}'.
403 config='[unix_http_server]\n'
404 'file = /tmp/supervisor.sock\n\n'
405 '[rpcinterface:supervisor]\n'
406 'supervisor.rpcinterface_factory = '
407 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
409 'serverurl = unix:///tmp/supervisor.sock\n\n'
411 'pidfile = /tmp/supervisord.pid\n'
412 'identifier = supervisor\n'
414 'logfile=/tmp/supervisord.log\n'
416 'nodaemon=false\n\n',
417 config_file=SUPERVISOR_CONF))
420 """Start VPP inside a container."""
421 self.execute('echo "{config}" >> {config_file}'.
423 config='[program:vpp]\n'
424 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
426 'autorestart=false\n'
427 'redirect_stderr=true\n'
429 config_file=SUPERVISOR_CONF))
430 self.execute('supervisorctl reload')
431 self.execute('supervisorctl start vpp')
433 from robot.libraries.BuiltIn import BuiltIn
434 topo_instance = BuiltIn().get_library_instance(
435 'resources.libraries.python.topology.Topology')
436 topo_instance.add_new_socket(
440 '{root}/tmp/vpp_sockets/{name}/api.sock'.
441 format(root=self.container.root, name=self.container.name))
442 topo_instance.add_new_socket(
446 '{root}/tmp/vpp_sockets/{name}/stats.sock'.
447 format(root=self.container.root, name=self.container.name))
449 def restart_vpp(self):
450 """Restart VPP service inside a container."""
451 self.execute('supervisorctl restart vpp')
452 self.execute('cat /tmp/supervisord.log')
454 def create_base_vpp_startup_config(self):
455 """Create base startup configuration of VPP on container.
457 :returns: Base VPP startup configuration.
458 :rtype: VppConfigGenerator
460 cpuset_cpus = self.container.cpuset_cpus
462 # Create config instance
463 vpp_config = VppConfigGenerator()
464 vpp_config.set_node(self.container.node)
465 vpp_config.add_unix_cli_listen()
466 vpp_config.add_unix_nodaemon()
467 vpp_config.add_unix_exec('/tmp/running.exec')
468 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
469 vpp_config.add_statseg_per_node_counters(value='on')
470 # We will pop the first core from the list to be a main core
471 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
472 # If more cores in the list, the rest will be used as workers.
474 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
475 vpp_config.add_cpu_corelist_workers(corelist_workers)
479 def create_vpp_startup_config(self):
480 """Create startup configuration of VPP without DPDK on container.
482 vpp_config = self.create_base_vpp_startup_config()
483 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
485 # Apply configuration
486 self.execute('mkdir -p /etc/vpp/')
487 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
488 .format(config=vpp_config.get_config_str()))
490 def create_vpp_startup_config_dpdk_dev(self, *devices):
491 """Create startup configuration of VPP with DPDK on container.
493 :param devices: List of PCI devices to add.
496 vpp_config = self.create_base_vpp_startup_config()
497 vpp_config.add_dpdk_dev(*devices)
498 vpp_config.add_dpdk_no_tx_checksum_offload()
499 vpp_config.add_dpdk_log_level('debug')
500 vpp_config.add_plugin('disable', 'default')
501 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
502 vpp_config.add_plugin('enable', 'memif_plugin.so')
504 # Apply configuration
505 self.execute('mkdir -p /etc/vpp/')
506 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
507 .format(config=vpp_config.get_config_str()))
509 def create_vpp_startup_config_func_dev(self):
510 """Create startup configuration of VPP on container for functional
513 # Create config instance
514 vpp_config = VppConfigGenerator()
515 vpp_config.set_node(self.container.node)
516 vpp_config.add_unix_cli_listen()
517 vpp_config.add_unix_nodaemon()
518 vpp_config.add_unix_exec('/tmp/running.exec')
519 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
520 vpp_config.add_statseg_per_node_counters(value='on')
521 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
523 # Apply configuration
524 self.execute('mkdir -p /etc/vpp/')
525 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
526 .format(config=vpp_config.get_config_str()))
528 def create_vpp_exec_config(self, template_file, **kwargs):
529 """Create VPP exec configuration on container.
531 :param template_file: File name of a template script.
532 :param kwargs: Parameters for script.
533 :type template_file: str
536 running = '/tmp/running.exec'
538 template = '{res}/{tpl}'.format(
539 res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
541 with open(template, 'r') as src_file:
542 src = Template(src_file.read())
543 self.execute('echo "{out}" > {running}'.format(
544 out=src.safe_substitute(**kwargs), running=running))
546 def is_container_running(self):
547 """Check if container is running."""
548 raise NotImplementedError
550 def is_container_present(self):
551 """Check if container is present."""
552 raise NotImplementedError
554 def _configure_cgroup(self, name):
555 """Configure the control group associated with a container.
557 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
558 container is initialized a new cgroup /docker or /lxc is created under
559 cpuset parent tree. This newly created cgroup is inheriting parent
560 setting for cpu/mem exclusive parameter and thus cannot be overriden
561 within /docker or /lxc cgroup. This function is supposed to set cgroups
562 to allow coexistence of both engines.
564 :param name: Name of cgroup.
566 :raises RuntimeError: If applying cgroup settings via cgset failed.
568 ret, _, _ = self.container.ssh.exec_command_sudo(
569 'cgset -r cpuset.cpu_exclusive=0 /')
571 raise RuntimeError('Failed to apply cgroup settings.')
573 ret, _, _ = self.container.ssh.exec_command_sudo(
574 'cgset -r cpuset.mem_exclusive=0 /')
576 raise RuntimeError('Failed to apply cgroup settings.')
578 ret, _, _ = self.container.ssh.exec_command_sudo(
579 'cgcreate -g cpuset:/{name}'.format(name=name))
581 raise RuntimeError('Failed to copy cgroup settings from root.')
583 ret, _, _ = self.container.ssh.exec_command_sudo(
584 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
586 raise RuntimeError('Failed to apply cgroup settings.')
588 ret, _, _ = self.container.ssh.exec_command_sudo(
589 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
591 raise RuntimeError('Failed to apply cgroup settings.')
594 class LXC(ContainerEngine):
595 """LXC implementation."""
597 # Implicit constructor is inherited.
599 def acquire(self, force=True):
600 """Acquire a privileged system object where configuration is stored.
602 :param force: If a container exists, destroy it and create a new
605 :raises RuntimeError: If creating the container or writing the container
608 if self.is_container_present():
614 target_arch = 'arm64' \
615 if Topology.get_node_arch(self.container.node) == 'aarch64' \
618 image = self.container.image if self.container.image else\
619 "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
621 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
622 '--no-validate'.format(c=self.container, image=image)
624 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
626 raise RuntimeError('Failed to create container.')
628 self._configure_cgroup('lxc')
631 """Create/deploy an application inside a container on system.
633 :raises RuntimeError: If creating the container fails.
635 if self.container.mnt:
637 # https://github.com/lxc/lxc/issues/434
638 ret, _, _ = self.container.ssh.exec_command_sudo(
639 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
640 format(e="lxc.mount.entry = tmpfs run tmpfs defaults",
643 raise RuntimeError('Failed to write {c.name} config.'.
644 format(c=self.container))
646 for mount in self.container.mnt:
647 host_dir, guest_dir = mount.split(':')
648 options = 'bind,create=dir' \
649 if guest_dir.endswith('/') else 'bind,create=file'
650 entry = 'lxc.mount.entry = {host_dir} {guest_dir} none ' \
651 '{options} 0 0'.format(
652 host_dir=host_dir, guest_dir=guest_dir[1:],
654 ret, _, _ = self.container.ssh.exec_command_sudo(
655 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
656 format(e=entry, c=self.container))
658 raise RuntimeError('Failed to write {c.name} config.'
659 .format(c=self.container))
661 cpuset_cpus = '{0}'.format(
662 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
663 if self.container.cpuset_cpus else ''
665 ret, _, _ = self.container.ssh.exec_command_sudo(
666 'lxc-start --name {c.name} --daemon'.format(c=self.container))
668 raise RuntimeError('Failed to start container {c.name}.'.
669 format(c=self.container))
670 self._lxc_wait('RUNNING')
672 # Workaround for LXC to be able to allocate all cpus including isolated.
673 ret, _, _ = self.container.ssh.exec_command_sudo(
674 'cgset --copy-from / lxc/')
676 raise RuntimeError('Failed to copy cgroup to LXC')
678 ret, _, _ = self.container.ssh.exec_command_sudo(
679 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
680 format(c=self.container, cpus=cpuset_cpus))
682 raise RuntimeError('Failed to set cpuset.cpus to container '
683 '{c.name}.'.format(c=self.container))
685 def execute(self, command):
686 """Start a process inside a running container.
688 Runs the specified command inside the container specified by name. The
689 container has to be running already.
691 :param command: Command to run inside container.
693 :raises RuntimeError: If running the command failed.
695 env = '--keep-env {0}'.format(
696 ' '.join('--set-var %s' % env for env in self.container.env))\
697 if self.container.env else ''
699 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
700 "exit $?'".format(env=env, c=self.container, command=command)
702 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
704 raise RuntimeError('Failed to run command inside container '
705 '{c.name}.'.format(c=self.container))
710 :raises RuntimeError: If stopping the container failed.
712 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
714 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
716 raise RuntimeError('Failed to stop container {c.name}.'
717 .format(c=self.container))
718 self._lxc_wait('STOPPED|FROZEN')
721 """Destroy a container.
723 :raises RuntimeError: If destroying container failed.
725 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
727 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
729 raise RuntimeError('Failed to destroy container {c.name}.'
730 .format(c=self.container))
733 """Query and shows information about a container.
735 :raises RuntimeError: If getting info about a container failed.
737 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
739 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
741 raise RuntimeError('Failed to get info about container {c.name}.'
742 .format(c=self.container))
744 def system_info(self):
745 """Check the current kernel for LXC support.
747 :raises RuntimeError: If checking LXC support failed.
749 cmd = 'lxc-checkconfig'
751 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
753 raise RuntimeError('Failed to check LXC support.')
755 def is_container_running(self):
756 """Check if container is running on node.
758 :returns: True if container is running.
760 :raises RuntimeError: If getting info about a container failed.
762 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
763 .format(c=self.container)
765 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
767 raise RuntimeError('Failed to get info about container {c.name}.'
768 .format(c=self.container))
769 return True if 'RUNNING' in stdout else False
771 def is_container_present(self):
772 """Check if container is existing on node.
774 :returns: True if container is present.
776 :raises RuntimeError: If getting info about a container failed.
778 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
780 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
781 return False if int(ret) else True
783 def _lxc_wait(self, state):
784 """Wait for a specific container state.
786 :param state: Specify the container state(s) to wait for.
788 :raises RuntimeError: If waiting for state of a container failed.
790 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
791 .format(c=self.container, s=state)
793 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
795 raise RuntimeError('Failed to wait for state "{s}" of container '
796 '{c.name}.'.format(s=state, c=self.container))
799 class Docker(ContainerEngine):
800 """Docker implementation."""
802 # Implicit constructor is inherited.
804 def acquire(self, force=True):
805 """Pull an image or a repository from a registry.
807 :param force: Destroy a container if exists.
809 :raises RuntimeError: If pulling a container failed.
811 if self.is_container_present():
817 if not self.container.image:
818 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
819 if Topology.get_node_arch(self.container.node) == 'aarch64' \
820 else Constants.DOCKER_SUT_IMAGE_UBUNTU
821 setattr(self.container, 'image', img)
823 cmd = 'docker pull {image}'.format(image=self.container.image)
825 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
827 raise RuntimeError('Failed to create container {c.name}.'
828 .format(c=self.container))
830 if self.container.cpuset_cpus:
831 self._configure_cgroup('docker')
834 """Create/deploy container.
836 :raises RuntimeError: If creating a container failed.
838 cpuset_cpus = '--cpuset-cpus={0}'.format(
839 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
840 if self.container.cpuset_cpus else ''
842 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
843 if self.container.cpuset_mems is not None else ''
844 # Temporary workaround - disabling due to bug in memif
848 ' '.join('--env %s' % env for env in self.container.env))\
849 if self.container.env else ''
851 command = '{0}'.format(self.container.command)\
852 if self.container.command else ''
854 publish = '{0}'.format(
855 ' '.join('--publish %s' % var for var in self.container.publish))\
856 if self.container.publish else ''
858 volume = '{0}'.format(
859 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
860 if self.container.mnt else ''
863 '--privileged --detach --interactive --tty --rm '\
864 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
865 '{env} {volume} --name {container.name} {container.image} '\
866 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
867 container=self.container, command=command,
868 env=env, publish=publish, volume=volume)
870 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
872 raise RuntimeError('Failed to create container {c.name}'
873 .format(c=self.container))
877 def execute(self, command):
878 """Start a process inside a running container.
880 Runs the specified command inside the container specified by name. The
881 container has to be running already.
883 :param command: Command to run inside container.
885 :raises RuntimeError: If running the command in a container failed.
887 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
888 "exit $?'".format(c=self.container, command=command)
890 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
892 raise RuntimeError('Failed to execute command in container '
893 '{c.name}.'.format(c=self.container))
896 """Stop running container.
898 :raises RuntimeError: If stopping a container failed.
900 cmd = 'docker stop {c.name}'.format(c=self.container)
902 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
904 raise RuntimeError('Failed to stop container {c.name}.'
905 .format(c=self.container))
908 """Remove a container.
910 :raises RuntimeError: If removing a container failed.
912 cmd = 'docker rm --force {c.name}'.format(c=self.container)
914 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
916 raise RuntimeError('Failed to destroy container {c.name}.'
917 .format(c=self.container))
920 """Return low-level information on Docker objects.
922 :raises RuntimeError: If getting info about a container failed.
924 cmd = 'docker inspect {c.name}'.format(c=self.container)
926 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
928 raise RuntimeError('Failed to get info about container {c.name}.'
929 .format(c=self.container))
931 def system_info(self):
932 """Display the docker system-wide information.
934 :raises RuntimeError: If displaying system information failed.
936 cmd = 'docker system info'
938 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
940 raise RuntimeError('Failed to get system info.')
942 def is_container_present(self):
943 """Check if container is present on node.
945 :returns: True if container is present.
947 :raises RuntimeError: If getting info about a container failed.
949 cmd = 'docker ps --all --quiet --filter name={c.name}'\
950 .format(c=self.container)
952 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
954 raise RuntimeError('Failed to get info about container {c.name}.'
955 .format(c=self.container))
956 return True if stdout else False
958 def is_container_running(self):
959 """Check if container is running on node.
961 :returns: True if container is running.
963 :raises RuntimeError: If getting info about a container failed.
965 cmd = 'docker ps --quiet --filter name={c.name}'\
966 .format(c=self.container)
968 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
970 raise RuntimeError('Failed to get info about container {c.name}.'
971 .format(c=self.container))
972 return True if stdout else False
975 class Container(object):
976 """Container class."""
979 """Initialize Container object."""
982 def __getattr__(self, attr):
983 """Get attribute custom implementation.
985 :param attr: Attribute to get.
987 :returns: Attribute value or None.
991 return self.__dict__[attr]
995 def __setattr__(self, attr, value):
996 """Set attribute custom implementation.
998 :param attr: Attribute to set.
999 :param value: Value to set.
1004 # Check if attribute exists
1007 # Creating new attribute
1009 self.__dict__['ssh'] = SSH()
1010 self.__dict__['ssh'].connect(value)
1011 self.__dict__[attr] = value
1013 # Updating attribute base of type
1014 if isinstance(self.__dict__[attr], list):
1015 self.__dict__[attr].append(value)
1017 self.__dict__[attr] = value