1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from string import Template
20 from collections import OrderedDict, Counter
22 from resources.libraries.python.ssh import SSH
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.topology import Topology, SocketType
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
33 class ContainerManager(object):
34 """Container lifecycle management class."""
36 def __init__(self, engine):
37 """Initialize Container Manager class.
39 :param engine: Container technology used (LXC/Docker/...).
41 :raises NotImplementedError: If container technology is not implemented.
44 self.engine = globals()[engine]()
46 raise NotImplementedError('{engine} is not implemented.'.
47 format(engine=engine))
48 self.containers = OrderedDict()
50 def get_container_by_name(self, name):
51 """Get container instance.
53 :param name: Container name.
55 :returns: Container instance.
57 :raises RuntimeError: If failed to get container with name.
60 return self.containers[name]
62 raise RuntimeError('Failed to get container with name: {name}'.
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
78 setattr(self.engine.container, 'env',
79 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
81 # Store container instance
82 self.containers[kwargs['name']] = self.engine.container
84 def construct_containers(self, **kwargs):
85 """Construct 1..N container(s) on node with specified name.
87 Ordinal number is automatically added to the name of container as
90 :param kwargs: Named parameters.
94 for i in range(kwargs['count']):
95 # Name will contain ordinal suffix
96 kwargs['name'] = ''.join([name, str(i+1)])
98 self.construct_container(i=i, **kwargs)
100 def acquire_all_containers(self):
101 """Acquire all containers."""
102 for container in self.containers:
103 self.engine.container = self.containers[container]
104 self.engine.acquire()
106 def build_all_containers(self):
107 """Build all containers."""
108 for container in self.containers:
109 self.engine.container = self.containers[container]
112 def create_all_containers(self):
113 """Create all containers."""
114 for container in self.containers:
115 self.engine.container = self.containers[container]
118 def execute_on_container(self, name, command):
119 """Execute command on container with name.
121 :param name: Container name.
122 :param command: Command to execute.
126 self.engine.container = self.get_container_by_name(name)
127 self.engine.execute(command)
129 def execute_on_all_containers(self, command):
130 """Execute command on all containers.
132 :param command: Command to execute.
135 for container in self.containers:
136 self.engine.container = self.containers[container]
137 self.engine.execute(command)
139 def start_vpp_in_all_containers(self):
140 """Start VPP in all containers."""
141 for container in self.containers:
142 self.engine.container = self.containers[container]
143 # We need to install supervisor client/server system to control VPP
145 self.engine.install_supervisor()
146 self.engine.start_vpp()
148 def restart_vpp_in_all_containers(self):
149 """Restart VPP in all containers."""
150 for container in self.containers:
151 self.engine.container = self.containers[container]
152 self.engine.restart_vpp()
154 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
155 """Configure VPP in all containers.
157 :param chain_topology: Topology used for chaining containers can be
158 chain or cross_horiz. Chain topology is using 1 memif pair per
159 container. Cross_horiz topology is using 1 memif and 1 physical
160 interface in container (only single container can be configured).
161 :param kwargs: Named parameters.
162 :type chain_topology: str
165 # Count number of DUTs based on node's host information
166 dut_cnt = len(Counter([self.containers[container].node['host']
167 for container in self.containers]))
168 mod = len(self.containers)/dut_cnt
170 for i, container in enumerate(self.containers):
173 sid1 = i % mod * 2 + 1
174 sid2 = i % mod * 2 + 2
175 self.engine.container = self.containers[container]
176 guest_dir = self.engine.container.mnt[0].split(':')[1]
178 if chain_topology == 'chain':
179 self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
180 sid1=sid1, sid2=sid2,
183 elif chain_topology == 'cross_horiz':
184 self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
185 sid1=sid1, sid2=sid2,
188 elif chain_topology == 'chain_functional':
189 self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
190 sid1=sid1, sid2=sid2,
193 elif chain_topology == 'chain_ip4':
194 self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
195 sid1=sid1, sid2=sid2,
198 elif chain_topology == 'pipeline_ip4':
199 self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
200 sid1=sid1, sid2=sid2,
204 raise RuntimeError('Container topology {name} not implemented'.
205 format(name=chain_topology))
207 def _configure_vpp_chain_l2xc(self, **kwargs):
208 """Configure VPP in chain topology with l2xc.
210 :param kwargs: Named parameters.
213 self.engine.create_vpp_startup_config()
214 self.engine.create_vpp_exec_config(
215 'memif_create_chain_l2xc.exec',
216 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
217 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
218 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
219 format(c=self.engine.container, **kwargs),
220 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
221 format(c=self.engine.container, **kwargs))
223 def _configure_vpp_cross_horiz(self, **kwargs):
224 """Configure VPP in cross horizontal topology (single memif).
226 :param kwargs: Named parameters.
229 if 'DUT1' in self.engine.container.name:
230 if_pci = Topology.get_interface_pci_addr(
231 self.engine.container.node, kwargs['dut1_if'])
232 if_name = Topology.get_interface_name(
233 self.engine.container.node, kwargs['dut1_if'])
234 if 'DUT2' in self.engine.container.name:
235 if_pci = Topology.get_interface_pci_addr(
236 self.engine.container.node, kwargs['dut2_if'])
237 if_name = Topology.get_interface_name(
238 self.engine.container.node, kwargs['dut2_if'])
239 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
240 self.engine.create_vpp_exec_config(
241 'memif_create_cross_horizon.exec',
242 mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
243 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
244 format(c=self.engine.container, **kwargs))
246 def _configure_vpp_chain_functional(self, **kwargs):
247 """Configure VPP in chain topology with l2xc (functional).
249 :param kwargs: Named parameters.
252 self.engine.create_vpp_startup_config_func_dev()
253 self.engine.create_vpp_exec_config(
254 'memif_create_chain_functional.exec',
255 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
256 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
257 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
258 format(c=self.engine.container, **kwargs),
259 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
260 format(c=self.engine.container, **kwargs),
263 def _configure_vpp_chain_ip4(self, **kwargs):
264 """Configure VPP in chain topology with ip4.
266 :param kwargs: Named parameters.
269 self.engine.create_vpp_startup_config()
271 vif1_mac = kwargs['tg_if1_mac'] \
272 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
273 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
274 vif2_mac = kwargs['tg_if2_mac'] \
275 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
276 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
277 self.engine.create_vpp_exec_config(
278 'memif_create_chain_ip4.exec',
279 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
280 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
281 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
282 format(c=self.engine.container, **kwargs),
283 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
284 format(c=self.engine.container, **kwargs),
285 mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
286 mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
287 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
289 def _configure_vpp_pipeline_ip4(self, **kwargs):
290 """Configure VPP in pipeline topology with ip4.
292 :param kwargs: Named parameters.
295 self.engine.create_vpp_startup_config()
296 node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
297 mid1 = kwargs['mid1']
298 mid2 = kwargs['mid2']
301 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
303 kwargs['mid2'] = kwargs['mid2'] \
304 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
305 else kwargs['mid2'] + 1
306 vif1_mac = kwargs['tg_if1_mac'] \
307 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
308 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
309 vif2_mac = kwargs['tg_if2_mac'] \
310 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
311 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
312 socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
313 format(c=self.engine.container, **kwargs) \
314 if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
315 format(c=self.engine.container, **kwargs)
316 socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
317 format(c=self.engine.container, **kwargs) \
318 if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
319 else '{guest_dir}/memif-pipe-{mid2}'.\
320 format(c=self.engine.container, **kwargs)
322 self.engine.create_vpp_exec_config(
323 'memif_create_pipeline_ip4.exec',
324 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
325 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
326 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
327 mac1='52:54:00:00:{0:02X}:01'.format(mid1),
328 mac2='52:54:00:00:{0:02X}:02'.format(mid2),
329 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
331 def stop_all_containers(self):
332 """Stop all containers."""
333 for container in self.containers:
334 self.engine.container = self.containers[container]
337 def destroy_all_containers(self):
338 """Destroy all containers."""
339 for container in self.containers:
340 self.engine.container = self.containers[container]
341 self.engine.destroy()
344 class ContainerEngine(object):
345 """Abstract class for container engine."""
348 """Init ContainerEngine object."""
349 self.container = None
351 def initialize(self):
352 """Initialize container object."""
353 self.container = Container()
355 def acquire(self, force):
356 """Acquire/download container.
358 :param force: Destroy a container if exists and create.
361 raise NotImplementedError
364 """Build container (compile)."""
365 raise NotImplementedError
368 """Create/deploy container."""
369 raise NotImplementedError
371 def execute(self, command):
372 """Execute process inside container.
374 :param command: Command to run inside container.
377 raise NotImplementedError
380 """Stop container."""
381 raise NotImplementedError
384 """Destroy/remove container."""
385 raise NotImplementedError
388 """Info about container."""
389 raise NotImplementedError
391 def system_info(self):
393 raise NotImplementedError
395 def install_supervisor(self):
396 """Install supervisord inside a container."""
397 if isinstance(self, LXC):
398 self.execute('sleep 3; apt-get update')
399 self.execute('apt-get install -y supervisor')
400 self.execute('echo "{config}" > {config_file} && '
401 'supervisord -c {config_file}'.
403 config='[unix_http_server]\n'
404 'file = /tmp/supervisor.sock\n\n'
405 '[rpcinterface:supervisor]\n'
406 'supervisor.rpcinterface_factory = '
407 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
409 'serverurl = unix:///tmp/supervisor.sock\n\n'
411 'pidfile = /tmp/supervisord.pid\n'
412 'identifier = supervisor\n'
414 'logfile=/tmp/supervisord.log\n'
416 'nodaemon=false\n\n',
417 config_file=SUPERVISOR_CONF))
420 """Start VPP inside a container."""
421 self.execute('echo "{config}" >> {config_file}'.
423 config='[program:vpp]\n'
424 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
426 'autorestart=false\n'
427 'redirect_stderr=true\n'
429 config_file=SUPERVISOR_CONF))
430 self.execute('supervisorctl reload')
431 self.execute('supervisorctl start vpp')
433 from robot.libraries.BuiltIn import BuiltIn
434 topo_instance = BuiltIn().get_library_instance(
435 'resources.libraries.python.topology.Topology')
436 topo_instance.add_new_socket(
440 '{root}/tmp/vpp_sockets/{name}/api.sock'.
441 format(root=self.container.root, name=self.container.name))
442 topo_instance.add_new_socket(
446 '{root}/tmp/vpp_sockets/{name}/stats.sock'.
447 format(root=self.container.root, name=self.container.name))
449 def restart_vpp(self):
450 """Restart VPP service inside a container."""
451 self.execute('supervisorctl restart vpp')
452 self.execute('cat /tmp/supervisord.log')
454 def create_base_vpp_startup_config(self):
455 """Create base startup configuration of VPP on container.
457 :returns: Base VPP startup configuration.
458 :rtype: VppConfigGenerator
460 cpuset_cpus = self.container.cpuset_cpus
462 # Create config instance
463 vpp_config = VppConfigGenerator()
464 vpp_config.set_node(self.container.node)
465 vpp_config.add_unix_cli_listen()
466 vpp_config.add_unix_nodaemon()
467 vpp_config.add_unix_exec('/tmp/running.exec')
468 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
469 vpp_config.add_statseg_per_node_counters(value='on')
470 # We will pop the first core from the list to be a main core
471 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
472 # If more cores in the list, the rest will be used as workers.
474 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
475 vpp_config.add_cpu_corelist_workers(corelist_workers)
479 def create_vpp_startup_config(self):
480 """Create startup configuration of VPP without DPDK on container.
482 vpp_config = self.create_base_vpp_startup_config()
483 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
485 # Apply configuration
486 self.execute('mkdir -p /etc/vpp/')
487 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
488 .format(config=vpp_config.get_config_str()))
490 def create_vpp_startup_config_dpdk_dev(self, *devices):
491 """Create startup configuration of VPP with DPDK on container.
493 :param devices: List of PCI devices to add.
496 vpp_config = self.create_base_vpp_startup_config()
497 vpp_config.add_dpdk_dev(*devices)
498 vpp_config.add_dpdk_no_tx_checksum_offload()
499 vpp_config.add_dpdk_log_level('debug')
500 vpp_config.add_plugin('disable', 'default')
501 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
502 vpp_config.add_plugin('enable', 'memif_plugin.so')
504 # Apply configuration
505 self.execute('mkdir -p /etc/vpp/')
506 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
507 .format(config=vpp_config.get_config_str()))
509 def create_vpp_startup_config_func_dev(self):
510 """Create startup configuration of VPP on container for functional
513 # Create config instance
514 vpp_config = VppConfigGenerator()
515 vpp_config.set_node(self.container.node)
516 vpp_config.add_unix_cli_listen()
517 vpp_config.add_unix_nodaemon()
518 vpp_config.add_unix_exec('/tmp/running.exec')
519 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
520 vpp_config.add_statseg_per_node_counters(value='on')
521 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
523 # Apply configuration
524 self.execute('mkdir -p /etc/vpp/')
525 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
526 .format(config=vpp_config.get_config_str()))
528 def create_vpp_exec_config(self, template_file, **kwargs):
529 """Create VPP exec configuration on container.
531 :param template_file: File name of a template script.
532 :param kwargs: Parameters for script.
533 :type template_file: str
536 running = '/tmp/running.exec'
538 template = '{res}/{tpl}'.format(
539 res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
541 with open(template, 'r') as src_file:
542 src = Template(src_file.read())
543 self.execute('echo "{out}" > {running}'.format(
544 out=src.safe_substitute(**kwargs), running=running))
546 def is_container_running(self):
547 """Check if container is running."""
548 raise NotImplementedError
550 def is_container_present(self):
551 """Check if container is present."""
552 raise NotImplementedError
554 def _configure_cgroup(self, name):
555 """Configure the control group associated with a container.
557 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
558 container is initialized a new cgroup /docker or /lxc is created under
559 cpuset parent tree. This newly created cgroup is inheriting parent
560 setting for cpu/mem exclusive parameter and thus cannot be overriden
561 within /docker or /lxc cgroup. This function is supposed to set cgroups
562 to allow coexistence of both engines.
564 :param name: Name of cgroup.
566 :raises RuntimeError: If applying cgroup settings via cgset failed.
568 ret, _, _ = self.container.ssh.exec_command_sudo(
569 'cgset -r cpuset.cpu_exclusive=0 /')
571 raise RuntimeError('Failed to apply cgroup settings.')
573 ret, _, _ = self.container.ssh.exec_command_sudo(
574 'cgset -r cpuset.mem_exclusive=0 /')
576 raise RuntimeError('Failed to apply cgroup settings.')
578 ret, _, _ = self.container.ssh.exec_command_sudo(
579 'cgcreate -g cpuset:/{name}'.format(name=name))
581 raise RuntimeError('Failed to copy cgroup settings from root.')
583 ret, _, _ = self.container.ssh.exec_command_sudo(
584 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
586 raise RuntimeError('Failed to apply cgroup settings.')
588 ret, _, _ = self.container.ssh.exec_command_sudo(
589 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
591 raise RuntimeError('Failed to apply cgroup settings.')
594 class LXC(ContainerEngine):
595 """LXC implementation."""
597 # Implicit constructor is inherited.
599 def acquire(self, force=True):
600 """Acquire a privileged system object where configuration is stored.
602 :param force: If a container exists, destroy it and create a new
605 :raises RuntimeError: If creating the container or writing the container
608 if self.is_container_present():
614 target_arch = 'arm64' \
615 if Topology.get_node_arch(self.container.node) == 'aarch64' \
618 image = self.container.image if self.container.image else\
619 "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
621 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
622 '--no-validate'.format(c=self.container, image=image)
624 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
626 raise RuntimeError('Failed to create container.')
628 self._configure_cgroup('lxc')
631 """Create/deploy an application inside a container on system.
633 :raises RuntimeError: If creating the container fails.
635 if self.container.mnt:
636 for mount in self.container.mnt:
637 host_dir, guest_dir = mount.split(':')
638 if host_dir.endswith('/'):
639 self.container.ssh.exec_command_sudo(
640 "sh -c 'mkdir -p {host_dir}'".format(host_dir=host_dir))
641 options = 'bind,create=dir' \
642 if guest_dir.endswith('/') else 'bind,create=file'
643 entry = 'lxc.mount.entry = {host_dir} {guest_dir} none ' \
644 '{options} 0 0'.format(
645 host_dir=host_dir, guest_dir=guest_dir[1:],
647 ret, _, _ = self.container.ssh.exec_command_sudo(
648 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
649 format(e=entry, c=self.container))
651 raise RuntimeError('Failed to write {c.name} config.'
652 .format(c=self.container))
654 cpuset_cpus = '{0}'.format(
655 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
656 if self.container.cpuset_cpus else ''
658 ret, _, _ = self.container.ssh.exec_command_sudo(
659 'lxc-start --name {c.name} --daemon'.
660 format(c=self.container))
662 raise RuntimeError('Failed to start container {c.name}.'.
663 format(c=self.container))
664 self._lxc_wait('RUNNING')
666 # Workaround for LXC to be able to allocate all cpus including isolated.
667 ret, _, _ = self.container.ssh.exec_command_sudo(
668 'cgset --copy-from / lxc/')
670 raise RuntimeError('Failed to copy cgroup to LXC')
672 ret, _, _ = self.container.ssh.exec_command_sudo(
673 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
674 format(c=self.container, cpus=cpuset_cpus))
676 raise RuntimeError('Failed to set cpuset.cpus to container '
677 '{c.name}.'.format(c=self.container))
679 def execute(self, command):
680 """Start a process inside a running container.
682 Runs the specified command inside the container specified by name. The
683 container has to be running already.
685 :param command: Command to run inside container.
687 :raises RuntimeError: If running the command failed.
689 env = '--keep-env {0}'.format(
690 ' '.join('--set-var %s' % env for env in self.container.env))\
691 if self.container.env else ''
693 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
694 "exit $?'".format(env=env, c=self.container, command=command)
696 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
698 raise RuntimeError('Failed to run command inside container '
699 '{c.name}.'.format(c=self.container))
704 :raises RuntimeError: If stopping the container failed.
706 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
708 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
710 raise RuntimeError('Failed to stop container {c.name}.'
711 .format(c=self.container))
712 self._lxc_wait('STOPPED|FROZEN')
715 """Destroy a container.
717 :raises RuntimeError: If destroying container failed.
719 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
721 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
723 raise RuntimeError('Failed to destroy container {c.name}.'
724 .format(c=self.container))
727 """Query and shows information about a container.
729 :raises RuntimeError: If getting info about a container failed.
731 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
733 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
735 raise RuntimeError('Failed to get info about container {c.name}.'
736 .format(c=self.container))
738 def system_info(self):
739 """Check the current kernel for LXC support.
741 :raises RuntimeError: If checking LXC support failed.
743 cmd = 'lxc-checkconfig'
745 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
747 raise RuntimeError('Failed to check LXC support.')
749 def is_container_running(self):
750 """Check if container is running on node.
752 :returns: True if container is running.
754 :raises RuntimeError: If getting info about a container failed.
756 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
757 .format(c=self.container)
759 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
761 raise RuntimeError('Failed to get info about container {c.name}.'
762 .format(c=self.container))
763 return True if 'RUNNING' in stdout else False
765 def is_container_present(self):
766 """Check if container is existing on node.
768 :returns: True if container is present.
770 :raises RuntimeError: If getting info about a container failed.
772 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
774 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
775 return False if int(ret) else True
777 def _lxc_wait(self, state):
778 """Wait for a specific container state.
780 :param state: Specify the container state(s) to wait for.
782 :raises RuntimeError: If waiting for state of a container failed.
784 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
785 .format(c=self.container, s=state)
787 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
789 raise RuntimeError('Failed to wait for state "{s}" of container '
790 '{c.name}.'.format(s=state, c=self.container))
793 class Docker(ContainerEngine):
794 """Docker implementation."""
796 # Implicit constructor is inherited.
798 def acquire(self, force=True):
799 """Pull an image or a repository from a registry.
801 :param force: Destroy a container if exists.
803 :raises RuntimeError: If pulling a container failed.
805 if self.is_container_present():
811 if not self.container.image:
812 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
813 if Topology.get_node_arch(self.container.node) == 'aarch64' \
814 else Constants.DOCKER_SUT_IMAGE_UBUNTU
815 setattr(self.container, 'image', img)
817 cmd = 'docker pull {image}'.format(image=self.container.image)
819 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
821 raise RuntimeError('Failed to create container {c.name}.'
822 .format(c=self.container))
824 if self.container.cpuset_cpus:
825 self._configure_cgroup('docker')
828 """Create/deploy container.
830 :raises RuntimeError: If creating a container failed.
832 cpuset_cpus = '--cpuset-cpus={0}'.format(
833 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
834 if self.container.cpuset_cpus else ''
836 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
837 if self.container.cpuset_mems is not None else ''
838 # Temporary workaround - disabling due to bug in memif
842 ' '.join('--env %s' % env for env in self.container.env))\
843 if self.container.env else ''
845 command = '{0}'.format(self.container.command)\
846 if self.container.command else ''
848 publish = '{0}'.format(
849 ' '.join('--publish %s' % var for var in self.container.publish))\
850 if self.container.publish else ''
852 volume = '{0}'.format(
853 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
854 if self.container.mnt else ''
857 '--privileged --detach --interactive --tty --rm '\
858 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
859 '{env} {volume} --name {container.name} {container.image} '\
860 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
861 container=self.container, command=command,
862 env=env, publish=publish, volume=volume)
864 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
866 raise RuntimeError('Failed to create container {c.name}'
867 .format(c=self.container))
871 def execute(self, command):
872 """Start a process inside a running container.
874 Runs the specified command inside the container specified by name. The
875 container has to be running already.
877 :param command: Command to run inside container.
879 :raises RuntimeError: If running the command in a container failed.
881 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
882 "exit $?'".format(c=self.container, command=command)
884 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
886 raise RuntimeError('Failed to execute command in container '
887 '{c.name}.'.format(c=self.container))
890 """Stop running container.
892 :raises RuntimeError: If stopping a container failed.
894 cmd = 'docker stop {c.name}'.format(c=self.container)
896 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
898 raise RuntimeError('Failed to stop container {c.name}.'
899 .format(c=self.container))
902 """Remove a container.
904 :raises RuntimeError: If removing a container failed.
906 cmd = 'docker rm --force {c.name}'.format(c=self.container)
908 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
910 raise RuntimeError('Failed to destroy container {c.name}.'
911 .format(c=self.container))
914 """Return low-level information on Docker objects.
916 :raises RuntimeError: If getting info about a container failed.
918 cmd = 'docker inspect {c.name}'.format(c=self.container)
920 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
922 raise RuntimeError('Failed to get info about container {c.name}.'
923 .format(c=self.container))
925 def system_info(self):
926 """Display the docker system-wide information.
928 :raises RuntimeError: If displaying system information failed.
930 cmd = 'docker system info'
932 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
934 raise RuntimeError('Failed to get system info.')
936 def is_container_present(self):
937 """Check if container is present on node.
939 :returns: True if container is present.
941 :raises RuntimeError: If getting info about a container failed.
943 cmd = 'docker ps --all --quiet --filter name={c.name}'\
944 .format(c=self.container)
946 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
948 raise RuntimeError('Failed to get info about container {c.name}.'
949 .format(c=self.container))
950 return True if stdout else False
952 def is_container_running(self):
953 """Check if container is running on node.
955 :returns: True if container is running.
957 :raises RuntimeError: If getting info about a container failed.
959 cmd = 'docker ps --quiet --filter name={c.name}'\
960 .format(c=self.container)
962 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
964 raise RuntimeError('Failed to get info about container {c.name}.'
965 .format(c=self.container))
966 return True if stdout else False
969 class Container(object):
970 """Container class."""
973 """Initialize Container object."""
976 def __getattr__(self, attr):
977 """Get attribute custom implementation.
979 :param attr: Attribute to get.
981 :returns: Attribute value or None.
985 return self.__dict__[attr]
989 def __setattr__(self, attr, value):
990 """Set attribute custom implementation.
992 :param attr: Attribute to set.
993 :param value: Value to set.
998 # Check if attribute exists
1001 # Creating new attribute
1003 self.__dict__['ssh'] = SSH()
1004 self.__dict__['ssh'].connect(value)
1005 self.__dict__[attr] = value
1007 # Updating attribute base of type
1008 if isinstance(self.__dict__[attr], list):
1009 self.__dict__[attr].append(value)
1011 self.__dict__[attr] = value