1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from string import Template
20 from collections import OrderedDict, Counter
22 from resources.libraries.python.ssh import SSH
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.topology import Topology, SocketType
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
33 class ContainerManager(object):
34 """Container lifecycle management class."""
36 def __init__(self, engine):
37 """Initialize Container Manager class.
39 :param engine: Container technology used (LXC/Docker/...).
41 :raises NotImplementedError: If container technology is not implemented.
44 self.engine = globals()[engine]()
46 raise NotImplementedError('{engine} is not implemented.'.
47 format(engine=engine))
48 self.containers = OrderedDict()
50 def get_container_by_name(self, name):
51 """Get container instance.
53 :param name: Container name.
55 :returns: Container instance.
57 :raises RuntimeError: If failed to get container with name.
60 return self.containers[name]
62 raise RuntimeError('Failed to get container with name: {name}'.
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
78 setattr(self.engine.container, 'env',
79 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
81 # Store container instance
82 self.containers[kwargs['name']] = self.engine.container
84 def construct_containers(self, **kwargs):
85 """Construct 1..N container(s) on node with specified name.
87 Ordinal number is automatically added to the name of container as
90 :param kwargs: Named parameters.
94 for i in range(kwargs['count']):
95 # Name will contain ordinal suffix
96 kwargs['name'] = ''.join([name, str(i+1)])
98 self.construct_container(i=i, **kwargs)
100 def acquire_all_containers(self):
101 """Acquire all containers."""
102 for container in self.containers:
103 self.engine.container = self.containers[container]
104 self.engine.acquire()
106 def build_all_containers(self):
107 """Build all containers."""
108 for container in self.containers:
109 self.engine.container = self.containers[container]
112 def create_all_containers(self):
113 """Create all containers."""
114 for container in self.containers:
115 self.engine.container = self.containers[container]
118 def execute_on_container(self, name, command):
119 """Execute command on container with name.
121 :param name: Container name.
122 :param command: Command to execute.
126 self.engine.container = self.get_container_by_name(name)
127 self.engine.execute(command)
129 def execute_on_all_containers(self, command):
130 """Execute command on all containers.
132 :param command: Command to execute.
135 for container in self.containers:
136 self.engine.container = self.containers[container]
137 self.engine.execute(command)
139 def start_vpp_in_all_containers(self):
140 """Start VPP in all containers."""
141 for container in self.containers:
142 self.engine.container = self.containers[container]
143 # We need to install supervisor client/server system to control VPP
145 self.engine.install_supervisor()
146 self.engine.start_vpp()
148 def restart_vpp_in_all_containers(self):
149 """Restart VPP in all containers."""
150 for container in self.containers:
151 self.engine.container = self.containers[container]
152 self.engine.restart_vpp()
154 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
155 """Configure VPP in all containers.
157 :param chain_topology: Topology used for chaining containers can be
158 chain or cross_horiz. Chain topology is using 1 memif pair per
159 container. Cross_horiz topology is using 1 memif and 1 physical
160 interface in container (only single container can be configured).
161 :param kwargs: Named parameters.
162 :type chain_topology: str
165 # Count number of DUTs based on node's host information
166 dut_cnt = len(Counter([self.containers[container].node['host']
167 for container in self.containers]))
168 mod = len(self.containers)/dut_cnt
170 for i, container in enumerate(self.containers):
173 sid1 = i % mod * 2 + 1
174 sid2 = i % mod * 2 + 2
175 self.engine.container = self.containers[container]
176 guest_dir = self.engine.container.mnt[0].split(':')[1]
178 if chain_topology == 'chain':
179 self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
180 sid1=sid1, sid2=sid2,
183 elif chain_topology == 'cross_horiz':
184 self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
185 sid1=sid1, sid2=sid2,
188 elif chain_topology == 'chain_functional':
189 self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
190 sid1=sid1, sid2=sid2,
193 elif chain_topology == 'chain_ip4':
194 self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
195 sid1=sid1, sid2=sid2,
198 elif chain_topology == 'pipeline_ip4':
199 self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
200 sid1=sid1, sid2=sid2,
204 raise RuntimeError('Container topology {name} not implemented'.
205 format(name=chain_topology))
207 def _configure_vpp_chain_l2xc(self, **kwargs):
208 """Configure VPP in chain topology with l2xc.
210 :param kwargs: Named parameters.
213 self.engine.create_vpp_startup_config()
214 self.engine.create_vpp_exec_config(
215 'memif_create_chain_l2xc.exec',
216 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
217 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
218 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
219 format(c=self.engine.container, **kwargs),
220 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
221 format(c=self.engine.container, **kwargs))
223 def _configure_vpp_cross_horiz(self, **kwargs):
224 """Configure VPP in cross horizontal topology (single memif).
226 :param kwargs: Named parameters.
229 if 'DUT1' in self.engine.container.name:
230 if_pci = Topology.get_interface_pci_addr(
231 self.engine.container.node, kwargs['dut1_if'])
232 if_name = Topology.get_interface_name(
233 self.engine.container.node, kwargs['dut1_if'])
234 if 'DUT2' in self.engine.container.name:
235 if_pci = Topology.get_interface_pci_addr(
236 self.engine.container.node, kwargs['dut2_if'])
237 if_name = Topology.get_interface_name(
238 self.engine.container.node, kwargs['dut2_if'])
239 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
240 self.engine.create_vpp_exec_config(
241 'memif_create_cross_horizon.exec',
242 mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
243 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
244 format(c=self.engine.container, **kwargs))
246 def _configure_vpp_chain_functional(self, **kwargs):
247 """Configure VPP in chain topology with l2xc (functional).
249 :param kwargs: Named parameters.
252 self.engine.create_vpp_startup_config_func_dev()
253 self.engine.create_vpp_exec_config(
254 'memif_create_chain_functional.exec',
255 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
256 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
257 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
258 format(c=self.engine.container, **kwargs),
259 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
260 format(c=self.engine.container, **kwargs),
263 def _configure_vpp_chain_ip4(self, **kwargs):
264 """Configure VPP in chain topology with ip4.
266 :param kwargs: Named parameters.
269 self.engine.create_vpp_startup_config()
271 vif1_mac = kwargs['tg_if1_mac'] \
272 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
273 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
274 vif2_mac = kwargs['tg_if2_mac'] \
275 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
276 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
277 self.engine.create_vpp_exec_config(
278 'memif_create_chain_ip4.exec',
279 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
280 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
281 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
282 format(c=self.engine.container, **kwargs),
283 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
284 format(c=self.engine.container, **kwargs),
285 mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
286 mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
287 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
289 def _configure_vpp_pipeline_ip4(self, **kwargs):
290 """Configure VPP in pipeline topology with ip4.
292 :param kwargs: Named parameters.
295 self.engine.create_vpp_startup_config()
296 node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
297 mid1 = kwargs['mid1']
298 mid2 = kwargs['mid2']
301 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
303 kwargs['mid2'] = kwargs['mid2'] \
304 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
305 else kwargs['mid2'] + 1
306 vif1_mac = kwargs['tg_if1_mac'] \
307 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
308 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
309 vif2_mac = kwargs['tg_if2_mac'] \
310 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
311 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
312 socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
313 format(c=self.engine.container, **kwargs) \
314 if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
315 format(c=self.engine.container, **kwargs)
316 socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
317 format(c=self.engine.container, **kwargs) \
318 if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
319 else '{guest_dir}/memif-pipe-{mid2}'.\
320 format(c=self.engine.container, **kwargs)
322 self.engine.create_vpp_exec_config(
323 'memif_create_pipeline_ip4.exec',
324 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
325 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
326 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
327 mac1='52:54:00:00:{0:02X}:01'.format(mid1),
328 mac2='52:54:00:00:{0:02X}:02'.format(mid2),
329 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
331 def stop_all_containers(self):
332 """Stop all containers."""
333 for container in self.containers:
334 self.engine.container = self.containers[container]
337 def destroy_all_containers(self):
338 """Destroy all containers."""
339 for container in self.containers:
340 self.engine.container = self.containers[container]
341 self.engine.destroy()
344 class ContainerEngine(object):
345 """Abstract class for container engine."""
348 """Init ContainerEngine object."""
349 self.container = None
351 def initialize(self):
352 """Initialize container object."""
353 self.container = Container()
355 def acquire(self, force):
356 """Acquire/download container.
358 :param force: Destroy a container if exists and create.
361 raise NotImplementedError
364 """Build container (compile)."""
365 raise NotImplementedError
368 """Create/deploy container."""
369 raise NotImplementedError
371 def execute(self, command):
372 """Execute process inside container.
374 :param command: Command to run inside container.
377 raise NotImplementedError
380 """Stop container."""
381 raise NotImplementedError
384 """Destroy/remove container."""
385 raise NotImplementedError
388 """Info about container."""
389 raise NotImplementedError
391 def system_info(self):
393 raise NotImplementedError
395 def install_supervisor(self):
396 """Install supervisord inside a container."""
397 if isinstance(self, LXC):
398 self.execute('sleep 3; apt-get update')
399 self.execute('apt-get install -y supervisor')
400 self.execute('echo "{config}" > {config_file} && '
401 'supervisord -c {config_file}'.
403 config='[unix_http_server]\n'
404 'file = /tmp/supervisor.sock\n\n'
405 '[rpcinterface:supervisor]\n'
406 'supervisor.rpcinterface_factory = '
407 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
409 'serverurl = unix:///tmp/supervisor.sock\n\n'
411 'pidfile = /tmp/supervisord.pid\n'
412 'identifier = supervisor\n'
414 'logfile=/tmp/supervisord.log\n'
416 'nodaemon=false\n\n',
417 config_file=SUPERVISOR_CONF))
420 """Start VPP inside a container."""
421 self.execute('echo "{config}" >> {config_file}'.
423 config='[program:vpp]\n'
424 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
426 'autorestart=false\n'
427 'redirect_stderr=true\n'
429 config_file=SUPERVISOR_CONF))
430 self.execute('supervisorctl reload')
431 self.execute('supervisorctl start vpp')
433 from robot.libraries.BuiltIn import BuiltIn
434 topo_instance = BuiltIn().get_library_instance(
435 'resources.libraries.python.topology.Topology')
436 topo_instance.add_new_socket(
440 '{root}/tmp/vpp_sockets/{name}/api.sock'.
441 format(root=self.container.root, name=self.container.name))
442 topo_instance.add_new_socket(
446 '{root}/tmp/vpp_sockets/{name}/stats.sock'.
447 format(root=self.container.root, name=self.container.name))
449 def restart_vpp(self):
450 """Restart VPP service inside a container."""
451 self.execute('supervisorctl restart vpp')
452 self.execute('cat /tmp/supervisord.log')
454 def create_base_vpp_startup_config(self):
455 """Create base startup configuration of VPP on container.
457 :returns: Base VPP startup configuration.
458 :rtype: VppConfigGenerator
460 cpuset_cpus = self.container.cpuset_cpus
462 # Create config instance
463 vpp_config = VppConfigGenerator()
464 vpp_config.set_node(self.container.node)
465 vpp_config.add_unix_cli_listen()
466 vpp_config.add_unix_nodaemon()
467 vpp_config.add_unix_exec('/tmp/running.exec')
468 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
469 vpp_config.add_statseg_per_node_counters(value='on')
470 # We will pop the first core from the list to be a main core
471 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
472 # If more cores in the list, the rest will be used as workers.
474 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
475 vpp_config.add_cpu_corelist_workers(corelist_workers)
479 def create_vpp_startup_config(self):
480 """Create startup configuration of VPP without DPDK on container.
482 vpp_config = self.create_base_vpp_startup_config()
483 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
485 # Apply configuration
486 self.execute('mkdir -p /etc/vpp/')
487 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
488 .format(config=vpp_config.get_config_str()))
490 def create_vpp_startup_config_dpdk_dev(self, *devices):
491 """Create startup configuration of VPP with DPDK on container.
493 :param devices: List of PCI devices to add.
496 vpp_config = self.create_base_vpp_startup_config()
497 vpp_config.add_dpdk_dev(*devices)
498 vpp_config.add_dpdk_no_tx_checksum_offload()
499 vpp_config.add_dpdk_log_level('debug')
500 vpp_config.add_plugin('disable', 'default')
501 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
502 vpp_config.add_plugin('enable', 'memif_plugin.so')
504 # Apply configuration
505 self.execute('mkdir -p /etc/vpp/')
506 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
507 .format(config=vpp_config.get_config_str()))
509 def create_vpp_startup_config_func_dev(self):
510 """Create startup configuration of VPP on container for functional
513 # Create config instance
514 vpp_config = VppConfigGenerator()
515 vpp_config.set_node(self.container.node)
516 vpp_config.add_unix_cli_listen()
517 vpp_config.add_unix_nodaemon()
518 vpp_config.add_unix_exec('/tmp/running.exec')
519 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
520 vpp_config.add_statseg_per_node_counters(value='on')
521 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
523 # Apply configuration
524 self.execute('mkdir -p /etc/vpp/')
525 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
526 .format(config=vpp_config.get_config_str()))
528 def create_vpp_exec_config(self, template_file, **kwargs):
529 """Create VPP exec configuration on container.
531 :param template_file: File name of a template script.
532 :param kwargs: Parameters for script.
533 :type template_file: str
536 running = '/tmp/running.exec'
538 template = '{res}/{tpl}'.format(
539 res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
541 with open(template, 'r') as src_file:
542 src = Template(src_file.read())
543 self.execute('echo "{out}" > {running}'.format(
544 out=src.safe_substitute(**kwargs), running=running))
546 def is_container_running(self):
547 """Check if container is running."""
548 raise NotImplementedError
550 def is_container_present(self):
551 """Check if container is present."""
552 raise NotImplementedError
554 def _configure_cgroup(self, name):
555 """Configure the control group associated with a container.
557 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
558 container is initialized a new cgroup /docker or /lxc is created under
559 cpuset parent tree. This newly created cgroup is inheriting parent
560 setting for cpu/mem exclusive parameter and thus cannot be overriden
561 within /docker or /lxc cgroup. This function is supposed to set cgroups
562 to allow coexistence of both engines.
564 :param name: Name of cgroup.
566 :raises RuntimeError: If applying cgroup settings via cgset failed.
568 ret, _, _ = self.container.ssh.exec_command_sudo(
569 'cgset -r cpuset.cpu_exclusive=0 /')
571 raise RuntimeError('Failed to apply cgroup settings.')
573 ret, _, _ = self.container.ssh.exec_command_sudo(
574 'cgset -r cpuset.mem_exclusive=0 /')
576 raise RuntimeError('Failed to apply cgroup settings.')
578 ret, _, _ = self.container.ssh.exec_command_sudo(
579 'cgcreate -g cpuset:/{name}'.format(name=name))
581 raise RuntimeError('Failed to copy cgroup settings from root.')
583 ret, _, _ = self.container.ssh.exec_command_sudo(
584 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
586 raise RuntimeError('Failed to apply cgroup settings.')
588 ret, _, _ = self.container.ssh.exec_command_sudo(
589 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
591 raise RuntimeError('Failed to apply cgroup settings.')
594 class LXC(ContainerEngine):
595 """LXC implementation."""
597 # Implicit constructor is inherited.
599 def acquire(self, force=True):
600 """Acquire a privileged system object where configuration is stored.
602 :param force: If a container exists, destroy it and create a new
605 :raises RuntimeError: If creating the container or writing the container
608 if self.is_container_present():
614 target_arch = 'arm64' \
615 if Topology.get_node_arch(self.container.node) == 'aarch64' \
618 image = self.container.image if self.container.image else\
619 "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
621 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
622 '--no-validate'.format(c=self.container, image=image)
624 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
626 raise RuntimeError('Failed to create container.')
628 self._configure_cgroup('lxc')
631 """Create/deploy an application inside a container on system.
633 :raises RuntimeError: If creating the container fails.
635 if self.container.mnt:
636 for mount in self.container.mnt:
637 host_dir, guest_dir = mount.split(':')
638 if host_dir.endswith('/'):
639 self.container.ssh.exec_command_sudo(
640 "sh -c 'mkdir -p {host_dir}'".format(host_dir=host_dir))
641 options = 'bind,create=dir' \
642 if guest_dir.endswith('/') else 'bind,create=file'
643 entry = 'lxc.mount.entry = {host_dir} '\
644 '{guest_dir} none ' \
645 '{options} 0 0'.format(c=self.container,
647 guest_dir=guest_dir[1:],
649 ret, _, _ = self.container.ssh.exec_command_sudo(
650 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
651 format(e=entry, c=self.container))
653 raise RuntimeError('Failed to write {c.name} config.'
654 .format(c=self.container))
656 cpuset_cpus = '{0}'.format(
657 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
658 if self.container.cpuset_cpus else ''
660 ret, _, _ = self.container.ssh.exec_command_sudo(
661 'lxc-start --name {c.name} --daemon'.
662 format(c=self.container))
664 raise RuntimeError('Failed to start container {c.name}.'.
665 format(c=self.container))
666 self._lxc_wait('RUNNING')
668 # Workaround for LXC to be able to allocate all cpus including isolated.
669 ret, _, _ = self.container.ssh.exec_command_sudo(
670 'cgset --copy-from / lxc/')
672 raise RuntimeError('Failed to copy cgroup to LXC')
674 ret, _, _ = self.container.ssh.exec_command_sudo(
675 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
676 format(c=self.container, cpus=cpuset_cpus))
678 raise RuntimeError('Failed to set cpuset.cpus to container '
679 '{c.name}.'.format(c=self.container))
681 def execute(self, command):
682 """Start a process inside a running container.
684 Runs the specified command inside the container specified by name. The
685 container has to be running already.
687 :param command: Command to run inside container.
689 :raises RuntimeError: If running the command failed.
691 env = '--keep-env {0}'.format(
692 ' '.join('--set-var %s' % env for env in self.container.env))\
693 if self.container.env else ''
695 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
696 "exit $?'".format(env=env, c=self.container, command=command)
698 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
700 raise RuntimeError('Failed to run command inside container '
701 '{c.name}.'.format(c=self.container))
706 :raises RuntimeError: If stopping the container failed.
708 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
710 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
712 raise RuntimeError('Failed to stop container {c.name}.'
713 .format(c=self.container))
714 self._lxc_wait('STOPPED|FROZEN')
717 """Destroy a container.
719 :raises RuntimeError: If destroying container failed.
721 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
723 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
725 raise RuntimeError('Failed to destroy container {c.name}.'
726 .format(c=self.container))
729 """Query and shows information about a container.
731 :raises RuntimeError: If getting info about a container failed.
733 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
735 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
737 raise RuntimeError('Failed to get info about container {c.name}.'
738 .format(c=self.container))
740 def system_info(self):
741 """Check the current kernel for LXC support.
743 :raises RuntimeError: If checking LXC support failed.
745 cmd = 'lxc-checkconfig'
747 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
749 raise RuntimeError('Failed to check LXC support.')
751 def is_container_running(self):
752 """Check if container is running on node.
754 :returns: True if container is running.
756 :raises RuntimeError: If getting info about a container failed.
758 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
759 .format(c=self.container)
761 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
763 raise RuntimeError('Failed to get info about container {c.name}.'
764 .format(c=self.container))
765 return True if 'RUNNING' in stdout else False
767 def is_container_present(self):
768 """Check if container is existing on node.
770 :returns: True if container is present.
772 :raises RuntimeError: If getting info about a container failed.
774 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
776 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
777 return False if int(ret) else True
779 def _lxc_wait(self, state):
780 """Wait for a specific container state.
782 :param state: Specify the container state(s) to wait for.
784 :raises RuntimeError: If waiting for state of a container failed.
786 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
787 .format(c=self.container, s=state)
789 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
791 raise RuntimeError('Failed to wait for state "{s}" of container '
792 '{c.name}.'.format(s=state, c=self.container))
795 class Docker(ContainerEngine):
796 """Docker implementation."""
798 # Implicit constructor is inherited.
800 def acquire(self, force=True):
801 """Pull an image or a repository from a registry.
803 :param force: Destroy a container if exists.
805 :raises RuntimeError: If pulling a container failed.
807 if self.is_container_present():
813 if not self.container.image:
814 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
815 if Topology.get_node_arch(self.container.node) == 'aarch64' \
816 else Constants.DOCKER_SUT_IMAGE_UBUNTU
817 setattr(self.container, 'image', img)
819 cmd = 'docker pull {image}'.format(image=self.container.image)
821 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
823 raise RuntimeError('Failed to create container {c.name}.'
824 .format(c=self.container))
826 if self.container.cpuset_cpus:
827 self._configure_cgroup('docker')
830 """Create/deploy container.
832 :raises RuntimeError: If creating a container failed.
834 cpuset_cpus = '--cpuset-cpus={0}'.format(
835 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
836 if self.container.cpuset_cpus else ''
838 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
839 if self.container.cpuset_mems is not None else ''
840 # Temporary workaround - disabling due to bug in memif
844 ' '.join('--env %s' % env for env in self.container.env))\
845 if self.container.env else ''
847 command = '{0}'.format(self.container.command)\
848 if self.container.command else ''
850 publish = '{0}'.format(
851 ' '.join('--publish %s' % var for var in self.container.publish))\
852 if self.container.publish else ''
854 volume = '{0}'.format(
855 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
856 if self.container.mnt else ''
859 '--privileged --detach --interactive --tty --rm '\
860 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
861 '{env} {volume} --name {container.name} {container.image} '\
862 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
863 container=self.container, command=command,
864 env=env, publish=publish, volume=volume)
866 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
868 raise RuntimeError('Failed to create container {c.name}'
869 .format(c=self.container))
873 def execute(self, command):
874 """Start a process inside a running container.
876 Runs the specified command inside the container specified by name. The
877 container has to be running already.
879 :param command: Command to run inside container.
881 :raises RuntimeError: If running the command in a container failed.
883 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
884 "exit $?'".format(c=self.container, command=command)
886 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
888 raise RuntimeError('Failed to execute command in container '
889 '{c.name}.'.format(c=self.container))
892 """Stop running container.
894 :raises RuntimeError: If stopping a container failed.
896 cmd = 'docker stop {c.name}'.format(c=self.container)
898 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
900 raise RuntimeError('Failed to stop container {c.name}.'
901 .format(c=self.container))
904 """Remove a container.
906 :raises RuntimeError: If removing a container failed.
908 cmd = 'docker rm --force {c.name}'.format(c=self.container)
910 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
912 raise RuntimeError('Failed to destroy container {c.name}.'
913 .format(c=self.container))
916 """Return low-level information on Docker objects.
918 :raises RuntimeError: If getting info about a container failed.
920 cmd = 'docker inspect {c.name}'.format(c=self.container)
922 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
924 raise RuntimeError('Failed to get info about container {c.name}.'
925 .format(c=self.container))
927 def system_info(self):
928 """Display the docker system-wide information.
930 :raises RuntimeError: If displaying system information failed.
932 cmd = 'docker system info'
934 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
936 raise RuntimeError('Failed to get system info.')
938 def is_container_present(self):
939 """Check if container is present on node.
941 :returns: True if container is present.
943 :raises RuntimeError: If getting info about a container failed.
945 cmd = 'docker ps --all --quiet --filter name={c.name}'\
946 .format(c=self.container)
948 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
950 raise RuntimeError('Failed to get info about container {c.name}.'
951 .format(c=self.container))
952 return True if stdout else False
954 def is_container_running(self):
955 """Check if container is running on node.
957 :returns: True if container is running.
959 :raises RuntimeError: If getting info about a container failed.
961 cmd = 'docker ps --quiet --filter name={c.name}'\
962 .format(c=self.container)
964 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
966 raise RuntimeError('Failed to get info about container {c.name}.'
967 .format(c=self.container))
968 return True if stdout else False
971 class Container(object):
972 """Container class."""
975 """Initialize Container object."""
978 def __getattr__(self, attr):
979 """Get attribute custom implementation.
981 :param attr: Attribute to get.
983 :returns: Attribute value or None.
987 return self.__dict__[attr]
991 def __setattr__(self, attr, value):
992 """Set attribute custom implementation.
994 :param attr: Attribute to set.
995 :param value: Value to set.
1000 # Check if attribute exists
1003 # Creating new attribute
1005 self.__dict__['ssh'] = SSH()
1006 self.__dict__['ssh'].connect(value)
1007 self.__dict__[attr] = value
1009 # Updating attribute base of type
1010 if isinstance(self.__dict__[attr], list):
1011 self.__dict__[attr].append(value)
1013 self.__dict__[attr] = value