1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 # Bug workaround in pylint for abstract classes.
15 # pylint: disable=W0223
17 """Library to manipulate Containers."""
19 from string import Template
20 from collections import OrderedDict, Counter
22 from resources.libraries.python.ssh import SSH
23 from resources.libraries.python.Constants import Constants
24 from resources.libraries.python.topology import Topology, SocketType
25 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
28 __all__ = ["ContainerManager", "ContainerEngine", "LXC", "Docker", "Container"]
30 SUPERVISOR_CONF = '/etc/supervisord.conf'
33 class ContainerManager(object):
34 """Container lifecycle management class."""
36 def __init__(self, engine):
37 """Initialize Container Manager class.
39 :param engine: Container technology used (LXC/Docker/...).
41 :raises NotImplementedError: If container technology is not implemented.
44 self.engine = globals()[engine]()
46 raise NotImplementedError('{engine} is not implemented.'.
47 format(engine=engine))
48 self.containers = OrderedDict()
50 def get_container_by_name(self, name):
51 """Get container instance.
53 :param name: Container name.
55 :returns: Container instance.
57 :raises RuntimeError: If failed to get container with name.
60 return self.containers[name]
62 raise RuntimeError('Failed to get container with name: {name}'.
65 def construct_container(self, **kwargs):
66 """Construct container object on node with specified parameters.
68 :param kwargs: Key-value pairs used to construct container.
72 self.engine.initialize()
75 setattr(self.engine.container, key, kwargs[key])
77 # Set additional environmental variables
78 setattr(self.engine.container, 'env',
79 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
81 # Store container instance
82 self.containers[kwargs['name']] = self.engine.container
84 def construct_containers(self, **kwargs):
85 """Construct 1..N container(s) on node with specified name.
87 Ordinal number is automatically added to the name of container as
90 :param kwargs: Named parameters.
94 for i in range(kwargs['count']):
95 # Name will contain ordinal suffix
96 kwargs['name'] = ''.join([name, str(i+1)])
98 self.construct_container(i=i, **kwargs)
100 def acquire_all_containers(self):
101 """Acquire all containers."""
102 for container in self.containers:
103 self.engine.container = self.containers[container]
104 self.engine.acquire()
106 def build_all_containers(self):
107 """Build all containers."""
108 for container in self.containers:
109 self.engine.container = self.containers[container]
112 def create_all_containers(self):
113 """Create all containers."""
114 for container in self.containers:
115 self.engine.container = self.containers[container]
118 def execute_on_container(self, name, command):
119 """Execute command on container with name.
121 :param name: Container name.
122 :param command: Command to execute.
126 self.engine.container = self.get_container_by_name(name)
127 self.engine.execute(command)
129 def execute_on_all_containers(self, command):
130 """Execute command on all containers.
132 :param command: Command to execute.
135 for container in self.containers:
136 self.engine.container = self.containers[container]
137 self.engine.execute(command)
139 def start_vpp_in_all_containers(self):
140 """Start VPP in all containers."""
141 for container in self.containers:
142 self.engine.container = self.containers[container]
143 # We need to install supervisor client/server system to control VPP
145 self.engine.install_supervisor()
146 self.engine.start_vpp()
148 def restart_vpp_in_all_containers(self):
149 """Restart VPP in all containers."""
150 for container in self.containers:
151 self.engine.container = self.containers[container]
152 self.engine.restart_vpp()
154 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
155 """Configure VPP in all containers.
157 :param chain_topology: Topology used for chaining containers can be
158 chain or cross_horiz. Chain topology is using 1 memif pair per
159 container. Cross_horiz topology is using 1 memif and 1 physical
160 interface in container (only single container can be configured).
161 :param kwargs: Named parameters.
162 :type chain_topology: str
165 # Count number of DUTs based on node's host information
166 dut_cnt = len(Counter([self.containers[container].node['host']
167 for container in self.containers]))
168 mod = len(self.containers)/dut_cnt
170 for i, container in enumerate(self.containers):
173 sid1 = i % mod * 2 + 1
174 sid2 = i % mod * 2 + 2
175 self.engine.container = self.containers[container]
176 guest_dir = self.engine.container.mnt[0].split(':')[1]
178 if chain_topology == 'chain':
179 self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
180 sid1=sid1, sid2=sid2,
183 elif chain_topology == 'cross_horiz':
184 self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
185 sid1=sid1, sid2=sid2,
188 elif chain_topology == 'chain_functional':
189 self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
190 sid1=sid1, sid2=sid2,
193 elif chain_topology == 'chain_ip4':
194 self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
195 sid1=sid1, sid2=sid2,
198 elif chain_topology == 'pipeline_ip4':
199 self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
200 sid1=sid1, sid2=sid2,
204 raise RuntimeError('Container topology {name} not implemented'.
205 format(name=chain_topology))
207 def _configure_vpp_chain_l2xc(self, **kwargs):
208 """Configure VPP in chain topology with l2xc.
210 :param kwargs: Named parameters.
213 self.engine.create_vpp_startup_config()
214 self.engine.create_vpp_exec_config(
215 'memif_create_chain_l2xc.exec',
216 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
217 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
218 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
219 format(c=self.engine.container, **kwargs),
220 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
221 format(c=self.engine.container, **kwargs))
223 def _configure_vpp_cross_horiz(self, **kwargs):
224 """Configure VPP in cross horizontal topology (single memif).
226 :param kwargs: Named parameters.
229 if 'DUT1' in self.engine.container.name:
230 if_pci = Topology.get_interface_pci_addr(
231 self.engine.container.node, kwargs['dut1_if'])
232 if_name = Topology.get_interface_name(
233 self.engine.container.node, kwargs['dut1_if'])
234 if 'DUT2' in self.engine.container.name:
235 if_pci = Topology.get_interface_pci_addr(
236 self.engine.container.node, kwargs['dut2_if'])
237 if_name = Topology.get_interface_name(
238 self.engine.container.node, kwargs['dut2_if'])
239 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
240 self.engine.create_vpp_exec_config(
241 'memif_create_cross_horizon.exec',
242 mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
243 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
244 format(c=self.engine.container, **kwargs))
246 def _configure_vpp_chain_functional(self, **kwargs):
247 """Configure VPP in chain topology with l2xc (functional).
249 :param kwargs: Named parameters.
252 self.engine.create_vpp_startup_config_func_dev()
253 self.engine.create_vpp_exec_config(
254 'memif_create_chain_functional.exec',
255 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
256 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
257 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
258 format(c=self.engine.container, **kwargs),
259 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
260 format(c=self.engine.container, **kwargs),
263 def _configure_vpp_chain_ip4(self, **kwargs):
264 """Configure VPP in chain topology with ip4.
266 :param kwargs: Named parameters.
269 self.engine.create_vpp_startup_config()
271 vif1_mac = kwargs['tg_if1_mac'] \
272 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
273 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
274 vif2_mac = kwargs['tg_if2_mac'] \
275 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
276 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
277 self.engine.create_vpp_exec_config(
278 'memif_create_chain_ip4.exec',
279 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
280 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
281 socket1='{guest_dir}/memif-{c.name}-{sid1}'.
282 format(c=self.engine.container, **kwargs),
283 socket2='{guest_dir}/memif-{c.name}-{sid2}'.
284 format(c=self.engine.container, **kwargs),
285 mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
286 mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
287 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
289 def _configure_vpp_pipeline_ip4(self, **kwargs):
290 """Configure VPP in pipeline topology with ip4.
292 :param kwargs: Named parameters.
295 self.engine.create_vpp_startup_config()
296 node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
297 mid1 = kwargs['mid1']
298 mid2 = kwargs['mid2']
301 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
303 kwargs['mid2'] = kwargs['mid2'] \
304 if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
305 else kwargs['mid2'] + 1
306 vif1_mac = kwargs['tg_if1_mac'] \
307 if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
308 else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
309 vif2_mac = kwargs['tg_if2_mac'] \
310 if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
311 else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
312 socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
313 format(c=self.engine.container, **kwargs) \
314 if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
315 format(c=self.engine.container, **kwargs)
316 socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
317 format(c=self.engine.container, **kwargs) \
318 if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
319 else '{guest_dir}/memif-pipe-{mid2}'.\
320 format(c=self.engine.container, **kwargs)
322 self.engine.create_vpp_exec_config(
323 'memif_create_pipeline_ip4.exec',
324 mid1=kwargs['mid1'], mid2=kwargs['mid2'],
325 sid1=kwargs['sid1'], sid2=kwargs['sid2'],
326 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
327 mac1='52:54:00:00:{0:02X}:01'.format(mid1),
328 mac2='52:54:00:00:{0:02X}:02'.format(mid2),
329 vif1_mac=vif1_mac, vif2_mac=vif2_mac)
331 def stop_all_containers(self):
332 """Stop all containers."""
333 for container in self.containers:
334 self.engine.container = self.containers[container]
337 def destroy_all_containers(self):
338 """Destroy all containers."""
339 for container in self.containers:
340 self.engine.container = self.containers[container]
341 self.engine.destroy()
344 class ContainerEngine(object):
345 """Abstract class for container engine."""
348 """Init ContainerEngine object."""
349 self.container = None
351 def initialize(self):
352 """Initialize container object."""
353 self.container = Container()
355 def acquire(self, force):
356 """Acquire/download container.
358 :param force: Destroy a container if exists and create.
361 raise NotImplementedError
364 """Build container (compile)."""
365 raise NotImplementedError
368 """Create/deploy container."""
369 raise NotImplementedError
371 def execute(self, command):
372 """Execute process inside container.
374 :param command: Command to run inside container.
377 raise NotImplementedError
380 """Stop container."""
381 raise NotImplementedError
384 """Destroy/remove container."""
385 raise NotImplementedError
388 """Info about container."""
389 raise NotImplementedError
391 def system_info(self):
393 raise NotImplementedError
395 def install_supervisor(self):
396 """Install supervisord inside a container."""
397 if isinstance(self, LXC):
398 self.execute('sleep 3; apt-get update')
399 self.execute('apt-get install -y supervisor')
400 self.execute('echo "{config}" > {config_file} && '
401 'unlink /tmp/supervisor.sock && '
402 'supervisord -c {config_file}'.
404 config='[unix_http_server]\n'
405 'file = /tmp/supervisor.sock\n\n'
406 '[rpcinterface:supervisor]\n'
407 'supervisor.rpcinterface_factory = '
408 'supervisor.rpcinterface:make_main_rpcinterface\n\n'
410 'serverurl = unix:///tmp/supervisor.sock\n\n'
412 'pidfile = /tmp/supervisord.pid\n'
413 'identifier = supervisor\n'
415 'logfile = /tmp/supervisord.log\n'
417 'nodaemon = false\n\n',
418 config_file=SUPERVISOR_CONF))
421 """Start VPP inside a container."""
422 self.execute('echo "{config}" >> {config_file}'.
424 config='[program:vpp]\n'
425 'command = /usr/bin/vpp -c /etc/vpp/startup.conf\n'
426 'autostart = false\n'
427 'autorestart = false\n'
428 'redirect_stderr = true\n'
430 config_file=SUPERVISOR_CONF))
431 self.execute('supervisorctl reload')
432 self.execute('supervisorctl start vpp')
434 from robot.libraries.BuiltIn import BuiltIn
435 topo_instance = BuiltIn().get_library_instance(
436 'resources.libraries.python.topology.Topology')
437 topo_instance.add_new_socket(
441 '{root}/tmp/vpp_sockets/{name}/api.sock'.
442 format(root=self.container.root, name=self.container.name))
443 topo_instance.add_new_socket(
447 '{root}/tmp/vpp_sockets/{name}/stats.sock'.
448 format(root=self.container.root, name=self.container.name))
450 def restart_vpp(self):
451 """Restart VPP service inside a container."""
452 self.execute('supervisorctl restart vpp')
453 self.execute('cat /tmp/supervisord.log')
455 def create_base_vpp_startup_config(self):
456 """Create base startup configuration of VPP on container.
458 :returns: Base VPP startup configuration.
459 :rtype: VppConfigGenerator
461 cpuset_cpus = self.container.cpuset_cpus
463 # Create config instance
464 vpp_config = VppConfigGenerator()
465 vpp_config.set_node(self.container.node)
466 vpp_config.add_unix_cli_listen()
467 vpp_config.add_unix_nodaemon()
468 vpp_config.add_unix_exec('/tmp/running.exec')
469 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
470 vpp_config.add_statseg_per_node_counters(value='on')
471 # We will pop the first core from the list to be a main core
472 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
473 # If more cores in the list, the rest will be used as workers.
475 corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
476 vpp_config.add_cpu_corelist_workers(corelist_workers)
480 def create_vpp_startup_config(self):
481 """Create startup configuration of VPP without DPDK on container.
483 vpp_config = self.create_base_vpp_startup_config()
484 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
486 # Apply configuration
487 self.execute('mkdir -p /etc/vpp/')
488 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
489 .format(config=vpp_config.get_config_str()))
491 def create_vpp_startup_config_dpdk_dev(self, *devices):
492 """Create startup configuration of VPP with DPDK on container.
494 :param devices: List of PCI devices to add.
497 vpp_config = self.create_base_vpp_startup_config()
498 vpp_config.add_dpdk_dev(*devices)
499 vpp_config.add_dpdk_no_tx_checksum_offload()
500 vpp_config.add_dpdk_log_level('debug')
501 vpp_config.add_plugin('disable', 'default')
502 vpp_config.add_plugin('enable', 'dpdk_plugin.so')
503 vpp_config.add_plugin('enable', 'memif_plugin.so')
505 # Apply configuration
506 self.execute('mkdir -p /etc/vpp/')
507 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
508 .format(config=vpp_config.get_config_str()))
510 def create_vpp_startup_config_func_dev(self):
511 """Create startup configuration of VPP on container for functional
514 # Create config instance
515 vpp_config = VppConfigGenerator()
516 vpp_config.set_node(self.container.node)
517 vpp_config.add_unix_cli_listen()
518 vpp_config.add_unix_nodaemon()
519 vpp_config.add_unix_exec('/tmp/running.exec')
520 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
521 vpp_config.add_statseg_per_node_counters(value='on')
522 vpp_config.add_plugin('disable', 'dpdk_plugin.so')
524 # Apply configuration
525 self.execute('mkdir -p /etc/vpp/')
526 self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
527 .format(config=vpp_config.get_config_str()))
529 def create_vpp_exec_config(self, template_file, **kwargs):
530 """Create VPP exec configuration on container.
532 :param template_file: File name of a template script.
533 :param kwargs: Parameters for script.
534 :type template_file: str
537 running = '/tmp/running.exec'
539 template = '{res}/{tpl}'.format(
540 res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
542 with open(template, 'r') as src_file:
543 src = Template(src_file.read())
544 self.execute('echo "{out}" > {running}'.format(
545 out=src.safe_substitute(**kwargs), running=running))
547 def is_container_running(self):
548 """Check if container is running."""
549 raise NotImplementedError
551 def is_container_present(self):
552 """Check if container is present."""
553 raise NotImplementedError
555 def _configure_cgroup(self, name):
556 """Configure the control group associated with a container.
558 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
559 container is initialized a new cgroup /docker or /lxc is created under
560 cpuset parent tree. This newly created cgroup is inheriting parent
561 setting for cpu/mem exclusive parameter and thus cannot be overriden
562 within /docker or /lxc cgroup. This function is supposed to set cgroups
563 to allow coexistence of both engines.
565 :param name: Name of cgroup.
567 :raises RuntimeError: If applying cgroup settings via cgset failed.
569 ret, _, _ = self.container.ssh.exec_command_sudo(
570 'cgset -r cpuset.cpu_exclusive=0 /')
572 raise RuntimeError('Failed to apply cgroup settings.')
574 ret, _, _ = self.container.ssh.exec_command_sudo(
575 'cgset -r cpuset.mem_exclusive=0 /')
577 raise RuntimeError('Failed to apply cgroup settings.')
579 ret, _, _ = self.container.ssh.exec_command_sudo(
580 'cgcreate -g cpuset:/{name}'.format(name=name))
582 raise RuntimeError('Failed to copy cgroup settings from root.')
584 ret, _, _ = self.container.ssh.exec_command_sudo(
585 'cgset -r cpuset.cpu_exclusive=0 /{name}'.format(name=name))
587 raise RuntimeError('Failed to apply cgroup settings.')
589 ret, _, _ = self.container.ssh.exec_command_sudo(
590 'cgset -r cpuset.mem_exclusive=0 /{name}'.format(name=name))
592 raise RuntimeError('Failed to apply cgroup settings.')
595 class LXC(ContainerEngine):
596 """LXC implementation."""
598 # Implicit constructor is inherited.
600 def acquire(self, force=True):
601 """Acquire a privileged system object where configuration is stored.
603 :param force: If a container exists, destroy it and create a new
606 :raises RuntimeError: If creating the container or writing the container
609 if self.is_container_present():
615 target_arch = 'arm64' \
616 if Topology.get_node_arch(self.container.node) == 'aarch64' \
619 image = self.container.image if self.container.image else\
620 "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
622 cmd = 'lxc-create -t download --name {c.name} -- {image} '\
623 '--no-validate'.format(c=self.container, image=image)
625 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
627 raise RuntimeError('Failed to create container.')
629 self._configure_cgroup('lxc')
632 """Create/deploy an application inside a container on system.
634 :raises RuntimeError: If creating the container fails.
636 if self.container.mnt:
638 # https://github.com/lxc/lxc/issues/434
639 ret, _, _ = self.container.ssh.exec_command_sudo(
640 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
641 format(e="lxc.mount.entry = tmpfs run tmpfs defaults",
644 raise RuntimeError('Failed to write {c.name} config.'.
645 format(c=self.container))
647 for mount in self.container.mnt:
648 host_dir, guest_dir = mount.split(':')
649 options = 'bind,create=dir' \
650 if guest_dir.endswith('/') else 'bind,create=file'
651 entry = 'lxc.mount.entry = {host_dir} {guest_dir} none ' \
652 '{options} 0 0'.format(
653 host_dir=host_dir, guest_dir=guest_dir[1:],
655 ret, _, _ = self.container.ssh.exec_command_sudo(
656 "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
657 format(e=entry, c=self.container))
659 raise RuntimeError('Failed to write {c.name} config.'
660 .format(c=self.container))
662 cpuset_cpus = '{0}'.format(
663 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
664 if self.container.cpuset_cpus else ''
666 ret, _, _ = self.container.ssh.exec_command_sudo(
667 'lxc-start --name {c.name} --daemon'.format(c=self.container))
669 raise RuntimeError('Failed to start container {c.name}.'.
670 format(c=self.container))
671 self._lxc_wait('RUNNING')
673 # Workaround for LXC to be able to allocate all cpus including isolated.
674 ret, _, _ = self.container.ssh.exec_command_sudo(
675 'cgset --copy-from / lxc/')
677 raise RuntimeError('Failed to copy cgroup to LXC')
679 ret, _, _ = self.container.ssh.exec_command_sudo(
680 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
681 format(c=self.container, cpus=cpuset_cpus))
683 raise RuntimeError('Failed to set cpuset.cpus to container '
684 '{c.name}.'.format(c=self.container))
686 def execute(self, command):
687 """Start a process inside a running container.
689 Runs the specified command inside the container specified by name. The
690 container has to be running already.
692 :param command: Command to run inside container.
694 :raises RuntimeError: If running the command failed.
696 env = '--keep-env {0}'.format(
697 ' '.join('--set-var %s' % env for env in self.container.env))\
698 if self.container.env else ''
700 cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
701 "exit $?'".format(env=env, c=self.container, command=command)
703 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
705 raise RuntimeError('Failed to run command inside container '
706 '{c.name}.'.format(c=self.container))
711 :raises RuntimeError: If stopping the container failed.
713 cmd = 'lxc-stop --name {c.name}'.format(c=self.container)
715 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
717 raise RuntimeError('Failed to stop container {c.name}.'
718 .format(c=self.container))
719 self._lxc_wait('STOPPED|FROZEN')
722 """Destroy a container.
724 :raises RuntimeError: If destroying container failed.
726 cmd = 'lxc-destroy --force --name {c.name}'.format(c=self.container)
728 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
730 raise RuntimeError('Failed to destroy container {c.name}.'
731 .format(c=self.container))
734 """Query and shows information about a container.
736 :raises RuntimeError: If getting info about a container failed.
738 cmd = 'lxc-info --name {c.name}'.format(c=self.container)
740 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
742 raise RuntimeError('Failed to get info about container {c.name}.'
743 .format(c=self.container))
745 def system_info(self):
746 """Check the current kernel for LXC support.
748 :raises RuntimeError: If checking LXC support failed.
750 cmd = 'lxc-checkconfig'
752 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
754 raise RuntimeError('Failed to check LXC support.')
756 def is_container_running(self):
757 """Check if container is running on node.
759 :returns: True if container is running.
761 :raises RuntimeError: If getting info about a container failed.
763 cmd = 'lxc-info --no-humanize --state --name {c.name}'\
764 .format(c=self.container)
766 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
768 raise RuntimeError('Failed to get info about container {c.name}.'
769 .format(c=self.container))
770 return True if 'RUNNING' in stdout else False
772 def is_container_present(self):
773 """Check if container is existing on node.
775 :returns: True if container is present.
777 :raises RuntimeError: If getting info about a container failed.
779 cmd = 'lxc-info --no-humanize --name {c.name}'.format(c=self.container)
781 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
782 return False if int(ret) else True
784 def _lxc_wait(self, state):
785 """Wait for a specific container state.
787 :param state: Specify the container state(s) to wait for.
789 :raises RuntimeError: If waiting for state of a container failed.
791 cmd = 'lxc-wait --name {c.name} --state "{s}"'\
792 .format(c=self.container, s=state)
794 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
796 raise RuntimeError('Failed to wait for state "{s}" of container '
797 '{c.name}.'.format(s=state, c=self.container))
800 class Docker(ContainerEngine):
801 """Docker implementation."""
803 # Implicit constructor is inherited.
805 def acquire(self, force=True):
806 """Pull an image or a repository from a registry.
808 :param force: Destroy a container if exists.
810 :raises RuntimeError: If pulling a container failed.
812 if self.is_container_present():
818 if not self.container.image:
819 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
820 if Topology.get_node_arch(self.container.node) == 'aarch64' \
821 else Constants.DOCKER_SUT_IMAGE_UBUNTU
822 setattr(self.container, 'image', img)
824 cmd = 'docker pull {image}'.format(image=self.container.image)
826 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
828 raise RuntimeError('Failed to create container {c.name}.'
829 .format(c=self.container))
831 if self.container.cpuset_cpus:
832 self._configure_cgroup('docker')
835 """Create/deploy container.
837 :raises RuntimeError: If creating a container failed.
839 cpuset_cpus = '--cpuset-cpus={0}'.format(
840 ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
841 if self.container.cpuset_cpus else ''
843 cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
844 if self.container.cpuset_mems is not None else ''
845 # Temporary workaround - disabling due to bug in memif
849 ' '.join('--env %s' % env for env in self.container.env))\
850 if self.container.env else ''
852 command = '{0}'.format(self.container.command)\
853 if self.container.command else ''
855 publish = '{0}'.format(
856 ' '.join('--publish %s' % var for var in self.container.publish))\
857 if self.container.publish else ''
859 volume = '{0}'.format(
860 ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
861 if self.container.mnt else ''
864 '--privileged --detach --interactive --tty --rm '\
865 '--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} '\
866 '{env} {volume} --name {container.name} {container.image} '\
867 '{command}'.format(cpuset_cpus=cpuset_cpus, cpuset_mems=cpuset_mems,
868 container=self.container, command=command,
869 env=env, publish=publish, volume=volume)
871 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
873 raise RuntimeError('Failed to create container {c.name}'
874 .format(c=self.container))
878 def execute(self, command):
879 """Start a process inside a running container.
881 Runs the specified command inside the container specified by name. The
882 container has to be running already.
884 :param command: Command to run inside container.
886 :raises RuntimeError: If running the command in a container failed.
888 cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
889 "exit $?'".format(c=self.container, command=command)
891 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
893 raise RuntimeError('Failed to execute command in container '
894 '{c.name}.'.format(c=self.container))
897 """Stop running container.
899 :raises RuntimeError: If stopping a container failed.
901 cmd = 'docker stop {c.name}'.format(c=self.container)
903 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
905 raise RuntimeError('Failed to stop container {c.name}.'
906 .format(c=self.container))
909 """Remove a container.
911 :raises RuntimeError: If removing a container failed.
913 cmd = 'docker rm --force {c.name}'.format(c=self.container)
915 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
917 raise RuntimeError('Failed to destroy container {c.name}.'
918 .format(c=self.container))
921 """Return low-level information on Docker objects.
923 :raises RuntimeError: If getting info about a container failed.
925 cmd = 'docker inspect {c.name}'.format(c=self.container)
927 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
929 raise RuntimeError('Failed to get info about container {c.name}.'
930 .format(c=self.container))
932 def system_info(self):
933 """Display the docker system-wide information.
935 :raises RuntimeError: If displaying system information failed.
937 cmd = 'docker system info'
939 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
941 raise RuntimeError('Failed to get system info.')
943 def is_container_present(self):
944 """Check if container is present on node.
946 :returns: True if container is present.
948 :raises RuntimeError: If getting info about a container failed.
950 cmd = 'docker ps --all --quiet --filter name={c.name}'\
951 .format(c=self.container)
953 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
955 raise RuntimeError('Failed to get info about container {c.name}.'
956 .format(c=self.container))
957 return True if stdout else False
959 def is_container_running(self):
960 """Check if container is running on node.
962 :returns: True if container is running.
964 :raises RuntimeError: If getting info about a container failed.
966 cmd = 'docker ps --quiet --filter name={c.name}'\
967 .format(c=self.container)
969 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
971 raise RuntimeError('Failed to get info about container {c.name}.'
972 .format(c=self.container))
973 return True if stdout else False
976 class Container(object):
977 """Container class."""
980 """Initialize Container object."""
983 def __getattr__(self, attr):
984 """Get attribute custom implementation.
986 :param attr: Attribute to get.
988 :returns: Attribute value or None.
992 return self.__dict__[attr]
996 def __setattr__(self, attr, value):
997 """Set attribute custom implementation.
999 :param attr: Attribute to set.
1000 :param value: Value to set.
1005 # Check if attribute exists
1008 # Creating new attribute
1010 self.__dict__['ssh'] = SSH()
1011 self.__dict__['ssh'].connect(value)
1012 self.__dict__[attr] = value
1014 # Updating attribute base of type
1015 if isinstance(self.__dict__[attr], list):
1016 self.__dict__[attr].append(value)
1018 self.__dict__[attr] = value