-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""Library to manipulate Containers."""
+from string import Template
from collections import OrderedDict, Counter
from resources.libraries.python.ssh import SSH
-from resources.libraries.python.constants import Constants
-from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.Constants import Constants
+from resources.libraries.python.topology import Topology
from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
try:
self.engine = globals()[engine]()
except KeyError:
- raise NotImplementedError('{e} is not implemented.'
- .format(e=engine))
+ raise NotImplementedError('{engine} is not implemented.'.
+ format(engine=engine))
self.containers = OrderedDict()
def get_container_by_name(self, name):
try:
return self.containers[name]
except KeyError:
- raise RuntimeError('Failed to get container with name: {n}'
- .format(n=name))
+ raise RuntimeError('Failed to get container with name: {name}'.
+ format(name=name))
def construct_container(self, **kwargs):
"""Construct container object on node with specified parameters.
# Set additional environmental variables
setattr(self.engine.container, 'env',
- 'MICROSERVICE_LABEL={n}'.format(n=kwargs['name']))
-
- # Set cpuset.cpus cgroup
- skip_cnt = kwargs['cpu_skip']
- if not kwargs['cpu_shared']:
- skip_cnt += kwargs['i'] * kwargs['cpu_count']
- self.engine.container.cpuset_cpus = \
- CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
- cpu_node=kwargs['cpuset_mems'],
- skip_cnt=skip_cnt,
- cpu_cnt=kwargs['cpu_count'],
- smt_used=kwargs['smt_used'])
+ 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
# Store container instance
self.containers[kwargs['name']] = self.engine.container
self.engine.container = self.containers[container]
self.engine.execute(command)
- def install_vpp_in_all_containers(self):
- """Install VPP into all containers."""
+ def start_vpp_in_all_containers(self):
+ """Start VPP in all containers."""
for container in self.containers:
self.engine.container = self.containers[container]
# We need to install supervisor client/server system to control VPP
# as a service
self.engine.install_supervisor()
- self.engine.install_vpp()
+ self.engine.start_vpp()
+
+ def restart_vpp_in_all_containers(self):
+ """Restart VPP in all containers."""
+ for container in self.containers:
+ self.engine.container = self.containers[container]
self.engine.restart_vpp()
- def configure_vpp_in_all_containers(self, vat_template_file):
+ def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
"""Configure VPP in all containers.
- :param vat_template_file: Template file name of a VAT script.
- :type vat_template_file: str
+ :param chain_topology: Topology used for chaining containers can be
+ chain or cross_horiz. Chain topology is using 1 memif pair per
+ container. Cross_horiz topology is using 1 memif and 1 physical
+ interface in container (only single container can be configured).
+ :param kwargs: Named parameters.
+ :type chain_topology: str
+ :param kwargs: dict
"""
# Count number of DUTs based on node's host information
dut_cnt = len(Counter([self.containers[container].node['host']
for container in self.containers]))
- container_cnt = len(self.containers)
- mod = container_cnt/dut_cnt
+ mod = len(self.containers)/dut_cnt
for i, container in enumerate(self.containers):
mid1 = i % mod + 1
sid1 = i % mod * 2 + 1
sid2 = i % mod * 2 + 2
self.engine.container = self.containers[container]
- self.engine.create_vpp_startup_config()
- self.engine.create_vpp_exec_config(vat_template_file, mid1=mid1,
- mid2=mid2, sid1=sid1, sid2=sid2,
- socket1='memif-{c.name}-{sid}'
- .format(c=self.engine.container,
- sid=sid1),
- socket2='memif-{c.name}-{sid}'
- .format(c=self.engine.container,
- sid=sid2))
+ guest_dir = self.engine.container.mnt[0].split(':')[1]
+
+ if chain_topology == 'chain':
+ self._configure_vpp_chain_l2xc(mid1=mid1, mid2=mid2,
+ sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir,
+ **kwargs)
+ elif chain_topology == 'cross_horiz':
+ self._configure_vpp_cross_horiz(mid1=mid1, mid2=mid2,
+ sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir,
+ **kwargs)
+ elif chain_topology == 'chain_functional':
+ self._configure_vpp_chain_functional(mid1=mid1, mid2=mid2,
+ sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir,
+ **kwargs)
+ elif chain_topology == 'chain_ip4':
+ self._configure_vpp_chain_ip4(mid1=mid1, mid2=mid2,
+ sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir,
+ **kwargs)
+ elif chain_topology == 'pipeline_ip4':
+ self._configure_vpp_pipeline_ip4(mid1=mid1, mid2=mid2,
+ sid1=sid1, sid2=sid2,
+ guest_dir=guest_dir,
+ **kwargs)
+ else:
+ raise RuntimeError('Container topology {name} not implemented'.
+ format(name=chain_topology))
+
+ def _configure_vpp_chain_l2xc(self, **kwargs):
+ """Configure VPP in chain topology with l2xc.
+
+ :param kwargs: Named parameters.
+ :param kwargs: dict
+ """
+ self.engine.create_vpp_startup_config()
+ self.engine.create_vpp_exec_config(
+ 'memif_create_chain_l2xc.exec',
+ mid1=kwargs['mid1'], mid2=kwargs['mid2'],
+ sid1=kwargs['sid1'], sid2=kwargs['sid2'],
+ socket1='{guest_dir}/memif-{c.name}-{sid1}'.
+ format(c=self.engine.container, **kwargs),
+ socket2='{guest_dir}/memif-{c.name}-{sid2}'.
+ format(c=self.engine.container, **kwargs))
+
+ def _configure_vpp_cross_horiz(self, **kwargs):
+ """Configure VPP in cross horizontal topology (single memif).
+
+ :param kwargs: Named parameters.
+ :param kwargs: dict
+ """
+ if 'DUT1' in self.engine.container.name:
+ if_pci = Topology.get_interface_pci_addr(
+ self.engine.container.node, kwargs['dut1_if'])
+ if_name = Topology.get_interface_name(
+ self.engine.container.node, kwargs['dut1_if'])
+ if 'DUT2' in self.engine.container.name:
+ if_pci = Topology.get_interface_pci_addr(
+ self.engine.container.node, kwargs['dut2_if'])
+ if_name = Topology.get_interface_name(
+ self.engine.container.node, kwargs['dut2_if'])
+ self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
+ self.engine.create_vpp_exec_config(
+ 'memif_create_cross_horizon.exec',
+ mid1=kwargs['mid1'], sid1=kwargs['sid1'], if_name=if_name,
+ socket1='{guest_dir}/memif-{c.name}-{sid1}'.
+ format(c=self.engine.container, **kwargs))
+
+ def _configure_vpp_chain_functional(self, **kwargs):
+ """Configure VPP in chain topology with l2xc (functional).
+
+ :param kwargs: Named parameters.
+ :param kwargs: dict
+ """
+ self.engine.create_vpp_startup_config_func_dev()
+ self.engine.create_vpp_exec_config(
+ 'memif_create_chain_functional.exec',
+ mid1=kwargs['mid1'], mid2=kwargs['mid2'],
+ sid1=kwargs['sid1'], sid2=kwargs['sid2'],
+ socket1='{guest_dir}/memif-{c.name}-{sid1}'.
+ format(c=self.engine.container, **kwargs),
+ socket2='{guest_dir}/memif-{c.name}-{sid2}'.
+ format(c=self.engine.container, **kwargs),
+ rx_mode='interrupt')
+
+ def _configure_vpp_chain_ip4(self, **kwargs):
+ """Configure VPP in chain topology with ip4.
+
+ :param kwargs: Named parameters.
+ :param kwargs: dict
+ """
+ self.engine.create_vpp_startup_config()
+
+ vif1_mac = kwargs['tg_if1_mac'] \
+ if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
+ else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
+ vif2_mac = kwargs['tg_if2_mac'] \
+ if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
+ else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
+ self.engine.create_vpp_exec_config(
+ 'memif_create_chain_ip4.exec',
+ mid1=kwargs['mid1'], mid2=kwargs['mid2'],
+ sid1=kwargs['sid1'], sid2=kwargs['sid2'],
+ socket1='{guest_dir}/memif-{c.name}-{sid1}'.
+ format(c=self.engine.container, **kwargs),
+ socket2='{guest_dir}/memif-{c.name}-{sid2}'.
+ format(c=self.engine.container, **kwargs),
+ mac1='52:54:00:00:{0:02X}:01'.format(kwargs['mid1']),
+ mac2='52:54:00:00:{0:02X}:02'.format(kwargs['mid2']),
+ vif1_mac=vif1_mac, vif2_mac=vif2_mac)
+
+ def _configure_vpp_pipeline_ip4(self, **kwargs):
+ """Configure VPP in pipeline topology with ip4.
+
+ :param kwargs: Named parameters.
+ :param kwargs: dict
+ """
+ self.engine.create_vpp_startup_config()
+ node = (kwargs['mid1'] - 1) % kwargs['nodes'] + 1
+ mid1 = kwargs['mid1']
+ mid2 = kwargs['mid2']
+ role1 = 'master'
+ role2 = 'master' \
+ if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
+ else 'slave'
+ kwargs['mid2'] = kwargs['mid2'] \
+ if node == kwargs['nodes'] or node == kwargs['nodes'] and node == 1\
+ else kwargs['mid2'] + 1
+ vif1_mac = kwargs['tg_if1_mac'] \
+ if (kwargs['mid1'] - 1) % kwargs['nodes'] + 1 == 1 \
+ else '52:54:00:00:{0:02X}:02'.format(kwargs['mid1'] - 1)
+ vif2_mac = kwargs['tg_if2_mac'] \
+ if (kwargs['mid2'] - 1) % kwargs['nodes'] + 1 == kwargs['nodes'] \
+ else '52:54:00:00:{0:02X}:01'.format(kwargs['mid2'] + 1)
+ socket1 = '{guest_dir}/memif-{c.name}-{sid1}'.\
+ format(c=self.engine.container, **kwargs) \
+ if node == 1 else '{guest_dir}/memif-pipe-{mid1}'.\
+ format(c=self.engine.container, **kwargs)
+ socket2 = '{guest_dir}/memif-{c.name}-{sid2}'.\
+ format(c=self.engine.container, **kwargs) \
+ if node == 1 and kwargs['nodes'] == 1 or node == kwargs['nodes'] \
+ else '{guest_dir}/memif-pipe-{mid2}'.\
+ format(c=self.engine.container, **kwargs)
+
+ self.engine.create_vpp_exec_config(
+ 'memif_create_pipeline_ip4.exec',
+ mid1=kwargs['mid1'], mid2=kwargs['mid2'],
+ sid1=kwargs['sid1'], sid2=kwargs['sid2'],
+ socket1=socket1, socket2=socket2, role1=role1, role2=role2,
+ mac1='52:54:00:00:{0:02X}:01'.format(mid1),
+ mac2='52:54:00:00:{0:02X}:02'.format(mid2),
+ vif1_mac=vif1_mac, vif2_mac=vif2_mac)
def stop_all_containers(self):
"""Stop all containers."""
def install_supervisor(self):
"""Install supervisord inside a container."""
- self.execute('sleep 3')
- self.execute('apt-get update')
- self.execute('apt-get install -y supervisor')
- self.execute('echo "{0}" > {1}'
- .format(
- '[unix_http_server]\n'
+ if isinstance(self, LXC):
+ self.execute('sleep 3; apt-get update')
+ self.execute('apt-get install -y supervisor')
+ self.execute('echo "{config}" > {config_file} && '
+ 'supervisord -c {config_file}'.
+ format(
+ config='[unix_http_server]\n'
'file = /tmp/supervisor.sock\n\n'
'[rpcinterface:supervisor]\n'
'supervisor.rpcinterface_factory = '
'logfile=/tmp/supervisord.log\n'
'loglevel=debug\n'
'nodaemon=false\n\n',
- SUPERVISOR_CONF))
- self.execute('supervisord -c {0}'.format(SUPERVISOR_CONF))
-
- def install_vpp(self, install_dkms=False):
- """Install VPP inside a container.
-
- :param install_dkms: If install dkms package. This will impact install
- time. Dkms is required for installation of vpp-dpdk-dkms. Default is
- false.
- :type install_dkms: bool
- """
- self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf')
- self.execute('apt-get update')
- if install_dkms:
- self.execute('apt-get install -y dkms && '
- 'dpkg -i --force-all {0}/install_dir/*.deb'
- .format(self.container.guest_dir))
- else:
- self.execute('for i in $(ls -I \"*dkms*\" {0}/install_dir/); '
- 'do dpkg -i --force-all {0}/install_dir/$i; done'
- .format(self.container.guest_dir))
- self.execute('apt-get -f install -y')
- self.execute('apt-get install -y ca-certificates')
- self.execute('echo "{0}" >> {1}'
- .format(
- '[program:vpp]\n'
+ config_file=SUPERVISOR_CONF))
+
+ def start_vpp(self):
+ """Start VPP inside a container."""
+ self.execute('echo "{config}" >> {config_file}'.
+ format(
+ config='[program:vpp]\n'
'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
+ 'autostart=false\n'
'autorestart=false\n'
'redirect_stderr=true\n'
'priority=1',
- SUPERVISOR_CONF))
+ config_file=SUPERVISOR_CONF))
self.execute('supervisorctl reload')
+ self.execute('supervisorctl start vpp')
def restart_vpp(self):
"""Restart VPP service inside a container."""
self.execute('supervisorctl restart vpp')
+ self.execute('cat /tmp/supervisord.log')
- def create_vpp_startup_config(self,
- config_filename='/etc/vpp/startup.conf'):
+ def create_base_vpp_startup_config(self):
"""Create base startup configuration of VPP on container.
- :param config_filename: Startup configuration file name.
- :type config_filename: str
+ :returns: Base VPP startup configuration.
+ :rtype: VppConfigGenerator
"""
cpuset_cpus = self.container.cpuset_cpus
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_nodaemon()
vpp_config.add_unix_exec('/tmp/running.exec')
- # We will pop first core from list to be main core
+ vpp_config.add_socksvr()
+ # We will pop the first core from the list to be a main core
vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
- # if this is not only core in list, the rest will be used as workers.
+ # If more cores in the list, the rest will be used as workers.
if cpuset_cpus:
corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
vpp_config.add_cpu_corelist_workers(corelist_workers)
+
+ return vpp_config
+
+ def create_vpp_startup_config(self):
+ """Create startup configuration of VPP without DPDK on container.
+ """
+ vpp_config = self.create_base_vpp_startup_config()
+ vpp_config.add_plugin('disable', 'dpdk_plugin.so')
+
+ # Apply configuration
+ self.execute('mkdir -p /etc/vpp/')
+ self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
+ .format(config=vpp_config.get_config_str()))
+
+ def create_vpp_startup_config_dpdk_dev(self, *devices):
+ """Create startup configuration of VPP with DPDK on container.
+
+ :param devices: List of PCI devices to add.
+ :type devices: list
+ """
+ vpp_config = self.create_base_vpp_startup_config()
+ vpp_config.add_dpdk_dev(*devices)
+ vpp_config.add_dpdk_no_tx_checksum_offload()
+ vpp_config.add_dpdk_log_level('debug')
+ vpp_config.add_plugin('disable', 'default')
+ vpp_config.add_plugin('enable', 'dpdk_plugin.so')
+ vpp_config.add_plugin('enable', 'memif_plugin.so')
+
+ # Apply configuration
+ self.execute('mkdir -p /etc/vpp/')
+ self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
+ .format(config=vpp_config.get_config_str()))
+
+ def create_vpp_startup_config_func_dev(self):
+ """Create startup configuration of VPP on container for functional
+ vpp_device tests.
+ """
+ # Create config instance
+ vpp_config = VppConfigGenerator()
+ vpp_config.set_node(self.container.node)
+ vpp_config.add_unix_cli_listen()
+ vpp_config.add_unix_nodaemon()
+ vpp_config.add_unix_exec('/tmp/running.exec')
+ vpp_config.add_socksvr()
vpp_config.add_plugin('disable', 'dpdk_plugin.so')
+ # Apply configuration
self.execute('mkdir -p /etc/vpp/')
- self.execute('echo "{c}" | tee {f}'
- .format(c=vpp_config.get_config_str(),
- f=config_filename))
+ self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
+ .format(config=vpp_config.get_config_str()))
- def create_vpp_exec_config(self, vat_template_file, **kwargs):
+ def create_vpp_exec_config(self, template_file, **kwargs):
"""Create VPP exec configuration on container.
- :param vat_template_file: File name of a VAT template script.
- :param kwargs: Parameters for VAT script.
- :type vat_template_file: str
+ :param template_file: File name of a template script.
+ :param kwargs: Parameters for script.
+ :type template_file: str
:type kwargs: dict
"""
- vat_file_path = '{p}/{f}'.format(p=Constants.RESOURCES_TPL_VAT,
- f=vat_template_file)
+ running = '/tmp/running.exec'
+
+ template = '{res}/{tpl}'.format(
+ res=Constants.RESOURCES_TPL_CONTAINER, tpl=template_file)
- with open(vat_file_path, 'r') as template_file:
- cmd_template = template_file.readlines()
- for line_tmpl in cmd_template:
- vat_cmd = line_tmpl.format(**kwargs)
- self.execute('echo "{c}" >> /tmp/running.exec'
- .format(c=vat_cmd.replace('\n', '')))
+ with open(template, 'r') as src_file:
+ src = Template(src_file.read())
+ self.execute('echo "{out}" > {running}'.format(
+ out=src.safe_substitute(**kwargs), running=running))
def is_container_running(self):
"""Check if container is running."""
def _configure_cgroup(self, name):
"""Configure the control group associated with a container.
- By default the cpuset cgroup is using exclusive CPU/MEM. When Docker
+ By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
container is initialized a new cgroup /docker or /lxc is created under
cpuset parent tree. This newly created cgroup is inheriting parent
setting for cpu/mem exclusive parameter and thus cannot be overriden
- within /docker or /lxc cgroup. This patch is supposed to set cpu/mem
- exclusive parameter for both parent and subgroup.
+ within /docker or /lxc cgroup. This function is supposed to set cgroups
+ to allow coexistence of both engines.
:param name: Name of cgroup.
:type name: str
class LXC(ContainerEngine):
"""LXC implementation."""
- def __init__(self):
- """Initialize LXC object."""
- super(LXC, self).__init__()
+ # Implicit constructor is inherited.
def acquire(self, force=True):
"""Acquire a privileged system object where configuration is stored.
:param force: If a container exists, destroy it and create a new
- container.
+ container.
:type force: bool
:raises RuntimeError: If creating the container or writing the container
- config fails.
+ config fails.
"""
if self.is_container_present():
if force:
else:
return
+ target_arch = 'arm64' \
+ if Topology.get_node_arch(self.container.node) == 'aarch64' \
+ else 'amd64'
+
image = self.container.image if self.container.image else\
- "-d ubuntu -r xenial -a amd64"
+ "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
cmd = 'lxc-create -t download --name {c.name} -- {image} '\
'--no-validate'.format(c=self.container, image=image)
if int(ret) != 0:
raise RuntimeError('Failed to create container.')
- if self.container.host_dir and self.container.guest_dir:
- entry = 'lxc.mount.entry = '\
- '{c.host_dir} /var/lib/lxc/{c.name}/rootfs{c.guest_dir} ' \
- 'none bind,create=dir 0 0'.format(c=self.container)
- ret, _, _ = self.container.ssh.exec_command_sudo(
- "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'"
- .format(e=entry, c=self.container))
- if int(ret) != 0:
- raise RuntimeError('Failed to write {c.name} config.'
- .format(c=self.container))
self._configure_cgroup('lxc')
def create(self):
:raises RuntimeError: If creating the container fails.
"""
+ if self.container.mnt:
+ for mount in self.container.mnt:
+ host_dir, guest_dir = mount.split(':')
+ options = 'bind,create=dir' \
+ if guest_dir.endswith('/') else 'bind,create=file'
+ entry = 'lxc.mount.entry = {host_dir} '\
+ '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \
+ '{options} 0 0'.format(c=self.container,
+ host_dir=host_dir,
+ guest_dir=guest_dir,
+ options=options)
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
+ format(e=entry, c=self.container))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to write {c.name} config.'
+ .format(c=self.container))
+
cpuset_cpus = '{0}'.format(
','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
if self.container.cpuset_cpus else ''
- cmd = 'lxc-start --name {c.name} --daemon'.format(c=self.container)
-
- ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ 'lxc-start --name {c.name} --daemon'.
+ format(c=self.container))
if int(ret) != 0:
- raise RuntimeError('Failed to start container {c.name}.'
- .format(c=self.container))
+ raise RuntimeError('Failed to start container {c.name}.'.
+ format(c=self.container))
self._lxc_wait('RUNNING')
# Workaround for LXC to be able to allocate all cpus including isolated.
- cmd = 'cgset --copy-from / lxc/'
- ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ 'cgset --copy-from / lxc/')
if int(ret) != 0:
raise RuntimeError('Failed to copy cgroup to LXC')
- cmd = 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'\
- .format(c=self.container, cpus=cpuset_cpus)
- ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
+ format(c=self.container, cpus=cpuset_cpus))
if int(ret) != 0:
raise RuntimeError('Failed to set cpuset.cpus to container '
'{c.name}.'.format(c=self.container))
' '.join('--set-var %s' % env for env in self.container.env))\
if self.container.env else ''
- cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}'"\
- .format(env=env, c=self.container, command=command)
+ cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
+ "exit $?'".format(env=env, c=self.container, command=command)
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
if int(ret) != 0:
class Docker(ContainerEngine):
"""Docker implementation."""
- def __init__(self):
- """Initialize Docker object."""
- super(Docker, self).__init__()
+ # Implicit constructor is inherited.
def acquire(self, force=True):
"""Pull an image or a repository from a registry.
else:
return
- cmd = 'docker pull {c.image}'.format(c=self.container)
+ if not self.container.image:
+ img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
+ if Topology.get_node_arch(self.container.node) == 'aarch64' \
+ else Constants.DOCKER_SUT_IMAGE_UBUNTU
+ setattr(self.container, 'image', img)
+
+ cmd = 'docker pull {image}'.format(image=self.container.image)
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
if int(ret) != 0:
raise RuntimeError('Failed to create container {c.name}.'
.format(c=self.container))
- self._configure_cgroup('docker')
+
+ if self.container.cpuset_cpus:
+ self._configure_cgroup('docker')
def create(self):
"""Create/deploy container.
cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
if self.container.cpuset_mems is not None else ''
+ # Temporary workaround - disabling due to bug in memif
+ cpuset_mems = ''
env = '{0}'.format(
' '.join('--env %s' % env for env in self.container.env))\
' '.join('--publish %s' % var for var in self.container.publish))\
if self.container.publish else ''
- volume = '--volume {c.host_dir}:{c.guest_dir}'.format(c=self.container)\
- if self.container.host_dir and self.container.guest_dir else ''
+ volume = '{0}'.format(
+ ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
+ if self.container.mnt else ''
cmd = 'docker run '\
'--privileged --detach --interactive --tty --rm '\
:param command: Command to run inside container.
:type command: str
- :raises RuntimeError: If runnig the command in a container failed.
+ :raises RuntimeError: If running the command in a container failed.
"""
- cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}'"\
- .format(c=self.container, command=command)
+ cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
+ "exit $?'".format(c=self.container, command=command)
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
if int(ret) != 0: