-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from resources.libraries.python.ssh import SSH
from resources.libraries.python.constants import Constants
from resources.libraries.python.CpuUtils import CpuUtils
+from resources.libraries.python.topology import Topology
from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
try:
self.engine = globals()[engine]()
except KeyError:
- raise NotImplementedError('{e} is not implemented.'
- .format(e=engine))
+ raise NotImplementedError('{engine} is not implemented.'.
+ format(engine=engine))
self.containers = OrderedDict()
def get_container_by_name(self, name):
try:
return self.containers[name]
except KeyError:
- raise RuntimeError('Failed to get container with name: {n}'
- .format(n=name))
+ raise RuntimeError('Failed to get container with name: {name}'.
+ format(name=name))
def construct_container(self, **kwargs):
"""Construct container object on node with specified parameters.
# Set additional environmental variables
setattr(self.engine.container, 'env',
- 'MICROSERVICE_LABEL={n}'.format(n=kwargs['name']))
+ 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name']))
# Set cpuset.cpus cgroup
skip_cnt = kwargs['cpu_skip']
+ smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
if not kwargs['cpu_shared']:
skip_cnt += kwargs['i'] * kwargs['cpu_count']
self.engine.container.cpuset_cpus = \
CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
cpu_node=kwargs['cpuset_mems'],
skip_cnt=skip_cnt,
- cpu_cnt=kwargs['cpu_count'],
- smt_used=kwargs['smt_used'])
+ cpu_cnt=1,
+ smt_used=False) \
+ + \
+ CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
+ cpu_node=kwargs['cpuset_mems'],
+ skip_cnt=skip_cnt+1,
+ cpu_cnt=kwargs['cpu_count']-1,
+ smt_used=smt_used)
# Store container instance
self.containers[kwargs['name']] = self.engine.container
self.engine.install_vpp()
self.engine.restart_vpp()
- def configure_vpp_in_all_containers(self, vat_template_file):
+ def restart_vpp_in_all_containers(self):
+ """Restart VPP on all containers."""
+ for container in self.containers:
+ self.engine.container = self.containers[container]
+ self.engine.restart_vpp()
+
+ def configure_vpp_in_all_containers(self, chain_topology,
+ dut1_if=None, dut2_if=None):
"""Configure VPP in all containers.
- :param vat_template_file: Template file name of a VAT script.
- :type vat_template_file: str
+ :param chain_topology: Topology used for chaining containers can be
+ chain or cross_horiz. Chain topology is using 1 memif pair per
+ container. Cross_horiz topology is using 1 memif and 1 physical
+ interface in container (only single container can be configured).
+ :param dut1_if: Interface on DUT1 directly connected to DUT2.
+ :param dut2_if: Interface on DUT2 directly connected to DUT1.
+ :type container_topology: str
+ :type dut1_if: str
+ :type dut2_if: str
"""
# Count number of DUTs based on node's host information
dut_cnt = len(Counter([self.containers[container].node['host']
for container in self.containers]))
- container_cnt = len(self.containers)
- mod = container_cnt/dut_cnt
-
- for i, container in enumerate(self.containers):
- mid1 = i % mod + 1
- mid2 = i % mod + 1
- sid1 = i % mod * 2 + 1
- sid2 = i % mod * 2 + 2
- self.engine.container = self.containers[container]
- self.engine.create_vpp_startup_config()
- self.engine.create_vpp_exec_config(vat_template_file, mid1=mid1,
- mid2=mid2, sid1=sid1, sid2=sid2,
- socket1='memif-{c.name}-{sid}'
- .format(c=self.engine.container,
- sid=sid1),
- socket2='memif-{c.name}-{sid}'
- .format(c=self.engine.container,
- sid=sid2))
+ mod = len(self.containers)/dut_cnt
+ container_vat_template = 'memif_create_{topology}.vat'.format(
+ topology=chain_topology)
+
+ if chain_topology == 'chain':
+ for i, container in enumerate(self.containers):
+ mid1 = i % mod + 1
+ mid2 = i % mod + 1
+ sid1 = i % mod * 2 + 1
+ sid2 = i % mod * 2 + 2
+ self.engine.container = self.containers[container]
+ self.engine.create_vpp_startup_config()
+ self.engine.create_vpp_exec_config(container_vat_template, \
+ mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, \
+ socket1='memif-{c.name}-{sid}'. \
+ format(c=self.engine.container, sid=sid1), \
+ socket2='memif-{c.name}-{sid}'. \
+ format(c=self.engine.container, sid=sid2))
+ elif chain_topology == 'cross_horiz':
+ if mod > 1:
+ raise RuntimeError('Container chain topology {topology} '
+ 'supports only single container.'.
+ format(topology=chain_topology))
+ for i, container in enumerate(self.containers):
+ mid1 = i % mod + 1
+ sid1 = i % mod * 2 + 1
+ self.engine.container = self.containers[container]
+ if 'DUT1' in self.engine.container.name:
+ if_pci = Topology.get_interface_pci_addr( \
+ self.engine.container.node, dut1_if)
+ if_name = Topology.get_interface_name( \
+ self.engine.container.node, dut1_if)
+ if 'DUT2' in self.engine.container.name:
+ if_pci = Topology.get_interface_pci_addr( \
+ self.engine.container.node, dut2_if)
+ if_name = Topology.get_interface_name( \
+ self.engine.container.node, dut2_if)
+ self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
+ self.engine.create_vpp_exec_config(container_vat_template, \
+ mid1=mid1, sid1=sid1, if_name=if_name, \
+ socket1='memif-{c.name}-{sid}'. \
+ format(c=self.engine.container, sid=sid1))
+ else:
+ raise RuntimeError('Container topology {topology} not implemented'.
+ format(topology=chain_topology))
def stop_all_containers(self):
"""Stop all containers."""
self.execute('sleep 3')
self.execute('apt-get update')
self.execute('apt-get install -y supervisor')
- self.execute('echo "{0}" > {1}'
- .format(
- '[unix_http_server]\n'
+ self.execute('echo "{config}" > {config_file}'.
+ format(
+ config='[unix_http_server]\n'
'file = /tmp/supervisor.sock\n\n'
'[rpcinterface:supervisor]\n'
'supervisor.rpcinterface_factory = '
'logfile=/tmp/supervisord.log\n'
'loglevel=debug\n'
'nodaemon=false\n\n',
- SUPERVISOR_CONF))
- self.execute('supervisord -c {0}'.format(SUPERVISOR_CONF))
-
- def install_vpp(self, install_dkms=False):
- """Install VPP inside a container.
+ config_file=SUPERVISOR_CONF))
+ self.execute('supervisord -c {config_file}'.
+ format(config_file=SUPERVISOR_CONF))
- :param install_dkms: If install dkms package. This will impact install
- time. Dkms is required for installation of vpp-dpdk-dkms. Default is
- false.
- :type install_dkms: bool
- """
+ def install_vpp(self):
+ """Install VPP inside a container."""
self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf')
self.execute('apt-get update')
- if install_dkms:
- self.execute('apt-get install -y dkms && '
- 'dpkg -i --force-all {0}/install_dir/*.deb'
- .format(self.container.guest_dir))
+ if self.container.install_dkms:
+ self.execute(
+ 'apt-get install -y dkms && '
+ 'dpkg -i --force-all '
+ '{guest_dir}/openvpp-testing/download_dir/*.deb'.
+ format(guest_dir=self.container.mnt[0].split(':')[1]))
else:
- self.execute('for i in $(ls -I \"*dkms*\" {0}/install_dir/); '
- 'do dpkg -i --force-all {0}/install_dir/$i; done'
- .format(self.container.guest_dir))
+ self.execute(
+ 'for i in $(ls -I \"*dkms*\" '
+ '{guest_dir}/openvpp-testing/download_dir/); do '
+ 'dpkg -i --force-all '
+ '{guest_dir}/openvpp-testing/download_dir/$i; done'.
+ format(guest_dir=self.container.mnt[0].split(':')[1]))
self.execute('apt-get -f install -y')
self.execute('apt-get install -y ca-certificates')
- self.execute('echo "{0}" >> {1}'
- .format(
- '[program:vpp]\n'
+ self.execute('echo "{config}" >> {config_file}'.
+ format(
+ config='[program:vpp]\n'
'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n'
'autorestart=false\n'
'redirect_stderr=true\n'
'priority=1',
- SUPERVISOR_CONF))
+ config_file=SUPERVISOR_CONF))
self.execute('supervisorctl reload')
+ self.execute('supervisorctl restart vpp')
def restart_vpp(self):
"""Restart VPP service inside a container."""
self.execute('supervisorctl restart vpp')
+ self.execute('cat /tmp/supervisord.log')
- def create_vpp_startup_config(self,
- config_filename='/etc/vpp/startup.conf'):
+ def create_base_vpp_startup_config(self):
"""Create base startup configuration of VPP on container.
- :param config_filename: Startup configuration file name.
- :type config_filename: str
+ :returns: Base VPP startup configuration.
+ :rtype: VppConfigGenerator
"""
cpuset_cpus = self.container.cpuset_cpus
if cpuset_cpus:
corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
vpp_config.add_cpu_corelist_workers(corelist_workers)
- vpp_config.add_plugin_disable('dpdk_plugin.so')
+ return vpp_config
+
+ def create_vpp_startup_config(self):
+ """Create startup configuration of VPP without DPDK on container.
+ """
+ vpp_config = self.create_base_vpp_startup_config()
+ vpp_config.add_plugin('disable', 'dpdk_plugin.so')
+
+ # Apply configuration
self.execute('mkdir -p /etc/vpp/')
- self.execute('echo "{c}" | tee {f}'
- .format(c=vpp_config.get_config_str(),
- f=config_filename))
+ self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
+ .format(config=vpp_config.get_config_str()))
+
+ def create_vpp_startup_config_dpdk_dev(self, *devices):
+ """Create startup configuration of VPP with DPDK on container.
+
+ :param devices: List of PCI devices to add.
+ :type devices: list
+ """
+ vpp_config = self.create_base_vpp_startup_config()
+ vpp_config.add_dpdk_dev(*devices)
+ vpp_config.add_dpdk_no_tx_checksum_offload()
+ vpp_config.add_dpdk_log_level('debug')
+ vpp_config.add_plugin('disable', 'default')
+ vpp_config.add_plugin('enable', 'dpdk_plugin.so')
+ vpp_config.add_plugin('enable', 'memif_plugin.so')
+
+ # Apply configuration
+ self.execute('mkdir -p /etc/vpp/')
+ self.execute('echo "{config}" | tee /etc/vpp/startup.conf'
+ .format(config=vpp_config.get_config_str()))
def create_vpp_exec_config(self, vat_template_file, **kwargs):
"""Create VPP exec configuration on container.
def _configure_cgroup(self, name):
"""Configure the control group associated with a container.
- By default the cpuset cgroup is using exclusive CPU/MEM. When Docker
+ By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
container is initialized a new cgroup /docker or /lxc is created under
cpuset parent tree. This newly created cgroup is inheriting parent
setting for cpu/mem exclusive parameter and thus cannot be overriden
- within /docker or /lxc cgroup. This patch is supposed to set cpu/mem
- exclusive parameter for both parent and subgroup.
+ within /docker or /lxc cgroup. This function is supposed to set cgroups
+ to allow coexistence of both engines.
:param name: Name of cgroup.
:type name: str
"""Acquire a privileged system object where configuration is stored.
:param force: If a container exists, destroy it and create a new
- container.
+ container.
:type force: bool
:raises RuntimeError: If creating the container or writing the container
- config fails.
+ config fails.
"""
if self.is_container_present():
if force:
if int(ret) != 0:
raise RuntimeError('Failed to create container.')
- if self.container.host_dir and self.container.guest_dir:
- entry = 'lxc.mount.entry = '\
- '{c.host_dir} /var/lib/lxc/{c.name}/rootfs{c.guest_dir} ' \
- 'none bind,create=dir 0 0'.format(c=self.container)
- ret, _, _ = self.container.ssh.exec_command_sudo(
- "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'"
- .format(e=entry, c=self.container))
- if int(ret) != 0:
- raise RuntimeError('Failed to write {c.name} config.'
- .format(c=self.container))
self._configure_cgroup('lxc')
def create(self):
:raises RuntimeError: If creating the container fails.
"""
+ if self.container.mnt:
+ for mount in self.container.mnt:
+ host_dir, guest_dir = mount.split(':')
+ entry = 'lxc.mount.entry = {host_dir} '\
+ '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \
+ 'bind,create=dir 0 0'.format(c=self.container,
+ host_dir=host_dir,
+ guest_dir=guest_dir)
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
+ format(e=entry, c=self.container))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to write {c.name} config.'
+ .format(c=self.container))
+
cpuset_cpus = '{0}'.format(
','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\
if self.container.cpuset_cpus else ''
- cmd = 'lxc-start --name {c.name} --daemon'.format(c=self.container)
-
- ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ 'lxc-start --name {c.name} --daemon'.
+ format(c=self.container))
if int(ret) != 0:
- raise RuntimeError('Failed to start container {c.name}.'
- .format(c=self.container))
+ raise RuntimeError('Failed to start container {c.name}.'.
+ format(c=self.container))
self._lxc_wait('RUNNING')
# Workaround for LXC to be able to allocate all cpus including isolated.
- cmd = 'cgset --copy-from / lxc/'
- ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ 'cgset --copy-from / lxc/')
if int(ret) != 0:
raise RuntimeError('Failed to copy cgroup to LXC')
- cmd = 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'\
- .format(c=self.container, cpus=cpuset_cpus)
- ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'.
+ format(c=self.container, cpus=cpuset_cpus))
if int(ret) != 0:
raise RuntimeError('Failed to set cpuset.cpus to container '
'{c.name}.'.format(c=self.container))
' '.join('--set-var %s' % env for env in self.container.env))\
if self.container.env else ''
- cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}'"\
- .format(env=env, c=self.container, command=command)
+ cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\
+ "exit $?'".format(env=env, c=self.container, command=command)
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
if int(ret) != 0:
else:
return
- cmd = 'docker pull {c.image}'.format(c=self.container)
+ image = self.container.image if self.container.image else\
+ "ubuntu:xenial-20180412"
+
+ cmd = 'docker pull {image}'.format(image=image)
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
if int(ret) != 0:
cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\
if self.container.cpuset_mems is not None else ''
+ # Temporary workaround - disabling due to bug in memif
+ cpuset_mems = ''
env = '{0}'.format(
' '.join('--env %s' % env for env in self.container.env))\
' '.join('--publish %s' % var for var in self.container.publish))\
if self.container.publish else ''
- volume = '--volume {c.host_dir}:{c.guest_dir}'.format(c=self.container)\
- if self.container.host_dir and self.container.guest_dir else ''
+ volume = '{0}'.format(
+ ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\
+ if self.container.mnt else ''
cmd = 'docker run '\
'--privileged --detach --interactive --tty --rm '\
:type command: str
:raises RuntimeError: If runnig the command in a container failed.
"""
- cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}'"\
- .format(c=self.container, command=command)
+ cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\
+ "exit $?'".format(c=self.container, command=command)
ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
if int(ret) != 0: