from resources.libraries.python.ssh import SSH
from resources.libraries.python.Constants import Constants
-from resources.libraries.python.topology import Topology
+from resources.libraries.python.topology import Topology, SocketType
from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
self.execute('supervisorctl reload')
self.execute('supervisorctl start vpp')
+ from robot.libraries.BuiltIn import BuiltIn
+ topo_instance = BuiltIn().get_library_instance(
+ 'resources.libraries.python.topology.Topology')
+ topo_instance.add_new_socket(
+ self.container.node,
+ SocketType.PAPI,
+ self.container.name,
+ '{root}/tmp/vpp_sockets/{name}/api.sock'.
+ format(root=self.container.root, name=self.container.name))
+ topo_instance.add_new_socket(
+ self.container.node,
+ SocketType.STATS,
+ self.container.name,
+ '{root}/tmp/vpp_sockets/{name}/stats.sock'.
+ format(root=self.container.root, name=self.container.name))
+
def restart_vpp(self):
"""Restart VPP service inside a container."""
self.execute('supervisorctl restart vpp')
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_nodaemon()
vpp_config.add_unix_exec('/tmp/running.exec')
+ vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
+ vpp_config.add_statseg_per_node_counters(value='on')
# We will pop the first core from the list to be a main core
vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
# If more cores in the list, the rest will be used as workers.
vpp_config.add_unix_cli_listen()
vpp_config.add_unix_nodaemon()
vpp_config.add_unix_exec('/tmp/running.exec')
+ vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
+ vpp_config.add_statseg_per_node_counters(value='on')
vpp_config.add_plugin('disable', 'dpdk_plugin.so')
# Apply configuration
else:
return
+ target_arch = 'arm64' \
+ if Topology.get_node_arch(self.container.node) == 'aarch64' \
+ else 'amd64'
+
image = self.container.image if self.container.image else\
- "-d ubuntu -r bionic -a amd64"
+ "-d ubuntu -r bionic -a {arch}".format(arch=target_arch)
cmd = 'lxc-create -t download --name {c.name} -- {image} '\
'--no-validate'.format(c=self.container, image=image)
:raises RuntimeError: If creating the container fails.
"""
if self.container.mnt:
+ # LXC fix for tmpfs
+ # https://github.com/lxc/lxc/issues/434
+ ret, _, _ = self.container.ssh.exec_command_sudo(
+ "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
+ format(e="lxc.mount.entry = tmpfs run tmpfs defaults",
+ c=self.container))
+ if int(ret) != 0:
+ raise RuntimeError('Failed to write {c.name} config.'.
+ format(c=self.container))
+
for mount in self.container.mnt:
host_dir, guest_dir = mount.split(':')
options = 'bind,create=dir' \
if guest_dir.endswith('/') else 'bind,create=file'
- entry = 'lxc.mount.entry = {host_dir} '\
- '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \
- '{options} 0 0'.format(c=self.container,
- host_dir=host_dir,
- guest_dir=guest_dir,
- options=options)
+ entry = 'lxc.mount.entry = {host_dir} {guest_dir} none ' \
+ '{options} 0 0'.format(
+ host_dir=host_dir, guest_dir=guest_dir[1:],
+ options=options)
ret, _, _ = self.container.ssh.exec_command_sudo(
"sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'".
format(e=entry, c=self.container))
if self.container.cpuset_cpus else ''
ret, _, _ = self.container.ssh.exec_command_sudo(
- 'lxc-start --name {c.name} --daemon'.
- format(c=self.container))
+ 'lxc-start --name {c.name} --daemon'.format(c=self.container))
if int(ret) != 0:
raise RuntimeError('Failed to start container {c.name}.'.
format(c=self.container))
return
if not self.container.image:
- setattr(self.container, 'image', 'snergster/csit-sut:latest')
+ img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
+ if Topology.get_node_arch(self.container.node) == 'aarch64' \
+ else Constants.DOCKER_SUT_IMAGE_UBUNTU
+ setattr(self.container, 'image', img)
cmd = 'docker pull {image}'.format(image=self.container.image)
if int(ret) != 0:
raise RuntimeError('Failed to create container {c.name}.'
.format(c=self.container))
+
if self.container.cpuset_cpus:
self._configure_cgroup('docker')