X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FContainerUtils.py;h=786a4013de808da83805e04e2606abe680f427da;hp=da3d7057dcab36fc95335e87fb4c43dd3ba6322a;hb=5075d208e29ece6bfd8cf4aa5289dd0b2ade583c;hpb=01e72f9dce0ef3eeb10ca9097836f7d34e40fa0d diff --git a/resources/libraries/python/ContainerUtils.py b/resources/libraries/python/ContainerUtils.py index da3d7057dc..786a4013de 100644 --- a/resources/libraries/python/ContainerUtils.py +++ b/resources/libraries/python/ContainerUtils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2017 Cisco and/or its affiliates. +# Copyright (c) 2018 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -20,7 +20,7 @@ from collections import OrderedDict, Counter from resources.libraries.python.ssh import SSH from resources.libraries.python.constants import Constants -from resources.libraries.python.CpuUtils import CpuUtils +from resources.libraries.python.topology import Topology from resources.libraries.python.VppConfigGenerator import VppConfigGenerator @@ -42,8 +42,8 @@ class ContainerManager(object): try: self.engine = globals()[engine]() except KeyError: - raise NotImplementedError('{e} is not implemented.' - .format(e=engine)) + raise NotImplementedError('{engine} is not implemented.'. + format(engine=engine)) self.containers = OrderedDict() def get_container_by_name(self, name): @@ -58,8 +58,8 @@ class ContainerManager(object): try: return self.containers[name] except KeyError: - raise RuntimeError('Failed to get container with name: {n}' - .format(n=name)) + raise RuntimeError('Failed to get container with name: {name}'. + format(name=name)) def construct_container(self, **kwargs): """Construct container object on node with specified parameters. @@ -75,18 +75,7 @@ class ContainerManager(object): # Set additional environmental variables setattr(self.engine.container, 'env', - 'MICROSERVICE_LABEL={n}'.format(n=kwargs['name'])) - - # Set cpuset.cpus cgroup - skip_cnt = kwargs['cpu_skip'] - if not kwargs['cpu_shared']: - skip_cnt += kwargs['i'] * kwargs['cpu_count'] - self.engine.container.cpuset_cpus = \ - CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'], - cpu_node=kwargs['cpuset_mems'], - skip_cnt=skip_cnt, - cpu_cnt=kwargs['cpu_count'], - smt_used=kwargs['smt_used']) + 'MICROSERVICE_LABEL={label}'.format(label=kwargs['name'])) # Store container instance self.containers[kwargs['name']] = self.engine.container @@ -156,33 +145,74 @@ class ContainerManager(object): self.engine.install_vpp() self.engine.restart_vpp() - def configure_vpp_in_all_containers(self, vat_template_file): + def restart_vpp_in_all_containers(self): + """Restart VPP on all containers.""" + for container in self.containers: + self.engine.container = self.containers[container] + self.engine.restart_vpp() + + def configure_vpp_in_all_containers(self, chain_topology, + dut1_if=None, dut2_if=None): """Configure VPP in all containers. - :param vat_template_file: Template file name of a VAT script. - :type vat_template_file: str + :param chain_topology: Topology used for chaining containers can be + chain or cross_horiz. Chain topology is using 1 memif pair per + container. Cross_horiz topology is using 1 memif and 1 physical + interface in container (only single container can be configured). + :param dut1_if: Interface on DUT1 directly connected to DUT2. + :param dut2_if: Interface on DUT2 directly connected to DUT1. + :type container_topology: str + :type dut1_if: str + :type dut2_if: str """ # Count number of DUTs based on node's host information dut_cnt = len(Counter([self.containers[container].node['host'] for container in self.containers])) - container_cnt = len(self.containers) - mod = container_cnt/dut_cnt - - for i, container in enumerate(self.containers): - mid1 = i % mod + 1 - mid2 = i % mod + 1 - sid1 = i % mod * 2 + 1 - sid2 = i % mod * 2 + 2 - self.engine.container = self.containers[container] - self.engine.create_vpp_startup_config() - self.engine.create_vpp_exec_config(vat_template_file, mid1=mid1, - mid2=mid2, sid1=sid1, sid2=sid2, - socket1='memif-{c.name}-{sid}' - .format(c=self.engine.container, - sid=sid1), - socket2='memif-{c.name}-{sid}' - .format(c=self.engine.container, - sid=sid2)) + mod = len(self.containers)/dut_cnt + container_vat_template = 'memif_create_{topology}.vat'.format( + topology=chain_topology) + + if chain_topology == 'chain': + for i, container in enumerate(self.containers): + mid1 = i % mod + 1 + mid2 = i % mod + 1 + sid1 = i % mod * 2 + 1 + sid2 = i % mod * 2 + 2 + self.engine.container = self.containers[container] + self.engine.create_vpp_startup_config() + self.engine.create_vpp_exec_config(container_vat_template, \ + mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2, \ + socket1='memif-{c.name}-{sid}'. \ + format(c=self.engine.container, sid=sid1), \ + socket2='memif-{c.name}-{sid}'. \ + format(c=self.engine.container, sid=sid2)) + elif chain_topology == 'cross_horiz': + if mod > 1: + raise RuntimeError('Container chain topology {topology} ' + 'supports only single container.'. + format(topology=chain_topology)) + for i, container in enumerate(self.containers): + mid1 = i % mod + 1 + sid1 = i % mod * 2 + 1 + self.engine.container = self.containers[container] + if 'DUT1' in self.engine.container.name: + if_pci = Topology.get_interface_pci_addr( \ + self.engine.container.node, dut1_if) + if_name = Topology.get_interface_name( \ + self.engine.container.node, dut1_if) + if 'DUT2' in self.engine.container.name: + if_pci = Topology.get_interface_pci_addr( \ + self.engine.container.node, dut2_if) + if_name = Topology.get_interface_name( \ + self.engine.container.node, dut2_if) + self.engine.create_vpp_startup_config_dpdk_dev(if_pci) + self.engine.create_vpp_exec_config(container_vat_template, \ + mid1=mid1, sid1=sid1, if_name=if_name, \ + socket1='memif-{c.name}-{sid}'. \ + format(c=self.engine.container, sid=sid1)) + else: + raise RuntimeError('Container topology {topology} not implemented'. + format(topology=chain_topology)) def stop_all_containers(self): """Stop all containers.""" @@ -253,9 +283,9 @@ class ContainerEngine(object): self.execute('sleep 3') self.execute('apt-get update') self.execute('apt-get install -y supervisor') - self.execute('echo "{0}" > {1}' - .format( - '[unix_http_server]\n' + self.execute('echo "{config}" > {config_file}'. + format( + config='[unix_http_server]\n' 'file = /tmp/supervisor.sock\n\n' '[rpcinterface:supervisor]\n' 'supervisor.rpcinterface_factory = ' @@ -269,49 +299,53 @@ class ContainerEngine(object): 'logfile=/tmp/supervisord.log\n' 'loglevel=debug\n' 'nodaemon=false\n\n', - SUPERVISOR_CONF)) - self.execute('supervisord -c {0}'.format(SUPERVISOR_CONF)) - - def install_vpp(self, install_dkms=False): - """Install VPP inside a container. + config_file=SUPERVISOR_CONF)) + self.execute('supervisord -c {config_file}'. + format(config_file=SUPERVISOR_CONF)) - :param install_dkms: If install dkms package. This will impact install - time. Dkms is required for installation of vpp-dpdk-dkms. Default is - false. - :type install_dkms: bool - """ + def install_vpp(self): + """Install VPP inside a container.""" self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf') self.execute('apt-get update') - if install_dkms: - self.execute('apt-get install -y dkms && ' - 'dpkg -i --force-all {0}/install_dir/*.deb' - .format(self.container.guest_dir)) - else: - self.execute('for i in $(ls -I \"*dkms*\" {0}/install_dir/); ' - 'do dpkg -i --force-all {0}/install_dir/$i; done' - .format(self.container.guest_dir)) + # Workaround for install xenial vpp build on bionic ubuntu. + self.execute('apt-get install -y wget') + self.execute('deb=$(mktemp) && wget -O "${deb}" ' + 'http://launchpadlibrarian.net/336117627/' + 'libmbedcrypto0_2.5.1-1ubuntu1_amd64.deb && ' + 'dpkg -i "${deb}" && ' + 'rm -f "${deb}"') + self.execute('deb=$(mktemp) && wget -O "${deb}" ' + 'http://launchpadlibrarian.net/252876048/' + 'libboost-system1.58.0_1.58.0+dfsg-5ubuntu3_amd64.deb && ' + 'dpkg -i "${deb}" && ' + 'rm -f "${deb}"') + self.execute( + 'dpkg -i --force-all ' + '{guest_dir}/openvpp-testing/download_dir/*.deb'. + format(guest_dir=self.container.mnt[0].split(':')[1])) self.execute('apt-get -f install -y') self.execute('apt-get install -y ca-certificates') - self.execute('echo "{0}" >> {1}' - .format( - '[program:vpp]\n' + self.execute('echo "{config}" >> {config_file}'. + format( + config='[program:vpp]\n' 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n' 'autorestart=false\n' 'redirect_stderr=true\n' 'priority=1', - SUPERVISOR_CONF)) + config_file=SUPERVISOR_CONF)) self.execute('supervisorctl reload') + self.execute('supervisorctl restart vpp') def restart_vpp(self): """Restart VPP service inside a container.""" self.execute('supervisorctl restart vpp') + self.execute('cat /tmp/supervisord.log') - def create_vpp_startup_config(self, - config_filename='/etc/vpp/startup.conf'): + def create_base_vpp_startup_config(self): """Create base startup configuration of VPP on container. - :param config_filename: Startup configuration file name. - :type config_filename: str + :returns: Base VPP startup configuration. + :rtype: VppConfigGenerator """ cpuset_cpus = self.container.cpuset_cpus @@ -327,13 +361,38 @@ class ContainerEngine(object): if cpuset_cpus: corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus) vpp_config.add_cpu_corelist_workers(corelist_workers) + + return vpp_config + + def create_vpp_startup_config(self): + """Create startup configuration of VPP without DPDK on container. + """ + vpp_config = self.create_base_vpp_startup_config() + vpp_config.add_plugin('disable', 'dpdk_plugin.so') + + # Apply configuration + self.execute('mkdir -p /etc/vpp/') + self.execute('echo "{config}" | tee /etc/vpp/startup.conf' + .format(config=vpp_config.get_config_str())) + + def create_vpp_startup_config_dpdk_dev(self, *devices): + """Create startup configuration of VPP with DPDK on container. + + :param devices: List of PCI devices to add. + :type devices: list + """ + vpp_config = self.create_base_vpp_startup_config() + vpp_config.add_dpdk_dev(*devices) + vpp_config.add_dpdk_no_tx_checksum_offload() + vpp_config.add_dpdk_log_level('debug') vpp_config.add_plugin('disable', 'default') + vpp_config.add_plugin('enable', 'dpdk_plugin.so') vpp_config.add_plugin('enable', 'memif_plugin.so') + # Apply configuration self.execute('mkdir -p /etc/vpp/') - self.execute('echo "{c}" | tee {f}' - .format(c=vpp_config.get_config_str(), - f=config_filename)) + self.execute('echo "{config}" | tee /etc/vpp/startup.conf' + .format(config=vpp_config.get_config_str())) def create_vpp_exec_config(self, vat_template_file, **kwargs): """Create VPP exec configuration on container. @@ -364,12 +423,12 @@ class ContainerEngine(object): def _configure_cgroup(self, name): """Configure the control group associated with a container. - By default the cpuset cgroup is using exclusive CPU/MEM. When Docker + By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC container is initialized a new cgroup /docker or /lxc is created under cpuset parent tree. This newly created cgroup is inheriting parent setting for cpu/mem exclusive parameter and thus cannot be overriden - within /docker or /lxc cgroup. This patch is supposed to set cpu/mem - exclusive parameter for both parent and subgroup. + within /docker or /lxc cgroup. This function is supposed to set cgroups + to allow coexistence of both engines. :param name: Name of cgroup. :type name: str @@ -404,18 +463,16 @@ class ContainerEngine(object): class LXC(ContainerEngine): """LXC implementation.""" - def __init__(self): - """Initialize LXC object.""" - super(LXC, self).__init__() + # Implicit constructor is inherited. def acquire(self, force=True): """Acquire a privileged system object where configuration is stored. :param force: If a container exists, destroy it and create a new - container. + container. :type force: bool :raises RuntimeError: If creating the container or writing the container - config fails. + config fails. """ if self.is_container_present(): if force: @@ -433,16 +490,6 @@ class LXC(ContainerEngine): if int(ret) != 0: raise RuntimeError('Failed to create container.') - if self.container.host_dir and self.container.guest_dir: - entry = 'lxc.mount.entry = '\ - '{c.host_dir} /var/lib/lxc/{c.name}/rootfs{c.guest_dir} ' \ - 'none bind,create=dir 0 0'.format(c=self.container) - ret, _, _ = self.container.ssh.exec_command_sudo( - "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'" - .format(e=entry, c=self.container)) - if int(ret) != 0: - raise RuntimeError('Failed to write {c.name} config.' - .format(c=self.container)) self._configure_cgroup('lxc') def create(self): @@ -450,27 +497,42 @@ class LXC(ContainerEngine): :raises RuntimeError: If creating the container fails. """ + if self.container.mnt: + for mount in self.container.mnt: + host_dir, guest_dir = mount.split(':') + entry = 'lxc.mount.entry = {host_dir} '\ + '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \ + 'bind,create=dir 0 0'.format(c=self.container, + host_dir=host_dir, + guest_dir=guest_dir) + ret, _, _ = self.container.ssh.exec_command_sudo( + "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'". + format(e=entry, c=self.container)) + if int(ret) != 0: + raise RuntimeError('Failed to write {c.name} config.' + .format(c=self.container)) + cpuset_cpus = '{0}'.format( ','.join('%s' % cpu for cpu in self.container.cpuset_cpus))\ if self.container.cpuset_cpus else '' - cmd = 'lxc-start --name {c.name} --daemon'.format(c=self.container) - - ret, _, _ = self.container.ssh.exec_command_sudo(cmd) + ret, _, _ = self.container.ssh.exec_command_sudo( + 'lxc-start --name {c.name} --daemon'. + format(c=self.container)) if int(ret) != 0: - raise RuntimeError('Failed to start container {c.name}.' - .format(c=self.container)) + raise RuntimeError('Failed to start container {c.name}.'. + format(c=self.container)) self._lxc_wait('RUNNING') # Workaround for LXC to be able to allocate all cpus including isolated. - cmd = 'cgset --copy-from / lxc/' - ret, _, _ = self.container.ssh.exec_command_sudo(cmd) + ret, _, _ = self.container.ssh.exec_command_sudo( + 'cgset --copy-from / lxc/') if int(ret) != 0: raise RuntimeError('Failed to copy cgroup to LXC') - cmd = 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'\ - .format(c=self.container, cpus=cpuset_cpus) - ret, _, _ = self.container.ssh.exec_command_sudo(cmd) + ret, _, _ = self.container.ssh.exec_command_sudo( + 'lxc-cgroup --name {c.name} cpuset.cpus {cpus}'. + format(c=self.container, cpus=cpuset_cpus)) if int(ret) != 0: raise RuntimeError('Failed to set cpuset.cpus to container ' '{c.name}.'.format(c=self.container)) @@ -489,8 +551,8 @@ class LXC(ContainerEngine): ' '.join('--set-var %s' % env for env in self.container.env))\ if self.container.env else '' - cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}'"\ - .format(env=env, c=self.container, command=command) + cmd = "lxc-attach {env} --name {c.name} -- /bin/sh -c '{command}; "\ + "exit $?'".format(env=env, c=self.container, command=command) ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180) if int(ret) != 0: @@ -592,9 +654,7 @@ class LXC(ContainerEngine): class Docker(ContainerEngine): """Docker implementation.""" - def __init__(self): - """Initialize Docker object.""" - super(Docker, self).__init__() + # Implicit constructor is inherited. def acquire(self, force=True): """Pull an image or a repository from a registry. @@ -609,7 +669,10 @@ class Docker(ContainerEngine): else: return - cmd = 'docker pull {c.image}'.format(c=self.container) + if not self.container.image: + setattr(self.container, 'image', 'snergster/csit-sut:latest') + + cmd = 'docker pull {image}'.format(image=self.container.image) ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800) if int(ret) != 0: @@ -628,6 +691,8 @@ class Docker(ContainerEngine): cpuset_mems = '--cpuset-mems={0}'.format(self.container.cpuset_mems)\ if self.container.cpuset_mems is not None else '' + # Temporary workaround - disabling due to bug in memif + cpuset_mems = '' env = '{0}'.format( ' '.join('--env %s' % env for env in self.container.env))\ @@ -640,8 +705,9 @@ class Docker(ContainerEngine): ' '.join('--publish %s' % var for var in self.container.publish))\ if self.container.publish else '' - volume = '--volume {c.host_dir}:{c.guest_dir}'.format(c=self.container)\ - if self.container.host_dir and self.container.guest_dir else '' + volume = '{0}'.format( + ' '.join('--volume %s' % mnt for mnt in self.container.mnt))\ + if self.container.mnt else '' cmd = 'docker run '\ '--privileged --detach --interactive --tty --rm '\ @@ -668,8 +734,8 @@ class Docker(ContainerEngine): :type command: str :raises RuntimeError: If runnig the command in a container failed. """ - cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}'"\ - .format(c=self.container, command=command) + cmd = "docker exec --interactive {c.name} /bin/sh -c '{command}; "\ + "exit $?'".format(c=self.container, command=command) ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180) if int(ret) != 0: