# See the License for the specific language governing permissions and
# limitations under the License.
-import shlex
+"""This module exists to provide setup utilities for the framework on topology
+nodes. All tasks required to be run before the actual tests are started is
+supposed to end up here.
+"""
+
+from shlex import split
from subprocess import Popen, PIPE, call
from multiprocessing import Pool
from tempfile import NamedTemporaryFile
from os.path import basename
+from os import environ
+
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
-from ssh import SSH
-from constants import Constants as con
-from topology import NodeType
+
+from resources.libraries.python.ssh import SSH
+from resources.libraries.python.constants import Constants as con
+from resources.libraries.python.topology import NodeType
__all__ = ["SetupFramework"]
def pack_framework_dir():
- """Pack the testing WS into temp file, return its name."""
+ """Pack the testing WS into temp file, return its name.
+
+ :returns: Tarball file name.
+ :rtype: str
+ :raises Exception: When failed to pack testing framework.
+ """
+
+ try:
+ directory = environ["TMPDIR"]
+ except KeyError:
+ directory = None
- tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="openvpp-testing-")
+ if directory is not None:
+ tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="openvpp-testing-",
+ dir="{0}".format(directory))
+ else:
+ tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="openvpp-testing-")
file_name = tmpfile.name
tmpfile.close()
proc = Popen(
- shlex.split("tar --exclude-vcs -zcf {0} .".format(file_name)),
- stdout=PIPE, stderr=PIPE)
+ split("tar --sparse --exclude-vcs --exclude=output*.xml "
+ "--exclude=./tmp --exclude=*.deb --exclude=*.rpm -zcf {0} .".
+ format(file_name)), stdout=PIPE, stderr=PIPE)
(stdout, stderr) = proc.communicate()
logger.debug(stdout)
logger.debug(stderr)
return_code = proc.wait()
- if 0 != return_code:
+ if return_code != 0:
raise Exception("Could not pack testing framework.")
return file_name
def copy_tarball_to_node(tarball, node):
+ """Copy tarball file from local host to remote node.
+
+ :param tarball: Path to tarball to upload.
+ :param node: Dictionary created from topology.
+ :type tarball: str
+ :type node: dict
+ :returns: nothing
+ """
logger.console('Copying tarball to {0}'.format(node['host']))
ssh = SSH()
ssh.connect(node)
def extract_tarball_at_node(tarball, node):
+ """Extract tarball at given node.
+
+ Extracts tarball using tar on given node to specific CSIT location.
+
+ :param tarball: Path to tarball to upload.
+ :param node: Dictionary created from topology.
+ :type tarball: str
+ :type node: dict
+ :returns: nothing
+ :raises Excpetion: When failed to unpack tarball.
+ """
logger.console('Extracting tarball to {0} on {1}'.format(
con.REMOTE_FW_DIR, node['host']))
ssh = SSH()
cmd = 'sudo rm -rf {1}; mkdir {1} ; tar -zxf {0} -C {1}; ' \
'rm -f {0}'.format(tarball, con.REMOTE_FW_DIR)
- (ret_code, stdout, stderr) = ssh.exec_command(cmd, timeout=30)
- if 0 != ret_code:
+ (ret_code, _, stderr) = ssh.exec_command(cmd, timeout=30)
+ if ret_code != 0:
logger.error('Unpack error: {0}'.format(stderr))
raise Exception('Failed to unpack {0} at node {1}'.format(
tarball, node['host']))
def create_env_directory_at_node(node):
- """Create fresh virtualenv to a directory, install pip requirements."""
+ """Create fresh virtualenv to a directory, install pip requirements.
+
+ :param node: Node to create virtualenv on.
+ :type node: dict
+ :returns: nothing
+ :raises Exception: When failed to setup virtualenv.
+ """
logger.console('Extracting virtualenv, installing requirements.txt '
'on {0}'.format(node['host']))
ssh = SSH()
ssh.connect(node)
(ret_code, stdout, stderr) = ssh.exec_command(
- 'cd {0} && rm -rf env && virtualenv env && '
- '. env/bin/activate && '
- 'pip install -r requirements.txt'.format(con.REMOTE_FW_DIR), timeout=100)
- if 0 != ret_code:
+ 'cd {0} && rm -rf env && '
+ 'virtualenv --system-site-packages --never-download env && '
+ '. env/bin/activate && '
+ 'pip install -r requirements.txt'
+ .format(con.REMOTE_FW_DIR), timeout=100)
+ if ret_code != 0:
logger.error('Virtualenv creation error: {0}'.format(stdout + stderr))
raise Exception('Virtualenv setup failed')
else:
logger.console('Virtualenv created on {0}'.format(node['host']))
+
+# pylint: disable=broad-except
def setup_node(args):
+ """Run all set-up methods for a node.
+
+ This method is used as map_async parameter. It receives tuple with all
+ parameters as passed to map_async function.
+
+ :param args: All parameters needed to setup one node.
+ :type args: tuple
+ :returns: True - success, False - error
+ :rtype: bool
+ """
tarball, remote_tarball, node = args
- copy_tarball_to_node(tarball, node)
- extract_tarball_at_node(remote_tarball, node)
- if node['type'] == NodeType.TG:
- create_env_directory_at_node(node)
+ try:
+ copy_tarball_to_node(tarball, node)
+ extract_tarball_at_node(remote_tarball, node)
+ if node['type'] == NodeType.TG:
+ create_env_directory_at_node(node)
+ except Exception as exc:
+ logger.error("Node {0} setup failed, error:'{1}'".format(node['host'],
+ exc.message))
+ return False
+ else:
+ logger.console('Setup of node {0} done'.format(node['host']))
+ return True
def delete_local_tarball(tarball):
- call(shlex.split('sh -c "rm {0} > /dev/null 2>&1"'.format(tarball)))
+ """Delete local tarball to prevent disk pollution.
+
+ :param tarball: Path to tarball to upload.
+ :type tarball: str
+ :returns: nothing
+ """
+ call(split('sh -c "rm {0} > /dev/null 2>&1"'.format(tarball)))
+
+
+def delete_openvpp_testing_stuff(node):
+ """Delete openvpp-testing directory and tarball in /tmp/ on given node.
+
+ :param node: Node to delete openvpp-testing stuff on.
+ :type node: dict
+ """
+ logger.console('Deleting openvpp-testing directory and tarball on {0}'
+ .format(node['host']))
+ ssh = SSH()
+ ssh.connect(node)
+ (ret_code, stdout, stderr) = ssh.exec_command(
+ 'cd {0} && sudo rm -rf openvpp-testing*'.format(
+ con.REMOTE_FW_DIR), timeout=100)
+ if ret_code != 0:
+ logger.console('Deleting opvenvpp-testing stuff failed on node {0}: {1}'
+ .format(node, stdout + stderr))
+
+
+def remove_env_directory_at_node(node):
+ """Remove virtualenv directory on given node.
+
+ :param node: Node to remove virtualenv on.
+ :type node: dict
+ """
+ logger.console('Removing virtualenv directory on {0}'.format(node['host']))
+ ssh = SSH()
+ ssh.connect(node)
+ (ret_code, stdout, stderr) = ssh.exec_command(
+ 'cd {0} && sudo rm -rf openvpp-testing*'
+ .format(con.REMOTE_FW_DIR), timeout=100)
+ if ret_code != 0:
+ logger.console('Virtualenv removing failed on node {0}: {1}'.format(
+ node, stdout + stderr))
+
+
+# pylint: disable=broad-except
+def cleanup_node(node):
+ """Run all clean-up methods for a node.
+
+ This method is used as map_async parameter. It receives tuple with all
+ parameters as passed to map_async function.
+
+ :param node: Node to do cleanup on.
+ :type node: dict
+ :returns: True - success, False - error
+ :rtype: bool
+ """
+ try:
+ delete_openvpp_testing_stuff(node)
+ if node['type'] == NodeType.TG:
+ remove_env_directory_at_node(node)
+ except Exception as exc:
+ logger.error("Node {0} cleanup failed, error:'{1}'".format(
+ node['host'], exc.message))
+ return False
+ else:
+ logger.console('Cleanup of node {0} done'.format(node['host']))
+ return True
class SetupFramework(object):
to all nodes in topology under /tmp/
"""
- def __init__(self):
- pass
+ @staticmethod
+ def setup_framework(nodes):
+ """Pack the whole directory and extract in temp on each node.
- def setup_framework(self, nodes):
- """Pack the whole directory and extract in temp on each node."""
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
tarball = pack_framework_dir()
msg = 'Framework packed to {0}'.format(tarball)
logger.trace(msg)
remote_tarball = "/tmp/{0}".format(basename(tarball))
- # Turn off loggining since we use multiprocessing
+ # Turn off logging since we use multiprocessing
log_level = BuiltIn().set_log_level('NONE')
params = ((tarball, remote_tarball, node) for node in nodes.values())
pool = Pool(processes=len(nodes))
pool.close()
pool.join()
+ # Turn on logging
+ BuiltIn().set_log_level(log_level)
+
logger.info(
- 'Executed node setups in parallel, waiting for processes to end')
+ 'Executing node setups in parallel, waiting for processes to end')
result.wait()
logger.info('Results: {0}'.format(result.get()))
- # Turn on loggining
- BuiltIn().set_log_level(log_level)
logger.trace('Test framework copied to all topology nodes')
delete_local_tarball(tarball)
+ logger.console('All nodes are ready')
+
+
+class CleanupFramework(object):
+ """Clean up suite run on topology nodes."""
+
+ @staticmethod
+ def cleanup_framework(nodes):
+ """Perform cleaning on each node.
+
+ :param nodes: Topology nodes.
+ :type nodes: dict
+ """
+ # Turn off logging since we use multiprocessing
+ log_level = BuiltIn().set_log_level('NONE')
+ params = (node for node in nodes.values())
+ pool = Pool(processes=len(nodes))
+ result = pool.map_async(cleanup_node, params)
+ pool.close()
+ pool.join()
+
+ # Turn on logging
+ BuiltIn().set_log_level(log_level)
+
+ logger.info(
+ 'Executing node cleanups in parallel, waiting for processes to end')
+ result.wait()
+
+ logger.info('Results: {0}'.format(result.get()))
+
+ logger.trace('All topology nodes cleaned up')
+ logger.console('All nodes cleaned up')