X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FSetupFramework.py;h=500812826af49f4744ad1933803d1b9659ac6527;hp=b3df489685376bb1bf78dbb4ed428b6a7ec8b1cd;hb=e4744a48fa82c226162351d7598827efa610e0ec;hpb=8f77a1ac982b07802f0fb209f589708c27f3e9c5 diff --git a/resources/libraries/python/SetupFramework.py b/resources/libraries/python/SetupFramework.py index b3df489685..500812826a 100644 --- a/resources/libraries/python/SetupFramework.py +++ b/resources/libraries/python/SetupFramework.py @@ -16,11 +16,12 @@ nodes. All tasks required to be run before the actual tests are started is supposed to end up here. """ -import shlex +from shlex import split from subprocess import Popen, PIPE, call from multiprocessing import Pool from tempfile import NamedTemporaryFile from os.path import basename +from os import environ from robot.api import logger from robot.libraries.BuiltIn import BuiltIn @@ -31,23 +32,39 @@ from resources.libraries.python.topology import NodeType __all__ = ["SetupFramework"] + def pack_framework_dir(): - """Pack the testing WS into temp file, return its name.""" + """Pack the testing WS into temp file, return its name. + + :returns: Tarball file name. + :rtype: str + :raises Exception: When failed to pack testing framework. + """ + + try: + directory = environ["TMPDIR"] + except KeyError: + directory = None - tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="openvpp-testing-") + if directory is not None: + tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="openvpp-testing-", + dir="{0}".format(directory)) + else: + tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="openvpp-testing-") file_name = tmpfile.name tmpfile.close() proc = Popen( - shlex.split("tar --exclude-vcs -zcf {0} .".format(file_name)), - stdout=PIPE, stderr=PIPE) + split("tar --sparse --exclude-vcs --exclude=output*.xml " + "--exclude=./tmp --exclude=*.deb --exclude=*.rpm -zcf {0} .". + format(file_name)), stdout=PIPE, stderr=PIPE) (stdout, stderr) = proc.communicate() logger.debug(stdout) logger.debug(stderr) return_code = proc.wait() - if 0 != return_code: + if return_code != 0: raise Exception("Could not pack testing framework.") return file_name @@ -56,11 +73,11 @@ def pack_framework_dir(): def copy_tarball_to_node(tarball, node): """Copy tarball file from local host to remote node. - :param tarball: path to tarball to upload - :param node: dictionary created from topology - :type tarball: string + :param tarball: Path to tarball to upload. + :param node: Dictionary created from topology. + :type tarball: str :type node: dict - :return: nothing + :returns: nothing """ logger.console('Copying tarball to {0}'.format(node['host'])) ssh = SSH() @@ -72,13 +89,14 @@ def copy_tarball_to_node(tarball, node): def extract_tarball_at_node(tarball, node): """Extract tarball at given node. - Extracts tarball using tar on given node to specific CSIT loocation. + Extracts tarball using tar on given node to specific CSIT location. - :param tarball: path to tarball to upload - :param node: dictionary created from topology - :type tarball: string + :param tarball: Path to tarball to upload. + :param node: Dictionary created from topology. + :type tarball: str :type node: dict - :return: nothing + :returns: nothing + :raises Excpetion: When failed to unpack tarball. """ logger.console('Extracting tarball to {0} on {1}'.format( con.REMOTE_FW_DIR, node['host'])) @@ -88,55 +106,135 @@ def extract_tarball_at_node(tarball, node): cmd = 'sudo rm -rf {1}; mkdir {1} ; tar -zxf {0} -C {1}; ' \ 'rm -f {0}'.format(tarball, con.REMOTE_FW_DIR) (ret_code, _, stderr) = ssh.exec_command(cmd, timeout=30) - if 0 != ret_code: + if ret_code != 0: logger.error('Unpack error: {0}'.format(stderr)) raise Exception('Failed to unpack {0} at node {1}'.format( tarball, node['host'])) def create_env_directory_at_node(node): - """Create fresh virtualenv to a directory, install pip requirements.""" + """Create fresh virtualenv to a directory, install pip requirements. + + :param node: Node to create virtualenv on. + :type node: dict + :returns: nothing + :raises Exception: When failed to setup virtualenv. + """ logger.console('Extracting virtualenv, installing requirements.txt ' 'on {0}'.format(node['host'])) ssh = SSH() ssh.connect(node) (ret_code, stdout, stderr) = ssh.exec_command( - 'cd {0} && rm -rf env && virtualenv env && . env/bin/activate && ' - 'pip install -r requirements.txt'.format(con.REMOTE_FW_DIR), - timeout=100) - if 0 != ret_code: + 'cd {0} && rm -rf env && ' + 'virtualenv --system-site-packages --never-download env && ' + '. env/bin/activate && ' + 'pip install -r requirements.txt' + .format(con.REMOTE_FW_DIR), timeout=100) + if ret_code != 0: logger.error('Virtualenv creation error: {0}'.format(stdout + stderr)) raise Exception('Virtualenv setup failed') else: logger.console('Virtualenv created on {0}'.format(node['host'])) + +# pylint: disable=broad-except def setup_node(args): """Run all set-up methods for a node. This method is used as map_async parameter. It receives tuple with all parameters as passed to map_async function. - :param args: all parameters needed to setup one node + :param args: All parameters needed to setup one node. :type args: tuple - :return: nothing + :returns: True - success, False - error + :rtype: bool """ tarball, remote_tarball, node = args - copy_tarball_to_node(tarball, node) - extract_tarball_at_node(remote_tarball, node) - if node['type'] == NodeType.TG: - create_env_directory_at_node(node) - logger.console('Setup of node {0} done'.format(node['host'])) + try: + copy_tarball_to_node(tarball, node) + extract_tarball_at_node(remote_tarball, node) + if node['type'] == NodeType.TG: + create_env_directory_at_node(node) + except Exception as exc: + logger.error("Node {0} setup failed, error:'{1}'".format(node['host'], + exc.message)) + return False + else: + logger.console('Setup of node {0} done'.format(node['host'])) + return True + def delete_local_tarball(tarball): """Delete local tarball to prevent disk pollution. - :param tarball: path to tarball to upload - :type tarball: string - :return: nothing + :param tarball: Path to tarball to upload. + :type tarball: str + :returns: nothing + """ + call(split('sh -c "rm {0} > /dev/null 2>&1"'.format(tarball))) + + +def delete_openvpp_testing_stuff(node): + """Delete openvpp-testing directory and tarball in /tmp/ on given node. + + :param node: Node to delete openvpp-testing stuff on. + :type node: dict + """ + logger.console('Deleting openvpp-testing directory and tarball on {0}' + .format(node['host'])) + ssh = SSH() + ssh.connect(node) + (ret_code, stdout, stderr) = ssh.exec_command( + 'cd {0} && sudo rm -rf openvpp-testing*'.format( + con.REMOTE_FW_DIR), timeout=100) + if ret_code != 0: + logger.console('Deleting opvenvpp-testing stuff failed on node {0}: {1}' + .format(node, stdout + stderr)) + + +def remove_env_directory_at_node(node): + """Remove virtualenv directory on given node. + + :param node: Node to remove virtualenv on. + :type node: dict + """ + logger.console('Removing virtualenv directory on {0}'.format(node['host'])) + ssh = SSH() + ssh.connect(node) + (ret_code, stdout, stderr) = ssh.exec_command( + 'cd {0} && sudo rm -rf openvpp-testing*' + .format(con.REMOTE_FW_DIR), timeout=100) + if ret_code != 0: + logger.console('Virtualenv removing failed on node {0}: {1}'.format( + node, stdout + stderr)) + + +# pylint: disable=broad-except +def cleanup_node(node): + """Run all clean-up methods for a node. + + This method is used as map_async parameter. It receives tuple with all + parameters as passed to map_async function. + + :param node: Node to do cleanup on. + :type node: dict + :returns: True - success, False - error + :rtype: bool """ - call(shlex.split('sh -c "rm {0} > /dev/null 2>&1"'.format(tarball))) + try: + delete_openvpp_testing_stuff(node) + if node['type'] == NodeType.TG: + remove_env_directory_at_node(node) + except Exception as exc: + logger.error("Node {0} cleanup failed, error:'{1}'".format( + node['host'], exc.message)) + return False + else: + logger.console('Cleanup of node {0} done'.format(node['host'])) + return True + -class SetupFramework(object): # pylint: disable=too-few-public-methods +class SetupFramework(object): """Setup suite run on topology nodes. Many VAT/CLI based tests need the scripts at remote hosts before executing @@ -144,12 +242,13 @@ class SetupFramework(object): # pylint: disable=too-few-public-methods to all nodes in topology under /tmp/ """ - def __init__(self): - pass - @staticmethod def setup_framework(nodes): - """Pack the whole directory and extract in temp on each node.""" + """Pack the whole directory and extract in temp on each node. + + :param nodes: Topology nodes. + :type nodes: dict + """ tarball = pack_framework_dir() msg = 'Framework packed to {0}'.format(tarball) @@ -157,7 +256,7 @@ class SetupFramework(object): # pylint: disable=too-few-public-methods logger.trace(msg) remote_tarball = "/tmp/{0}".format(basename(tarball)) - # Turn off loggining since we use multiprocessing + # Turn off logging since we use multiprocessing log_level = BuiltIn().set_log_level('NONE') params = ((tarball, remote_tarball, node) for node in nodes.values()) pool = Pool(processes=len(nodes)) @@ -165,15 +264,46 @@ class SetupFramework(object): # pylint: disable=too-few-public-methods pool.close() pool.join() + # Turn on logging + BuiltIn().set_log_level(log_level) + logger.info( - 'Executed node setups in parallel, waiting for processes to end') + 'Executing node setups in parallel, waiting for processes to end') result.wait() logger.info('Results: {0}'.format(result.get())) - # Turn on loggining - BuiltIn().set_log_level(log_level) logger.trace('Test framework copied to all topology nodes') delete_local_tarball(tarball) logger.console('All nodes are ready') + +class CleanupFramework(object): + """Clean up suite run on topology nodes.""" + + @staticmethod + def cleanup_framework(nodes): + """Perform cleaning on each node. + + :param nodes: Topology nodes. + :type nodes: dict + """ + # Turn off logging since we use multiprocessing + log_level = BuiltIn().set_log_level('NONE') + params = (node for node in nodes.values()) + pool = Pool(processes=len(nodes)) + result = pool.map_async(cleanup_node, params) + pool.close() + pool.join() + + # Turn on logging + BuiltIn().set_log_level(log_level) + + logger.info( + 'Executing node cleanups in parallel, waiting for processes to end') + result.wait() + + logger.info('Results: {0}'.format(result.get())) + + logger.trace('All topology nodes cleaned up') + logger.console('All nodes cleaned up')