X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Flibraries%2Fpython%2FSetupFramework.py;h=26a07c3a3869e5695c034821dbaad3c2afcb846e;hp=9b50b90bc5aa31ab92ba423438990a5c29af1251;hb=1e5030b987404fb1cf1dfabe31a150092ff84a3d;hpb=da799981f5373b09398319df12e77e2efc75caa6 diff --git a/resources/libraries/python/SetupFramework.py b/resources/libraries/python/SetupFramework.py index 9b50b90bc5..26a07c3a38 100644 --- a/resources/libraries/python/SetupFramework.py +++ b/resources/libraries/python/SetupFramework.py @@ -1,4 +1,4 @@ -# Copyright (c) 2018 Cisco and/or its affiliates. +# Copyright (c) 2021 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -16,21 +16,19 @@ nodes. All tasks required to be run before the actual tests are started is supposed to end up here. """ -from shlex import split -from subprocess import Popen, PIPE, call -from multiprocessing import Pool +from os import environ, remove from tempfile import NamedTemporaryFile -from os.path import basename -from os import environ +import threading +import traceback from robot.api import logger -from robot.libraries.BuiltIn import BuiltIn -from resources.libraries.python.ssh import SSH from resources.libraries.python.Constants import Constants as con +from resources.libraries.python.ssh import exec_cmd_no_error, scp_node +from resources.libraries.python.LocalExecution import run from resources.libraries.python.topology import NodeType -__all__ = ["SetupFramework"] +__all__ = [u"SetupFramework"] def pack_framework_dir(): @@ -42,30 +40,25 @@ def pack_framework_dir(): """ try: - directory = environ["TMPDIR"] + directory = environ[u"TMPDIR"] except KeyError: directory = None if directory is not None: - tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="csit-testing-", - dir="{0}".format(directory)) + tmpfile = NamedTemporaryFile( + suffix=u".tgz", prefix=u"csit-testing-", dir=f"{directory}" + ) else: - tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="csit-testing-") + tmpfile = NamedTemporaryFile(suffix=u".tgz", prefix=u"csit-testing-") file_name = tmpfile.name tmpfile.close() - proc = Popen( - split("tar --sparse --exclude-vcs --exclude=output*.xml " - "--exclude=./tmp -zcf {0} ." - .format(file_name)), stdout=PIPE, stderr=PIPE) - (stdout, stderr) = proc.communicate() - - logger.debug(stdout) - logger.debug(stderr) - - return_code = proc.wait() - if return_code != 0: - raise RuntimeError("Could not pack testing framework.") + run( + [ + u"tar", u"--sparse", u"--exclude-vcs", u"--exclude=output*.xml", + u"--exclude=./tmp", u"-zcf", file_name, u"." + ], msg=u"Could not pack testing framework" + ) return file_name @@ -79,12 +72,15 @@ def copy_tarball_to_node(tarball, node): :type node: dict :returns: nothing """ - logger.console('Copying tarball to {0}'.format(node['host'])) - ssh = SSH() - ssh.connect(node) - - ssh.scp(tarball, "/tmp/") - logger.console('Copying tarball to {0} done'.format(node['host'])) + logger.console( + f"Copying tarball to {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} starts." + ) + scp_node(node, tarball, u"/tmp/") + logger.console( + f"Copying tarball to {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} done." + ) def extract_tarball_at_node(tarball, node): @@ -99,79 +95,109 @@ def extract_tarball_at_node(tarball, node): :returns: nothing :raises RuntimeError: When failed to unpack tarball. """ - logger.console('Extracting tarball to {0} on {1}' - .format(con.REMOTE_FW_DIR, node['host'])) - ssh = SSH() - ssh.connect(node) - (ret_code, _, _) = ssh.exec_command( - 'sudo rm -rf {1}; mkdir {1} ; tar -zxf {0} -C {1}; rm -f {0}' - .format(tarball, con.REMOTE_FW_DIR), timeout=30) - if ret_code != 0: - raise RuntimeError('Failed to extract {0} at node {1}' - .format(tarball, node['host'])) - logger.console('Extracting tarball to {0} on {1} done' - .format(con.REMOTE_FW_DIR, node['host'])) + logger.console( + f"Extracting tarball to {con.REMOTE_FW_DIR} on {node[u'type']} " + f"host {node[u'host']}, port {node[u'port']} starts." + ) + cmd = f"sudo rm -rf {con.REMOTE_FW_DIR}; mkdir {con.REMOTE_FW_DIR}; " \ + f"tar -zxf {tarball} -C {con.REMOTE_FW_DIR}; rm -f {tarball}" + exec_cmd_no_error( + node, cmd, + message=f"Failed to extract {tarball} at node {node[u'type']} " + f"host {node[u'host']}, port {node[u'port']}", + timeout=90, include_reason=True + ) + logger.console( + f"Extracting tarball to {con.REMOTE_FW_DIR} on {node[u'type']} " + f"host {node[u'host']}, port {node[u'port']} done." + ) def create_env_directory_at_node(node): """Create fresh virtualenv to a directory, install pip requirements. + Return stdout and stderr of the command, + so we see which installs are behaving weird (e.g. attempting download). + :param node: Node to create virtualenv on. :type node: dict - :returns: nothing + :returns: Stdout and stderr. + :rtype: str, str :raises RuntimeError: When failed to setup virtualenv. """ - logger.console('Virtualenv setup including requirements.txt on {0}' - .format(node['host'])) - ssh = SSH() - ssh.connect(node) - (ret_code, _, _) = ssh.exec_command( - 'cd {0} && rm -rf env && ' - 'virtualenv --system-site-packages --never-download env && ' - '. env/bin/activate && ' - 'pip install -r requirements.txt' - .format(con.REMOTE_FW_DIR), timeout=100) - if ret_code != 0: - raise RuntimeError('Virtualenv setup including requirements.txt on {0}' - .format(node['host'])) - - logger.console('Virtualenv on {0} created'.format(node['host'])) - - -def setup_node(args): - """Run all set-up methods for a node. - - This method is used as map_async parameter. It receives tuple with all - parameters as passed to map_async function. - - :param args: All parameters needed to setup one node. - :type args: tuple + logger.console( + f"Virtualenv setup including requirements.txt on {node[u'type']} " + f"host {node[u'host']}, port {node[u'port']} starts." + ) + cmd = f"cd {con.REMOTE_FW_DIR} && rm -rf env && virtualenv " \ + f"-p $(which python3) --system-site-packages --never-download env " \ + f"&& source env/bin/activate && ANSIBLE_SKIP_CONFLICT_CHECK=1 " \ + f"pip3 install -r requirements.txt" + stdout, stderr = exec_cmd_no_error( + node, cmd, timeout=100, include_reason=True, + message=f"Failed install at node {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']}" + ) + logger.console( + f"Virtualenv setup on {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} done." + ) + return stdout, stderr + + +def setup_node(node, tarball, remote_tarball, results=None, logs=None): + """Copy a tarball to a node and extract it. + + :param node: A node where the tarball will be copied and extracted. + :param tarball: Local path of tarball to be copied. + :param remote_tarball: Remote path of the tarball. + :param results: A list where to store the result of node setup, optional. + :param logs: A list where to store anything that should be logged. + :type node: dict + :type tarball: str + :type remote_tarball: str + :type results: list + :type logs: list :returns: True - success, False - error :rtype: bool """ - tarball, remote_tarball, node = args try: copy_tarball_to_node(tarball, node) extract_tarball_at_node(remote_tarball, node) - if node['type'] == NodeType.TG: - create_env_directory_at_node(node) - except RuntimeError as exc: - logger.error("Node {0} setup failed, error:'{1}'" - .format(node['host'], exc.message)) - return False + if node[u"type"] == NodeType.TG: + stdout, stderr = create_env_directory_at_node(node) + if isinstance(logs, list): + logs.append(f"{node[u'host']} Env stdout: {stdout}") + logs.append(f"{node[u'host']} Env stderr: {stderr}") + except Exception: + # any exception must result in result = False + # since this runs in a thread and can't be caught anywhere else + err_msg = f"Node {node[u'type']} host {node[u'host']}, " \ + f"port {node[u'port']} setup failed." + logger.console(err_msg) + if isinstance(logs, list): + logs.append(f"{err_msg} Exception: {traceback.format_exc()}") + result = False else: - logger.console('Setup of node {0} done'.format(node['host'])) - return True + logger.console( + f"Setup of node {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} done." + ) + result = True + + if isinstance(results, list): + results.append(result) + return result def delete_local_tarball(tarball): """Delete local tarball to prevent disk pollution. - :param tarball: Path to tarball to upload. + :param tarball: Path of local tarball to delete. :type tarball: str :returns: nothing """ - call(split('sh -c "rm {0} > /dev/null 2>&1"'.format(tarball))) + remove(tarball) def delete_framework_dir(node): @@ -180,40 +206,56 @@ def delete_framework_dir(node): :param node: Node to delete framework directory on. :type node: dict """ - logger.console('Deleting framework directory on {0}' - .format(node['host'])) - ssh = SSH() - ssh.connect(node) - (ret_code, _, _) = ssh.exec_command( - 'sudo rm -rf {0}' - .format(con.REMOTE_FW_DIR), timeout=100) - if ret_code != 0: - raise RuntimeError('Deleting framework directory on {0} failed' - .format(node)) - - -def cleanup_node(node): - """Run all clean-up methods for a node. - - This method is used as map_async parameter. It receives tuple with all - parameters as passed to map_async function. - - :param node: Node to do cleanup on. + logger.console( + f"Deleting framework directory on {node[u'type']} host {node[u'host']}," + f" port {node[u'port']} starts." + ) + exec_cmd_no_error( + node, f"sudo rm -rf {con.REMOTE_FW_DIR}", + message=f"Framework delete failed at node {node[u'type']} " + f"host {node[u'host']}, port {node[u'port']}", + timeout=100, include_reason=True + ) + logger.console( + f"Deleting framework directory on {node[u'type']} host {node[u'host']}," + f" port {node[u'port']} done." + ) + + +def cleanup_node(node, results=None, logs=None): + """Delete a tarball from a node. + + :param node: A node where the tarball will be delete. + :param results: A list where to store the result of node cleanup, optional. + :param logs: A list where to store anything that should be logged. :type node: dict + :type results: list + :type logs: list :returns: True - success, False - error :rtype: bool """ try: delete_framework_dir(node) - except RuntimeError: - logger.error("Cleanup of node {0} failed".format(node['host'])) - return False + except Exception: + err_msg = f"Cleanup of node {node[u'type']} host {node[u'host']}, " \ + f"port {node[u'port']} failed." + logger.console(err_msg) + if isinstance(logs, list): + logs.append(f"{err_msg} Exception: {traceback.format_exc()}") + result = False else: - logger.console('Cleanup of node {0} done'.format(node['host'])) - return True + logger.console( + f"Cleanup of node {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} done." + ) + result = True + if isinstance(results, list): + results.append(result) + return result -class SetupFramework(object): + +class SetupFramework: """Setup suite run on topology nodes. Many VAT/CLI based tests need the scripts at remote hosts before executing @@ -231,68 +273,80 @@ class SetupFramework(object): """ tarball = pack_framework_dir() - msg = 'Framework packed to {0}'.format(tarball) + msg = f"Framework packed to {tarball}" logger.console(msg) logger.trace(msg) - remote_tarball = "/tmp/{0}".format(basename(tarball)) + remote_tarball = f"{tarball}" - # Turn off logging since we use multiprocessing - log_level = BuiltIn().set_log_level('NONE') - params = ((tarball, remote_tarball, node) for node in nodes.values()) - pool = Pool(processes=len(nodes)) - result = pool.map_async(setup_node, params) - pool.close() - pool.join() + results = list() + logs = list() + threads = list() - # Turn on logging - BuiltIn().set_log_level(log_level) + for node in nodes.values(): + args = node, tarball, remote_tarball, results, logs + thread = threading.Thread(target=setup_node, args=args) + thread.start() + threads.append(thread) logger.info( - 'Executing node setups in parallel, waiting for processes to end') - result.wait() + u"Executing node setups in parallel, waiting for threads to end." + ) + + for thread in threads: + thread.join() - results = result.get() - node_success = all(results) - logger.info('Results: {0}'.format(results)) + logger.info(f"Results: {results}") + + for log in logs: + logger.trace(log) delete_local_tarball(tarball) - if node_success: - logger.console('All nodes are ready') + if all(results): + logger.console(u"All nodes are ready.") + for node in nodes.values(): + logger.info( + f"Setup of node {node[u'type']} host {node[u'host']}, " + f"port {node[u'port']} done." + ) else: - raise RuntimeError('Failed to setup framework') + raise RuntimeError(u"Failed to setup framework.") -class CleanupFramework(object): +class CleanupFramework: """Clean up suite run on topology nodes.""" @staticmethod def cleanup_framework(nodes): - """Perform cleaning on each node. + """Perform cleanup on each node. :param nodes: Topology nodes. :type nodes: dict :raises RuntimeError: If cleanup framework failed. """ - # Turn off logging since we use multiprocessing - log_level = BuiltIn().set_log_level('NONE') - params = (node for node in nodes.values()) - pool = Pool(processes=len(nodes)) - result = pool.map_async(cleanup_node, params) - pool.close() - pool.join() - # Turn on logging - BuiltIn().set_log_level(log_level) + results = list() + logs = list() + threads = list() + + for node in nodes.values(): + thread = threading.Thread(target=cleanup_node, + args=(node, results, logs)) + thread.start() + threads.append(thread) logger.info( - 'Executing node cleanups in parallel, waiting for processes to end') - result.wait() + u"Executing node cleanups in parallel, waiting for threads to end." + ) + + for thread in threads: + thread.join() + + logger.info(f"Results: {results}") - results = result.get() - node_success = all(results) - logger.info('Results: {0}'.format(results)) + for log in logs: + logger.trace(log) - if node_success: - logger.console('All nodes cleaned up') + if all(results): + logger.console(u"All nodes cleaned up.") else: - raise RuntimeError('Failed to cleaned up framework') + raise RuntimeError(u"Failed to cleaned up framework.")