-# Copyright (c) 2016 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
supposed to end up here.
"""
-from shlex import split
-from subprocess import Popen, PIPE, call
-from multiprocessing import Pool
-from tempfile import NamedTemporaryFile
+from os import environ, remove
from os.path import basename
-from os import environ
+from tempfile import NamedTemporaryFile
+import threading
from robot.api import logger
-from robot.libraries.BuiltIn import BuiltIn
-from resources.libraries.python.ssh import SSH
-from resources.libraries.python.constants import Constants as con
+from resources.libraries.python.Constants import Constants as con
+from resources.libraries.python.ssh import exec_cmd_no_error, scp_node
+from resources.libraries.python.LocalExecution import run
from resources.libraries.python.topology import NodeType
__all__ = ["SetupFramework"]
directory = None
if directory is not None:
- tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="openvpp-testing-",
+ tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="csit-testing-",
dir="{0}".format(directory))
else:
- tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="openvpp-testing-")
+ tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="csit-testing-")
file_name = tmpfile.name
tmpfile.close()
- proc = Popen(
- split("tar --sparse --exclude-vcs "
- "--exclude=./tmp --exclude=*.deb -zcf {0} .".
- format(file_name)), stdout=PIPE, stderr=PIPE)
- (stdout, stderr) = proc.communicate()
-
- logger.debug(stdout)
- logger.debug(stderr)
-
- return_code = proc.wait()
- if return_code != 0:
- raise Exception("Could not pack testing framework.")
+ run(["tar", "--sparse", "--exclude-vcs", "--exclude=output*.xml",
+ "--exclude=./tmp", "-zcf", file_name, "."],
+ msg="Could not pack testing framework")
return file_name
:type node: dict
:returns: nothing
"""
- logger.console('Copying tarball to {0}'.format(node['host']))
- ssh = SSH()
- ssh.connect(node)
-
- ssh.scp(tarball, "/tmp/")
+ host = node['host']
+ logger.console('Copying tarball to {0} starts.'.format(host))
+ scp_node(node, tarball, "/tmp/")
+ logger.console('Copying tarball to {0} done.'.format(host))
def extract_tarball_at_node(tarball, node):
:type tarball: str
:type node: dict
:returns: nothing
- :raises Excpetion: When failed to unpack tarball.
+ :raises RuntimeError: When failed to unpack tarball.
"""
- logger.console('Extracting tarball to {0} on {1}'.format(
- con.REMOTE_FW_DIR, node['host']))
- ssh = SSH()
- ssh.connect(node)
-
- cmd = 'sudo rm -rf {1}; mkdir {1} ; tar -zxf {0} -C {1}; ' \
- 'rm -f {0}'.format(tarball, con.REMOTE_FW_DIR)
- (ret_code, _, stderr) = ssh.exec_command(cmd, timeout=30)
- if ret_code != 0:
- logger.error('Unpack error: {0}'.format(stderr))
- raise Exception('Failed to unpack {0} at node {1}'.format(
- tarball, node['host']))
+ host = node['host']
+ logger.console('Extracting tarball to {0} on {1} starts.'
+ .format(con.REMOTE_FW_DIR, host))
+ exec_cmd_no_error(
+ node, "sudo rm -rf {1}; mkdir {1}; tar -zxf {0} -C {1};"
+ " rm -f {0}".format(tarball, con.REMOTE_FW_DIR),
+ message='Failed to extract {0} at node {1}'.format(tarball, host),
+ timeout=30, include_reason=True)
+ logger.console('Extracting tarball to {0} on {1} done.'
+ .format(con.REMOTE_FW_DIR, host))
def create_env_directory_at_node(node):
:param node: Node to create virtualenv on.
:type node: dict
:returns: nothing
- :raises Exception: When failed to setup virtualenv.
+ :raises RuntimeError: When failed to setup virtualenv.
"""
- logger.console('Extracting virtualenv, installing requirements.txt '
- 'on {0}'.format(node['host']))
- ssh = SSH()
- ssh.connect(node)
- (ret_code, stdout, stderr) = ssh.exec_command(
- 'cd {0} && rm -rf env && '
- 'virtualenv --system-site-packages --never-download env && '
- '. env/bin/activate && '
- 'pip install -r requirements.txt'
- .format(con.REMOTE_FW_DIR), timeout=100)
- if ret_code != 0:
- logger.error('Virtualenv creation error: {0}'.format(stdout + stderr))
- raise Exception('Virtualenv setup failed')
- else:
- logger.console('Virtualenv created on {0}'.format(node['host']))
-
-
-# pylint: disable=broad-except
-def setup_node(args):
- """Run all set-up methods for a node.
-
- This method is used as map_async parameter. It receives tuple with all
- parameters as passed to map_async function.
-
- :param args: All parameters needed to setup one node.
- :type args: tuple
+ host = node['host']
+ logger.console('Virtualenv setup including requirements.txt on {0} starts.'
+ .format(host))
+ exec_cmd_no_error(
+ node, 'cd {0} && rm -rf env'
+ ' && virtualenv --system-site-packages --never-download env'
+ ' && source env/bin/activate && pip install -r requirements.txt'
+ .format(con.REMOTE_FW_DIR), timeout=100, include_reason=True,
+ message="Failed install at node {host}".format(host=host))
+ logger.console('Virtualenv setup on {0} done.'.format(host))
+
+
+def setup_node(node, tarball, remote_tarball, results=None):
+ """Copy a tarball to a node and extract it.
+
+ :param node: A node where the tarball will be copied and extracted.
+ :param tarball: Local path of tarball to be copied.
+ :param remote_tarball: Remote path of the tarball.
+ :param results: A list where to store the result of node setup, optional.
+ :type node: dict
+ :type tarball: str
+ :type remote_tarball: str
+ :type results: list
:returns: True - success, False - error
:rtype: bool
"""
- tarball, remote_tarball, node = args
+ host = node['host']
try:
copy_tarball_to_node(tarball, node)
extract_tarball_at_node(remote_tarball, node)
if node['type'] == NodeType.TG:
create_env_directory_at_node(node)
- except Exception as exc:
- logger.error("Node {0} setup failed, error:'{1}'".format(node['host'],
- exc.message))
- return False
+ except RuntimeError as exc:
+ logger.console("Node {node} setup failed, error: {err!r}".format(
+ node=host, err=exc))
+ result = False
else:
- logger.console('Setup of node {0} done'.format(node['host']))
- return True
+ logger.console('Setup of node {ip} done.'.format(ip=host))
+ result = True
+
+ if isinstance(results, list):
+ results.append(result)
+ return result
def delete_local_tarball(tarball):
"""Delete local tarball to prevent disk pollution.
- :param tarball: Path to tarball to upload.
+ :param tarball: Path of local tarball to delete.
:type tarball: str
:returns: nothing
"""
- call(split('sh -c "rm {0} > /dev/null 2>&1"'.format(tarball)))
+ remove(tarball)
-def delete_openvpp_testing_stuff(node):
- """Delete openvpp-testing directory and tarball in /tmp/ on given node.
+def delete_framework_dir(node):
+ """Delete framework directory in /tmp/ on given node.
- :param node: Node to delete openvpp-testing stuff on.
+ :param node: Node to delete framework directory on.
:type node: dict
"""
- logger.console('Deleting openvpp-testing directory and tarball on {0}'
- .format(node['host']))
- ssh = SSH()
- ssh.connect(node)
- (ret_code, stdout, stderr) = ssh.exec_command(
- 'cd {0} && sudo rm -rf openvpp-testing*'.format(
- con.REMOTE_FW_DIR), timeout=100)
- if ret_code != 0:
- logger.console('Deleting opvenvpp-testing stuff failed on node {0}: {1}'
- .format(node, stdout + stderr))
-
-
-def remove_env_directory_at_node(node):
- """Remove virtualenv directory on given node.
-
- :param node: Node to remove virtualenv on.
- :type node: dict
- """
- logger.console('Removing virtualenv directory on {0}'.format(node['host']))
- ssh = SSH()
- ssh.connect(node)
- (ret_code, stdout, stderr) = ssh.exec_command(
- 'cd {0} && sudo rm -rf openvpp-testing*'
- .format(con.REMOTE_FW_DIR), timeout=100)
- if ret_code != 0:
- logger.console('Virtualenv removing failed on node {0}: {1}'.format(
- node, stdout + stderr))
-
-
-# pylint: disable=broad-except
-def cleanup_node(node):
- """Run all clean-up methods for a node.
-
- This method is used as map_async parameter. It receives tuple with all
- parameters as passed to map_async function.
-
- :param node: Node to do cleanup on.
+ host = node['host']
+ logger.console(
+ 'Deleting framework directory on {0} starts.'.format(host))
+ exec_cmd_no_error(
+ node, 'sudo rm -rf {0}'.format(con.REMOTE_FW_DIR),
+ message="Framework delete failed at node {host}".format(host=host),
+ timeout=100, include_reason=True)
+ logger.console(
+ 'Deleting framework directory on {0} done.'.format(host))
+
+
+def cleanup_node(node, results=None):
+ """Delete a tarball from a node.
+
+ :param node: A node where the tarball will be delete.
+ :param results: A list where to store the result of node cleanup, optional.
:type node: dict
+ :type results: list
:returns: True - success, False - error
:rtype: bool
"""
+ host = node['host']
try:
- delete_openvpp_testing_stuff(node)
- if node['type'] == NodeType.TG:
- remove_env_directory_at_node(node)
- except Exception as exc:
- logger.error("Node {0} cleanup failed, error:'{1}'".format(
- node['host'], exc.message))
- return False
+ delete_framework_dir(node)
+ except RuntimeError:
+ logger.error("Cleanup of node {0} failed.".format(host))
+ result = False
else:
- logger.console('Cleanup of node {0} done'.format(node['host']))
- return True
+ logger.console('Cleanup of node {0} done.'.format(host))
+ result = True
+
+ if isinstance(results, list):
+ results.append(result)
+ return result
class SetupFramework(object):
:param nodes: Topology nodes.
:type nodes: dict
+ :raises RuntimeError: If setup framework failed.
"""
tarball = pack_framework_dir()
logger.trace(msg)
remote_tarball = "/tmp/{0}".format(basename(tarball))
- # Turn off logging since we use multiprocessing
- log_level = BuiltIn().set_log_level('NONE')
- params = ((tarball, remote_tarball, node) for node in nodes.values())
- pool = Pool(processes=len(nodes))
- result = pool.map_async(setup_node, params)
- pool.close()
- pool.join()
+ results = []
+ threads = []
- # Turn on logging
- BuiltIn().set_log_level(log_level)
+ for node in nodes.values():
+ args = node, tarball, remote_tarball, results
+ thread = threading.Thread(target=setup_node, args=args)
+ thread.start()
+ threads.append(thread)
logger.info(
- 'Executing node setups in parallel, waiting for processes to end')
- result.wait()
+ 'Executing node setups in parallel, waiting for threads to end')
- logger.info('Results: {0}'.format(result.get()))
+ for thread in threads:
+ thread.join()
+
+ logger.info('Results: {0}'.format(results))
- logger.trace('Test framework copied to all topology nodes')
delete_local_tarball(tarball)
- logger.console('All nodes are ready')
+ if all(results):
+ logger.console('All nodes are ready.')
+ for node in nodes.values():
+ logger.info('Setup of {type} node {ip} done.'.
+ format(type=node['type'], ip=node['host']))
+ else:
+ raise RuntimeError('Failed to setup framework.')
class CleanupFramework(object):
@staticmethod
def cleanup_framework(nodes):
- """Perform cleaning on each node.
+ """Perform cleanup on each node.
:param nodes: Topology nodes.
:type nodes: dict
+ :raises RuntimeError: If cleanup framework failed.
"""
- # Turn off logging since we use multiprocessing
- log_level = BuiltIn().set_log_level('NONE')
- params = (node for node in nodes.values())
- pool = Pool(processes=len(nodes))
- result = pool.map_async(cleanup_node, params)
- pool.close()
- pool.join()
- # Turn on logging
- BuiltIn().set_log_level(log_level)
+ results = []
+ threads = []
+
+ for node in nodes.values():
+ thread = threading.Thread(target=cleanup_node,
+ args=(node, results))
+ thread.start()
+ threads.append(thread)
logger.info(
- 'Executing node cleanups in parallel, waiting for processes to end')
- result.wait()
+ 'Executing node cleanups in parallel, waiting for threads to end.')
+
+ for thread in threads:
+ thread.join()
- logger.info('Results: {0}'.format(result.get()))
+ logger.info('Results: {0}'.format(results))
- logger.trace('All topology nodes cleaned up')
- logger.console('All nodes cleaned up')
+ if all(results):
+ logger.console('All nodes cleaned up.')
+ else:
+ raise RuntimeError('Failed to cleaned up framework.')