cat /etc/hostname
cat /etc/hosts
+JOB_ARCHIVE_ARTIFACTS=(log.html output.xml report.html *.log)
+LOG_ARCHIVE_ARTIFACTS=(log.html output.xml report.html *.log)
+JOB_ARCHIVE_DIR="archive"
+LOG_ARCHIVE_DIR="$WORKSPACE/archives"
+mkdir -p ${JOB_ARCHIVE_DIR}
+mkdir -p ${LOG_ARCHIVE_DIR}
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export PYTHONPATH=${SCRIPT_DIR}
# Remove unnecessary log files
rm -f ${partial_logs}
+# Archive JOB artifacts in jenkins
+for i in ${JOB_ARCHIVE_ARTIFACTS[@]}; do
+ cp $( readlink -f ${i} | tr '\n' ' ' ) ${JOB_ARCHIVE_DIR}/
+done
+# Archive JOB artifacts to logs.fd.io
+for i in ${LOG_ARCHIVE_ARTIFACTS[@]}; do
+ cp $( readlink -f ${i} | tr '\n' ' ' ) ${LOG_ARCHIVE_DIR}/
+done
+
echo Post-processing finished.
if [ ${RC} -eq 0 ]; then
import os
import glob
+from resources.libraries.python.ssh import SSH
from resources.libraries.python.ssh import exec_cmd, exec_cmd_no_error
from resources.libraries.python.DMM.DMMConstants import DMMConstants as con
from resources.libraries.python.topology import Topology
cmd = 'cd {0}/{1} && ./{2} log 1'\
.format(con.REMOTE_FW_DIR, con.DMM_RUN_SCRIPTS, script_name)
exec_cmd(dut2_node, cmd)
+ cmd = 'mv /var/log/nStack/running.log /var/log/nStack/{0}_ser.log'\
+ .format(script_name)
+ exec_cmd(dut1_node, cmd, sudo=True)
+ cmd = 'mv /var/log/nStack/running.log /var/log/nStack/{0}_cli.log'\
+ .format(script_name)
+ exec_cmd(dut2_node, cmd, sudo=True)
@staticmethod
def cleanup_dmm_dut(dut1_node, dut2_node, script_name):
(stdout, _) = exec_cmd_no_error(dut_node, cmd)
interface_name = stdout.split(' ', 1)[0]
return interface_name
+
+ @staticmethod
+ def get_logs_from_node(dut_node):
+ """
+ Get logs from node to the test executor machine.
+
+ :param dut_node: Node to artifact the logs of.
+ :type dut_node: dict
+ """
+ ssh = SSH()
+ ssh.connect(dut_node)
+ ssh.scp(".", '/var/log/nStack/*.log',
+ get=True, timeout=60, wildcard=True)
+
+ (ret, _, _) = exec_cmd(dut_node, 'ls -l /var/log/app*.log')
+ if ret == 0:
+ ssh.scp(".", '/var/log/app*.log',
+ get=True, timeout=60, wildcard=True)
+
+ exec_cmd(dut_node, 'rm -rf /var/log/nStack/*.log', sudo=True)
+ exec_cmd(dut_node, 'rm -rf /var/log/app*.log', sudo=True)
+
+ @staticmethod
+ def archive_dmm_logs(dut1_node, dut2_node):
+ """
+ Get logs from both DUT's to the test executor machine.
+
+ :param dut1_node: DUT1 node.
+ :param dut2_node: DUT2 node.
+ :type dut1_node: dict
+ :type dut2_node: dict
+ """
+ SingleCliSer.get_logs_from_node(dut1_node)
+ SingleCliSer.get_logs_from_node(dut2_node)
"""
chan.close()
- def scp(self, local_path, remote_path, get=False, timeout=30):
+ def scp(self, local_path, remote_path, get=False, timeout=30,
+ wildcard=False):
"""Copy files from local_path to remote_path or vice versa.
connect() method has to be called first!
path to remote file which should be downloaded.
:param get: scp operation to perform. Default is put.
:param timeout: Timeout value in seconds.
+ :param wildcard: If path has wildcard characters. Default is false.
:type local_path: str
:type remote_path: str
:type get: bool
:type timeout: int
+ :type wildcard: bool
"""
if not get:
logger.trace('SCP {0} to {1}:{2}'.format(
self._ssh.get_transport().getpeername(), remote_path,
local_path))
# SCPCLient takes a paramiko transport as its only argument
- scp = SCPClient(self._ssh.get_transport(), socket_timeout=timeout)
+ if not wildcard:
+ scp = SCPClient(self._ssh.get_transport(), socket_timeout=timeout)
+ else:
+ scp = SCPClient(self._ssh.get_transport(), sanitize=lambda x: x,
+ socket_timeout=timeout)
start = time()
if not get:
scp.put(local_path, remote_path)
| Resource | resources/libraries/robot/shared/default.robot
| Resource | resources/libraries/robot/shared/interfaces.robot
| Library | resources.libraries.python.DMM.SetupDMMTest
+| Library | resources.libraries.python.DMM.SingleCliSer
| Suite Setup | Setup DMM Test | ${nodes}
+| Suite Teardown | Archive DMM logs | ${nodes['DUT1']} | ${nodes['DUT2']}
\ No newline at end of file