From 01d8f262afc567c3d49a23c3cb2cdeaced8a6887 Mon Sep 17 00:00:00 2001 From: Vratko Polak Date: Wed, 15 Dec 2021 17:14:36 +0100 Subject: [PATCH] UTI: Export results + Model version 1.0.0. - Only some result types are exported. + MRR, NDRPDR and SOAK. - Other result types to be added later. + In contrast, all test types are detected. + Convert custom classes to JSON-serializable equivalents. + Sort dict keys before converting to JSON. + Override the order for some known keys. + Export sets as sorted arrays. + Convert to info content from serialized raw content. + Also export outputs for suite setups and teardowns. + Info files for setup/teardown exist only temporarily. + The data is merged into suite.info.json file. + This simplifies presentation of total suite duration. + Define model via JSON schema: - Just test case, suite setup/teardown/suite to be added later. - Just info, raw to be added later. + Proper descriptions. + Json is generated from yaml. + This is a convenience for maintainers. + The officially used schema is the .json one. + TODOs written into a separate .txt file. + Validate exported instance against the schema. + Include format checking. + Update CSIT requirements for validation dependencies. + This needs python-dateutil==2.8.2, only a patch bump. + Compute bandwidth also for soak tests. + This unifies with NDRPDR to simplify schema definition. - PAL may need an update for parsing soak test message. + Include SSH log items, raw output only. + Generate all outputs in a single filesystem tree. + Move raw outputs into test_output_raw.tar.xz. + Rename existing tar with suites to generated_robot_files.tar.xz. Change-Id: I69ff7b330ed1a14dc435fd0ef008e753c0d7f78c Signed-off-by: Vratko Polak --- .../current/schema/test_case.info.schema.json | 571 ++++++++++++++++++ .../current/schema/test_case.info.schema.yaml | 637 +++++++++++++++++++++ docs/model/current/schema/todos.txt | 16 + docs/model/current/schema/yaml2json.py | 29 + docs/model/current/top.rst | 43 +- .../roles/csit_sut_image/files/Dockerfile | 2 +- .../roles/python_env/tasks/main.yaml | 2 +- requirements.txt | 15 +- resources/libraries/bash/function/common.sh | 29 +- resources/libraries/bash/function/per_patch.sh | 4 +- resources/libraries/python/Constants.py | 17 +- resources/libraries/python/DPDK/DPDKTools.py | 26 +- resources/libraries/python/SetupFramework.py | 8 +- resources/libraries/python/VPPUtil.py | 8 +- resources/libraries/python/model/ExportLog.py | 148 +++++ resources/libraries/python/model/ExportResult.py | 179 ++++++ resources/libraries/python/model/export_json.py | 238 ++++++++ resources/libraries/python/model/mem2raw.py | 145 +++++ resources/libraries/python/model/raw2info.py | 294 ++++++++++ resources/libraries/python/model/util.py | 69 +++ resources/libraries/python/model/validate.py | 73 +++ resources/libraries/python/ssh.py | 78 ++- .../robot/performance/performance_display.robot | 32 +- .../robot/performance/performance_utils.robot | 19 +- resources/libraries/robot/shared/default.robot | 11 + resources/libraries/robot/shared/suite_setup.robot | 10 + .../libraries/robot/shared/suite_teardown.robot | 2 + resources/libraries/robot/shared/test_setup.robot | 3 +- .../libraries/robot/shared/test_teardown.robot | 4 +- resources/tools/scripts/topo_reservation.py | 19 +- tests/__init__.robot | 37 ++ .../10ge2p1x710-eth-l2xcbase-testpmd-ndrpdr.robot | 2 + .../10ge2p1x710-ethip4-ip4base-l3fwd-ndrpdr.robot | 2 + ...l-10ge2p1x710-eth-l2xcbase-testpmd-ndrpdr.robot | 2 + ...l-10ge2p1x710-ethip4-ip4base-l3fwd-ndrpdr.robot | 2 + tests/dpdk/perf/__init__.robot | 8 +- tests/trex/perf/__init__.robot | 8 +- ...1n1l-10ge2p1x710-ethip4-ip4base-tg-ndrpdr.robot | 1 + ...l-10ge2p1x710-ethip4-ip4scale2m-tg-ndrpdr.robot | 1 + ...cp-ip4base-h1024-p63-s64512-cps-tg-ndrpdr.robot | 1 + ...cp-ip4base-h1024-p63-s64512-pps-tg-ndrpdr.robot | 1 + ...4base-h262144-p63-s16515072-cps-tg-ndrpdr.robot | 1 + ...4base-h262144-p63-s16515072-pps-tg-ndrpdr.robot | 1 + ...dp-ip4base-h1024-p63-s64512-cps-tg-ndrpdr.robot | 1 + ...dp-ip4base-h1024-p63-s64512-pps-tg-ndrpdr.robot | 1 + ...4base-h262144-p63-s16515072-cps-tg-ndrpdr.robot | 3 +- ...4base-h262144-p63-s16515072-pps-tg-ndrpdr.robot | 1 + ...1n1l-10ge2p1x710-ethip6-ip6base-tg-ndrpdr.robot | 1 + ...l-10ge2p1x710-ethip6-ip6scale2m-tg-ndrpdr.robot | 1 + ...ge2p1x710-eth-l2bdscale1mmaclrn-tg-ndrpdr.robot | 1 + tests/vpp/device/__init__.robot | 8 +- tests/vpp/perf/__init__.robot | 8 +- 52 files changed, 2764 insertions(+), 59 deletions(-) create mode 100644 docs/model/current/schema/test_case.info.schema.json create mode 100644 docs/model/current/schema/test_case.info.schema.yaml create mode 100644 docs/model/current/schema/todos.txt create mode 100644 docs/model/current/schema/yaml2json.py create mode 100644 resources/libraries/python/model/ExportLog.py create mode 100644 resources/libraries/python/model/ExportResult.py create mode 100644 resources/libraries/python/model/export_json.py create mode 100644 resources/libraries/python/model/mem2raw.py create mode 100644 resources/libraries/python/model/raw2info.py create mode 100644 resources/libraries/python/model/util.py create mode 100644 resources/libraries/python/model/validate.py create mode 100644 tests/__init__.robot diff --git a/docs/model/current/schema/test_case.info.schema.json b/docs/model/current/schema/test_case.info.schema.json new file mode 100644 index 0000000000..358a3e7582 --- /dev/null +++ b/docs/model/current/schema/test_case.info.schema.json @@ -0,0 +1,571 @@ +{ + "$id": "https://fd.io/FIXME/CSIT/UTI/test_case/info/0.2.0", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "Schema for info output of test case.", + "allOf": [ + { + "description": "The main structure, without conditional relations between fields yet.", + "type": "object", + "additionalProperties": false, + "properties": { + "duration": { + "description": "A derived quantity, present only in info output files. Difference between start_time and end_time, in seconds.", + "$ref": "#/$defs/types/nonnegative_number" + }, + "dut_type": { + "description": "DUT type used, e.g. VPP or DPDK.", + "type": "string" + }, + "dut_version": { + "description": "Version string appropriate to DUT type used.", + "type": "string" + }, + "end_time": { + "description": "UTC date and time in RFC 3339 format, specifying calendar time just before test case ended (at the end of test case teardown).", + "$ref": "#/$defs/types/date_time" + }, + "hosts": { + "description": "Array of hosts this test interacted with. This can be used for identifying testbed number. Valid tests shoud interact with at least one DUT or TG. The array is usually sorted, but that is not a requirement.", + "type": "array", + "minItems": 1, + "items": { + "description": "Host identifier, usually numeric IPv4 address.", + "type": "string" + } + }, + "log": { + "description": "No log items are implemented in the current version, but the (empty) list is present to simplify logic in multi-version importers.", + "$ref": "#/$defs/types/empty_array" + }, + "message": { + "description": "If passed is true, this value is empty. Otherwise, value taken directly from TEST_MESSAGE Robot variable, read at the end of test case (in test teardown, before export and validation). It contains information from the exception that caused the failure, probably with additional exceptions from teardown keywords.", + "type": "string" + }, + "passed": { + "description": "Value set accordingly to TEST_STATUS Robot variable, true if and only if the status is \"PASS\". The status is read at the end of test case (in test teardown, before export and validation).", + "type": "boolean" + }, + "result": { + "type": "object", + "allOf": [ + { + "description": "Sub-schema common for all cases, only result type identifier defined here.", + "properties": { + "type": { + "description": "Identifier of which result type case is applied.", + "type": "string" + } + }, + "required": [ + "type" + ] + }, + { + "oneOf": [ + { + "description": "Result type for unknown case. This case represents a test with no specific result (outside message), e.g. device test; or a test with result not parsed into this version of model yet, e.g. GSO test.", + "additionalProperties": false, + "properties": { + "type": { + "const": "unknown" + } + } + }, + { + "description": "Result type MRR case.", + "additionalProperties": false, + "properties": { + "type": { + "const": "mrr" + }, + "receive_rate": { + "description": "The results refer to receive rates for multiple MRR trials. For PPS, these are aggregate (bidirectional) rates. Currently, the tests are exporting approximated receive rates. That means the actual trial duration is measured (as opposed to trusting traffic generator to honor its target duration), so the resulting values contain noise from time measurement, and can be lower than the real performance (due to various time overheads). Bandwidth values are supported, but currently Robot does not export them.", + "$ref": "#/$defs/types/rate_list_with_bandwidth" + } + }, + "required": [ + "type", + "receive_rate" + ] + }, + { + "description": "Result type NDRPDR case.", + "additionalProperties": false, + "properties": { + "type": { + "const": "ndrpdr" + }, + "ndr": { + "description": "The results refer to search for NDR (Non Drop Rate). For PPS, this is aggregate (bidirectional) rate. Each bound was used as the target load value in a full-duration trial measurement. The accepted loss ratio for NDR is exact zero. Note that packets the Traffic Generator did not send are also counted as lost packets.", + "$ref": "#/$defs/macros/lower_and_upper_rate" + }, + "pdr": { + "description": "The results refer to search for PDR (Partial Drop Rate). For PPS, this is aggregate (bidirectional) rate. Each bound was used as the target load value in a full-duration trial measurement. The accepted loss ratio for PDR is 0.5%. Note that packets the Traffic Generator did not send are also counted as lost packets.", + "$ref": "#/$defs/macros/lower_and_upper_rate" + }, + "latency_forward": { + "description": "Object with results related to latency part of NDRPDR test, for forward traffic direction. It is the direction used in unidirectional traffic profiles. ASTF profiles and IMIX STL profiles do not support latency information, so for those tests this object is missing. It is also missing if Traffic Generator fails to return valid latency results for any other reasons, e.g. latency rate is too high for CPU/NIC used.", + "$ref": "#/$defs/macros/latency_for_loads" + }, + "latency_reverse": { + "description": "Object with results related to latency part of NDRPDR test, for reverse traffic diration. This object is not present when unidirectional traffic profiles are used. ASTF profiles and IMIX STL profiles do not support latency information, so for those tests this object is missing. It is also missing if Traffic Generator fails to return valid latency results for any other reasons, e.g. latency rate is too high for CPU/NIC used.", + "$ref": "#/$defs/macros/latency_for_loads" + } + }, + "required": [ + "type", + "ndr", + "pdr" + ] + }, + { + "description": "Result type SOAK case.", + "additionalProperties": false, + "properties": { + "type": { + "const": "soak" + }, + "critical_rate": { + "description": "The results refer to bayesian estimate of critical rate corresponding to average loss ratio of 10^-7. For PPS, this is aggregate (bidirectional) rate. The bounds are computed from trial measurement results, but are not equal to any target load used. Note that packets the Traffic Generator did not send are also counted as lost packets.", + "$ref": "#/$defs/macros/lower_and_upper_rate" + } + }, + "required": [ + "type", + "critical_rate" + ] + } + ] + } + ] + }, + "start_time": { + "description": "UTC date and time in RFC 3339 format, specifying calendar time just after test case started (at the start of test setup).", + "$ref": "#/$defs/types/date_time" + }, + "tags": { + "description": "The list of strings comes directly from Robot variable TEST_TAGS. The content should include both static and dynamic tags at the end of test case (teardown).", + "type": "array", + "items": { + "type": "string" + } + }, + "test_documentation": { + "description": "Value taken directly from TEST_DOCUMENTATION Robot variable. The content is what you see in suite file at test case definition, which is usually empty as CSIT uses data driven test cases.", + "type": "string" + }, + "test_id": { + "description": "A derived quantity, present only in info output files. It is the most complete and unique identifier for a test case. This property has a value, of the following form: {suite_name}.{test_name} Here, suite name comes from SUITE_NAME robot variable, test name comes from TEST_NAME robot variable, but both are converted to lower case, and spaces are replaced by underscores.", + "type": "string", + "minLength": 3 + }, + "test_name_long": { + "description": "A derived quantity, present only in info output files. This property has a value, of the following form: {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part} Here, suite part is very similar to suite tag, but additionally may contain a prefix describing NIC driver used (if it is not the default one, drv_vfio_pci for VPP tests). Any space is replaced by underscore and letters are lower case.", + "type": "string", + "minLength": 3 + }, + "test_name_short": { + "description": "A derived quantity, present only in info output files. This property has a value very similar to suite tag, but additionally may contain a prefix describing NIC driver used (if it is not the default one, drv_vfio_pci for VPP tests). Any space is replaced by underscore and letters are lower case.", + "type": "string", + "minLength": 3 + }, + "test_type": { + "description": "A derived quantity, present only in info output files. Test type identifier, PAL uses it to group similar tests, e.g. for comparison tables. Ideally, this information should be parseable from test name, but the current naming scheme is not simple/consistent enough. The current implementation queries the robot test tags. The resulting value is frequently identical to result type, but this schema version does not require any relation there, as PAL may want to group tests differently.", + "type": "string", + "enum": [ + "device", + "gso", + "hoststack", + "mrr", + "ndrpdr", + "reconf", + "soak", + "vsap" + ] + }, + "version": { + "description": "CSIT model version (semver format) the exporting code adhered to.", + "type": "string", + "const": "1.0.0" + } + }, + "required": [ + "duration", + "dut_type", + "dut_version", + "end_time", + "hosts", + "log", + "message", + "passed", + "result", + "start_time", + "tags", + "test_documentation", + "test_id", + "test_name_long", + "test_name_short", + "test_type", + "version" + ] + }, + { + "description": "Subschema validating relation between status and message.", + "oneOf": [ + { + "description": "Subschema for passing tests, message has to be empty.", + "type": "object", + "properties": { + "passed": { + "const": true + }, + "message": { + "const": "" + } + } + }, + { + "description": "Subschema for failing tests, mesage cannot be empty.", + "type": "object", + "properties": { + "passed": { + "const": false + }, + "message": { + "minLength": 1 + } + } + } + ] + }, + { + "description": "Subschema validating relation between dut_type and dut_version.", + "oneOf": [ + { + "description": "Subschema for tests with no DUT, e.g. TRex self-test.", + "type": "object", + "properties": { + "dut_type": { + "const": "none" + }, + "dut_version": { + "const": "" + } + } + }, + { + "description": "Subschema for DUT type VPP.", + "type": "object", + "properties": { + "dut_type": { + "const": "VPP" + }, + "dut_version": { + "minLength": 1 + } + } + }, + { + "description": "Subschema for DUT type DPDK.", + "type": "object", + "properties": { + "dut_type": { + "const": "DPDK" + }, + "dut_version": { + "minLength": 1 + } + } + } + ] + } + ], + "$defs": { + "types": { + "nonnegative_number": { + "type": "number", + "minimum": 0 + }, + "positive_number": { + "type": "number", + "minimum": 1 + }, + "nonnegative_integer": { + "type": "integer", + "minimum": 0 + }, + "positive_integer": { + "type": "integer", + "minimum": 1 + }, + "date_time": { + "type": "string", + "format": "date-time" + }, + "empty_array": { + "type": "array", + "maxItems": 0 + }, + "rate_unit": { + "description": "Packets per second (pps) or connections per second (cps).", + "type": "string", + "enum": [ + "pps", + "cps" + ] + }, + "bandwidth_unit": { + "description": "Unit of measurement for bandwidth values. Currently a constant, but later versions of model may allow more units.", + "enum": [ + "bps" + ] + }, + "value_with_unit": { + "description": "Reusable composite type, value together with its unit of measurement.", + "type": "object", + "additionalProperties": false, + "properties": { + "value": { + "description": "Numeric value, context specified elsewhere. The only assumption is that value is not negative.", + "$ref": "#/$defs/types/nonnegative_number" + }, + "unit": { + "description": "Unit of measurement for the value. Context and allowed values are specified elsewhere.", + "type": "string" + } + }, + "required": [ + "value", + "unit" + ] + }, + "rate_without_bandwidth": { + "description": "Reusable type, for various rate quantites.", + "allOf": [ + { + "$ref": "#/$defs/types/value_with_unit" + }, + { + "properties": { + "value": { + "description": "Unless specified otherwise, this is the aggregated rate (sum of both traffic directions). Depending on the usage, the value can express intended load, offered load, receive rate, and various approximations or estimated bounds thereof." + }, + "unit": { + "description": "A transaction rate unit the value is expressed in.", + "$ref": "#/$defs/types/rate_unit" + } + } + } + ] + }, + "bandwidth": { + "description": "Reusable type, for various bandwidth quantites.", + "allOf": [ + { + "$ref": "#/$defs/types/value_with_unit" + }, + { + "properties": { + "value": { + "description": "Bandwidth value computed from the corresponding rate." + }, + "unit": { + "$ref": "#/$defs/types/bandwidth_unit" + } + } + } + ] + }, + "rate_with_bandwidth": { + "description": "Reusable composite type, joining primary rate with optional derived bandwidth. Not all test types currently compute bandwidth, even if rate unit is pps.", + "type": "object", + "additionalProperties": false, + "properties": { + "rate": { + "$ref": "#/$defs/types/rate_without_bandwidth" + }, + "bandwidth": { + "$ref": "#/$defs/types/bandwidth" + } + }, + "required": [ + "rate" + ] + }, + "value_list_with_unit_and_stats": { + "description": "Reusable composite type, multiple values together with their unit of measurement and derived statistics.", + "type": "object", + "additionalProperties": false, + "properties": { + "values": { + "description": "List of values of the same unit, useful for MRR.", + "type": "array", + "minItmes": 1, + "items": { + "description": "Numeric value, context specified elsewhere. The only assumption is that the value is nonnegative.", + "$ref": "#/$defs/types/nonnegative_number" + } + }, + "avg": { + "description": "A derived quantity, present only in info output files. It is the arithmetic average of the values list.", + "$ref": "#/$defs/types/nonnegative_number" + }, + "stdev": { + "description": "A derived quantity, present only in info output files. It is the standard deviation for the values list, as computed by jumpavg library.", + "$ref": "#/$defs/types/nonnegative_number" + }, + "unit": { + "description": "Unit of measurement for the values. Context and allowed values are specified elsewhere.", + "type": "string" + } + }, + "required": [ + "values", + "avg", + "stdev", + "unit" + ] + }, + "rate_list_without_bandwidth": { + "description": "Reusable composite type, multiple rate values.", + "allOf": [ + { + "$ref": "#/$defs/types/value_list_with_unit_and_stats" + }, + { + "properties": { + "values": { + "items": { + "description": "Unless specified otherwise, this is the aggregated rate (sum of both traffic directions). Depending on the usage, the value can express intended load, offered load, receive rate, and various approximations or estimated bounds thereof." + } + }, + "unit": { + "$ref": "#/$defs/types/rate_unit" + } + } + } + ] + }, + "bandwidth_list": { + "description": "Reusable composite type, multiple bandwidth values. This is a derived entity, thus it only appears in info output, and only if rate unit is pps.", + "allOf": [ + { + "$ref": "#/$defs/types/value_list_with_unit_and_stats" + }, + { + "properties": { + "values": { + "items": { + "description": "Unless specified otherwise, this is the aggregated bandwidth (sum of both traffic directions). Depending on the usage, the value can express intended load, offered load, receive rate, and various approximations or estimated bounds thereof." + } + }, + "unit": { + "$ref": "#/$defs/types/bandwidth_unit" + } + } + } + ] + }, + "rate_list_with_bandwidth": { + "description": "Reusable composite type, joining primary rates with optional derived bandwidths (and stats). No test types currently computes the bandwidth part.", + "type": "object", + "additionalProperties": false, + "properties": { + "rate": { + "$ref": "#/$defs/types/rate_list_without_bandwidth" + }, + "bandwidth": { + "$ref": "#/$defs/types/bandwidth_list" + } + }, + "required": [ + "rate" + ] + } + }, + "macros": { + "lower_and_upper_rate": { + "type": "object", + "additionalProperties": false, + "properties": { + "lower": { + "description": "The lower bound (or min_rate) for the estimate of a particular searched value.", + "$ref": "#/$defs/types/rate_with_bandwidth" + }, + "upper": { + "description": "The upper bound (or max_rate) for the estimate of a particular searched value.", + "$ref": "#/$defs/types/rate_with_bandwidth" + } + }, + "required": [ + "lower", + "upper" + ] + }, + "latency_numbers": { + "type": "object", + "additionalProperties": false, + "properties": { + "min": { + "description": "Rounded minimal latency time measured in this trial. See unit property for the unit of measurement.", + "$ref": "#/$defs/types/nonnegative_integer" + }, + "max": { + "description": "Rounded maximal latency time measured in this trial. See unit property for the unit of measurement. Zero value is not allowed, as that is one of symptoms of Traffic Generator failing to get proper latency.", + "$ref": "#/$defs/types/positive_integer" + }, + "avg": { + "description": "Rounded average latency time measured in this trial. See unit property for the unit of measurement.", + "$ref": "#/$defs/types/nonnegative_integer" + }, + "hdrh": { + "description": "Base64-encoded compressed representation of HDRHistogram of all latency sample times encountered in this latency trial. See unit property for the unit of measurement. Note that some bins can be several units wide.", + "type": "string" + }, + "unit": { + "description": "Unit of measurement for latency times. Currently a constant, but later versions of the model may allow more values.", + "type": "string", + "enum": [ + "us" + ] + } + }, + "required": [ + "avg", + "hdrh", + "max", + "min", + "unit" + ] + }, + "latency_for_loads": { + "type": "object", + "additionalProperties": false, + "properties": { + "pdr_0": { + "description": "Object related to latency measurement performed at minimal rate (currently 9000 pps per direction).", + "$ref": "#/$defs/macros/latency_numbers" + }, + "pdr_10": { + "description": "Object related to latency measurement performed at 10% of PDR lower bound, if needed rounded up to minimal rate (currently 9000 pps per direction).", + "$ref": "#/$defs/macros/latency_numbers" + }, + "pdr_50": { + "description": "Object related to latency measurement performed at 50% of PDR lower bound, if needed rounded up to minimal rate (currently 9000 pps per direction).", + "$ref": "#/$defs/macros/latency_numbers" + }, + "pdr_90": { + "description": "Object related to latency measurement performed at 90% of PDR lower bound, if needed rounded up to minimal rate (currently 9000 pps per direction).", + "$ref": "#/$defs/macros/latency_numbers" + } + }, + "required": [ + "pdr_0", + "pdr_10", + "pdr_50", + "pdr_90" + ] + } + } + } +} \ No newline at end of file diff --git a/docs/model/current/schema/test_case.info.schema.yaml b/docs/model/current/schema/test_case.info.schema.yaml new file mode 100644 index 0000000000..7771d7afec --- /dev/null +++ b/docs/model/current/schema/test_case.info.schema.yaml @@ -0,0 +1,637 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- + +$id: https://fd.io/FIXME/CSIT/UTI/test_case/info/0.2.0 +$schema: https://json-schema.org/draft/2020-12/schema +description: >- + Schema for info output of test case. +allOf: +- description: >- + The main structure, without conditional relations between fields yet. + type: object + additionalProperties: false + properties: + duration: + description: >- + A derived quantity, present only in info output files. + Difference between start_time and end_time, in seconds. + $ref: "#/$defs/types/nonnegative_number" + dut_type: + description: >- + DUT type used, e.g. VPP or DPDK. + type: string + dut_version: + description: >- + Version string appropriate to DUT type used. + type: string + end_time: + description: >- + UTC date and time in RFC 3339 format, specifying calendar time + just before test case ended (at the end of test case teardown). + $ref: "#/$defs/types/date_time" + hosts: + description: >- + Array of hosts this test interacted with. + This can be used for identifying testbed number. + Valid tests shoud interact with at least one DUT or TG. + The array is usually sorted, but that is not a requirement. + type: array + minItems: 1 + items: + description: >- + Host identifier, usually numeric IPv4 address. + type: string + log: + description: >- + No log items are implemented in the current version, + but the (empty) list is present to simplify logic + in multi-version importers. + $ref: "#/$defs/types/empty_array" + message: + description: >- + If passed is true, this value is empty. + Otherwise, value taken directly from TEST_MESSAGE + Robot variable, read at the end of test case + (in test teardown, before export and validation). + It contains information from the exception + that caused the failure, probably with additional + exceptions from teardown keywords. + type: string + passed: + description: >- + Value set accordingly to TEST_STATUS Robot variable, + true if and only if the status is "PASS". + The status is read at the end of test case + (in test teardown, before export and validation). + type: boolean + result: + type: object + allOf: + - description: >- + Sub-schema common for all cases, + only result type identifier defined here. + properties: + type: + description: >- + Identifier of which result type case is applied. + type: string + required: + - type + - oneOf: + - description: >- + Result type for unknown case. + This case represents a test with no specific result + (outside message), e.g. device test; + or a test with result not parsed into + this version of model yet, e.g. GSO test. + additionalProperties: false + properties: + type: + const: unknown + - description: >- + Result type MRR case. + additionalProperties: false + properties: + type: + const: mrr + receive_rate: + description: >- + The results refer to receive rates for multiple + MRR trials. For PPS, these are aggregate + (bidirectional) rates. + Currently, the tests are exporting + approximated receive rates. + That means the actual trial duration + is measured (as opposed to trusting traffic + generator to honor its target duration), + so the resulting values contain noise + from time measurement, and can be lower + than the real performance + (due to various time overheads). + Bandwidth values are supported, but currently + Robot does not export them. + $ref: "#/$defs/types/rate_list_with_bandwidth" + required: + - type + - receive_rate + - description: >- + Result type NDRPDR case. + additionalProperties: false + properties: + type: + const: ndrpdr + ndr: + description: >- + The results refer to search for NDR + (Non Drop Rate). For PPS, this is aggregate + (bidirectional) rate. + Each bound was used as the target load value + in a full-duration trial measurement. + The accepted loss ratio for NDR is exact zero. + Note that packets the Traffic Generator + did not send are also counted as lost packets. + $ref: "#/$defs/macros/lower_and_upper_rate" + pdr: + description: >- + The results refer to search for PDR + (Partial Drop Rate). For PPS, this is aggregate + (bidirectional) rate. + Each bound was used as the target load value + in a full-duration trial measurement. + The accepted loss ratio for PDR is 0.5%. + Note that packets the Traffic Generator + did not send are also counted as lost packets. + $ref: "#/$defs/macros/lower_and_upper_rate" + latency_forward: + description: >- + Object with results related to latency part + of NDRPDR test, for forward traffic direction. + It is the direction used in unidirectional + traffic profiles. + ASTF profiles and IMIX STL profiles + do not support latency information, + so for those tests this object is missing. + It is also missing if Traffic Generator + fails to return valid latency results + for any other reasons, + e.g. latency rate is too high for CPU/NIC used. + $ref: "#/$defs/macros/latency_for_loads" + latency_reverse: + description: >- + Object with results related to latency part + of NDRPDR test, for reverse traffic diration. + This object is not present + when unidirectional traffic profiles are used. + ASTF profiles and IMIX STL profiles + do not support latency information, + so for those tests this object is missing. + It is also missing if Traffic Generator + fails to return valid latency results + for any other reasons, + e.g. latency rate is too high for CPU/NIC used. + $ref: "#/$defs/macros/latency_for_loads" + required: + - type + - ndr + - pdr + - description: >- + Result type SOAK case. + additionalProperties: false + properties: + type: + const: soak + critical_rate: + description: >- + The results refer to bayesian estimate + of critical rate corresponding to + average loss ratio of 10^-7. + For PPS, this is aggregate (bidirectional) rate. + The bounds are computed from + trial measurement results, + but are not equal to any target load used. + Note that packets the Traffic Generator + did not send are also counted as lost packets. + $ref: "#/$defs/macros/lower_and_upper_rate" + required: + - type + - critical_rate + start_time: + description: >- + UTC date and time in RFC 3339 format, specifying calendar time + just after test case started (at the start of test setup). + $ref: "#/$defs/types/date_time" + tags: + description: >- + The list of strings comes directly + from Robot variable TEST_TAGS. + The content should include both static and dynamic tags + at the end of test case (teardown). + type: array + items: + type: string + test_documentation: + description: >- + Value taken directly from TEST_DOCUMENTATION Robot variable. + The content is what you see in suite file + at test case definition, which is usually empty + as CSIT uses data driven test cases. + type: string + test_id: + description: >- + A derived quantity, present only in info output files. + It is the most complete and unique identifier for a test case. + This property has a value, of the following form: + {suite_name}.{test_name} + Here, suite name comes from SUITE_NAME robot variable, + test name comes from TEST_NAME robot variable, + but both are converted to lower case, + and spaces are replaced by underscores. + type: string + minLength: 3 + test_name_long: + description: >- + A derived quantity, present only in info output files. + This property has a value, of the following form: + {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part} + Here, suite part is very similar to suite tag, + but additionally may contain a prefix describing NIC driver used + (if it is not the default one, drv_vfio_pci for VPP tests). + Any space is replaced by underscore and letters are lower case. + type: string + minLength: 3 + test_name_short: + description: >- + A derived quantity, present only in info output files. + This property has a value very similar to suite tag, + but additionally may contain a prefix describing NIC driver used + (if it is not the default one, drv_vfio_pci for VPP tests). + Any space is replaced by underscore and letters are lower case. + type: string + minLength: 3 + test_type: + description: >- + A derived quantity, present only in info output files. + Test type identifier, PAL uses it to group similar tests, + e.g. for comparison tables. + Ideally, this information should be parseable from test name, + but the current naming scheme is not simple/consistent enough. + The current implementation queries the robot test tags. + The resulting value is frequently identical to result type, + but this schema version does not require any relation there, + as PAL may want to group tests differently. + type: string + enum: + - device + - gso + - hoststack + - mrr + - ndrpdr + - reconf + - soak + - vsap + version: + description: >- + CSIT model version (semver format) + the exporting code adhered to. + type: string + const: 1.0.0 + required: + - duration + - dut_type + - dut_version + - end_time + - hosts + - log + - message + - passed + - result + - start_time + - tags + - test_documentation + - test_id + - test_name_long + - test_name_short + - test_type + - version +- description: >- + Subschema validating relation between status and message. + oneOf: + - description: >- + Subschema for passing tests, message has to be empty. + type: object + properties: + passed: + const: true + message: + const: "" + - description: >- + Subschema for failing tests, mesage cannot be empty. + type: object + properties: + passed: + const: false + message: + minLength: 1 +- description: >- + Subschema validating relation between dut_type and dut_version. + oneOf: + - description: >- + Subschema for tests with no DUT, e.g. TRex self-test. + type: object + properties: + dut_type: + const: none + dut_version: + const: "" + - description: >- + Subschema for DUT type VPP. + type: object + properties: + dut_type: + const: VPP + dut_version: + minLength: 1 + - description: >- + Subschema for DUT type DPDK. + type: object + properties: + dut_type: + const: DPDK + dut_version: + minLength: 1 + +$defs: + types: + nonnegative_number: + type: number + minimum: 0 + positive_number: + type: number + minimum: 1 + nonnegative_integer: + type: integer + minimum: 0 + positive_integer: + type: integer + minimum: 1 + date_time: + type: string + format: date-time + empty_array: + type: array + maxItems: 0 + rate_unit: + description: >- + Packets per second (pps) or connections per second (cps). + type: string + enum: + - pps + - cps + bandwidth_unit: + description: >- + Unit of measurement for bandwidth values. + Currently a constant, but later versions of model + may allow more units. + enum: + - bps + value_with_unit: + description: >- + Reusable composite type, value together with its + unit of measurement. + type: object + additionalProperties: false + properties: + value: + description: >- + Numeric value, context specified elsewhere. + The only assumption is that value is not negative. + $ref: "#/$defs/types/nonnegative_number" + unit: + description: >- + Unit of measurement for the value. + Context and allowed values are specified elsewhere. + type: string + required: + - value + - unit + rate_without_bandwidth: + description: >- + Reusable type, for various rate quantites. + allOf: + - $ref: "#/$defs/types/value_with_unit" + - properties: + value: + description: >- + Unless specified otherwise, + this is the aggregated rate + (sum of both traffic directions). + Depending on the usage, the value can express + intended load, offered load, receive rate, + and various approximations + or estimated bounds thereof. + unit: + description: >- + A transaction rate unit the value is expressed in. + $ref: "#/$defs/types/rate_unit" + bandwidth: + description: >- + Reusable type, for various bandwidth quantites. + allOf: + - $ref: "#/$defs/types/value_with_unit" + - properties: + value: + description: >- + Bandwidth value computed + from the corresponding rate. + unit: + $ref: "#/$defs/types/bandwidth_unit" + rate_with_bandwidth: + description: >- + Reusable composite type, joining primary rate + with optional derived bandwidth. + Not all test types currently compute bandwidth, + even if rate unit is pps. + type: object + additionalProperties: false + properties: + rate: + $ref: "#/$defs/types/rate_without_bandwidth" + bandwidth: + $ref: "#/$defs/types/bandwidth" + required: + - rate + value_list_with_unit_and_stats: + description: >- + Reusable composite type, multiple values together with their + unit of measurement and derived statistics. + type: object + additionalProperties: false + properties: + values: + description: >- + List of values of the same unit, useful for MRR. + type: array + minItmes: 1 + items: + description: >- + Numeric value, context specified elsewhere. + The only assumption is that the value is nonnegative. + $ref: "#/$defs/types/nonnegative_number" + avg: + description: >- + A derived quantity, present only in info output files. + It is the arithmetic average of the values list. + $ref: "#/$defs/types/nonnegative_number" + stdev: + description: >- + A derived quantity, present only in info output files. + It is the standard deviation for the values list, + as computed by jumpavg library. + $ref: "#/$defs/types/nonnegative_number" + unit: + description: >- + Unit of measurement for the values. + Context and allowed values are specified elsewhere. + type: string + required: + - values + - avg + - stdev + - unit + rate_list_without_bandwidth: + description: >- + Reusable composite type, multiple rate values. + allOf: + - $ref: "#/$defs/types/value_list_with_unit_and_stats" + - properties: + values: + items: + description: >- + Unless specified otherwise, + this is the aggregated rate + (sum of both traffic directions). + Depending on the usage, the value can express + intended load, offered load, receive rate, + and various approximations or estimated bounds + thereof. + unit: + $ref: "#/$defs/types/rate_unit" + bandwidth_list: + description: >- + Reusable composite type, multiple bandwidth values. + This is a derived entity, thus it only appears in info output, + and only if rate unit is pps. + allOf: + - $ref: "#/$defs/types/value_list_with_unit_and_stats" + - properties: + values: + items: + description: >- + Unless specified otherwise, + this is the aggregated bandwidth + (sum of both traffic directions). + Depending on the usage, the value can express + intended load, offered load, receive rate, + and various approximations or estimated bounds + thereof. + unit: + $ref: "#/$defs/types/bandwidth_unit" + rate_list_with_bandwidth: + description: >- + Reusable composite type, joining primary rates + with optional derived bandwidths (and stats). + No test types currently computes the bandwidth part. + type: object + additionalProperties: false + properties: + rate: + $ref: "#/$defs/types/rate_list_without_bandwidth" + bandwidth: + $ref: "#/$defs/types/bandwidth_list" + required: + - rate + macros: + lower_and_upper_rate: + type: object + additionalProperties: false + properties: + lower: + description: >- + The lower bound (or min_rate) for the estimate + of a particular searched value. + $ref: "#/$defs/types/rate_with_bandwidth" + upper: + description: >- + The upper bound (or max_rate) for the estimate + of a particular searched value. + $ref: "#/$defs/types/rate_with_bandwidth" + required: + - lower + - upper + latency_numbers: + type: object + additionalProperties: false + properties: + min: + description: >- + Rounded minimal latency time measured in this trial. + See unit property for the unit of measurement. + $ref: "#/$defs/types/nonnegative_integer" + max: + description: >- + Rounded maximal latency time measured in this trial. + See unit property for the unit of measurement. + Zero value is not allowed, as that is one of symptoms + of Traffic Generator failing to get proper latency. + $ref: "#/$defs/types/positive_integer" + avg: + description: >- + Rounded average latency time measured in this trial. + See unit property for the unit of measurement. + $ref: "#/$defs/types/nonnegative_integer" + hdrh: + description: >- + Base64-encoded compressed representation of HDRHistogram + of all latency sample times encountered + in this latency trial. + See unit property for the unit of measurement. + Note that some bins can be several units wide. + type: string + unit: + description: >- + Unit of measurement for latency times. + Currently a constant, but later versions + of the model may allow more values. + type: string + enum: + - us + required: + - avg + - hdrh + - max + - min + - unit + latency_for_loads: + type: object + additionalProperties: false + properties: + pdr_0: + description: >- + Object related to latency measurement performed + at minimal rate (currently 9000 pps per direction). + $ref: "#/$defs/macros/latency_numbers" + pdr_10: + description: >- + Object related to latency measurement performed + at 10% of PDR lower bound, if needed rounded up + to minimal rate (currently 9000 pps per direction). + $ref: "#/$defs/macros/latency_numbers" + pdr_50: + description: >- + Object related to latency measurement performed + at 50% of PDR lower bound, if needed rounded up + to minimal rate (currently 9000 pps per direction). + $ref: "#/$defs/macros/latency_numbers" + pdr_90: + description: >- + Object related to latency measurement performed + at 90% of PDR lower bound, if needed rounded up + to minimal rate (currently 9000 pps per direction). + $ref: "#/$defs/macros/latency_numbers" + required: + - pdr_0 + - pdr_10 + - pdr_50 + - pdr_90 diff --git a/docs/model/current/schema/todos.txt b/docs/model/current/schema/todos.txt new file mode 100644 index 0000000000..8e63b21a19 --- /dev/null +++ b/docs/model/current/schema/todos.txt @@ -0,0 +1,16 @@ + +Export also tg_type and tg_version properties. + +Add description with link to methodology for MRR, NDRPDR and SOAK. + +Add multiplicity field to MRR result, so PAL can detect incomplete samples. + +Add link explaining our L1 bandwidth calculation. + +Add a link to URL explaining how to decode the hdrh data. + +Do we want to require more structure for dut_version, e.g. at least two dots? + +Should we parse version strings? +E.g.: Turn dut_version from "21.11.0" +into {"major": 21, "minor": 11, "patch": 0}. diff --git a/docs/model/current/schema/yaml2json.py b/docs/model/current/schema/yaml2json.py new file mode 100644 index 0000000000..6927bc0172 --- /dev/null +++ b/docs/model/current/schema/yaml2json.py @@ -0,0 +1,29 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utility to convert from .schema.yaml to .schema.json. + +TODO: Read the input file name from command line argument. +""" + +import glob +import json +import yaml + + +for filename in glob.glob(u"*.schema.yaml"): + name = filename[:-5] + with open(f"{name}.yaml", u"rt") as fin: + data = yaml.load(fin.read()) + with open(f"{name}.json", u"wt") as fout: + json.dump(data, fout, indent=2) diff --git a/docs/model/current/top.rst b/docs/model/current/top.rst index 640abe2343..d86e3fde4c 100644 --- a/docs/model/current/top.rst +++ b/docs/model/current/top.rst @@ -22,7 +22,7 @@ especially the export side (UTI), not import side (PAL). Version ~~~~~~~ -This document is valid for CSIT model version 0.1.0. +This document is valid for CSIT model version 1.0.0. It is recommended to use semantic versioning: https://semver.org/ That means, if the new model misses a field present in the old model, @@ -42,7 +42,42 @@ UTI stands for Unified Test Interface. It mainly focuses on exporting information gathered during test run into JSON output files. -Files ------ +Output Structure +----------------- -No files are exported yet in this version. +UTI outputs come in filesystem tree structure (single tree), where directories +correspond to suite levels and files correspond to suite setup, suite teardown +or any test case at this level of suite. +The directory name comes from SUITE_NAME Robot variable (the last part +as the previous parts are higher level suites), converted to lowercase. +If the suite name contains spaces (Robot converts underscores to spaces), +they are replaced with underscores. + +The filesystem tree is rooted under tests/ (as suites in git are there), +and for each component (test case, suite setup, suite teardown) +two files are generated. +The "raw" variant is suitable for debugging (can contain lower level logging), +while the "info" variant is suitable for processing by PAL +(can contain derivative values so PAL does not need to compute them +on every download). +Their structure and content is mostly identical, model definition mentions +if a particular subschema is not identical in the two variants. +It is possible to convert from raw to info, but not the other way. + +Although we expect only ASCII text in the exported files, +we manipulate files using UTF-8 encoding, +so if Robot Framework uses a non-ascii character, it will be handled. + +JSON schemas +------------ + +CSIT model is formally defined as a collection of JSON schema documents, +one for each output file type. + +The current version specifies only one output file type: +Info output for test case. + +The authoritative JSON schema documents are in JSON format. +Git repository also contains YAML formatted document and conversion utility, +which simplifies maintaining of the JSON document +(no need to track brackets and commas), but are not authoritative. diff --git a/fdio.infra.ansible/roles/csit_sut_image/files/Dockerfile b/fdio.infra.ansible/roles/csit_sut_image/files/Dockerfile index 73ff5c5e86..a955799b63 100644 --- a/fdio.infra.ansible/roles/csit_sut_image/files/Dockerfile +++ b/fdio.infra.ansible/roles/csit_sut_image/files/Dockerfile @@ -135,7 +135,7 @@ RUN pip3 install \ Pygments==2.4.2 \ PyNaCl==1.3.0 \ pyparsing==2.4.4 \ - python-dateutil==2.8.1 \ + python-dateutil==2.8.2 \ pytz==2019.3 \ retrying==1.3.3 \ six==1.13.0 \ diff --git a/fdio.infra.ansible/roles/python_env/tasks/main.yaml b/fdio.infra.ansible/roles/python_env/tasks/main.yaml index 7df8008a27..4e4cfb447c 100644 --- a/fdio.infra.ansible/roles/python_env/tasks/main.yaml +++ b/fdio.infra.ansible/roles/python_env/tasks/main.yaml @@ -59,7 +59,7 @@ - "Pygments==2.8.1" - "PyNaCl==1.3.0" - "pyparsing==2.4.7" - - "python-dateutil==2.8.1" + - "python-dateutil==2.8.2" - "pytz==2021.1" - "retrying==1.3.3" - "six==1.15.0" diff --git a/requirements.txt b/requirements.txt index 244c7649ad..983cc79412 100644 --- a/requirements.txt +++ b/requirements.txt @@ -50,8 +50,14 @@ sphinxcontrib-robotdoc==0.11.0 # VPP requirements ply==3.11 -# PIP freeze dependencies +# JSON schema validation +jsonschema==4.1.0 +rfc3339-validator==0.1.4 +rfc3987==1.3.8 + +# Other PIP freeze dependencies. alabaster==0.7.12 +arrow==1.2.1 Babel==2.9.0 bcrypt==3.1.7 certifi==2020.12.5 @@ -60,9 +66,12 @@ chardet==4.0.0 cryptography==2.8 docutils==0.16 future==0.18.2 +fqdn==1.5.1 idna==2.10 imagesize==1.2.0 +isoduration==20.11.0 Jinja2==2.11.3 +jsonpointer==2.1 MarkupSafe==1.1.1 packaging==20.9 pbr==5.5.1 @@ -70,7 +79,7 @@ pycparser==2.19 Pygments==2.8.1 PyNaCl==1.3.0 pyparsing==2.4.7 -python-dateutil==2.8.1 +python-dateutil==2.8.2 pytz==2021.1 retrying==1.3.3 six==1.15.0 @@ -81,4 +90,6 @@ sphinxcontrib-htmlhelp==1.0.3 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.4 +uri-template==1.1.0 urllib3==1.25.6 +webcolors==1.11.1 diff --git a/resources/libraries/bash/function/common.sh b/resources/libraries/bash/function/common.sh index 7348f25c47..0a4c94969d 100644 --- a/resources/libraries/bash/function/common.sh +++ b/resources/libraries/bash/function/common.sh @@ -147,7 +147,8 @@ function archive_tests () { set -exuo pipefail - tar c "${GENERATED_DIR}/tests" | xz -3 > "${ARCHIVE_DIR}/tests.tar.xz" || { + filename="generated_robot_files.tar.xz" + tar c "${GENERATED_DIR}/tests" | xz -3 > "${ARCHIVE_DIR}/${filename}" || { die "Error creating archive of generated tests." } } @@ -713,6 +714,8 @@ function run_pybot () { # Run pybot with options based on input variables. Create output_info.xml # + # Also, .info.json files are moved into an archive to speed up PAL. + # # Variables read: # - CSIT_DIR - Path to existing root of local CSIT git repository. # - ARCHIVE_DIR - Path to store robot result files in. @@ -735,7 +738,31 @@ function run_pybot () { PYBOT_EXIT_STATUS="$?" set -e + # Compress raw json outputs, if any. + pushd "${ARCHIVE_DIR}" || die + if [ -d "tests" ]; then + # Use deterministic order. + options+=("--sort=name") + # We are keeping info outputs where they are. + # Assuming we want to move anything but info files (and dirs). + options+=("--exclude=*.info.json") + # There may be other unforeseen errors, + # we still want to execute subsequent commands, so disable set -e. + set +e + tar cvf "tests_output_raw.tar" "${options[@]}" "tests" + # If compression fails, it leaves an uncompressed .tar, + # we still want to archive that to investigate why compression failed. + time xz -9e "tests_output_raw.tar" + # Tar can remove when archiving, but chokes (not deterministically) + # on attempting to remove dirs (not empty as info files are there). + # So we need to delete the raw files manually. + find "tests" -type f -name "*.raw.json" -delete + set -e + fi + popd || die + # Generate INFO level output_info.xml for post-processing. + # This comes last, as it is slowest, and sometimes users abort here. all_options=("--loglevel" "INFO") all_options+=("--log" "none") all_options+=("--report" "none") diff --git a/resources/libraries/bash/function/per_patch.sh b/resources/libraries/bash/function/per_patch.sh index 4af3302008..b6a572d654 100644 --- a/resources/libraries/bash/function/per_patch.sh +++ b/resources/libraries/bash/function/per_patch.sh @@ -37,7 +37,9 @@ function archive_test_results () { cd "${VPP_DIR}" || die "Change directory command failed." TARGET="$(readlink -f "$1")" mkdir -p "${TARGET}" || die "Directory creation failed." - for filename in "output.xml" "log.html" "report.html"; do + file_list=("output.xml" "log.html" "report.html") + file_list+=("tests" "tests_output_raw.tar.xz") + for filename in "${file_list[@]}"; do mv "${ARCHIVE_DIR}/${filename}" "${TARGET}/${filename}" || { die "Attempt to move '${filename}' failed." } diff --git a/resources/libraries/python/Constants.py b/resources/libraries/python/Constants.py index f44c6ad95a..672ce8716f 100644 --- a/resources/libraries/python/Constants.py +++ b/resources/libraries/python/Constants.py @@ -120,7 +120,10 @@ class Constants: """Constants used in CSIT.""" # Version for CSIT data model. See docs/model/. - MODEL_VERSION = u"0.1.0" + MODEL_VERSION = u"1.0.0" + + # Global off-switch in case JSON export is large or slow. + EXPORT_JSON = get_optimistic_bool_from_env(u"EXPORT_JSON") # OpenVPP testing directory location at topology nodes REMOTE_FW_DIR = u"/tmp/openvpp-testing" @@ -334,6 +337,18 @@ class Constants: u"Mellanox-CX556A": u"100ge2p1cx556a", } + # Shortened lowercase NIC model name, useful for presentation. + NIC_CODE_TO_SHORT_NAME = { + u"10ge2p1x520": u"x520", + u"10ge2p1x553": u"x553", + u"10ge2p1x710": u"x710", + u"40ge2p1xl710": u"xl710", + u"25ge2p1xxv710": u"xxv710", + u"100ge2p1e810cq": u"e810cq", + u"50ge1p1ENA": u"ena", + u"100ge2p1cx556a": u"cx556a", + } + # Not each driver is supported by each NIC. NIC_NAME_TO_DRIVER = { u"Intel-X520-DA2": [u"vfio-pci", u"af_xdp"], diff --git a/resources/libraries/python/DPDK/DPDKTools.py b/resources/libraries/python/DPDK/DPDKTools.py index b403c3a5f9..83ddae8b4a 100644 --- a/resources/libraries/python/DPDK/DPDKTools.py +++ b/resources/libraries/python/DPDK/DPDKTools.py @@ -79,6 +79,25 @@ class DPDKTools: message = u"Cleanup the DPDK failed!" exec_cmd_no_error(node, command, timeout=1200, message=message) + @staticmethod + def get_dpdk_version(node): + """Log and return the installed DPDK version. + + The logged string ends with newline, the returned one is stripped. + + :param node: Node from topology file. + :type node: dict + :returns: Stripped DPDK version string. + :rtype: str + :raises RuntimeError: If command returns nonzero return code. + """ + command = f"cat {Constants.REMOTE_FW_DIR}/dpdk*/VERSION" + message = u"Get DPDK version failed!" + stdout, _ = exec_cmd_no_error(node, command, message=message) + # TODO: PAL should already tolerate stripped value in the log. + logger.info(f"DPDK Version: {stdout}") + return stdout.strip() + @staticmethod def install_dpdk_framework(node): """ @@ -92,12 +111,7 @@ class DPDKTools: f"/entry/install_dpdk.sh" message = u"Install the DPDK failed!" exec_cmd_no_error(node, command, timeout=3600, message=message) - - command = f"cat {Constants.REMOTE_FW_DIR}/dpdk*/VERSION" - message = u"Get DPDK version failed!" - stdout, _ = exec_cmd_no_error(node, command, message=message) - - logger.info(f"DPDK Version: {stdout}") + DPDKTools.get_dpdk_version(node) @staticmethod def install_dpdk_framework_on_all_duts(nodes): diff --git a/resources/libraries/python/SetupFramework.py b/resources/libraries/python/SetupFramework.py index e0c3a4cb61..810a16d06e 100644 --- a/resources/libraries/python/SetupFramework.py +++ b/resources/libraries/python/SetupFramework.py @@ -14,6 +14,8 @@ """This module exists to provide setup utilities for the framework on topology nodes. All tasks required to be run before the actual tests are started is supposed to end up here. + +TODO: Figure out how to export JSON from SSH outside main Robot thread. """ from os import environ, remove @@ -105,7 +107,7 @@ def extract_tarball_at_node(tarball, node): node, cmd, message=f"Failed to extract {tarball} at node {node[u'type']} " f"host {node[u'host']}, port {node[u'port']}", - timeout=240, include_reason=True + timeout=240, include_reason=True, export=False ) logger.console( f"Extracting tarball to {con.REMOTE_FW_DIR} on {node[u'type']} " @@ -134,7 +136,7 @@ def create_env_directory_at_node(node): f"&& source env/bin/activate && ANSIBLE_SKIP_CONFLICT_CHECK=1 " \ f"pip3 install -r requirements.txt" stdout, stderr = exec_cmd_no_error( - node, cmd, timeout=100, include_reason=True, + node, cmd, timeout=100, include_reason=True, export=False, message=f"Failed install at node {node[u'type']} host {node[u'host']}, " f"port {node[u'port']}" ) @@ -214,7 +216,7 @@ def delete_framework_dir(node): node, f"sudo rm -rf {con.REMOTE_FW_DIR}", message=f"Framework delete failed at node {node[u'type']} " f"host {node[u'host']}, port {node[u'port']}", - timeout=100, include_reason=True + timeout=100, include_reason=True, export=False ) logger.console( f"Deleting framework directory on {node[u'type']} host {node[u'host']}," diff --git a/resources/libraries/python/VPPUtil.py b/resources/libraries/python/VPPUtil.py index e343d38d2f..0d5dea57df 100644 --- a/resources/libraries/python/VPPUtil.py +++ b/resources/libraries/python/VPPUtil.py @@ -18,6 +18,9 @@ from robot.api import logger from resources.libraries.python.Constants import Constants from resources.libraries.python.DUTSetup import DUTSetup from resources.libraries.python.PapiExecutor import PapiSocketExecutor +from resources.libraries.python.model.ExportResult import ( + export_dut_type_and_version +) from resources.libraries.python.ssh import exec_cmd_no_error, exec_cmd from resources.libraries.python.topology import Topology, SocketType, NodeType @@ -197,6 +200,7 @@ class VPPUtil: """Run "show_version" PAPI command. Socket is configurable, so VPP inside container can be accessed. + The result is exported to JSON UTI output as "dut-version". :param node: Node to run command on. :param remote_vpp_socket: Path to remote socket to target VPP. @@ -214,7 +218,9 @@ class VPPUtil: reply = papi_exec.add(cmd).get_reply() if log: logger.info(f"VPP version: {reply[u'version']}\n") - return f"{reply[u'version']}" + version = f"{reply[u'version']}" + export_dut_type_and_version(u"VPP", version) + return version @staticmethod def show_vpp_version_on_all_duts(nodes): diff --git a/resources/libraries/python/model/ExportLog.py b/resources/libraries/python/model/ExportLog.py new file mode 100644 index 0000000000..e02eef63c5 --- /dev/null +++ b/resources/libraries/python/model/ExportLog.py @@ -0,0 +1,148 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module with keywords that publish metric and other log events. +""" + +import datetime + +from resources.libraries.python.model.util import get_export_data + + +def export_ssh_command(host, port, command): + """Add a log item about SSH command execution starting. + + The log item is present only in raw output. + Result arrives in a separate log item. + Log level is always DEBUG. + + The command is stored as "data" (not "msg") as in some cases + the command can be too long to act as a message. + + The host is added to the info set of hosts. + + :param host: Node "host" attribute, usually its IPv4 address. + :param port: SSH port number to use when connecting to the host. + :param command: Serialized bash command to execute. + :type host: str + :type port: int + :type command: str + """ + timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ") + data = get_export_data() + ssh_record = dict( + source_type=u"host,port", + source_id=dict(host=host, port=port), + msg_type=u"ssh_command", + log_level=u"DEBUG", + timestamp=timestamp, + msg="", + data=str(command), + ) + data[u"hosts"].add(host) + data[u"log"].append(ssh_record) + + +def export_ssh_result(host, port, code, stdout, stderr, duration): + """Add a log item about ssh execution result. + + Only for raw output log. + + There is no easy way to pair with the corresponding command, + but usually there is only one SSH session for given host and port. + The duration value may give a hint if that is not the case. + + Message is empty, data has fields "rc", "stdout", "stderr" and "duration". + Log level is always DEBUG. + + The host is NOT added to the info set of hosts, as each result + comes after a command. + + TODO: Do not require duration, find preceding ssh command in log. + Reason: Pylint complains about too many arguments. + Alternative: Define type for SSH endopoint (and use that instead host+port). + + :param host: Node "host" attribute, usually its IPv4 address. + :param port: SSH port number to use when connecting to the host. + :param code: Bash return code, e.g. 0 for success. + :param stdout: Captured standard output of the command execution. + :param stderr: Captured error output of the command execution. + :param duration: How long has the command been executing, in seconds. + :type host: str + :type port: int + :type code: int + :type stdout: str + :type stderr: str + :type duration: float + """ + timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ") + data = get_export_data() + ssh_record = dict( + source_type=u"host,port", + source_id=dict(host=host, port=port), + msg_type=u"ssh_result", + log_level=u"DEBUG", + timestamp=timestamp, + msg=u"", + data=dict( + rc=int(code), + stdout=str(stdout), + stderr=str(stderr), + duration=float(duration), + ), + ) + data[u"log"].append(ssh_record) + + +def export_ssh_timeout(host, port, stdout, stderr, duration): + """Add a log item about ssh execution timing out. + + Only for debug log. + + There is no easy way to pair with the corresponding command, + but usually there is only one SSH session for given host and port. + + Message is empty, data has fields "stdout", "stderr" and "duration". + The duration value may give a hint if that is not the case. + Log level is always DEBUG. + + The host is NOT added to the info set of hosts, as each timeout + comes after a command. + + :param host: Node "host" attribute, usually its IPv4 address. + :param port: SSH port number to use when connecting to the host. + :param stdout: Captured standard output of the command execution so far. + :param stderr: Captured error output of the command execution so far. + :param duration: How long has the command been executing, in seconds. + :type host: str + :type port: int + :type stdout: str + :type stderr: str + :type duration: float + """ + timestamp = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ") + data = get_export_data() + ssh_record = dict( + source_type=u"host,port", + source_id=dict(host=host, port=port), + msg_type=u"ssh_timeout", + log_level=u"DEBUG", + timestamp=timestamp, + msg=u"", + data=dict( + stdout=str(stdout), + stderr=str(stderr), + duration=float(duration), + ), + ) + data[u"log"].append(ssh_record) diff --git a/resources/libraries/python/model/ExportResult.py b/resources/libraries/python/model/ExportResult.py new file mode 100644 index 0000000000..d74a6ab5df --- /dev/null +++ b/resources/libraries/python/model/ExportResult.py @@ -0,0 +1,179 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module with keywords that publish parts of result structure.""" + +from robot.libraries.BuiltIn import BuiltIn + +from resources.libraries.python.model.util import descend, get_export_data + + +def export_dut_type_and_version(dut_type=u"unknown", dut_version=u"unknown"): + """Export the arguments as dut type and version. + + Robot tends to convert "none" into None, hence the unusual default values. + + If either argument is missing, the value from robot variable is used. + If argument is present, the value is also stored to robot suite variable. + + :param dut_type: DUT type, e.g. VPP or DPDK. + :param dut_version: DUT version as determined by the caller. + :type dut_type: Optional[str] + :type dut_version: Optiona[str] + :raises RuntimeError: If value is neither in argument not robot variable. + """ + if dut_type == u"unknown": + dut_type = BuiltIn().get_variable_value(u"\\${DUT_TYPE}", u"unknown") + if dut_type == u"unknown": + raise RuntimeError(u"Dut type not provided.") + else: + # We want to set a variable in higher level suite setup + # to be available to test setup several levels lower. + # Documentation [0] looks like "children" is a keyword argument, + # but code [1] lines 1458 and 1511-1512 show + # it is just last stringy argument. + # [0] http://robotframework.org/robotframework/ + # 3.1.2/libraries/BuiltIn.html#Set%20Suite%20Variable + # [1] https://github.com/robotframework/robotframework/blob/ + # v3.1.2/src/robot/libraries/BuiltIn.py + BuiltIn().set_suite_variable( + u"\\${DUT_TYPE}", dut_type, u"children=True" + ) + if dut_version == u"unknown": + dut_version = BuiltIn().get_variable_value(u"\\${DUT_VERSION}", u"unknown") + if dut_type == u"unknown": + raise RuntimeError(u"Dut version not provided.") + else: + BuiltIn().set_suite_variable( + u"\\${DUT_VERSION}", dut_version, u"children=True" + ) + data = get_export_data() + data[u"dut_type"] = dut_type + data[u"dut_version"] = dut_version + + +def append_mrr_value(mrr_value, unit): + """Store mrr value to proper place so it is dumped into json. + + The value is appended only when unit is not empty. + + :param mrr_value: Forwarding rate from MRR trial. + :param unit: Unit of measurement for the rate. + :type mrr_value: float + :type unit: str + """ + if not unit: + return + data = get_export_data() + data[u"result"][u"type"] = u"mrr" + rate_node = descend(descend(data[u"result"], u"receive_rate"), "rate") + rate_node[u"unit"] = str(unit) + values_list = descend(rate_node, u"values", list) + values_list.append(float(mrr_value)) + # TODO: Fill in the bandwidth part for pps? + + +def export_search_bound(text, value, unit, bandwidth=None): + """Store bound value and unit. + + This function works for both NDRPDR and SOAK, decided by text. + + If a node does not exist, it is created. + If a previous value exists, it is overwritten silently. + Result type is set (overwritten) to ndrpdr (or soak). + + Text is used to determine whether it is ndr or pdr, upper or lower bound, + as the Robot caller has the information only there. + + :param text: Info from Robot caller to determime bound type. + :param value: The bound value in packets (or connections) per second. + :param unit: Rate unit the bound is measured (or estimated) in. + :param bandwidth: The same value recomputed into L1 bits per second. + :type text: str + :type value: float + :type unit: str + :type bandwidth: Optional[float] + """ + value = float(value) + text = str(text).lower() + result_type = u"soak" if u"plrsearch" in text else u"ndrpdr" + upper_or_lower = u"upper" if u"upper" in text else u"lower" + ndr_or_pdr = u"ndr" if u"ndr" in text else u"pdr" + + data = get_export_data() + result_node = data[u"result"] + result_node[u"type"] = result_type + rate_item = dict(rate=dict(value=value, unit=unit)) + if bandwidth: + rate_item[u"bandwidth"] = dict(value=float(bandwidth), unit=u"bps") + if result_type == u"soak": + descend(result_node, u"critical_rate")[upper_or_lower] = rate_item + return + descend(result_node, ndr_or_pdr)[upper_or_lower] = rate_item + + +def _add_latency(result_node, percent, whichward, latency_string): + """Descend to a corresponding node and add values from latency string. + + This is an internal block, moved out from export_ndrpdr_latency, + as it can be called up to 4 times. + + :param result_node: UTI tree node to descend from. + :param percent: Percent value to use in node key (90, 50, 10, 0). + :param whichward: "forward" or "reverse". + :param latency_item: Unidir output from TRex utility, min/avg/max/hdrh. + :type result_node: dict + :type percent: int + :type whichward: str + :latency_string: str + """ + l_min, l_avg, l_max, l_hdrh = latency_string.split(u"/", 3) + whichward_node = descend(result_node, f"latency_{whichward}") + percent_node = descend(whichward_node, f"pdr_{percent}") + percent_node[u"min"] = int(l_min) + percent_node[u"avg"] = int(l_avg) + percent_node[u"max"] = int(l_max) + percent_node[u"hdrh"] = l_hdrh + percent_node[u"unit"] = u"us" + + +def export_ndrpdr_latency(text, latency): + """Store NDRPDR hdrh latency data. + + If "latency" node does not exist, it is created. + If a previous value exists, it is overwritten silently. + + Text is used to determine what percentage of PDR is the load, + as the Robot caller has the information only there. + + Reverse data may be missing, we assume the test was unidirectional. + + :param text: Info from Robot caller to determime load. + :param latency: Output from TRex utility, min/avg/max/hdrh. + :type text: str + :type latency: 1-tuple or 2-tuple of str + """ + data = get_export_data() + result_node = data[u"result"] + percent = 0 + if u"90" in text: + percent = 90 + elif u"50" in text: + percent = 50 + elif u"10" in text: + percent = 10 + _add_latency(result_node, percent, u"forward", latency[0]) + # Else TRex does not support latency measurement for this traffic profile. + if len(latency) < 2: + return + _add_latency(result_node, percent, u"reverse", latency[1]) diff --git a/resources/libraries/python/model/export_json.py b/resources/libraries/python/model/export_json.py new file mode 100644 index 0000000000..4f1b86dbf4 --- /dev/null +++ b/resources/libraries/python/model/export_json.py @@ -0,0 +1,238 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module tracking json in-memory data and saving it to files. + +The current implementation tracks data for raw output, +and info output is created from raw output on disk (see raw2info module). +Raw file contains all log items but no derived quantities, +info file contains only important log items but also derived quantities. +The overlap between two files is big. + +Each test case, suite setup (hierarchical) and teardown has its own file pair. + +Validation is performed for output files with available JSON schema. +Validation is performed in data deserialized from disk, +as serialization might have introduced subtle errors. +""" + +import datetime +import os.path + +from robot.api import logger +from robot.libraries.BuiltIn import BuiltIn + +from resources.libraries.python.Constants import Constants +from resources.libraries.python.model.ExportResult import ( + export_dut_type_and_version +) +from resources.libraries.python.model.mem2raw import write_raw_output +from resources.libraries.python.model.raw2info import convert_content_to_info +from resources.libraries.python.model.validate import (get_validators, validate) + + +class export_json(): + """Class handling the json data setting and export.""" + + ROBOT_LIBRARY_SCOPE = u"GLOBAL" + + def __init__(self): + """Declare required fields, cache output dir. + + Also memorize schema validator instances. + """ + self.output_dir = BuiltIn().get_variable_value(u"\\${OUTPUT_DIR}", ".") + self.raw_file_path = None + self.raw_data = None + self.validators = get_validators() + + def export_pending_data(self): + """Write the accumulated data to disk. + + Create missing directories. + Reset both file path and data to avoid writing multiple times. + + Functions which finalize content for given file are calling this, + so make sure each test and non-empty suite setup or teardown + is calling this as their last keyword. + + If no file path is set, do not write anything, + as that is the failsafe behavior when caller from unexpected place. + Aso do not write anything when EXPORT_JSON constant is false. + + Regardless of whether data was written, it is cleared. + """ + if not Constants.EXPORT_JSON or not self.raw_file_path: + self.raw_data = None + self.raw_file_path = None + return + write_raw_output(self.raw_file_path, self.raw_data) + # Raw data is going to be cleared (as a sign that raw export succeeded), + # so this is the last chance to detect if it was for a test case. + is_testcase = u"result" in self.raw_data + self.raw_data = None + # Validation for raw output goes here when ready. + info_file_path = convert_content_to_info(self.raw_file_path) + self.raw_file_path = None + # If "result" is missing from info content, + # it could be a bug in conversion from raw test case content, + # so instead of that we use the flag detected earlier. + if is_testcase: + validate(info_file_path, self.validators[u"tc_info"]) + + def warn_on_bad_export(self): + """If bad state is detected, log a warning and clean up state.""" + if self.raw_file_path is not None or self.raw_data is not None: + logger.warn( + f"Previous export not clean, path {self.raw_file_path}\n" + f"data: {self.raw_data}" + ) + self.raw_data = None + self.raw_file_path = None + + def start_suite_setup_export(self): + """Set new file path, initialize data for the suite setup. + + This has to be called explicitly at start of suite setup, + otherwise Robot likes to postpone initialization + until first call by a data-adding keyword. + + File path is set based on suite. + """ + self.warn_on_bad_export() + start_time = datetime.datetime.utcnow().strftime( + u"%Y-%m-%dT%H:%M:%S.%fZ" + ) + suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}") + suite_id = suite_name.lower().replace(u" ", u"_") + suite_path_part = os.path.join(*suite_id.split(u".")) + output_dir = self.output_dir + self.raw_file_path = os.path.join( + output_dir, suite_path_part, u"setup.raw.json" + ) + self.raw_data = dict() + self.raw_data[u"version"] = Constants.MODEL_VERSION + self.raw_data[u"start_time"] = start_time + self.raw_data[u"suite_name"] = suite_name + self.raw_data[u"suite_documentation"] = BuiltIn().get_variable_value( + u"\\${SUITE_DOCUMENTATION}" + ) + # "end_time" and "duration" is added on flush. + self.raw_data[u"hosts"] = set() + self.raw_data[u"log"] = list() + + def start_test_export(self): + """Set new file path, initialize data to minimal tree for the test case. + + It is assumed Robot variables DUT_TYPE and DUT_VERSION + are already set (in suite setup) to correct values. + + This function has to be called explicitly at the start of test setup, + otherwise Robot likes to postpone initialization + until first call by a data-adding keyword. + + File path is set based on suite and test. + """ + self.warn_on_bad_export() + start_time = datetime.datetime.utcnow().strftime( + u"%Y-%m-%dT%H:%M:%S.%fZ" + ) + suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}") + suite_id = suite_name.lower().replace(u" ", u"_") + suite_path_part = os.path.join(*suite_id.split(u".")) + test_name = BuiltIn().get_variable_value(u"\\${TEST_NAME}") + self.raw_file_path = os.path.join( + self.output_dir, suite_path_part, + test_name.lower().replace(u" ", u"_") + u".raw.json" + ) + self.raw_data = dict() + self.raw_data[u"version"] = Constants.MODEL_VERSION + self.raw_data[u"start_time"] = start_time + self.raw_data[u"suite_name"] = suite_name + self.raw_data[u"test_name"] = test_name + test_doc = BuiltIn().get_variable_value(u"\\${TEST_DOCUMENTATION}", u"") + self.raw_data[u"test_documentation"] = test_doc + # "test_type" is added when converting to info. + # "tags" is detected and added on flush. + # "end_time" and "duration" is added on flush. + # Robot status and message are added on flush. + self.raw_data[u"result"] = dict(type=u"unknown") + self.raw_data[u"hosts"] = set() + self.raw_data[u"log"] = list() + export_dut_type_and_version() + + def start_suite_teardown_export(self): + """Set new file path, initialize data for the suite teardown. + + This has to be called explicitly at start of suite teardown, + otherwise Robot likes to postpone initialization + until first call by a data-adding keyword. + + File path is set based on suite. + """ + self.warn_on_bad_export() + start_time = datetime.datetime.utcnow().strftime( + u"%Y-%m-%dT%H:%M:%S.%fZ" + ) + suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}") + suite_id = suite_name.lower().replace(u" ", u"_") + suite_path_part = os.path.join(*suite_id.split(u".")) + self.raw_file_path = os.path.join( + self.output_dir, suite_path_part, u"teardown.raw.json" + ) + self.raw_data = dict() + self.raw_data[u"version"] = Constants.MODEL_VERSION + self.raw_data[u"start_time"] = start_time + self.raw_data[u"suite_name"] = suite_name + # "end_time" and "duration" is added on flush. + self.raw_data[u"hosts"] = set() + self.raw_data[u"log"] = list() + + def finalize_suite_setup_export(self): + """Add the missing fields to data. Do not write yet. + + Should be run at the end of suite setup. + The write is done at next start (or at the end of global teardown). + """ + end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ") + self.raw_data[u"end_time"] = end_time + self.export_pending_data() + + def finalize_test_export(self): + """Add the missing fields to data. Do not write yet. + + Should be at the end of test teardown, as the implementation + reads various Robot variables, some of them only available at teardown. + + The write is done at next start (or at the end of global teardown). + """ + end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ") + message = BuiltIn().get_variable_value(u"\\${TEST_MESSAGE}") + status = BuiltIn().get_variable_value(u"\\${TEST_STATUS}") + test_tags = BuiltIn().get_variable_value(u"\\${TEST_TAGS}") + self.raw_data[u"end_time"] = end_time + self.raw_data[u"tags"] = list(test_tags) + self.raw_data[u"status"] = status + self.raw_data[u"message"] = message + self.export_pending_data() + + def finalize_suite_teardown_export(self): + """Add the missing fields to data. Do not write yet. + + Should be run at the end of suite teardown + (but before the explicit write in the global suite teardown). + The write is done at next start (or explicitly for global teardown). + """ + end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ") + self.raw_data[u"end_time"] = end_time + self.export_pending_data() diff --git a/resources/libraries/python/model/mem2raw.py b/resources/libraries/python/model/mem2raw.py new file mode 100644 index 0000000000..c3145b9f31 --- /dev/null +++ b/resources/libraries/python/model/mem2raw.py @@ -0,0 +1,145 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for converting in-memory data into raw JSON output. + +CSIT and VPP PAPI are using custom data types +that are not directly serializable into JSON. + +Thus, before writing the raw outpt onto disk, +the data is recursively converted to equivalent serializable types, +in extreme cases replaced by string representation. + +Validation is outside the scope of this module, +as it should use the JSON data read from disk. +""" + +import json +import os + +from collections.abc import Iterable, Mapping, Set +from enum import IntFlag + + +def _pre_serialize_recursive(data): + """Recursively sort and convert to a more serializable form. + + VPP PAPI code can give data with its own MACAddres type, + or various other enum and flag types. + The default json.JSONEncoder method raises TypeError on that. + First point of this function is to apply str() or repr() + to leaf values that need it. + + Also, PAPI responses are namedtuples, which confuses + the json.JSONEncoder method (so it does not recurse). + Dictization (see PapiExecutor) helps somewhat, but it turns namedtuple + into a UserDict, which also confuses json.JSONEncoder. + Therefore, we recursively convert any Mapping into an ordinary dict. + + We also convert iterables to list (sorted if the iterable was a set), + and prevent numbers from getting converted to strings. + + As we are doing such low level operations, + we also convert mapping keys to strings + and sort the mapping items by keys alphabetically, + except "data" field moved to the end. + + :param data: Object to make serializable, dictized when applicable. + :type data: object + :returns: Serializable equivalent of the argument. + :rtype: object + :raises ValueError: If the argument does not support string conversion. + """ + # Recursion ends at scalar values, first handle irregular ones. + if isinstance(data, IntFlag): + return repr(data) + if isinstance(data, bytes): + return data.hex() + # The regular ones are good to go. + if isinstance(data, (str, int, float, bool)): + return data + # Recurse over, convert and sort mappings. + if isinstance(data, Mapping): + # Convert and sort alphabetically. + ret = { + str(key): _pre_serialize_recursive(data[key]) + for key in sorted(data.keys()) + } + # If exists, move "data" field to the end. + if u"data" in ret: + data_value = ret.pop(u"data") + ret[u"data"] = data_value + # If exists, move "type" field at the start. + if u"type" in ret: + type_value = ret.pop(u"type") + ret_old = ret + ret = dict(type=type_value) + ret.update(ret_old) + return ret + # Recurse over and convert iterables. + if isinstance(data, Iterable): + list_data = [_pre_serialize_recursive(item) for item in data] + # Additionally, sets are exported as sorted. + if isinstance(data, Set): + list_data = sorted(list_data) + return list_data + # Unknown structure, attempt str(). + return str(data) + + +def _pre_serialize_root(data): + """Recursively convert to a more serializable form, tweak order. + + See _pre_serialize_recursive for most of changes this does. + + The logic here (outside the recursive function) only affects + field ordering in the root mapping, + to make it more human friendly. + We are moving "version" to the top, + followed by start time and end time. + and various long fields (such as "log") to the bottom. + + Some edits are done in-place, do not trust the argument value after calling. + + :param data: Root data to make serializable, dictized when applicable. + :type data: dict + :returns: Order-tweaked version of the argument. + :rtype: dict + :raises KeyError: If the data does not contain required fields. + :raises TypeError: If the argument is not a dict. + :raises ValueError: If the argument does not support string conversion. + """ + if not isinstance(data, dict): + raise RuntimeError(f"Root data object needs to be a dict: {data!r}") + data = _pre_serialize_recursive(data) + log = data.pop(u"log") + new_data = dict(version=data.pop(u"version")) + new_data[u"start_time"] = data.pop(u"start_time") + new_data[u"end_time"] = data.pop(u"end_time") + new_data.update(data) + new_data[u"log"] = log + return new_data + + +def write_raw_output(raw_file_path, raw_data): + """Prepare data for serialization and dump into a file. + + Ancestor directories are created if needed. + + :param to_raw_path: Local filesystem path, including the file name. + :type to_raw_path: str + """ + raw_data = _pre_serialize_root(raw_data) + os.makedirs(os.path.dirname(raw_file_path), exist_ok=True) + with open(raw_file_path, u"xt", encoding="utf-8") as file_out: + json.dump(raw_data, file_out, indent=1) diff --git a/resources/libraries/python/model/raw2info.py b/resources/libraries/python/model/raw2info.py new file mode 100644 index 0000000000..7a3647d857 --- /dev/null +++ b/resources/libraries/python/model/raw2info.py @@ -0,0 +1,294 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module facilitating conversion from raw outputs into info outputs.""" + +import copy +import json +import os + +import dateutil.parser + +from resources.libraries.python.Constants import Constants +from resources.libraries.python.jumpavg.AvgStdevStats import AvgStdevStats + + +def _raw_to_info_path(raw_path): + """Compute path for info output corresponding to given raw output. + + :param raw_path: Local filesystem path to read raw JSON data from. + :type raw_path: str + :returns: Local filesystem path to write info JSON content to. + :rtype: str + :raises RuntimeError: If the input path does not meet all expectations. + """ + raw_extension = u".raw.json" + tmp_parts = raw_path.split(raw_extension) + if len(tmp_parts) != 2 or tmp_parts[1] != u"": + raise RuntimeError(f"Not good extension {raw_extension}: {raw_path}") + info_path = tmp_parts[0] + u".info.json" + return info_path + + +def _process_test_name(data): + """Replace raw test name with short and long test name and set test_type. + + Perform in-place edits on the data dictionary. + Remove raw suite_name and test_name, they are not part of info schema. + Return early if the data is not for test case. + Inserttest ID and long and short test name into the data. + Besides suite_name and test_name, also test tags are read. + + Short test name is basically a suite tag, but with NIC driver prefix, + if the NIC driver used is not the default one (drv_vfio_pci for VPP tests). + + Long test name has the following form: + {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part} + Lookup in test tags is needed to get the threads value. + The threads_and_cores part may be empty, e.g. for TRex tests. + + Test ID has form {suite_name}.{test_name} where the two names come from + Robot variables, converted to lower case and spaces replaces by undescores. + + Test type is set in an internal function. + + :param data: Raw data, perhaps some fields converted into info data already. + :type data: dict + :raises RuntimeError: If the raw data does not contain expected values. + """ + suite_part = data.pop(u"suite_name").lower().replace(u" ", u"_") + if u"test_name" not in data: + # There will be no test_id, provide suite_id instead. + data[u"suite_id"] = suite_part + return + test_part = data.pop(u"test_name").lower().replace(u" ", u"_") + data[u"test_id"] = f"{suite_part}.{test_part}" + tags = data[u"tags"] + # Test name does not contain thread count. + subparts = test_part.split(u"c-", 1) + if len(subparts) < 2 or subparts[0][-2:-1] != u"-": + # Physical core count not detected, assume it is a TRex test. + if u"--" not in test_part: + raise RuntimeError(f"Cores not found for {subparts}") + short_name = test_part.split(u"--", 1)[1] + else: + short_name = subparts[1] + # Add threads to test_part. + core_part = subparts[0][-1] + u"c" + for tag in tags: + tag = tag.lower() + if len(tag) == 4 and core_part == tag[2:] and tag[1] == u"t": + test_part = test_part.replace(f"-{core_part}-", f"-{tag}-") + break + else: + raise RuntimeError(f"Threads not found for {test_part} tags {tags}") + # For long name we need NIC model, which is only in suite name. + last_suite_part = suite_part.split(u".")[-1] + # Short name happens to be the suffix we want to ignore. + prefix_part = last_suite_part.split(short_name)[0] + # Also remove the trailing dash. + prefix_part = prefix_part[:-1] + # Throw away possible link prefix such as "1n1l-". + nic_code = prefix_part.split(u"-", 1)[-1] + nic_short = Constants.NIC_CODE_TO_SHORT_NAME[nic_code] + long_name = f"{nic_short}-{test_part}" + # Set test type. + test_type = _detect_test_type(data) + data[u"test_type"] = test_type + # Remove trailing test type from names (if present). + short_name = short_name.split(f"-{test_type}")[0] + long_name = long_name.split(f"-{test_type}")[0] + # Store names. + data[u"test_name_short"] = short_name + data[u"test_name_long"] = long_name + + +def _detect_test_type(data): + """Return test_type, as inferred from robot test tags. + + :param data: Raw data, perhaps some fields converted into info data already. + :type data: dict + :returns: The inferred test type value. + :rtype: str + :raises RuntimeError: If the test tags does not contain expected values. + """ + tags = data[u"tags"] + # First 5 options are specific for VPP tests. + if u"DEVICETEST" in tags: + test_type = u"device" + elif u"LDP_NGINX" in tags: + test_type = u"vsap" + elif u"HOSTSTACK" in tags: + test_type = u"hoststack" + elif u"GSO_TRUE" in tags or u"GSO_FALSE" in tags: + test_type = u"gso" + elif u"RECONF" in tags: + test_type = u"reconf" + # The remaining 3 options could also apply to DPDK and TRex tests. + elif u"SOAK" in tags: + test_type = u"soak" + elif u"NDRPDR" in tags: + test_type = u"ndrpdr" + elif u"MRR" in tags: + test_type = u"mrr" + else: + raise RuntimeError(f"Unable to infer test type from tags: {tags}") + return test_type + + +def _convert_to_info_in_memory(data): + """Perform all changes needed for processing of data, return new data. + + Data is assumed to be valid for raw schema, so no exceptions are expected. + The original argument object is not edited, + a new copy is created for edits and returned, + because there is no easy way to sort keys in-place. + + :param data: The whole composite object to filter and enhance. + :type data: dict + :returns: New object with the edited content. + :rtype: dict + """ + data = copy.deepcopy(data) + + # Drop any SSH log items. + data[u"log"] = list() + + # Duration is computed for every file. + start_float = dateutil.parser.parse(data[u"start_time"]).timestamp() + end_float = dateutil.parser.parse(data[u"end_time"]).timestamp() + data[u"duration"] = end_float - start_float + + # Reorder impotant fields to the top. + sorted_data = dict(version=data.pop(u"version")) + sorted_data[u"duration"] = data.pop(u"duration") + sorted_data[u"start_time"] = data.pop(u"start_time") + sorted_data[u"end_time"] = data.pop(u"end_time") + sorted_data.update(data) + data = sorted_data + # TODO: Do we care about the order of subsequently added fields? + + # Convert status into a boolean. + status = data.pop(u"status", None) + if status is not None: + data[u"passed"] = (status == u"PASS") + if data[u"passed"]: + # Also truncate success test messages. + data[u"message"] = u"" + + # Replace raw names with processed ones, set test_id and test_type. + _process_test_name(data) + + # The rest is only relevant for test case outputs. + if u"result" not in data: + return data + result_node = data[u"result"] + result_type = result_node[u"type"] + if result_type == u"unknown": + # Device or something else not supported. + return data + + # More processing depending on result type. TODO: Separate functions? + + # Compute avg and stdev for mrr. + if result_type == u"mrr": + rate_node = result_node[u"receive_rate"][u"rate"] + stats = AvgStdevStats.for_runs(rate_node[u"values"]) + rate_node[u"avg"] = stats.avg + rate_node[u"stdev"] = stats.stdev + + # Multiple processing steps for ndrpdr. + if result_type != u"ndrpdr": + return data + # Filter out invalid latencies. + for which_key in (u"latency_forward", u"latency_reverse"): + if which_key not in result_node: + # Probably just an unidir test. + continue + for load in (u"pdr_0", u"pdr_10", u"pdr_50", u"pdr_90"): + if result_node[which_key][load][u"max"] <= 0: + # One invalid number is enough to remove all loads. + break + else: + # No break means all numbers are ok, nothing to do here. + continue + # Break happened, something is invalid, remove all loads. + result_node.pop(which_key) + + return data + + +def _merge_into_suite_info_file(teardown_info_path): + """Move setup and teardown data into a singe file, remove old files. + + The caller has to confirm the argument is correct, e.g. ending in + "/teardown.info.json". + + :param teardown_info_path: Local filesystem path to teardown info file. + :type teardown_info_path: str + :returns: Local filesystem path to newly created suite info file. + :rtype: str + """ + # Manual right replace: https://stackoverflow.com/a/9943875 + setup_info_path = u"setup".join(teardown_info_path.rsplit(u"teardown", 1)) + with open(teardown_info_path, u"rt", encoding="utf-8") as file_in: + teardown_data = json.load(file_in) + # Transforming setup data into suite data. + with open(setup_info_path, u"rt", encoding="utf-8") as file_in: + suite_data = json.load(file_in) + + end_time = teardown_data[u"end_time"] + suite_data[u"end_time"] = end_time + start_float = dateutil.parser.parse(suite_data[u"start_time"]).timestamp() + end_float = dateutil.parser.parse(suite_data[u"end_time"]).timestamp() + suite_data[u"duration"] = end_float - start_float + setup_log = suite_data.pop(u"log") + suite_data[u"setup_log"] = setup_log + suite_data[u"teardown_log"] = teardown_data[u"log"] + + suite_info_path = u"suite".join(teardown_info_path.rsplit(u"teardown", 1)) + with open(suite_info_path, u"xt", encoding="utf-8") as file_out: + json.dump(suite_data, file_out, indent=1) + # We moved everything useful from temporary setup/teardown info files. + os.remove(setup_info_path) + os.remove(teardown_info_path) + + return suite_info_path + + +def convert_content_to_info(from_raw_path): + """Read raw output, perform filtering, add derivatves, write info output. + + Directory path is created if missing. + + When processing teardown, create also suite output using setup info. + + :param from_raw_path: Local filesystem path to read raw JSON data from. + :type from_raw_path: str + :returns: Local filesystem path to written info JSON file. + :rtype: str + :raises RuntimeError: If path or content do not match expectations. + """ + to_info_path = _raw_to_info_path(from_raw_path) + with open(from_raw_path, u"rt", encoding="utf-8") as file_in: + data = json.load(file_in) + + data = _convert_to_info_in_memory(data) + + with open(to_info_path, u"xt", encoding="utf-8") as file_out: + json.dump(data, file_out, indent=1) + if to_info_path.endswith(u"/teardown.info.json"): + to_info_path = _merge_into_suite_info_file(to_info_path) + # TODO: Return both paths for validation? + + return to_info_path diff --git a/resources/libraries/python/model/util.py b/resources/libraries/python/model/util.py new file mode 100644 index 0000000000..879f1f28b1 --- /dev/null +++ b/resources/libraries/python/model/util.py @@ -0,0 +1,69 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module hosting few utility functions useful when dealing with modelled data. + +This is for storing varied utility functions, which are too short and diverse +to be put into more descriptive modules. +""" + + +from robot.libraries.BuiltIn import BuiltIn + + +def descend(parent_node, key, default_factory=None): + """Return a sub-node, create and insert it when it does not exist. + + Without this function: + child_node = parent_node.get(key, dict()) + parent_node[key] = child_node + + With this function: + child_node = descend(parent_node, key) + + New code is shorter and avoids the need to type key and parent_node twice. + + :param parent_node: Reference to inner node of a larger structure + we want to descend from. + :param key: Key of the maybe existing child node. + :param default_factory: If the key does not exist, call this + to create a new value to be inserted under the key. + None means dict. The other popular option is list. + :type parent_node: dict + :type key: str + :type default_factory: Optional[Callable[[], object]] + :returns: The reference to (maybe just created) child node. + :rtype: object + """ + if key not in parent_node: + factory = dict if default_factory is None else default_factory + parent_node[key] = factory() + return parent_node[key] + + +def get_export_data(): + """Return raw_data member of export_json library instance. + + This assumes the data has been initialized already. + Return None if Robot is not running. + + :returns: Current library instance's raw data field. + :rtype: Optional[dict] + :raises AttributeError: If library is not imported yet. + """ + instance = BuiltIn().get_library_instance( + u"resources.libraries.python.model.export_json" + ) + if instance is None: + return None + return instance.raw_data diff --git a/resources/libraries/python/model/validate.py b/resources/libraries/python/model/validate.py new file mode 100644 index 0000000000..c441936ac8 --- /dev/null +++ b/resources/libraries/python/model/validate.py @@ -0,0 +1,73 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for validating JSON instances against schemas. + +Short module currently, as we validate only testcase info outputs. +Structure will probably change when we start validation mode file types. +""" + +import json +import jsonschema + + +def _get_validator(schema_path): + """Contruct validator with format checking enabled. + + Load json schema from disk. + Perform validation against meta-schema before returning. + + :param schema_path: Local filesystem path to .json file storing the schema. + :type schema_path: str + :returns: Instantiated validator class instance. + :rtype: jsonschema.validators.Validator + :raises RuntimeError: If the schema is not valid according its meta-schema. + """ + with open(schema_path, u"rt", encoding="utf-8") as file_in: + schema = json.load(file_in) + validator_class = jsonschema.validators.validator_for(schema) + validator_class.check_schema(schema) + fmt_checker = jsonschema.FormatChecker() + validator = validator_class(schema, format_checker=fmt_checker) + return validator + + +def get_validators(): + """Return mapping from file types to validator instances. + + Uses hardcoded file types and paths to schemas on disk. + + :returns: Validators, currently just for tc_info_output. + :rtype: Mapping[str, jsonschema.validators.Validator] + :raises RuntimeError: If schemas are not readable or not valid. + """ + relative_path = u"docs/model/current/schema/test_case.info.schema.json" + # Robot is always started when CWD is CSIT_DIR. + validator = _get_validator(relative_path) + return dict(tc_info=validator) + + +def validate(file_path, validator): + """Load data from disk, use validator to validate it. + + :param file_path: Local filesystem path including the file name to load. + :param validator: Validator instance to use for validation. + :type file_path: str + :type validator: jsonschema.validators.Validator + :raises RuntimeError: If schema validation fails. + """ + with open(file_path, u"rt", encoding="utf-8") as file_in: + instance = json.load(file_in) + error = jsonschema.exceptions.best_match(validator.iter_errors(instance)) + if error is not None: + raise error diff --git a/resources/libraries/python/ssh.py b/resources/libraries/python/ssh.py index fad00482ed..e47272f4db 100644 --- a/resources/libraries/python/ssh.py +++ b/resources/libraries/python/ssh.py @@ -17,7 +17,7 @@ import socket from io import StringIO -from time import time, sleep +from time import monotonic, sleep from paramiko import RSAKey, SSHClient, AutoAddPolicy from paramiko.ssh_exception import SSHException, NoValidConnectionsError @@ -25,6 +25,9 @@ from robot.api import logger from scp import SCPClient, SCPException from resources.libraries.python.OptionString import OptionString +from resources.libraries.python.model.ExportLog import ( + export_ssh_command, export_ssh_result, export_ssh_timeout +) __all__ = [ u"exec_cmd", u"exec_cmd_no_error", u"SSH", u"SSHTimeout", u"scp_node" @@ -82,7 +85,7 @@ class SSH: raise IOError(f"Cannot connect to {node['host']}") else: try: - start = time() + start = monotonic() pkey = None if u"priv_key" in node: pkey = RSAKey.from_private_key(StringIO(node[u"priv_key"])) @@ -101,7 +104,7 @@ class SSH: SSH.__existing_connections[node_hash] = self._ssh logger.debug( f"New SSH to {self._ssh.get_transport().getpeername()} " - f"took {time() - start} seconds: {self._ssh}" + f"took {monotonic() - start} seconds: {self._ssh}" ) except SSHException as exc: raise IOError(f"Cannot connect to {node[u'host']}") from exc @@ -142,7 +145,7 @@ class SSH: f"Reconnecting peer done: {node[u'host']}, {node[u'port']}" ) - def exec_command(self, cmd, timeout=10, log_stdout_err=True): + def exec_command(self, cmd, timeout=10, log_stdout_err=True, export=True): """Execute SSH command on a new channel on the connected Node. :param cmd: Command to run on the Node. @@ -151,9 +154,12 @@ class SSH: :param log_stdout_err: If True, stdout and stderr are logged. stdout and stderr are logged also if the return code is not zero independently of the value of log_stdout_err. + :param export: If false, do not attempt JSON export. + Needed for calls outside Robot (e.g. from reservation script). :type cmd: str or OptionString :type timeout: int :type log_stdout_err: bool + :type export: bool :returns: return_code, stdout, stderr :rtype: tuple(int, str, str) :raises SSHTimeout: If command is not finished in timeout time. @@ -174,7 +180,9 @@ class SSH: logger.trace(f"exec_command on {peer} with timeout {timeout}: {cmd}") - start = time() + if export: + export_ssh_command(self._node[u"host"], self._node[u"port"], cmd) + start = monotonic() chan.exec_command(cmd) while not chan.exit_status_ready() and timeout is not None: if chan.recv_ready(): @@ -187,7 +195,16 @@ class SSH: stderr += s_err.decode(encoding=u'utf-8', errors=u'ignore') \ if isinstance(s_err, bytes) else s_err - if time() - start > timeout: + duration = monotonic() - start + if duration > timeout: + if export: + export_ssh_timeout( + host=self._node[u"host"], + port=self._node[u"port"], + stdout=stdout, + stderr=stderr, + duration=duration, + ) raise SSHTimeout( f"Timeout exception during execution of command: {cmd}\n" f"Current contents of stdout buffer: " @@ -209,8 +226,8 @@ class SSH: stderr += s_err.decode(encoding=u'utf-8', errors=u'ignore') \ if isinstance(s_err, bytes) else s_err - end = time() - logger.trace(f"exec_command on {peer} took {end-start} seconds") + duration = monotonic() - start + logger.trace(f"exec_command on {peer} took {duration} seconds") logger.trace(f"return RC {return_code}") if log_stdout_err or int(return_code): @@ -220,20 +237,33 @@ class SSH: logger.trace( f"return STDERR {stderr}" ) + if export: + export_ssh_result( + host=self._node[u"host"], + port=self._node[u"port"], + code=return_code, + stdout=stdout, + stderr=stderr, + duration=duration, + ) return return_code, stdout, stderr def exec_command_sudo( - self, cmd, cmd_input=None, timeout=30, log_stdout_err=True): + self, cmd, cmd_input=None, timeout=30, log_stdout_err=True, + export=True): """Execute SSH command with sudo on a new channel on the connected Node. :param cmd: Command to be executed. :param cmd_input: Input redirected to the command. :param timeout: Timeout. :param log_stdout_err: If True, stdout and stderr are logged. + :param export: If false, do not attempt JSON export. + Needed for calls outside Robot (e.g. from reservation script). :type cmd: str :type cmd_input: str :type timeout: int :type log_stdout_err: bool + :type export: bool :returns: return_code, stdout, stderr :rtype: tuple(int, str, str) @@ -254,7 +284,7 @@ class SSH: else: command = f"sudo -E -S {cmd} <<< \"{cmd_input}\"" return self.exec_command( - command, timeout, log_stdout_err=log_stdout_err + command, timeout, log_stdout_err=log_stdout_err, export=export ) def exec_command_lxc( @@ -400,19 +430,20 @@ class SSH: self._ssh.get_transport(), sanitize=lambda x: x, socket_timeout=timeout ) - start = time() + start = monotonic() if not get: scp.put(local_path, remote_path) else: scp.get(remote_path, local_path) scp.close() - end = time() - logger.trace(f"SCP took {end-start} seconds") + duration = monotonic() - start + logger.trace(f"SCP took {duration} seconds") def exec_cmd( node, cmd, timeout=600, sudo=False, disconnect=False, - log_stdout_err=True): + log_stdout_err=True, export=True + ): """Convenience function to ssh/exec/return rc, out & err. Returns (rc, stdout, stderr). @@ -425,14 +456,17 @@ def exec_cmd( :param log_stdout_err: If True, stdout and stderr are logged. stdout and stderr are logged also if the return code is not zero independently of the value of log_stdout_err. + :param export: If false, do not attempt JSON export. + Needed for calls outside Robot (e.g. from reservation script). :type node: dict :type cmd: str or OptionString :type timeout: int :type sudo: bool :type disconnect: bool :type log_stdout_err: bool + :type export: bool :returns: RC, Stdout, Stderr. - :rtype: tuple(int, str, str) + :rtype: Tuple[int, str, str] """ if node is None: raise TypeError(u"Node parameter is None") @@ -452,11 +486,13 @@ def exec_cmd( try: if not sudo: ret_code, stdout, stderr = ssh.exec_command( - cmd, timeout=timeout, log_stdout_err=log_stdout_err + cmd, timeout=timeout, log_stdout_err=log_stdout_err, + export=export ) else: ret_code, stdout, stderr = ssh.exec_command_sudo( - cmd, timeout=timeout, log_stdout_err=log_stdout_err + cmd, timeout=timeout, log_stdout_err=log_stdout_err, + export=export ) except SSHException as err: logger.error(repr(err)) @@ -470,7 +506,8 @@ def exec_cmd( def exec_cmd_no_error( node, cmd, timeout=600, sudo=False, message=None, disconnect=False, - retries=0, include_reason=False, log_stdout_err=True): + retries=0, include_reason=False, log_stdout_err=True, export=True + ): """Convenience function to ssh/exec/return out & err. Verifies that return code is zero. @@ -489,6 +526,8 @@ def exec_cmd_no_error( :param log_stdout_err: If True, stdout and stderr are logged. stdout and stderr are logged also if the return code is not zero independently of the value of log_stdout_err. + :param export: If false, do not attempt JSON export. + Needed for calls outside Robot thread (e.g. parallel framework setup). :type node: dict :type cmd: str or OptionString :type timeout: int @@ -498,6 +537,7 @@ def exec_cmd_no_error( :type retries: int :type include_reason: bool :type log_stdout_err: bool + :type export: bool :returns: Stdout, Stderr. :rtype: tuple(str, str) :raises RuntimeError: If bash return code is not 0. @@ -505,7 +545,7 @@ def exec_cmd_no_error( for _ in range(retries + 1): ret_code, stdout, stderr = exec_cmd( node, cmd, timeout=timeout, sudo=sudo, disconnect=disconnect, - log_stdout_err=log_stdout_err + log_stdout_err=log_stdout_err, export=export ) if ret_code == 0: break diff --git a/resources/libraries/robot/performance/performance_display.robot b/resources/libraries/robot/performance/performance_display.robot index de515412a5..db2b522091 100644 --- a/resources/libraries/robot/performance/performance_display.robot +++ b/resources/libraries/robot/performance/performance_display.robot @@ -46,6 +46,31 @@ | | ... | ${message}${\n}${message_zero} | ${message}${\n}${message_other} | | Fail | ${message} +| Compute bandwidth +| | [Documentation] +| | ... | Compute (bidir) bandwidth from given (unidir) transaction rate. +| | ... +| | ... | This keyword reads "ppta" and "avg_frame_size" set elsewhere. +| | ... | The implementation should work for both pps and cps rates. +| | ... | +| | ... | *Arguments:* +| | ... | - tps - Transaction rate (unidirectional) [tps]. Type: float +| | ... +| | ... | *Returns:* +| | ... | - Computed bandwidth in Gbps. +| | ... | - Computed aggregate packet rate in pps. +| | +| | ... | *Example:* +| | +| | ... | |\ \${bandwidth} \| \${pps} = \| Compute Bandwidth \| \${12345.67} \| +| | +| | [Arguments] | ${tps} +| | +| | ${ppta} = | Get Packets Per Transaction Aggregated +| | ${pps} = | Evaluate | ${tps} * ${ppta} +| | ${bandwidth} = | Evaluate | ${pps} * (${avg_frame_size}+20)*8 / 1e9 +| | Return From Keyword | ${bandwidth} | ${pps} + | Display Reconfig Test Message | | [Documentation] | | ... | Display the number of packets lost (bidirectionally) @@ -198,6 +223,8 @@ | | [Arguments] | ${text} | ${tps} | ${latency}=${EMPTY} | | | | Set Test Message | ${\n}${text}: ${tps} CPS | append=yes +| | ${bandwidth} | ${pps} = | Compute Bandwidth | ${tps} +| | Export Search Bound | ${text} | ${tps} | cps | ${bandwidth} | | Return From Keyword If | not """${latency}""" | | Set Test Message | ${\n}LATENCY [min/avg/max/hdrh] per stream: ${latency} | | ... | append=yes @@ -230,11 +257,10 @@ | | | | [Arguments] | ${text} | ${tps} | ${latency}=${EMPTY} | | -| | ${ppta} = | Get Packets Per Transaction Aggregated -| | ${pps} = | Evaluate | ${tps} * ${ppta} -| | ${bandwidth} = | Evaluate | ${pps} * (${avg_frame_size}+20)*8 / 1e9 +| | ${bandwidth} | ${pps} = | Compute Bandwidth | ${tps} | | Set Test Message | ${\n}${text}: ${pps} pps, | append=yes | | Set Test Message | ${bandwidth} Gbps (initial) | append=yes +| | Export Search Bound | ${text} | ${pps} | pps | ${bandwidth * 1e9} | | Return From Keyword If | not """${latency}""" | | Set Test Message | ${\n}LATENCY [min/avg/max/hdrh] per stream: ${latency} | | ... | append=yes diff --git a/resources/libraries/robot/performance/performance_utils.robot b/resources/libraries/robot/performance/performance_utils.robot index 8de74c5707..f2cb873d1a 100644 --- a/resources/libraries/robot/performance/performance_utils.robot +++ b/resources/libraries/robot/performance/performance_utils.robot @@ -13,6 +13,7 @@ *** Settings *** | Library | Collections +| Library | resources.libraries.python.model.ExportResult | Library | resources.libraries.python.topology.Topology | Library | resources.libraries.python.NodePath | Library | resources.libraries.python.InterfaceUtil @@ -301,6 +302,7 @@ | | ... | ramp_up_rate=${ramp_up_rate} | | ${latency} = | Get Latency Int | | Set Test Message | ${\n}${message_prefix} ${latency} | append=${True} +| | Export Ndrpdr Latency | ${message_prefix} | ${latency} | Send ramp-up traffic | | [Documentation] @@ -371,14 +373,17 @@ | | ... | Type: boolean | | ... | - duration_limit - Hard limit for trial duration, overriding duration | | ... | computed from transaction_scale. Default 0.0 means no limit. +| | ... | - export_mrr_unit - Use this unit when exporting MRR values, +| | ... | or empty string for no export. | | | | ... | *Example:* | | | | ... | \| Send traffic at specified rate \| \${1.0} \| ${4000000.0} \ -| | ... | \| \${10} \| ${False} \| ${1.0} \| +| | ... | \| \${10} \| ${False} \| ${1.0} \| pps \| | | | | [Arguments] | ${trial_duration} | ${rate} | ${trial_multiplicity} | | ... | ${use_latency}=${False} | ${duration_limit}=${0.0} +| | ... | ${export_mrr_unit}=${Empty} | | | | ${ppta} = | Get Packets Per Transaction Aggregated | | ${ramp_up_duration} = | Get Ramp Up Duration @@ -415,7 +420,10 @@ | | | ... | ramp_up_rate=${ramp_up_rate} | | | # Out of several quantities for aborted traffic (duration stretching), | | | # the approximated receive rate is the best estimate we have. -| | | Append To List | ${results} | ${result.approximated_receive_rate} +| | | ${value} = | Set Variable | ${result.approximated_receive_rate} +| | | # TODO: Add correct bandwidth computation. +| | | Append Mrr Value | ${value} | ${export_mrr_unit} +| | | Append To List | ${results} | ${value} | | END | | FOR | ${action} | IN | @{stat_post_trial} | | | Run Keyword | Additional Statistics Action For ${action} @@ -637,6 +645,8 @@ | | ${trial_duration} = | Get Mrr Trial Duration | | ${trial_multiplicity} = | Get Mrr Trial Multiplicity | | ${use_latency} = | Get Use Latency +| | ${unit} = | Set Variable If | """_cps""" in """${transaction_type}""" +| | ... | cps | pps | | # The following also sets \${rate_for_teardown} | | ${results} = | Send traffic at specified rate | | ... | rate=${max_rate} @@ -644,9 +654,10 @@ | | ... | trial_multiplicity=${trial_multiplicity} | | ... | use_latency=${use_latency} | | ... | duration_limit=${0.0} -| | ${unit} = | Set Variable If | """_cps""" in """${transaction_type}""" +| | ... | export_mrr_unit=${unit} +| | ${unit_text} = | Set Variable If | """_cps""" in """${transaction_type}""" | | ... | estimated connections per second | packets per second | | Set Test Message | ${\n}Maximum Receive Rate trial results -| | Set Test Message | in ${unit}: ${results} +| | Set Test Message | in ${unit_text}: ${results} | | ... | append=yes | | Fail if no traffic forwarded diff --git a/resources/libraries/robot/shared/default.robot b/resources/libraries/robot/shared/default.robot index 32d8863a32..7fa0222f78 100644 --- a/resources/libraries/robot/shared/default.robot +++ b/resources/libraries/robot/shared/default.robot @@ -31,6 +31,7 @@ | Library | resources.libraries.python.IPUtil | Library | resources.libraries.python.IPv6Util | Library | resources.libraries.python.IrqUtil +| Library | resources.libraries.python.model.export_json | Library | resources.libraries.python.NodePath | Library | resources.libraries.python.Namespaces | Library | resources.libraries.python.PapiHistory @@ -334,6 +335,16 @@ | | Run Keyword If | ${with_trace} | VPP Enable Traces On Dut | | ... | ${nodes['${dut}']} +| Get And Export DPDK Version +| | [Documentation] | Add version to test export as detected on DUT1. +| | +| | ... | *Example:* +| | +| | ... | \| Get And Export DPDK Version \| +| | +| | ${version} = | Get Dpdk Version | ${nodes}[DUT1] +| | Export Dut Type And Version | DPDK | ${version} + | Save VPP PIDs | | [Documentation] | Get PIDs of VPP processes from all DUTs in topology and\ | | ... | set it as a test variable. The PIDs are stored as dictionary items\ diff --git a/resources/libraries/robot/shared/suite_setup.robot b/resources/libraries/robot/shared/suite_setup.robot index 26d7f52205..232158a6e0 100644 --- a/resources/libraries/robot/shared/suite_setup.robot +++ b/resources/libraries/robot/shared/suite_setup.robot @@ -24,6 +24,7 @@ | Variables | resources/libraries/python/Constants.py | | Documentation | Suite setup keywords. + *** Keywords *** | Create suite topology variables | | [Documentation] @@ -83,12 +84,14 @@ | | | | [Arguments] | @{actions} | | +| | Start Suite Setup Export | | ${nic_model_list}= | Create list | ${nic_name} | | &{info}= | Compute Circular Topology | | ... | ${nodes} | filter_list=${nic_model_list} | nic_pfs=${nic_pfs} | | ... | always_same_link=${False} | topo_has_tg=${True} | | Set suite variable | &{topology_info} | &{info} | | Create suite topology variables | @{actions} +| | Finalize Suite Setup Export | Setup suite topology interfaces with no TG | | [Documentation] @@ -104,12 +107,14 @@ | | | | [Arguments] | @{actions} | | +| | Start Suite Setup Export | | ${nic_model_list}= | Create list | ${nic_name} | | &{info}= | Compute Circular Topology | | ... | ${nodes} | filter_list=${nic_model_list} | nic_pfs=${nic_pfs} | | ... | always_same_link=${True} | topo_has_tg=${False} | | Set suite variable | &{topology_info} | &{info} | | Create suite topology variables | @{actions} +| | Finalize Suite Setup Export | Setup suite topology interfaces with no DUT | | [Documentation] @@ -125,12 +130,14 @@ | | | | [Arguments] | @{actions} | | +| | Start Suite Setup Export | | ${nic_model_list}= | Create list | ${nic_name} | | &{info}= | Compute Circular Topology | | ... | ${nodes} | filter_list=${nic_model_list} | nic_pfs=${nic_pfs} | | ... | always_same_link=${True} | topo_has_tg=${True} | topo_has_dut=${False} | | Set suite variable | &{topology_info} | &{info} | | Create suite topology variables | @{actions} +| | Finalize Suite Setup Export | Additional Suite Setup Action For scapy | | [Documentation] @@ -151,6 +158,7 @@ | | | Initialize DPDK Framework | ${nodes['${dut}']} | | | ... | ${${dut}_${int}1}[0] | ${${dut}_${int}2}[0] | ${nic_driver} | | END +| | Get And Export DPDK Version | Additional Suite Setup Action For performance vf | | [Documentation] @@ -238,6 +246,8 @@ | | ... | Additional Setup for suites which uses performance measurement | | ... | for L1 cross connect tests | | +| | # TRex suites have only TG (and a loopback cable), no SUT nor DUT. +| | Export Dut Type And Version | none | ${EMPTY} | | Initialize traffic generator | | ... | ${tg} | ${TG_pf1}[0] | ${TG_pf2}[0] | | ... | ${tg} | ${TG_pf2}[0] diff --git a/resources/libraries/robot/shared/suite_teardown.robot b/resources/libraries/robot/shared/suite_teardown.robot index 20b2776eec..ab6d3b6f8c 100644 --- a/resources/libraries/robot/shared/suite_teardown.robot +++ b/resources/libraries/robot/shared/suite_teardown.robot @@ -30,10 +30,12 @@ | | | | [Arguments] | @{actions} | | +| | Start Suite Teardown Export | | FOR | ${action} | IN | @{actions} | | | Run Keyword | Additional Suite Tear Down Action For ${action} | | END | | Remove All Added VIF Ports On All DUTs From Topology | ${nodes} +| | Finalize Suite Teardown Export | Additional Suite Tear Down Action For ab | | [Documentation] diff --git a/resources/libraries/robot/shared/test_setup.robot b/resources/libraries/robot/shared/test_setup.robot index e2fcfc5f87..3bc88858c0 100644 --- a/resources/libraries/robot/shared/test_setup.robot +++ b/resources/libraries/robot/shared/test_setup.robot @@ -21,13 +21,14 @@ *** Keywords *** | Setup test | | [Documentation] -| | ... | Common test setup for tests. +| | ... | Common test setup for VPP tests. | | | | ... | *Arguments:* | | ... | - ${actions} - Additional setup action. Type: list | | | | [Arguments] | @{actions} | | +| | Start Test Export | | Reset PAPI History On All DUTs | ${nodes} | | ${int} = | Set Variable If | ${nic_vfs} > 0 | prevf | pf | | Create base startup configuration of VPP on all DUTs diff --git a/resources/libraries/robot/shared/test_teardown.robot b/resources/libraries/robot/shared/test_teardown.robot index 3fe5e6a8ee..474a6e9385 100644 --- a/resources/libraries/robot/shared/test_teardown.robot +++ b/resources/libraries/robot/shared/test_teardown.robot @@ -14,7 +14,7 @@ """Keywords used in test teardowns.""" *** Settings *** -| Resource | resources/libraries/robot/shared/container.robot +| Resource | resources/libraries/robot/shared/default.robot | Library | resources.libraries.python.PapiHistory | Library | resources.libraries.python.topology.Topology | Variables | resources/libraries/python/Constants.py @@ -45,6 +45,7 @@ | | | Run Keyword | Additional Test Tear Down Action For ${action} | | END | | Clean Sockets On All Nodes | ${nodes} +| | Finalize Test Export | Tear down test raw | | [Documentation] @@ -60,6 +61,7 @@ | | | Run Keyword | Additional Test Tear Down Action For ${action} | | END | | Clean Sockets On All Nodes | ${nodes} +| | Finalize Test Export # Additional Test Tear Down Actions in alphabetical order | Additional Test Tear Down Action For acl diff --git a/resources/tools/scripts/topo_reservation.py b/resources/tools/scripts/topo_reservation.py index c1b5c4658e..0016ebc0cc 100755 --- a/resources/tools/scripts/topo_reservation.py +++ b/resources/tools/scripts/topo_reservation.py @@ -24,13 +24,30 @@ import argparse import sys import yaml -from resources.libraries.python.ssh import exec_cmd +from resources.libraries.python.ssh import exec_cmd as _exec_cmd RESERVATION_DIR = u"/tmp/reservation_dir" RESERVATION_NODE = u"TG" +def exec_cmd(node, cmd): + """A wrapper around ssh.exec_cmd with disabled JSON export. + + Using this, maintainers can use "exec_cmd" without worrying + about interaction with json export. + + TODO: Instead this, divide ssh module into reusable and robot-bound parts. + + :param node: Node object as parsed from topology file to execute cmd on. + :param cmd: Command to execute. + :type node: dict + :type cmd: str + :returns: RC, Stdout, Stderr. + :rtype: Tuple[int, str, str] + """ + return _exec_cmd(node, cmd, export=False) + def diag_cmd(node, cmd): """Execute cmd, print cmd and stdout, ignore stderr and rc; return None. diff --git a/tests/__init__.robot b/tests/__init__.robot new file mode 100644 index 0000000000..46f3125845 --- /dev/null +++ b/tests/__init__.robot @@ -0,0 +1,37 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +| Library | resources.libraries.python.model.export_json +| +| Suite Setup | Global Suite Setup +| Suite Teardown | Global Suite Teardown + +*** Keywords *** +| Global Suite Setup +| | [Documentation] +| | ... | Perform initializations needed for any subsequent suite. +| | ... | Currently only a minimal JSON export of time. +| | +| | Start Suite Setup Export +| | # Nothing explicit here, implicitly a place to find global start timestamp. +| | Finalize Suite Setup Export + +| Global Suite Teardown +| | [Documentation] +| | ... | Perform cleanup needed after any preceding suite. +| | ... | Currently only a minimal JSON export of time. +| | +| | Start Suite Teardown Export +| | # Nothing explicit here, implicitly a place to find global end timestamp. +| | Finalize Suite Teardown Export diff --git a/tests/dpdk/perf/10ge2p1x710-eth-l2xcbase-testpmd-ndrpdr.robot b/tests/dpdk/perf/10ge2p1x710-eth-l2xcbase-testpmd-ndrpdr.robot index c808d4f148..6979140571 100644 --- a/tests/dpdk/perf/10ge2p1x710-eth-l2xcbase-testpmd-ndrpdr.robot +++ b/tests/dpdk/perf/10ge2p1x710-eth-l2xcbase-testpmd-ndrpdr.robot @@ -24,6 +24,8 @@ | | Suite Setup | Setup suite topology interfaces | performance | dpdk | Suite Teardown | Tear down suite | performance | dpdk +| Test Setup | Start Test Export +| Test Teardown | Finalize Test Export | | Test Template | Local Template | diff --git a/tests/dpdk/perf/10ge2p1x710-ethip4-ip4base-l3fwd-ndrpdr.robot b/tests/dpdk/perf/10ge2p1x710-ethip4-ip4base-l3fwd-ndrpdr.robot index a99a7f6be2..285ce73dfb 100644 --- a/tests/dpdk/perf/10ge2p1x710-ethip4-ip4base-l3fwd-ndrpdr.robot +++ b/tests/dpdk/perf/10ge2p1x710-ethip4-ip4base-l3fwd-ndrpdr.robot @@ -24,6 +24,8 @@ | | Suite Setup | Setup suite topology interfaces | performance | dpdk | Suite Teardown | Tear down suite | performance | dpdk +| Test Setup | Start Test Export +| Test Teardown | Finalize Test Export | | Test Template | Local Template | diff --git a/tests/dpdk/perf/2n1l-10ge2p1x710-eth-l2xcbase-testpmd-ndrpdr.robot b/tests/dpdk/perf/2n1l-10ge2p1x710-eth-l2xcbase-testpmd-ndrpdr.robot index 467db2ece8..c7dd40cff8 100644 --- a/tests/dpdk/perf/2n1l-10ge2p1x710-eth-l2xcbase-testpmd-ndrpdr.robot +++ b/tests/dpdk/perf/2n1l-10ge2p1x710-eth-l2xcbase-testpmd-ndrpdr.robot @@ -24,6 +24,8 @@ | | Suite Setup | Setup suite topology interfaces | performance | dpdk | Suite Teardown | Tear down suite | performance | dpdk +| Test Setup | Start Test Export +| Test Teardown | Finalize Test Export | | Test Template | Local Template | diff --git a/tests/dpdk/perf/2n1l-10ge2p1x710-ethip4-ip4base-l3fwd-ndrpdr.robot b/tests/dpdk/perf/2n1l-10ge2p1x710-ethip4-ip4base-l3fwd-ndrpdr.robot index ce96f2f459..8ed6dfe907 100644 --- a/tests/dpdk/perf/2n1l-10ge2p1x710-ethip4-ip4base-l3fwd-ndrpdr.robot +++ b/tests/dpdk/perf/2n1l-10ge2p1x710-ethip4-ip4base-l3fwd-ndrpdr.robot @@ -24,6 +24,8 @@ | | Suite Setup | Setup suite topology interfaces | performance | dpdk | Suite Teardown | Tear down suite | performance | dpdk +| Test Setup | Start Test Export +| Test Teardown | Finalize Test Export | | Test Template | Local Template | diff --git a/tests/dpdk/perf/__init__.robot b/tests/dpdk/perf/__init__.robot index e896361708..2945936116 100644 --- a/tests/dpdk/perf/__init__.robot +++ b/tests/dpdk/perf/__init__.robot @@ -19,14 +19,18 @@ | Library | resources.libraries.python.SetupFramework.CleanupFramework | Library | resources.libraries.python.DPDK.DPDKTools | -| Suite Setup | Run Keywords | Setup performance global Variables +| Suite Setup | Run Keywords | Start Suite Setup Export +| ... | AND | Setup performance global Variables | ... | AND | Setup Framework | ${nodes} | ... | AND | Install DPDK framework on all DUTs | ${nodes} | ... | AND | Get CPU Info from All Nodes | ${nodes} | ... | AND | Update All Interface Data on All Nodes | ${nodes} | ... | skip_tg=${True} | skip_vpp=${True} +| ... | AND | Finalize Suite Setup Export | -| Suite Teardown | Cleanup Framework | ${nodes} +| Suite Teardown | Run Keywords | Start Suite Teardown Export +| ... | AND | Cleanup Framework | ${nodes} +| ... | AND | Finalize Suite Teardown Export *** Keywords *** | Setup performance global Variables diff --git a/tests/trex/perf/__init__.robot b/tests/trex/perf/__init__.robot index 7d7531d309..e6fc3fae17 100644 --- a/tests/trex/perf/__init__.robot +++ b/tests/trex/perf/__init__.robot @@ -19,13 +19,17 @@ | Library | resources.libraries.python.SetupFramework.CleanupFramework | Library | resources.libraries.python.CpuUtils | -| Suite Setup | Run Keywords | Setup Global Variables +| Suite Setup | Run Keywords | Start Suite Setup Export +| ... | AND | Setup Global Variables | ... | AND | Setup Framework | ${nodes} | ... | AND | Get CPU Info from All Nodes | ${nodes} | ... | AND | Update All Interface Data on All Nodes | ${nodes} | ... | skip_tg=${True} | skip_vpp=${True} +| ... | AND | Finalize Suite Setup Export | -| Suite Teardown | Cleanup Framework | ${nodes} +| Suite Teardown | Run Keywords | Start Suite Teardown Export +| ... | AND | Cleanup Framework | ${nodes} +| ... | AND | Finalize Suite Teardown Export *** Keywords *** | Setup Global Variables diff --git a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4-ip4base-tg-ndrpdr.robot b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4-ip4base-tg-ndrpdr.robot index 76d483940b..756e37a3cf 100644 --- a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4-ip4base-tg-ndrpdr.robot +++ b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4-ip4base-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4-ip4scale2m-tg-ndrpdr.robot b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4-ip4scale2m-tg-ndrpdr.robot index 0d7ae91a08..7dbed4c28f 100644 --- a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4-ip4scale2m-tg-ndrpdr.robot +++ b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4-ip4scale2m-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h1024-p63-s64512-cps-tg-ndrpdr.robot b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h1024-p63-s64512-cps-tg-ndrpdr.robot index 5de14432d1..e9fc43ff15 100644 --- a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h1024-p63-s64512-cps-tg-ndrpdr.robot +++ b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h1024-p63-s64512-cps-tg-ndrpdr.robot @@ -22,6 +22,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h1024-p63-s64512-pps-tg-ndrpdr.robot b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h1024-p63-s64512-pps-tg-ndrpdr.robot index 6fa25a50ca..3bb788bf2f 100644 --- a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h1024-p63-s64512-pps-tg-ndrpdr.robot +++ b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h1024-p63-s64512-pps-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h262144-p63-s16515072-cps-tg-ndrpdr.robot b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h262144-p63-s16515072-cps-tg-ndrpdr.robot index 66ef7e8fe5..f15eaacc43 100644 --- a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h262144-p63-s16515072-cps-tg-ndrpdr.robot +++ b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h262144-p63-s16515072-cps-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h262144-p63-s16515072-pps-tg-ndrpdr.robot b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h262144-p63-s16515072-pps-tg-ndrpdr.robot index afed63333f..a83e87d902 100644 --- a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h262144-p63-s16515072-pps-tg-ndrpdr.robot +++ b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4tcp-ip4base-h262144-p63-s16515072-pps-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h1024-p63-s64512-cps-tg-ndrpdr.robot b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h1024-p63-s64512-cps-tg-ndrpdr.robot index d73bd9b044..7c4ddc2115 100644 --- a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h1024-p63-s64512-cps-tg-ndrpdr.robot +++ b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h1024-p63-s64512-cps-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h1024-p63-s64512-pps-tg-ndrpdr.robot b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h1024-p63-s64512-pps-tg-ndrpdr.robot index bbf66e06b6..94456e7437 100644 --- a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h1024-p63-s64512-pps-tg-ndrpdr.robot +++ b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h1024-p63-s64512-pps-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h262144-p63-s16515072-cps-tg-ndrpdr.robot b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h262144-p63-s16515072-cps-tg-ndrpdr.robot index 41d49315be..23690c343c 100644 --- a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h262144-p63-s16515072-cps-tg-ndrpdr.robot +++ b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h262144-p63-s16515072-cps-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template @@ -73,7 +74,7 @@ | | [Arguments] | ${frame_size} | | | | Set Test Variable | \${frame_size} - +| | | | Given Set Max Rate And Jumbo | | Then Find NDR and PDR intervals using optimized search diff --git a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h262144-p63-s16515072-pps-tg-ndrpdr.robot b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h262144-p63-s16515072-pps-tg-ndrpdr.robot index ecb48413cc..61d29f1eda 100644 --- a/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h262144-p63-s16515072-pps-tg-ndrpdr.robot +++ b/tests/trex/perf/ip4/1n1l-10ge2p1x710-ethip4udp-ip4base-h262144-p63-s16515072-pps-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/ip6/1n1l-10ge2p1x710-ethip6-ip6base-tg-ndrpdr.robot b/tests/trex/perf/ip6/1n1l-10ge2p1x710-ethip6-ip6base-tg-ndrpdr.robot index 9788baadac..c13b6b663f 100644 --- a/tests/trex/perf/ip6/1n1l-10ge2p1x710-ethip6-ip6base-tg-ndrpdr.robot +++ b/tests/trex/perf/ip6/1n1l-10ge2p1x710-ethip6-ip6base-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/ip6/1n1l-10ge2p1x710-ethip6-ip6scale2m-tg-ndrpdr.robot b/tests/trex/perf/ip6/1n1l-10ge2p1x710-ethip6-ip6scale2m-tg-ndrpdr.robot index fcdcfead7f..ba3449d898 100644 --- a/tests/trex/perf/ip6/1n1l-10ge2p1x710-ethip6-ip6scale2m-tg-ndrpdr.robot +++ b/tests/trex/perf/ip6/1n1l-10ge2p1x710-ethip6-ip6scale2m-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/trex/perf/l2/1n1l-10ge2p1x710-eth-l2bdscale1mmaclrn-tg-ndrpdr.robot b/tests/trex/perf/l2/1n1l-10ge2p1x710-eth-l2bdscale1mmaclrn-tg-ndrpdr.robot index ed89f95fea..3acc63deb3 100644 --- a/tests/trex/perf/l2/1n1l-10ge2p1x710-eth-l2bdscale1mmaclrn-tg-ndrpdr.robot +++ b/tests/trex/perf/l2/1n1l-10ge2p1x710-eth-l2bdscale1mmaclrn-tg-ndrpdr.robot @@ -21,6 +21,7 @@ | | Suite Setup | Setup suite topology interfaces with no DUT | performance_tg_nic | Suite Teardown | Tear down suite | performance +| Test Setup | Start Test Export | Test Teardown | Tear down test raw | performance | | Test Template | Local Template diff --git a/tests/vpp/device/__init__.robot b/tests/vpp/device/__init__.robot index 3f05641bbf..581144f591 100644 --- a/tests/vpp/device/__init__.robot +++ b/tests/vpp/device/__init__.robot @@ -20,16 +20,20 @@ | Library | resources.libraries.python.SetupFramework.CleanupFramework | Library | resources.libraries.python.CpuUtils | -| Suite Setup | Run Keywords | Setup Global Variables +| Suite Setup | Run Keywords | Start Suite Setup Export +| ... | AND | Setup Global Variables | ... | AND | Setup Framework | ${nodes} | ... | AND | Setup Corekeeper on All Nodes | ${nodes} | ... | AND | Install Vpp on All Duts | ${nodes} | ${packages_dir} | ... | AND | Verify Vpp on All Duts | ${nodes} | ... | AND | Get CPU Info from All Nodes | ${nodes} | ... | AND | Update All Interface Data on All Nodes | ${nodes} +| ... | AND | Finalize Suite Setup Export | -| Suite Teardown | Run Keywords | Disconnect All Papi Connections +| Suite Teardown | Run Keywords | Start Suite Teardown Export +| ... | AND | Disconnect All Papi Connections | ... | AND | Cleanup Framework | ${nodes} +| ... | AND | Finalize Suite Teardown Export *** Keywords *** | Setup Global Variables diff --git a/tests/vpp/perf/__init__.robot b/tests/vpp/perf/__init__.robot index ccb56f2827..37875163a4 100644 --- a/tests/vpp/perf/__init__.robot +++ b/tests/vpp/perf/__init__.robot @@ -20,7 +20,8 @@ | Library | resources.libraries.python.SetupFramework.CleanupFramework | Library | resources.libraries.python.CpuUtils | -| Suite Setup | Run Keywords | Setup Global Variables +| Suite Setup | Run Keywords | Start Suite Setup Export +| ... | AND | Setup Global Variables | ... | AND | Setup Framework | ${nodes} | ... | AND | Setup Corekeeper on All Nodes | ${nodes} | ... | AND | Install Vpp on All Duts | ${nodes} | ${packages_dir} @@ -30,9 +31,12 @@ | ... | AND | Get CPU Info from All Nodes | ${nodes} | ... | AND | Update All Interface Data on All Nodes | ${nodes} | ... | skip_tg=${True} +| ... | AND | Finalize Suite Setup Export | -| Suite Teardown | Run Keywords | Disconnect All Papi Connections +| Suite Teardown | Run Keywords | Start Suite Teardown Export +| ... | AND | Disconnect All Papi Connections | ... | AND | Cleanup Framework | ${nodes} +| ... | AND | Finalize Suite Teardown Export *** Keywords *** | Setup Global Variables -- 2.16.6