From beeb2acb9ac153eaa54983bea46a76d596168965 Mon Sep 17 00:00:00 2001 From: Vratko Polak Date: Fri, 8 Jun 2018 18:07:35 +0200 Subject: [PATCH] CSIT-1110: Integrate anomaly detection into PAL + Keep the original detection, + add the new one as subdirectory (both in source and in rendered tree). - The new detection is not rebased over "Add dpdk mrr tests to trending". New detection features: + Do not remove (nor detect) outliers. + Trend line shows the constant average within a group. + Anomaly circles are placed at the changed average. + Small bias against too similar averages. + Should be ready for moving the detection library out to pip. Change-Id: I7ab1a92b79eeeed53ba65a071b1305e927816a89 Signed-off-by: Vratko Polak --- resources/tools/presentation/new/conf.py | 239 ++ resources/tools/presentation/new/conf_cpta/conf.py | 112 + .../presentation/new/doc/pal_func_diagram.svg | 1413 +++++++ .../tools/presentation/new/doc/pal_layers.svg | 441 +++ resources/tools/presentation/new/doc/pal_lld.rst | 1623 ++++++++ resources/tools/presentation/new/environment.py | 128 + resources/tools/presentation/new/errors.py | 78 + resources/tools/presentation/new/fdio.svg | 25 + resources/tools/presentation/new/generator_CPTA.py | 448 +++ .../tools/presentation/new/generator_files.py | 177 + .../tools/presentation/new/generator_plots.py | 399 ++ .../tools/presentation/new/generator_report.py | 191 + .../tools/presentation/new/generator_tables.py | 995 +++++ .../tools/presentation/new/input_data_files.py | 230 ++ .../tools/presentation/new/input_data_parser.py | 1093 ++++++ .../new/jumpavg/AbstractGroupClassifier.py | 33 + .../new/jumpavg/AbstractGroupMetadata.py | 37 + .../presentation/new/jumpavg/AvgStdevMetadata.py | 50 + .../new/jumpavg/AvgStdevMetadataFactory.py | 49 + .../new/jumpavg/BitCountingClassifier.py | 63 + .../presentation/new/jumpavg/BitCountingGroup.py | 43 + .../new/jumpavg/BitCountingGroupList.py | 82 + .../new/jumpavg/BitCountingMetadata.py | 102 + .../new/jumpavg/BitCountingMetadataFactory.py | 80 + .../new/jumpavg/ClassifiedBitCountingMetadata.py | 68 + .../new/jumpavg/ClassifiedMetadataFactory.py | 42 + .../tools/presentation/new/jumpavg/RunGroup.py | 26 + .../tools/presentation/new/jumpavg/__init__.py | 16 + resources/tools/presentation/new/pal.py | 126 + resources/tools/presentation/new/requirements.txt | 11 + resources/tools/presentation/new/run_cpta.sh | 34 + resources/tools/presentation/new/run_report.sh | 45 + .../tools/presentation/new/specification.yaml | 3900 ++++++++++++++++++++ .../tools/presentation/new/specification_CPTA.yaml | 1287 +++++++ .../tools/presentation/new/specification_parser.py | 626 ++++ resources/tools/presentation/new/static_content.py | 60 + resources/tools/presentation/new/utils.py | 291 ++ resources/tools/presentation/run_cpta.sh | 4 + 38 files changed, 14667 insertions(+) create mode 100644 resources/tools/presentation/new/conf.py create mode 100644 resources/tools/presentation/new/conf_cpta/conf.py create mode 100644 resources/tools/presentation/new/doc/pal_func_diagram.svg create mode 100644 resources/tools/presentation/new/doc/pal_layers.svg create mode 100644 resources/tools/presentation/new/doc/pal_lld.rst create mode 100644 resources/tools/presentation/new/environment.py create mode 100644 resources/tools/presentation/new/errors.py create mode 100644 resources/tools/presentation/new/fdio.svg create mode 100644 resources/tools/presentation/new/generator_CPTA.py create mode 100644 resources/tools/presentation/new/generator_files.py create mode 100644 resources/tools/presentation/new/generator_plots.py create mode 100644 resources/tools/presentation/new/generator_report.py create mode 100644 resources/tools/presentation/new/generator_tables.py create mode 100644 resources/tools/presentation/new/input_data_files.py create mode 100644 resources/tools/presentation/new/input_data_parser.py create mode 100644 resources/tools/presentation/new/jumpavg/AbstractGroupClassifier.py create mode 100644 resources/tools/presentation/new/jumpavg/AbstractGroupMetadata.py create mode 100644 resources/tools/presentation/new/jumpavg/AvgStdevMetadata.py create mode 100644 resources/tools/presentation/new/jumpavg/AvgStdevMetadataFactory.py create mode 100644 resources/tools/presentation/new/jumpavg/BitCountingClassifier.py create mode 100644 resources/tools/presentation/new/jumpavg/BitCountingGroup.py create mode 100644 resources/tools/presentation/new/jumpavg/BitCountingGroupList.py create mode 100644 resources/tools/presentation/new/jumpavg/BitCountingMetadata.py create mode 100644 resources/tools/presentation/new/jumpavg/BitCountingMetadataFactory.py create mode 100644 resources/tools/presentation/new/jumpavg/ClassifiedBitCountingMetadata.py create mode 100644 resources/tools/presentation/new/jumpavg/ClassifiedMetadataFactory.py create mode 100644 resources/tools/presentation/new/jumpavg/RunGroup.py create mode 100644 resources/tools/presentation/new/jumpavg/__init__.py create mode 100644 resources/tools/presentation/new/pal.py create mode 100644 resources/tools/presentation/new/requirements.txt create mode 100755 resources/tools/presentation/new/run_cpta.sh create mode 100755 resources/tools/presentation/new/run_report.sh create mode 100644 resources/tools/presentation/new/specification.yaml create mode 100644 resources/tools/presentation/new/specification_CPTA.yaml create mode 100644 resources/tools/presentation/new/specification_parser.py create mode 100644 resources/tools/presentation/new/static_content.py create mode 100644 resources/tools/presentation/new/utils.py diff --git a/resources/tools/presentation/new/conf.py b/resources/tools/presentation/new/conf.py new file mode 100644 index 0000000000..84890d9c46 --- /dev/null +++ b/resources/tools/presentation/new/conf.py @@ -0,0 +1,239 @@ +# -*- coding: utf-8 -*- +# +# CSIT report documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + + +import os +import sys + +sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinxcontrib.programoutput', + 'sphinx.ext.ifconfig'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source file names. +# You can specify multiple suffix as a list of string: +# +source_suffix = ['.rst', '.md'] + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'FD.io CSIT' +copyright = u'2018, FD.io' +author = u'FD.io CSIT' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +# version = u'' +# The full version, including alpha/beta/rc tags. +# release = u'' + +rst_epilog = """ +.. |release-1| replace:: {prev_release} +.. |srelease| replace:: {srelease} +.. |vpp-release| replace:: VPP-{vpprelease} release +.. |vpp-release-1| replace:: VPP-{vpp_prev_release} release +.. |dpdk-release| replace:: DPDK {dpdkrelease} +.. |trex-release| replace:: TRex {trex_version} +.. |virl-image-ubuntu| replace:: {csit_ubuntu_ver} +.. |virl-image-centos| replace:: {csit_centos_ver} + +.. _pdf version of this report: https://docs.fd.io/csit/{release}/report/_static/archive/csit_{release}.pdf +.. _tag documentation rst file: https://git.fd.io/csit/tree/docs/tag_documentation.rst?h={release} +.. _TRex intallation: https://git.fd.io/csit/tree/resources/tools/trex/trex_installer.sh?h={release} +.. _TRex driver: https://git.fd.io/csit/tree/resources/tools/trex/trex_stateless_profile.py?h={release} +.. _VIRL topologies directory: https://git.fd.io/csit/tree/resources/tools/virl/topologies/?h={release} +.. _VIRL ubuntu images lists: https://git.fd.io/csit/tree/resources/tools/disk-image-builder/ubuntu/lists/?h={release} +.. _VIRL centos images lists: https://git.fd.io/csit/tree/resources/tools/disk-image-builder/centos/lists/?h={release} +.. _VIRL nested: https://git.fd.io/csit/tree/resources/tools/disk-image-builder/nested/?h={release} +.. _CSIT Honeycomb Functional Tests Documentation: https://docs.fd.io/csit/{release}/doc/tests.vpp.func.honeycomb.html +.. _CSIT Honeycomb Performance Tests Documentation: https://docs.fd.io/csit/{release}/doc/tests.vpp.perf.honeycomb.html +.. _CSIT DPDK Performance Tests Documentation: https://docs.fd.io/csit/{release}/doc/tests.dpdk.perf.html +.. _CSIT VPP Functional Tests Documentation: https://docs.fd.io/csit/{release}/doc/tests.vpp.func.html +.. _CSIT VPP Performance Tests Documentation: https://docs.fd.io/csit/{release}/doc/tests.vpp.perf.html +.. _CSIT NSH_SFC Functional Tests Documentation: https://docs.fd.io/csit/{release}/doc/tests.nsh_sfc.func.html +.. _VPP test framework documentation: https://docs.fd.io/vpp/{vpprelease}/vpp_make_test/html/ +.. _FD.io test executor vpp performance jobs: https://jenkins.fd.io/view/csit/job/csit-vpp-perf-{srelease}-all +.. _FD.io test executor ligato performance jobs: https://jenkins.fd.io/job/csit-ligato-perf-{srelease}-all +.. _FD.io test executor vpp functional jobs: https://jenkins.fd.io/view/csit/job/csit-vpp-functional-{srelease}-ubuntu1604-virl/lastSuccessfulBuild +.. _FD.io test executor dpdk performance jobs: https://jenkins.fd.io/view/csit/job/csit-dpdk-perf-{srelease}-all +.. _FD.io test executor Honeycomb functional jobs: https://jenkins.fd.io/view/csit/job/hc2vpp-csit-integration-{srelease}-ubuntu1604/lastSuccessfulBuild +.. _FD.io test executor honeycomb performance jobs: https://jenkins.fd.io/view/hc2vpp/job/hc2vpp-csit-perf-master-ubuntu1604/lastSuccessfulBuild +.. _FD.io test executor NSH_SFC functional jobs: https://jenkins.fd.io/view/csit/job/csit-nsh_sfc-verify-func-{srelease}-ubuntu1604-virl/lastSuccessfulBuild +.. _FD.io VPP compile job: https://jenkins.fd.io/view/vpp/job/vpp-merge-{srelease}-ubuntu1604/ +.. _FD.io DPDK compile job: https://jenkins.fd.io/view/deb-dpdk/job/deb_dpdk-merge-{sdpdkrelease}-ubuntu1604/ +.. _CSIT Testbed Setup: https://git.fd.io/csit/tree/resources/tools/testbed-setup/README.md?h={release} +""".format(release='rls1804', + prev_release='rls1801', + srelease='1804', + vpprelease='18.04', + vpp_prev_release='18.01', + dpdkrelease='18.02', + sdpdkrelease='1802', + trex_version='v2.35', + csit_ubuntu_ver='csit-ubuntu-16.04.1_2018-03-07_2.1', + csit_centos_ver='csit-centos-7.4-1711_2018-03-20_1.9') + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_theme_path = ['env/lib/python2.7/site-packages/sphinx_rtd_theme'] + +# html_static_path = ['_build/_static'] +html_static_path = ['_tmp/src/_static'] + +html_context = { + 'css_files': [ + '_static/theme_overrides.css', # overrides for wide tables in RTD theme + ], + } + +# -- Options for LaTeX output --------------------------------------------- + +latex_engine = 'pdflatex' + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + 'papersize': 'a4paper', + + # The font size ('10pt', '11pt' or '12pt'). + # + #'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + 'preamble': r''' + \usepackage{pdfpages} + \usepackage{svg} + \usepackage{charter} + \usepackage[defaultsans]{lato} + \usepackage{inconsolata} + \usepackage{csvsimple} + \usepackage{longtable} + \usepackage{booktabs} + ''', + + # Latex figure (float) alignment + # + 'figure_align': 'H', + + # Latex font setup + # + 'fontpkg': r''' + \renewcommand{\familydefault}{\sfdefault} + ''', + + # Latex other setup + # + 'extraclassoptions': 'openany', + 'sphinxsetup': r''' + TitleColor={RGB}{225,38,40}, + InnerLinkColor={RGB}{62,62,63}, + OuterLinkColor={RGB}{225,38,40}, + shadowsep=0pt, + shadowsize=0pt, + shadowrule=0pt + ''' +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'csit.tex', u'CSIT REPORT', + u'', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# +# latex_logo = 'fdio.pdf' + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# +# latex_use_parts = True + +# If true, show page references after internal links. +# +latex_show_pagerefs = True + +# If true, show URL addresses after external links. +# +latex_show_urls = 'footnote' + +# Documents to append as an appendix to all manuals. +# +# latex_appendices = [] + +# It false, will not define \strong, \code, itleref, \crossref ... but only +# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added +# packages. +# +# latex_keep_old_macro_names = True + +# If false, no module index is generated. +# +# latex_domain_indices = True diff --git a/resources/tools/presentation/new/conf_cpta/conf.py b/resources/tools/presentation/new/conf_cpta/conf.py new file mode 100644 index 0000000000..4eb51d884e --- /dev/null +++ b/resources/tools/presentation/new/conf_cpta/conf.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +# +# CSIT report documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys + +sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinxcontrib.programoutput', + 'sphinx.ext.ifconfig'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = ['.rst', '.md'] + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'FD.io CSIT' +copyright = u'2018, FD.io' +author = u'FD.io CSIT' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +#version = u'' +# The full version, including alpha/beta/rc tags. +#release = u'' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_theme_path = ['env/lib/python2.7/site-packages/sphinx_rtd_theme'] + +# html_static_path = ['_build/_static'] +html_static_path = ['../_tmp/src/_static'] + +html_context = { + 'css_files': [ + '_static/theme_overrides.css', # overrides for wide tables in RTD theme + ], + } + +# If false, no module index is generated. +html_domain_indices = True + +# If false, no index is generated. +html_use_index = True + +# If true, the index is split into individual pages for each letter. +html_split_index = False diff --git a/resources/tools/presentation/new/doc/pal_func_diagram.svg b/resources/tools/presentation/new/doc/pal_func_diagram.svg new file mode 100644 index 0000000000..14f59605f9 --- /dev/null +++ b/resources/tools/presentation/new/doc/pal_func_diagram.svg @@ -0,0 +1,1413 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Specification.YAML + + + + + + + + Data to process.xml + + + + + + + + Static content.rst + + + + + + + + + + read_specification + + + + + + + + + + read_data + + + + + + + + Specification + + + + + + + + Input data + + + + + + + + + + filter_data + + + + + + + + + + filter_data + + + + + + + + + + generate_files + + + + + + + + Tables + + + + + + + + Plots + + + + + + + + Files + + + + + + + + + + generate_report + + + + + + + + Report + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + generate_tables + + + + + + + + + + generate_plots + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + sL1 - Data + + + + + + sL2 - Data processing + + + + + + sL3 - Data presentation + + + + + + sL4 - Report generation + + + + + + + \ No newline at end of file diff --git a/resources/tools/presentation/new/doc/pal_layers.svg b/resources/tools/presentation/new/doc/pal_layers.svg new file mode 100644 index 0000000000..dfb05d3106 --- /dev/null +++ b/resources/tools/presentation/new/doc/pal_layers.svg @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + .YAMLSpecification (CSIT gerrit) + + + + + + Data + + + + + + + + .RSTStatic content (CSIT gerrit) + + + + + + + + .ZIP (.XML)Data to process (Jenkins) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + pandasData model in JSONSpecification, Input data (Pandas.Series) + + + + + + Data processing + + + + + + Data presentation + + + + + + + + Plotsplot.ly → .html + + + + + + + + Files.RST + + + + + + + + TablesPandas → .csv + + + + + + Report generation + + + + + + + + Sphinx.html / .pdf (then stored in nexus) + + + + + + Jenkins plots + + + + + + + + Jenkins plotplugin.html + + + + + + sL1 + + + + + + sL2 + + + + + + sL3 + + + + + + sL4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Read files + + + + + + Read files + + + + + + Read files + + + + + + Read files + + + + + + Read files + + + + + + Read files + + + + + + Read files + + + + + + Read files + + + + + + Read files + + + + + + Python calls + + + + + + Python calls + + + + + + Python calls + + + + + + + \ No newline at end of file diff --git a/resources/tools/presentation/new/doc/pal_lld.rst b/resources/tools/presentation/new/doc/pal_lld.rst new file mode 100644 index 0000000000..81c2547a82 --- /dev/null +++ b/resources/tools/presentation/new/doc/pal_lld.rst @@ -0,0 +1,1623 @@ +Presentation and Analytics Layer +================================ + +Overview +-------- + +The presentation and analytics layer (PAL) is the fourth layer of CSIT +hierarchy. The model of presentation and analytics layer consists of four +sub-layers, bottom up: + + - sL1 - Data - input data to be processed: + + - Static content - .rst text files, .svg static figures, and other files + stored in the CSIT git repository. + - Data to process - .xml files generated by Jenkins jobs executing tests, + stored as robot results files (output.xml). + - Specification - .yaml file with the models of report elements (tables, + plots, layout, ...) generated by this tool. There is also the configuration + of the tool and the specification of input data (jobs and builds). + + - sL2 - Data processing + + - The data are read from the specified input files (.xml) and stored as + multi-indexed `pandas.Series `_. + - This layer provides also interface to input data and filtering of the input + data. + + - sL3 - Data presentation - This layer generates the elements specified in the + specification file: + + - Tables: .csv files linked to static .rst files. + - Plots: .html files generated using plot.ly linked to static .rst files. + + - sL4 - Report generation - Sphinx generates required formats and versions: + + - formats: html, pdf + - versions: minimal, full (TODO: define the names and scope of versions) + +.. only:: latex + + .. raw:: latex + + \begin{figure}[H] + \centering + \includesvg[width=0.90\textwidth]{../_tmp/src/csit_framework_documentation/pal_layers} + \label{fig:pal_layers} + \end{figure} + +.. only:: html + + .. figure:: pal_layers.svg + :alt: PAL Layers + :align: center + +Data +---- + +Report Specification +```````````````````` + +The report specification file defines which data is used and which outputs are +generated. It is human readable and structured. It is easy to add / remove / +change items. The specification includes: + + - Specification of the environment. + - Configuration of debug mode (optional). + - Specification of input data (jobs, builds, files, ...). + - Specification of the output. + - What and how is generated: + - What: plots, tables. + - How: specification of all properties and parameters. + - .yaml format. + +Structure of the specification file +''''''''''''''''''''''''''''''''''' + +The specification file is organized as a list of dictionaries distinguished by +the type: + +:: + + - + type: "environment" + - + type: "configuration" + - + type: "debug" + - + type: "static" + - + type: "input" + - + type: "output" + - + type: "table" + - + type: "plot" + - + type: "file" + +Each type represents a section. The sections "environment", "debug", "static", +"input" and "output" are listed only once in the specification; "table", "file" +and "plot" can be there multiple times. + +Sections "debug", "table", "file" and "plot" are optional. + +Table(s), files(s) and plot(s) are referred as "elements" in this text. It is +possible to define and implement other elements if needed. + + +Section: Environment +'''''''''''''''''''' + +This section has the following parts: + + - type: "environment" - says that this is the section "environment". + - configuration - configuration of the PAL. + - paths - paths used by the PAL. + - urls - urls pointing to the data sources. + - make-dirs - a list of the directories to be created by the PAL while + preparing the environment. + - remove-dirs - a list of the directories to be removed while cleaning the + environment. + - build-dirs - a list of the directories where the results are stored. + +The structure of the section "Environment" is as follows (example): + +:: + + - + type: "environment" + configuration: + # Debug mode: + # - Skip: + # - Download of input data files + # - Do: + # - Read data from given zip / xml files + # - Set the configuration as it is done in normal mode + # If the section "type: debug" is missing, CFG[DEBUG] is set to 0. + CFG[DEBUG]: 0 + + paths: + # Top level directories: + ## Working directory + DIR[WORKING]: "_tmp" + ## Build directories + DIR[BUILD,HTML]: "_build" + DIR[BUILD,LATEX]: "_build_latex" + + # Static .rst files + DIR[RST]: "../../../docs/report" + + # Working directories + ## Input data files (.zip, .xml) + DIR[WORKING,DATA]: "{DIR[WORKING]}/data" + ## Static source files from git + DIR[WORKING,SRC]: "{DIR[WORKING]}/src" + DIR[WORKING,SRC,STATIC]: "{DIR[WORKING,SRC]}/_static" + + # Static html content + DIR[STATIC]: "{DIR[BUILD,HTML]}/_static" + DIR[STATIC,VPP]: "{DIR[STATIC]}/vpp" + DIR[STATIC,DPDK]: "{DIR[STATIC]}/dpdk" + DIR[STATIC,ARCH]: "{DIR[STATIC]}/archive" + + # Detailed test results + DIR[DTR]: "{DIR[WORKING,SRC]}/detailed_test_results" + DIR[DTR,PERF,DPDK]: "{DIR[DTR]}/dpdk_performance_results" + DIR[DTR,PERF,VPP]: "{DIR[DTR]}/vpp_performance_results" + DIR[DTR,PERF,HC]: "{DIR[DTR]}/honeycomb_performance_results" + DIR[DTR,FUNC,VPP]: "{DIR[DTR]}/vpp_functional_results" + DIR[DTR,FUNC,HC]: "{DIR[DTR]}/honeycomb_functional_results" + DIR[DTR,FUNC,NSHSFC]: "{DIR[DTR]}/nshsfc_functional_results" + DIR[DTR,PERF,VPP,IMPRV]: "{DIR[WORKING,SRC]}/vpp_performance_tests/performance_improvements" + + # Detailed test configurations + DIR[DTC]: "{DIR[WORKING,SRC]}/test_configuration" + DIR[DTC,PERF,VPP]: "{DIR[DTC]}/vpp_performance_configuration" + DIR[DTC,FUNC,VPP]: "{DIR[DTC]}/vpp_functional_configuration" + + # Detailed tests operational data + DIR[DTO]: "{DIR[WORKING,SRC]}/test_operational_data" + DIR[DTO,PERF,VPP]: "{DIR[DTO]}/vpp_performance_operational_data" + + # .css patch file to fix tables generated by Sphinx + DIR[CSS_PATCH_FILE]: "{DIR[STATIC]}/theme_overrides.css" + DIR[CSS_PATCH_FILE2]: "{DIR[WORKING,SRC,STATIC]}/theme_overrides.css" + + urls: + URL[JENKINS,CSIT]: "https://jenkins.fd.io/view/csit/job" + URL[JENKINS,HC]: "https://jenkins.fd.io/view/hc2vpp/job" + + make-dirs: + # List the directories which are created while preparing the environment. + # All directories MUST be defined in "paths" section. + - "DIR[WORKING,DATA]" + - "DIR[STATIC,VPP]" + - "DIR[STATIC,DPDK]" + - "DIR[STATIC,ARCH]" + - "DIR[BUILD,LATEX]" + - "DIR[WORKING,SRC]" + - "DIR[WORKING,SRC,STATIC]" + + remove-dirs: + # List the directories which are deleted while cleaning the environment. + # All directories MUST be defined in "paths" section. + #- "DIR[BUILD,HTML]" + + build-dirs: + # List the directories where the results (build) is stored. + # All directories MUST be defined in "paths" section. + - "DIR[BUILD,HTML]" + - "DIR[BUILD,LATEX]" + +It is possible to use defined items in the definition of other items, e.g.: + +:: + + DIR[WORKING,DATA]: "{DIR[WORKING]}/data" + +will be automatically changed to + +:: + + DIR[WORKING,DATA]: "_tmp/data" + + +Section: Configuration +'''''''''''''''''''''' + +This section specifies the groups of parameters which are repeatedly used in the +elements defined later in the specification file. It has the following parts: + + - data sets - Specification of data sets used later in element's specifications + to define the input data. + - plot layouts - Specification of plot layouts used later in plots' + specifications to define the plot layout. + +The structure of the section "Configuration" is as follows (example): + +:: + + - + type: "configuration" + data-sets: + plot-vpp-throughput-latency: + csit-vpp-perf-1710-all: + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 20 + vpp-perf-results: + csit-vpp-perf-1710-all: + - 20 + - 23 + plot-layouts: + plot-throughput: + xaxis: + autorange: True + autotick: False + fixedrange: False + gridcolor: "rgb(238, 238, 238)" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + tickmode: "linear" + title: "Indexed Test Cases" + zeroline: False + yaxis: + gridcolor: "rgb(238, 238, 238)'" + hoverformat: ".4s" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + range: [] + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + title: "Packets Per Second [pps]" + zeroline: False + boxmode: "group" + boxgroupgap: 0.5 + autosize: False + margin: + t: 50 + b: 20 + l: 50 + r: 20 + showlegend: True + legend: + orientation: "h" + width: 700 + height: 1000 + +The definitions from this sections are used in the elements, e.g.: + +:: + + - + type: "plot" + title: "VPP Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-l2-sel1-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-throughput" + + +Section: Debug mode +''''''''''''''''''' + +This section is optional as it configures the debug mode. It is used if one +does not want to download input data files and use local files instead. + +If the debug mode is configured, the "input" section is ignored. + +This section has the following parts: + + - type: "debug" - says that this is the section "debug". + - general: + + - input-format - xml or zip. + - extract - if "zip" is defined as the input format, this file is extracted + from the zip file, otherwise this parameter is ignored. + + - builds - list of builds from which the data is used. Must include a job + name as a key and then a list of builds and their output files. + +The structure of the section "Debug" is as follows (example): + +:: + + - + type: "debug" + general: + input-format: "zip" # zip or xml + extract: "robot-plugin/output.xml" # Only for zip + builds: + # The files must be in the directory DIR[WORKING,DATA] + csit-dpdk-perf-1707-all: + - + build: 10 + file: "csit-dpdk-perf-1707-all__10.xml" + - + build: 9 + file: "csit-dpdk-perf-1707-all__9.xml" + csit-nsh_sfc-verify-func-1707-ubuntu1604-virl: + - + build: 2 + file: "csit-nsh_sfc-verify-func-1707-ubuntu1604-virl-2.xml" + csit-vpp-functional-1707-ubuntu1604-virl: + - + build: lastSuccessfulBuild + file: "csit-vpp-functional-1707-ubuntu1604-virl-lastSuccessfulBuild.xml" + hc2vpp-csit-integration-1707-ubuntu1604: + - + build: lastSuccessfulBuild + file: "hc2vpp-csit-integration-1707-ubuntu1604-lastSuccessfulBuild.xml" + csit-vpp-perf-1707-all: + - + build: 16 + file: "csit-vpp-perf-1707-all__16__output.xml" + - + build: 17 + file: "csit-vpp-perf-1707-all__17__output.xml" + + +Section: Static +''''''''''''''' + +This section defines the static content which is stored in git and will be used +as a source to generate the report. + +This section has these parts: + + - type: "static" - says that this section is the "static". + - src-path - path to the static content. + - dst-path - destination path where the static content is copied and then + processed. + +:: + - + type: "static" + src-path: "{DIR[RST]}" + dst-path: "{DIR[WORKING,SRC]}" + + +Section: Input +'''''''''''''' + +This section defines the data used to generate elements. It is mandatory +if the debug mode is not used. + +This section has the following parts: + + - type: "input" - says that this section is the "input". + - general - parameters common to all builds: + + - file-name: file to be downloaded. + - file-format: format of the downloaded file, ".zip" or ".xml" are supported. + - download-path: path to be added to url pointing to the file, e.g.: + "{job}/{build}/robot/report/*zip*/{filename}"; {job}, {build} and + {filename} are replaced by proper values defined in this section. + - extract: file to be extracted from downloaded zip file, e.g.: "output.xml"; + if xml file is downloaded, this parameter is ignored. + + - builds - list of jobs (keys) and numbers of builds which output data will be + downloaded. + +The structure of the section "Input" is as follows (example from 17.07 report): + +:: + + - + type: "input" # Ignored in debug mode + general: + file-name: "robot-plugin.zip" + file-format: ".zip" + download-path: "{job}/{build}/robot/report/*zip*/{filename}" + extract: "robot-plugin/output.xml" + builds: + csit-vpp-perf-1707-all: + - 9 + - 10 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 21 + - 22 + csit-dpdk-perf-1707-all: + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + csit-vpp-functional-1707-ubuntu1604-virl: + - lastSuccessfulBuild + hc2vpp-csit-perf-master-ubuntu1604: + - 8 + - 9 + hc2vpp-csit-integration-1707-ubuntu1604: + - lastSuccessfulBuild + csit-nsh_sfc-verify-func-1707-ubuntu1604-virl: + - 2 + + +Section: Output +''''''''''''''' + +This section specifies which format(s) will be generated (html, pdf) and which +versions will be generated for each format. + +This section has the following parts: + + - type: "output" - says that this section is the "output". + - format: html or pdf. + - version: defined for each format separately. + +The structure of the section "Output" is as follows (example): + +:: + + - + type: "output" + format: + html: + - full + pdf: + - full + - minimal + +TODO: define the names of versions + + +Content of "minimal" version +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +TODO: define the name and content of this version + + +Section: Table +'''''''''''''' + +This section defines a table to be generated. There can be 0 or more "table" +sections. + +This section has the following parts: + + - type: "table" - says that this section defines a table. + - title: Title of the table. + - algorithm: Algorithm which is used to generate the table. The other + parameters in this section must provide all information needed by the used + algorithm. + - template: (optional) a .csv file used as a template while generating the + table. + - output-file-ext: extension of the output file. + - output-file: file which the table will be written to. + - columns: specification of table columns: + + - title: The title used in the table header. + - data: Specification of the data, it has two parts - command and arguments: + + - command: + + - template - take the data from template, arguments: + + - number of column in the template. + + - data - take the data from the input data, arguments: + + - jobs and builds which data will be used. + + - operation - performs an operation with the data already in the table, + arguments: + + - operation to be done, e.g.: mean, stdev, relative_change (compute + the relative change between two columns) and display number of data + samples ~= number of test jobs. The operations are implemented in the + utils.py + TODO: Move from utils,py to e.g. operations.py + - numbers of columns which data will be used (optional). + + - data: Specify the jobs and builds which data is used to generate the table. + - filter: filter based on tags applied on the input data, if "template" is + used, filtering is based on the template. + - parameters: Only these parameters will be put to the output data structure. + +The structure of the section "Table" is as follows (example of +"table_performance_improvements"): + +:: + + - + type: "table" + title: "Performance improvements" + algorithm: "table_performance_improvements" + template: "{DIR[DTR,PERF,VPP,IMPRV]}/tmpl_performance_improvements.csv" + output-file-ext: ".csv" + output-file: "{DIR[DTR,PERF,VPP,IMPRV]}/performance_improvements" + columns: + - + title: "VPP Functionality" + data: "template 1" + - + title: "Test Name" + data: "template 2" + - + title: "VPP-16.09 mean [Mpps]" + data: "template 3" + - + title: "VPP-17.01 mean [Mpps]" + data: "template 4" + - + title: "VPP-17.04 mean [Mpps]" + data: "template 5" + - + title: "VPP-17.07 mean [Mpps]" + data: "data csit-vpp-perf-1707-all mean" + - + title: "VPP-17.07 stdev [Mpps]" + data: "data csit-vpp-perf-1707-all stdev" + - + title: "17.04 to 17.07 change [%]" + data: "operation relative_change 5 4" + data: + csit-vpp-perf-1707-all: + - 9 + - 10 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 21 + filter: "template" + parameters: + - "throughput" + +Example of "table_details" which generates "Detailed Test Results - VPP +Performance Results": + +:: + + - + type: "table" + title: "Detailed Test Results - VPP Performance Results" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[WORKING]}/vpp_performance_results" + columns: + - + title: "Name" + data: "data test_name" + - + title: "Documentation" + data: "data test_documentation" + - + title: "Status" + data: "data test_msg" + data: + csit-vpp-perf-1707-all: + - 17 + filter: "all" + parameters: + - "parent" + - "doc" + - "msg" + +Example of "table_details" which generates "Test configuration - VPP Performance +Test Configs": + +:: + + - + type: "table" + title: "Test configuration - VPP Performance Test Configs" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[WORKING]}/vpp_test_configuration" + columns: + - + title: "Name" + data: "data name" + - + title: "VPP API Test (VAT) Commands History - Commands Used Per Test Case" + data: "data show-run" + data: + csit-vpp-perf-1707-all: + - 17 + filter: "all" + parameters: + - "parent" + - "name" + - "show-run" + + +Section: Plot +''''''''''''' + +This section defines a plot to be generated. There can be 0 or more "plot" +sections. + +This section has these parts: + + - type: "plot" - says that this section defines a plot. + - title: Plot title used in the logs. Title which is displayed is in the + section "layout". + - output-file-type: format of the output file. + - output-file: file which the plot will be written to. + - algorithm: Algorithm used to generate the plot. The other parameters in this + section must provide all information needed by plot.ly to generate the plot. + For example: + + - traces + - layout + + - These parameters are transparently passed to plot.ly. + + - data: Specify the jobs and numbers of builds which data is used to generate + the plot. + - filter: filter applied on the input data. + - parameters: Only these parameters will be put to the output data structure. + +The structure of the section "Plot" is as follows (example of a plot showing +throughput in a chart box-with-whiskers): + +:: + + - + type: "plot" + title: "VPP Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-l2-sel1-ndrdisc" + data: + csit-vpp-perf-1707-all: + - 9 + - 10 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 21 + # Keep this formatting, the filter is enclosed with " (quotation mark) and + # each tag is enclosed with ' (apostrophe). + filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + xaxis: + autorange: True + autotick: False + fixedrange: False + gridcolor: "rgb(238, 238, 238)" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + tickmode: "linear" + title: "Indexed Test Cases" + zeroline: False + yaxis: + gridcolor: "rgb(238, 238, 238)'" + hoverformat: ".4s" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + range: [] + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + title: "Packets Per Second [pps]" + zeroline: False + boxmode: "group" + boxgroupgap: 0.5 + autosize: False + margin: + t: 50 + b: 20 + l: 50 + r: 20 + showlegend: True + legend: + orientation: "h" + width: 700 + height: 1000 + +The structure of the section "Plot" is as follows (example of a plot showing +latency in a box chart): + +:: + + - + type: "plot" + title: "VPP Latency 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-l2-sel1-ndrdisc-lat50" + data: + csit-vpp-perf-1707-all: + - 9 + - 10 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 21 + filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + xaxis: + autorange: True + autotick: False + fixedrange: False + gridcolor: "rgb(238, 238, 238)" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + tickmode: "linear" + title: "Indexed Test Cases" + zeroline: False + yaxis: + gridcolor: "rgb(238, 238, 238)'" + hoverformat: "" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + range: [] + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + title: "Latency min/avg/max [uSec]" + zeroline: False + boxmode: "group" + boxgroupgap: 0.5 + autosize: False + margin: + t: 50 + b: 20 + l: 50 + r: 20 + showlegend: True + legend: + orientation: "h" + width: 700 + height: 1000 + +The structure of the section "Plot" is as follows (example of a plot showing +VPP HTTP server performance in a box chart with pre-defined data +"plot-vpp-httlp-server-performance" set and plot layout "plot-cps"): + +:: + + - + type: "plot" + title: "VPP HTTP Server Performance" + algorithm: "plot_http_server_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/http-server-performance-cps" + data: + "plot-vpp-httlp-server-performance" + # Keep this formatting, the filter is enclosed with " (quotation mark) and + # each tag is enclosed with ' (apostrophe). + filter: "'HTTP' and 'TCP_CPS'" + parameters: + - "result" + - "name" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "VPP HTTP Server Performance" + layout: + "plot-cps" + + +Section: file +''''''''''''' + +This section defines a file to be generated. There can be 0 or more "file" +sections. + +This section has the following parts: + + - type: "file" - says that this section defines a file. + - title: Title of the table. + - algorithm: Algorithm which is used to generate the file. The other + parameters in this section must provide all information needed by the used + algorithm. + - output-file-ext: extension of the output file. + - output-file: file which the file will be written to. + - file-header: The header of the generated .rst file. + - dir-tables: The directory with the tables. + - data: Specify the jobs and builds which data is used to generate the table. + - filter: filter based on tags applied on the input data, if "all" is + used, no filtering is done. + - parameters: Only these parameters will be put to the output data structure. + - chapters: the hierarchy of chapters in the generated file. + - start-level: the level of the the top-level chapter. + +The structure of the section "file" is as follows (example): + +:: + + - + type: "file" + title: "VPP Performance Results" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTR,PERF,VPP]}/vpp_performance_results" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTR,PERF,VPP]}" + data: + csit-vpp-perf-1707-all: + - 22 + filter: "all" + parameters: + - "name" + - "doc" + - "level" + data-start-level: 2 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + + +Static content +`````````````` + + - Manually created / edited files. + - .rst files, static .csv files, static pictures (.svg), ... + - Stored in CSIT git repository. + +No more details about the static content in this document. + + +Data to process +``````````````` + +The PAL processes tests results and other information produced by Jenkins jobs. +The data are now stored as robot results in Jenkins (TODO: store the data in +nexus) either as .zip and / or .xml files. + + +Data processing +--------------- + +As the first step, the data are downloaded and stored locally (typically on a +Jenkins slave). If .zip files are used, the given .xml files are extracted for +further processing. + +Parsing of the .xml files is performed by a class derived from +"robot.api.ResultVisitor", only necessary methods are overridden. All and only +necessary data is extracted from .xml file and stored in a structured form. + +The parsed data are stored as the multi-indexed pandas.Series data type. Its +structure is as follows: + +:: + + + + + + + +"job name", "build", "metadata", "suites", "tests" are indexes to access the +data. For example: + +:: + + data = + + job 1 name: + build 1: + metadata: metadata + suites: suites + tests: tests + ... + build N: + metadata: metadata + suites: suites + tests: tests + ... + job M name: + build 1: + metadata: metadata + suites: suites + tests: tests + ... + build N: + metadata: metadata + suites: suites + tests: tests + +Using indexes data["job 1 name"]["build 1"]["tests"] (e.g.: +data["csit-vpp-perf-1704-all"]["17"]["tests"]) we get a list of all tests with +all tests data. + +Data will not be accessible directly using indexes, but using getters and +filters. + +**Structure of metadata:** + +:: + + "metadata": { + "version": "VPP version", + "job": "Jenkins job name" + "build": "Information about the build" + }, + +**Structure of suites:** + +:: + + "suites": { + "Suite name 1": { + "doc": "Suite 1 documentation" + "parent": "Suite 1 parent" + } + "Suite name N": { + "doc": "Suite N documentation" + "parent": "Suite N parent" + } + +**Structure of tests:** + +Performance tests: + +:: + + "tests": { + "ID": { + "name": "Test name", + "parent": "Name of the parent of the test", + "doc": "Test documentation" + "msg": "Test message" + "tags": ["tag 1", "tag 2", "tag n"], + "type": "PDR" | "NDR", + "throughput": { + "value": int, + "unit": "pps" | "bps" | "percentage" + }, + "latency": { + "direction1": { + "100": { + "min": int, + "avg": int, + "max": int + }, + "50": { # Only for NDR + "min": int, + "avg": int, + "max": int + }, + "10": { # Only for NDR + "min": int, + "avg": int, + "max": int + } + }, + "direction2": { + "100": { + "min": int, + "avg": int, + "max": int + }, + "50": { # Only for NDR + "min": int, + "avg": int, + "max": int + }, + "10": { # Only for NDR + "min": int, + "avg": int, + "max": int + } + } + }, + "lossTolerance": "lossTolerance" # Only for PDR + "vat-history": "DUT1 and DUT2 VAT History" + }, + "show-run": "Show Run" + }, + "ID" { + # next test + } + +Functional tests: + +:: + + "tests": { + "ID": { + "name": "Test name", + "parent": "Name of the parent of the test", + "doc": "Test documentation" + "msg": "Test message" + "tags": ["tag 1", "tag 2", "tag n"], + "vat-history": "DUT1 and DUT2 VAT History" + "show-run": "Show Run" + "status": "PASS" | "FAIL" + }, + "ID" { + # next test + } + } + +Note: ID is the lowercase full path to the test. + + +Data filtering +`````````````` + +The first step when generating an element is getting the data needed to +construct the element. The data are filtered from the processed input data. + +The data filtering is based on: + + - job name(s). + - build number(s). + - tag(s). + - required data - only this data is included in the output. + +WARNING: The filtering is based on tags, so be careful with tagging. + +For example, the element which specification includes: + +:: + + data: + csit-vpp-perf-1707-all: + - 9 + - 10 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 21 + filter: + - "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + +will be constructed using data from the job "csit-vpp-perf-1707-all", for all +listed builds and the tests with the list of tags matching the filter +conditions. + +The output data structure for filtered test data is: + +:: + + - job 1 + - build 1 + - test 1 + - parameter 1 + - parameter 2 + ... + - parameter n + ... + - test n + ... + ... + - build n + ... + - job n + + +Data analytics +`````````````` + +Data analytics part implements: + + - methods to compute statistical data from the filtered input data. + - trending. + +Throughput Speedup Analysis - Multi-Core with Multi-Threading +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Throughput Speedup Analysis (TSA) calculates throughput speedup ratios +for tested 1-, 2- and 4-core multi-threaded VPP configurations using the +following formula: + +:: + + N_core_throughput + N_core_throughput_speedup = ----------------- + 1_core_throughput + +Multi-core throughput speedup ratios are plotted in grouped bar graphs +for throughput tests with 64B/78B frame size, with number of cores on +X-axis and speedup ratio on Y-axis. + +For better comparison multiple test results' data sets are plotted per +each graph: + + - graph type: grouped bars; + - graph X-axis: (testcase index, number of cores); + - graph Y-axis: speedup factor. + +Subset of existing performance tests is covered by TSA graphs. + +**Model for TSA:** + +:: + + - + type: "plot" + title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-l2-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'BASE' and 'NDRDISC' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + + +Comparison of results from two sets of the same test executions +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +This algorithm enables comparison of results coming from two sets of the +same test executions. It is used to quantify performance changes across +all tests after test environment changes e.g. Operating System +upgrades/patches, Hardware changes. + +It is assumed that each set of test executions includes multiple runs +of the same tests, 10 or more, to verify test results repeatibility and +to yield statistically meaningful results data. + +Comparison results are presented in a table with a specified number of +the best and the worst relative changes between the two sets. Following table +columns are defined: + + - name of the test; + - throughput mean values of the reference set; + - throughput standard deviation of the reference set; + - throughput mean values of the set to compare; + - throughput standard deviation of the set to compare; + - relative change of the mean values. + +**The model** + +The model specifies: + + - type: "table" - means this section defines a table. + - title: Title of the table. + - algorithm: Algorithm which is used to generate the table. The other + parameters in this section must provide all information needed by the used + algorithm. + - output-file-ext: Extension of the output file. + - output-file: File which the table will be written to. + - reference - the builds which are used as the reference for comparison. + - compare - the builds which are compared to the reference. + - data: Specify the sources, jobs and builds, providing data for generating + the table. + - filter: Filter based on tags applied on the input data, if "template" is + used, filtering is based on the template. + - parameters: Only these parameters will be put to the output data + structure. + - nr-of-tests-shown: Number of the best and the worst tests presented in the + table. Use 0 (zero) to present all tests. + +*Example:* + +:: + + - + type: "table" + title: "Performance comparison" + algorithm: "table_performance_comparison" + output-file-ext: ".csv" + output-file: "{DIR[DTR,PERF,VPP,IMPRV]}/vpp_performance_comparison" + reference: + title: "csit-vpp-perf-1801-all - 1" + data: + csit-vpp-perf-1801-all: + - 1 + - 2 + compare: + title: "csit-vpp-perf-1801-all - 2" + data: + csit-vpp-perf-1801-all: + - 1 + - 2 + data: + "vpp-perf-comparison" + filter: "all" + parameters: + - "name" + - "parent" + - "throughput" + nr-of-tests-shown: 20 + + +Advanced data analytics +``````````````````````` + +In the future advanced data analytics (ADA) will be added to analyze the +telemetry data collected from SUT telemetry sources and correlate it to +performance test results. + +:TODO: + + - describe the concept of ADA. + - add specification. + + +Data presentation +----------------- + +Generates the plots and tables according to the report models per +specification file. The elements are generated using algorithms and data +specified in their models. + + +Tables +`````` + + - tables are generated by algorithms implemented in PAL, the model includes the + algorithm and all necessary information. + - output format: csv + - generated tables are stored in specified directories and linked to .rst + files. + + +Plots +````` + + - `plot.ly `_ is currently used to generate plots, the model + includes the type of plot and all the necessary information to render it. + - output format: html. + - generated plots are stored in specified directories and linked to .rst files. + + +Report generation +----------------- + +Report is generated using Sphinx and Read_the_Docs template. PAL generates html +and pdf formats. It is possible to define the content of the report by +specifying the version (TODO: define the names and content of versions). + + +The process +``````````` + +1. Read the specification. +2. Read the input data. +3. Process the input data. +4. For element (plot, table, file) defined in specification: + + a. Get the data needed to construct the element using a filter. + b. Generate the element. + c. Store the element. + +5. Generate the report. +6. Store the report (Nexus). + +The process is model driven. The elements' models (tables, plots, files +and report itself) are defined in the specification file. Script reads +the elements' models from specification file and generates the elements. + +It is easy to add elements to be generated in the report. If a new type +of an element is required, only a new algorithm needs to be implemented +and integrated. + + +Continuous Performance Measurements and Trending +------------------------------------------------ + +Performance analysis and trending execution sequence: +````````````````````````````````````````````````````` + +CSIT PA runs performance analysis, change detection and trending using specified +trend analysis metrics over the rolling window of last sets of historical +measurement data. PA is defined as follows: + + #. PA job triggers: + + #. By PT job at its completion. + #. Manually from Jenkins UI. + + #. Download and parse archived historical data and the new data: + + #. New data from latest PT job is evaluated against the rolling window + of sets of historical data. + #. Download RF output.xml files and compressed archived data. + #. Parse out the data filtering test cases listed in PA specification + (part of CSIT PAL specification file). + + #. Calculate trend metrics for the rolling window of sets of historical data: + + #. Calculate quartiles Q1, Q2, Q3. + #. Trim outliers using IQR. + #. Calculate TMA and TMSD. + #. Calculate normal trending range per test case based on TMA and TMSD. + + #. Evaluate new test data against trend metrics: + + #. If within the range of (TMA +/- 3*TMSD) => Result = Pass, + Reason = Normal. + #. If below the range => Result = Fail, Reason = Regression. + #. If above the range => Result = Pass, Reason = Progression. + + #. Generate and publish results + + #. Relay evaluation result to job result. + #. Generate a new set of trend analysis summary graphs and drill-down + graphs. + + #. Summary graphs to include measured values with Normal, + Progression and Regression markers. MM shown in the background if + possible. + #. Drill-down graphs to include MM, TMA and TMSD. + + #. Publish trend analysis graphs in html format on + https://docs.fd.io/csit/master/trending/. + + +Parameters to specify: +`````````````````````` + +- job to be monitored - the Jenkins job which results are used as input data for + this test; +- builds used for trending plot(s) - specified by a list of build numbers or by + a range of builds defined by the first and the last buld number; +- list plots to generate: + + - plot title; + - output file name; + - data for plots; + - tests to be displayed in the plot defined by a filter; + - list of parameters to extract from the data; + - periods (daily = 1, weekly = 5, monthly = 30); + - plot layout + +*Example:* + +:: + + - + type: "cpta" + title: "Continuous Performance Trending and Analysis" + algorithm: "cpta" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/cpta" + data: "plot-performance-trending" + plots: + - title: "VPP 1T1C L2 64B Packet Throughput - {period} Trending" + output-file-name: "l2-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + # - "name" + periods: + - 1 + - 5 + - 30 + layout: "plot-cpta" + + - title: "VPP 2T2C L2 64B Packet Throughput - {period} Trending" + output-file-name: "l2-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + # - "name" + periods: + - 1 + - 5 + - 30 + layout: "plot-cpta" + +API +--- + +List of modules, classes, methods and functions +``````````````````````````````````````````````` + +:: + + specification_parser.py + + class Specification + + Methods: + read_specification + set_input_state + set_input_file_name + + Getters: + specification + environment + debug + is_debug + input + builds + output + tables + plots + files + static + + + input_data_parser.py + + class InputData + + Methods: + read_data + filter_data + + Getters: + data + metadata + suites + tests + + + environment.py + + Functions: + clean_environment + + class Environment + + Methods: + set_environment + + Getters: + environment + + + input_data_files.py + + Functions: + download_data_files + unzip_files + + + generator_tables.py + + Functions: + generate_tables + + Functions implementing algorithms to generate particular types of + tables (called by the function "generate_tables"): + table_details + table_performance_improvements + + + generator_plots.py + + Functions: + generate_plots + + Functions implementing algorithms to generate particular types of + plots (called by the function "generate_plots"): + plot_performance_box + plot_latency_box + + + generator_files.py + + Functions: + generate_files + + Functions implementing algorithms to generate particular types of + files (called by the function "generate_files"): + file_test_results + + + report.py + + Functions: + generate_report + + Functions implementing algorithms to generate particular types of + report (called by the function "generate_report"): + generate_html_report + generate_pdf_report + + Other functions called by the function "generate_report": + archive_input_data + archive_report + + +PAL functional diagram +`````````````````````` + +.. only:: latex + + .. raw:: latex + + \begin{figure}[H] + \centering + \includesvg[width=0.90\textwidth]{../_tmp/src/csit_framework_documentation/pal_func_diagram} + \label{fig:pal_func_diagram} + \end{figure} + +.. only:: html + + .. figure:: pal_func_diagram.svg + :alt: PAL functional diagram + :align: center + + +How to add an element +````````````````````` + +Element can be added by adding it's model to the specification file. If +the element is to be generated by an existing algorithm, only it's +parameters must be set. + +If a brand new type of element needs to be added, also the algorithm +must be implemented. Element generation algorithms are implemented in +the files with names starting with "generator" prefix. The name of the +function implementing the algorithm and the name of algorithm in the +specification file have to be the same. diff --git a/resources/tools/presentation/new/environment.py b/resources/tools/presentation/new/environment.py new file mode 100644 index 0000000000..a2fa9a0d5b --- /dev/null +++ b/resources/tools/presentation/new/environment.py @@ -0,0 +1,128 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Environment + +Setting of the environment according to the specification specified in the +specification YAML file. +""" + +import os +import shutil +import logging + +from errors import PresentationError + + +class Environment(object): + """Setting of the environment: + - set environment variables, + - create directories. + """ + + def __init__(self, env, force=False): + """Initialization. + + :param env: Environment specification. + :param force: If True, remove old build(s) if present. + :type env: dict + :type force: bool + """ + + self._env = env + self._force = force + + @property + def environment(self): + """Getter. + + :returns: Environment settings. + :rtype: dict + """ + return self._env + + def _make_dirs(self): + """Create the directories specified in the 'make-dirs' part of + 'environment' section in the specification file. + + :raises: PresentationError if it is not possible to remove or create a + directory. + """ + + if self._force: + logging.info("Removing old build(s) ...") + for directory in self._env["build-dirs"]: + dir_to_remove = self._env["paths"][directory] + if os.path.isdir(dir_to_remove): + try: + shutil.rmtree(dir_to_remove) + logging.info(" Removed: {}".format(dir_to_remove)) + except OSError: + raise PresentationError("Cannot remove the directory " + "'{}'".format(dir_to_remove)) + logging.info("Done.") + + logging.info("Making directories ...") + + for directory in self._env["make-dirs"]: + dir_to_make = self._env["paths"][directory] + try: + if os.path.isdir(dir_to_make): + logging.warning("The directory '{}' exists, skipping.". + format(dir_to_make)) + else: + os.makedirs(dir_to_make) + logging.info(" Created: {}".format(dir_to_make)) + except OSError: + raise PresentationError("Cannot make the directory '{}'". + format(dir_to_make)) + + logging.info("Done.") + + def set_environment(self): + """Set the environment. + """ + + self._make_dirs() + + +def clean_environment(env): + """Clean the environment. + + :param env: Environment specification. + :type env: dict + :raises: PresentationError if it is not possible to remove a directory. + """ + + logging.info("Cleaning the environment ...") + + if not env["remove-dirs"]: # None or empty + logging.info(" No directories to remove.") + return + + for directory in env["remove-dirs"]: + dir_to_remove = env["paths"][directory] + logging.info(" Removing the working directory {} ...". + format(dir_to_remove)) + if os.path.isdir(dir_to_remove): + try: + shutil.rmtree(dir_to_remove) + except OSError as err: + logging.warning("Cannot remove the directory '{}'". + format(dir_to_remove)) + logging.debug(str(err)) + else: + logging.warning("The directory '{}' does not exist.". + format(dir_to_remove)) + + logging.info("Done.") diff --git a/resources/tools/presentation/new/errors.py b/resources/tools/presentation/new/errors.py new file mode 100644 index 0000000000..0d8d5b9b33 --- /dev/null +++ b/resources/tools/presentation/new/errors.py @@ -0,0 +1,78 @@ +# Copyright (c) 2017 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of exceptions used in the Presentation and analytics layer. +""" + +import sys +import logging + + +class PresentationError(Exception): + """Exception(s) raised by the presentation module. + + When raising this exception, put this information to the message in this + order: + - short description of the encountered problem (parameter msg), + - relevant messages if there are any collected, e.g., from caught + exception (optional parameter details), + - relevant data if there are any collected (optional parameter details). + """ + + log_exception = {"DEBUG": logging.debug, + "INFO": logging.info, + "WARNING": logging.warning, + "ERROR": logging.error, + "CRITICAL": logging.critical} + + def __init__(self, msg, details='', level="CRITICAL"): + """Sets the exception message and the level. + + :param msg: Short description of the encountered problem. + :param details: Relevant messages if there are any collected, e.g., + from caught exception (optional parameter details), or relevant data if + there are any collected (optional parameter details). + :param level: Level of the error, possible choices are: "DEBUG", "INFO", + "WARNING", "ERROR" and "CRITICAL". + :type msg: str + :type details: str + :type level: str + """ + + super(PresentationError, self).__init__() + self._msg = msg + self._details = details + self._level = level + + try: + self.log_exception[self._level](self._msg) + if self._details: + self.log_exception[self._level](self._details) + except KeyError: + print("Wrong log level.") + sys.exit(1) + + def __repr__(self): + return repr(self._msg) + + def __str__(self): + return str(self._msg) + + @property + def level(self): + """Getter - logging level. + + :returns: Logging level. + :rtype: str + """ + return self._level diff --git a/resources/tools/presentation/new/fdio.svg b/resources/tools/presentation/new/fdio.svg new file mode 100644 index 0000000000..32dd070d36 --- /dev/null +++ b/resources/tools/presentation/new/fdio.svg @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + diff --git a/resources/tools/presentation/new/generator_CPTA.py b/resources/tools/presentation/new/generator_CPTA.py new file mode 100644 index 0000000000..1b4115f1f6 --- /dev/null +++ b/resources/tools/presentation/new/generator_CPTA.py @@ -0,0 +1,448 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generation of Continuous Performance Trending and Analysis. +""" + +import multiprocessing +import os +import logging +import csv +import prettytable +import plotly.offline as ploff +import plotly.graph_objs as plgo +import plotly.exceptions as plerr +import pandas as pd + +from collections import OrderedDict +from datetime import datetime + +from utils import archive_input_data, execute_command,\ + classify_anomalies, Worker + + +# Command to build the html format of the report +HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \ + '-b html -E ' \ + '-t html ' \ + '-D version="{date}" ' \ + '{working_dir} ' \ + '{build_dir}/' + +# .css file for the html format of the report +THEME_OVERRIDES = """/* override table width restrictions */ +.wy-nav-content { + max-width: 1200px !important; +} +""" + +COLORS = ["SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink", + "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black", + "Violet", "Blue", "Yellow"] + + +def generate_cpta(spec, data): + """Generate all formats and versions of the Continuous Performance Trending + and Analysis. + + :param spec: Specification read from the specification file. + :param data: Full data set. + :type spec: Specification + :type data: InputData + """ + + logging.info("Generating the Continuous Performance Trending and Analysis " + "...") + + ret_code = _generate_all_charts(spec, data) + + cmd = HTML_BUILDER.format( + date=datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'), + working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"], + build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"]) + execute_command(cmd) + + with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE]"], "w") as \ + css_file: + css_file.write(THEME_OVERRIDES) + + with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE2]"], "w") as \ + css_file: + css_file.write(THEME_OVERRIDES) + + archive_input_data(spec) + + logging.info("Done.") + + return ret_code + + +def _generate_trending_traces(in_data, build_info, moving_win_size=10, + show_trend_line=True, name="", color=""): + """Generate the trending traces: + - samples, + - trimmed moving median (trending line) + - outliers, regress, progress + + :param in_data: Full data set. + :param build_info: Information about the builds. + :param moving_win_size: Window size. + :param show_trend_line: Show moving median (trending plot). + :param name: Name of the plot + :param color: Name of the color for the plot. + :type in_data: OrderedDict + :type build_info: dict + :type moving_win_size: int + :type show_trend_line: bool + :type name: str + :type color: str + :returns: Generated traces (list) and the evaluated result. + :rtype: tuple(traces, result) + """ + + data_x = list(in_data.keys()) + data_y = list(in_data.values()) + + hover_text = list() + xaxis = list() + for idx in data_x: + hover_text.append("vpp-ref: {0}
csit-ref: mrr-daily-build-{1}". + format(build_info[str(idx)][1].rsplit('~', 1)[0], + idx)) + date = build_info[str(idx)][0] + xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]), + int(date[9:11]), int(date[12:]))) + + data_pd = pd.Series(data_y, index=xaxis) + + anomaly_classification, avgs = classify_anomalies(data_pd) + + anomalies = pd.Series() + anomalies_colors = list() + anomalies_avgs = list() + anomaly_color = { + "outlier": 0.0, + "regression": 0.33, + "normal": 0.66, + "progression": 1.0 + } + if anomaly_classification: + for idx, item in enumerate(data_pd.items()): + if anomaly_classification[idx] in \ + ("outlier", "regression", "progression"): + anomalies = anomalies.append(pd.Series([item[1], ], + index=[item[0], ])) + anomalies_colors.append( + anomaly_color[anomaly_classification[idx]]) + anomalies_avgs.append(avgs[idx]) + anomalies_colors.extend([0.0, 0.33, 0.66, 1.0]) + + # Create traces + + trace_samples = plgo.Scatter( + x=xaxis, + y=data_y, + mode='markers', + line={ + "width": 1 + }, + legendgroup=name, + name="{name}-thput".format(name=name), + marker={ + "size": 5, + "color": color, + "symbol": "circle", + }, + text=hover_text, + hoverinfo="x+y+text+name" + ) + traces = [trace_samples, ] + + if show_trend_line: + trace_trend = plgo.Scatter( + x=xaxis, + y=avgs, + mode='lines', + line={ + "shape": "linear", + "width": 1, + "color": color, + }, + legendgroup=name, + name='{name}-trend'.format(name=name) + ) + traces.append(trace_trend) + + trace_anomalies = plgo.Scatter( + x=anomalies.keys(), + y=anomalies_avgs, + mode='markers', + hoverinfo="none", + showlegend=True, + legendgroup=name, + name="{name}-anomalies".format(name=name), + marker={ + "size": 15, + "symbol": "circle-open", + "color": anomalies_colors, + "colorscale": [[0.00, "grey"], + [0.25, "grey"], + [0.25, "red"], + [0.50, "red"], + [0.50, "white"], + [0.75, "white"], + [0.75, "green"], + [1.00, "green"]], + "showscale": True, + "line": { + "width": 2 + }, + "colorbar": { + "y": 0.5, + "len": 0.8, + "title": "Circles Marking Data Classification", + "titleside": 'right', + "titlefont": { + "size": 14 + }, + "tickmode": 'array', + "tickvals": [0.125, 0.375, 0.625, 0.875], + "ticktext": ["Outlier", "Regression", "Normal", "Progression"], + "ticks": "", + "ticklen": 0, + "tickangle": -90, + "thickness": 10 + } + } + ) + traces.append(trace_anomalies) + + return traces, anomaly_classification[-1] + + +def _generate_all_charts(spec, input_data): + """Generate all charts specified in the specification file. + + :param spec: Specification. + :param input_data: Full data set. + :type spec: Specification + :type input_data: InputData + """ + + def _generate_chart(_, data_q, graph): + """Generates the chart. + """ + + logs = list() + + logging.info(" Generating the chart '{0}' ...". + format(graph.get("title", ""))) + logs.append(("INFO", " Generating the chart '{0}' ...". + format(graph.get("title", "")))) + + job_name = spec.cpta["data"].keys()[0] + + csv_tbl = list() + res = list() + + # Transform the data + logs.append(("INFO", " Creating the data set for the {0} '{1}'.". + format(graph.get("type", ""), graph.get("title", "")))) + data = input_data.filter_data(graph, continue_on_error=True) + if data is None: + logging.error("No data.") + return + + chart_data = dict() + for job in data: + for index, bld in job.items(): + for test_name, test in bld.items(): + if chart_data.get(test_name, None) is None: + chart_data[test_name] = OrderedDict() + try: + chart_data[test_name][int(index)] = \ + test["result"]["throughput"] + except (KeyError, TypeError): + pass + + # Add items to the csv table: + for tst_name, tst_data in chart_data.items(): + tst_lst = list() + for bld in builds_lst: + itm = tst_data.get(int(bld), '') + tst_lst.append(str(itm)) + csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n') + # Generate traces: + traces = list() + win_size = 14 + index = 0 + for test_name, test_data in chart_data.items(): + if not test_data: + logs.append(("WARNING", "No data for the test '{0}'". + format(test_name))) + continue + test_name = test_name.split('.')[-1] + trace, rslt = _generate_trending_traces( + test_data, + build_info=build_info, + moving_win_size=win_size, + name='-'.join(test_name.split('-')[3:-1]), + color=COLORS[index]) + traces.extend(trace) + res.append(rslt) + index += 1 + + if traces: + # Generate the chart: + graph["layout"]["xaxis"]["title"] = \ + graph["layout"]["xaxis"]["title"].format(job=job_name) + name_file = "{0}-{1}{2}".format(spec.cpta["output-file"], + graph["output-file-name"], + spec.cpta["output-file-type"]) + + logs.append(("INFO", " Writing the file '{0}' ...". + format(name_file))) + plpl = plgo.Figure(data=traces, layout=graph["layout"]) + try: + ploff.plot(plpl, show_link=False, auto_open=False, + filename=name_file) + except plerr.PlotlyEmptyDataError: + logs.append(("WARNING", "No data for the plot. Skipped.")) + + data_out = { + "csv_table": csv_tbl, + "results": res, + "logs": logs + } + data_q.put(data_out) + + job_name = spec.cpta["data"].keys()[0] + + builds_lst = list() + for build in spec.input["builds"][job_name]: + status = build["status"] + if status != "failed" and status != "not found": + builds_lst.append(str(build["build"])) + + # Get "build ID": "date" dict: + build_info = OrderedDict() + for build in builds_lst: + try: + build_info[build] = ( + input_data.metadata(job_name, build)["generated"][:14], + input_data.metadata(job_name, build)["version"] + ) + except KeyError: + build_info[build] = ("", "") + + work_queue = multiprocessing.JoinableQueue() + manager = multiprocessing.Manager() + data_queue = manager.Queue() + cpus = multiprocessing.cpu_count() + + workers = list() + for cpu in range(cpus): + worker = Worker(work_queue, + data_queue, + _generate_chart) + worker.daemon = True + worker.start() + workers.append(worker) + os.system("taskset -p -c {0} {1} > /dev/null 2>&1". + format(cpu, worker.pid)) + + for chart in spec.cpta["plots"]: + work_queue.put((chart, )) + work_queue.join() + + anomaly_classifications = list() + + # Create the header: + csv_table = list() + header = "Build Number:," + ",".join(builds_lst) + '\n' + csv_table.append(header) + build_dates = [x[0] for x in build_info.values()] + header = "Build Date:," + ",".join(build_dates) + '\n' + csv_table.append(header) + vpp_versions = [x[1] for x in build_info.values()] + header = "VPP Version:," + ",".join(vpp_versions) + '\n' + csv_table.append(header) + + while not data_queue.empty(): + result = data_queue.get() + + anomaly_classifications.extend(result["results"]) + csv_table.extend(result["csv_table"]) + + for item in result["logs"]: + if item[0] == "INFO": + logging.info(item[1]) + elif item[0] == "ERROR": + logging.error(item[1]) + elif item[0] == "DEBUG": + logging.debug(item[1]) + elif item[0] == "CRITICAL": + logging.critical(item[1]) + elif item[0] == "WARNING": + logging.warning(item[1]) + + del data_queue + + # Terminate all workers + for worker in workers: + worker.terminate() + worker.join() + + # Write the tables: + file_name = spec.cpta["output-file"] + "-trending" + with open("{0}.csv".format(file_name), 'w') as file_handler: + file_handler.writelines(csv_table) + + txt_table = None + with open("{0}.csv".format(file_name), 'rb') as csv_file: + csv_content = csv.reader(csv_file, delimiter=',', quotechar='"') + line_nr = 0 + for row in csv_content: + if txt_table is None: + txt_table = prettytable.PrettyTable(row) + else: + if line_nr > 1: + for idx, item in enumerate(row): + try: + row[idx] = str(round(float(item) / 1000000, 2)) + except ValueError: + pass + try: + txt_table.add_row(row) + except Exception as err: + logging.warning("Error occurred while generating TXT table:" + "\n{0}".format(err)) + line_nr += 1 + txt_table.align["Build Number:"] = "l" + with open("{0}.txt".format(file_name), "w") as txt_file: + txt_file.write(str(txt_table)) + + # Evaluate result: + if anomaly_classifications: + result = "PASS" + for classification in anomaly_classifications: + if classification == "regression" or classification == "outlier": + result = "FAIL" + break + else: + result = "FAIL" + + logging.info("Partial results: {0}".format(anomaly_classifications)) + logging.info("Result: {0}".format(result)) + + return result diff --git a/resources/tools/presentation/new/generator_files.py b/resources/tools/presentation/new/generator_files.py new file mode 100644 index 0000000000..f8428adf8c --- /dev/null +++ b/resources/tools/presentation/new/generator_files.py @@ -0,0 +1,177 @@ +# Copyright (c) 2017 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Algorithms to generate files. +""" + + +import logging + +from utils import get_files, get_rst_title_char + +RST_INCLUDE_TABLE = ("\n.. only:: html\n\n" + " .. csv-table::\n" + " :header-rows: 1\n" + " :widths: auto\n" + " :align: center\n" + " :file: {file_html}\n" + "\n.. only:: latex\n\n" + "\n .. raw:: latex\n\n" + " \csvautolongtable{{{file_latex}}}\n\n") + + +def generate_files(spec, data): + """Generate all files specified in the specification file. + + :param spec: Specification read from the specification file. + :param data: Data to process. + :type spec: Specification + :type data: InputData + """ + + logging.info("Generating the files ...") + for file_spec in spec.files: + try: + eval(file_spec["algorithm"])(file_spec, data) + except NameError as err: + logging.error("Probably algorithm '{alg}' is not defined: {err}". + format(alg=file_spec["algorithm"], err=repr(err))) + logging.info("Done.") + + +def _tests_in_suite(suite_name, tests): + """Check if the suite includes tests. + + :param suite_name: Name of the suite to be checked. + :param tests: Set of tests + :type suite_name: str + :type tests: pandas.Series + :returns: True if the suite includes tests. + :rtype: bool + """ + + for key in tests.keys(): + if suite_name == tests[key]["parent"]: + return True + return False + + +def file_test_results(file_spec, input_data): + """Generate the file(s) with algorithm: file_test_results specified in the + specification file. + + :param file_spec: File to generate. + :param input_data: Data to process. + :type file_spec: pandas.Series + :type input_data: InputData + """ + + file_name = "{0}{1}".format(file_spec["output-file"], + file_spec["output-file-ext"]) + rst_header = file_spec["file-header"] + + logging.info(" Generating the file {0} ...".format(file_name)) + + table_lst = get_files(file_spec["dir-tables"], ".csv", full_path=True) + if len(table_lst) == 0: + logging.error(" No tables to include in '{0}'. Skipping.". + format(file_spec["dir-tables"])) + return None + + job = file_spec["data"].keys()[0] + build = str(file_spec["data"][job][0]) + + logging.info(" Writing file '{0}'".format(file_name)) + + suites = input_data.suites(job, build)[file_spec["data-start-level"]:] + suites.sort_index(inplace=True) + + with open(file_name, "w") as file_handler: + file_handler.write(rst_header) + for suite_longname, suite in suites.iteritems(): + suite_name = suite["name"] + file_handler.write("\n{0}\n{1}\n".format( + suite_name, get_rst_title_char( + suite["level"] - file_spec["data-start-level"] - 1) * + len(suite_name))) + file_handler.write("\n{0}\n".format( + suite["doc"].replace('|br|', '\n\n -'))) + if _tests_in_suite(suite_name, input_data.tests(job, build)): + for tbl_file in table_lst: + if suite_name in tbl_file: + file_handler.write( + RST_INCLUDE_TABLE.format( + file_latex=tbl_file, + file_html=tbl_file.split("/")[-1])) + + logging.info(" Done.") + + +def file_merged_test_results(file_spec, input_data): + """Generate the file(s) with algorithm: file_merged_test_results specified + in the specification file. + + :param file_spec: File to generate. + :param input_data: Data to process. + :type file_spec: pandas.Series + :type input_data: InputData + """ + + file_name = "{0}{1}".format(file_spec["output-file"], + file_spec["output-file-ext"]) + rst_header = file_spec["file-header"] + + logging.info(" Generating the file {0} ...".format(file_name)) + + table_lst = get_files(file_spec["dir-tables"], ".csv", full_path=True) + if len(table_lst) == 0: + logging.error(" No tables to include in '{0}'. Skipping.". + format(file_spec["dir-tables"])) + return None + + logging.info(" Writing file '{0}'".format(file_name)) + + logging.info(" Creating the data set for the {0} '{1}'.". + format(file_spec.get("type", ""), file_spec.get("title", ""))) + tests = input_data.filter_data(file_spec) + tests = input_data.merge_data(tests) + + logging.info(" Creating the data set for the {0} '{1}'.". + format(file_spec.get("type", ""), file_spec.get("title", ""))) + suites = input_data.filter_data(file_spec, data_set="suites") + suites = input_data.merge_data(suites) + suites.sort_index(inplace=True) + + with open(file_name, "w") as file_handler: + file_handler.write(rst_header) + for suite_longname, suite in suites.iteritems(): + if "ndrchk" in suite_longname or "pdrchk" in suite_longname: + continue + if len(suite_longname.split(".")) <= file_spec["data-start-level"]: + continue + suite_name = suite["name"] + file_handler.write("\n{0}\n{1}\n".format( + suite_name, get_rst_title_char( + suite["level"] - file_spec["data-start-level"] - 1) * + len(suite_name))) + file_handler.write("\n{0}\n".format( + suite["doc"].replace('|br|', '\n\n -'))) + if _tests_in_suite(suite_name, tests): + for tbl_file in table_lst: + if suite_name in tbl_file: + file_handler.write( + RST_INCLUDE_TABLE.format( + file_latex=tbl_file, + file_html=tbl_file.split("/")[-1])) + + logging.info(" Done.") diff --git a/resources/tools/presentation/new/generator_plots.py b/resources/tools/presentation/new/generator_plots.py new file mode 100644 index 0000000000..aaee31f53b --- /dev/null +++ b/resources/tools/presentation/new/generator_plots.py @@ -0,0 +1,399 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Algorithms to generate plots. +""" + + +import logging +import pandas as pd +import plotly.offline as ploff +import plotly.graph_objs as plgo + +from plotly.exceptions import PlotlyError + +from utils import mean + + +def generate_plots(spec, data): + """Generate all plots specified in the specification file. + + :param spec: Specification read from the specification file. + :param data: Data to process. + :type spec: Specification + :type data: InputData + """ + + logging.info("Generating the plots ...") + for index, plot in enumerate(spec.plots): + try: + logging.info(" Plot nr {0}:".format(index + 1)) + eval(plot["algorithm"])(plot, data) + except NameError as err: + logging.error("Probably algorithm '{alg}' is not defined: {err}". + format(alg=plot["algorithm"], err=repr(err))) + logging.info("Done.") + + +def plot_performance_box(plot, input_data): + """Generate the plot(s) with algorithm: plot_performance_box + specified in the specification file. + + :param plot: Plot to generate. + :param input_data: Data to process. + :type plot: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the plot {0} ...". + format(plot.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(plot.get("type", ""), plot.get("title", ""))) + data = input_data.filter_data(plot) + if data is None: + logging.error("No data.") + return + + # Prepare the data for the plot + y_vals = dict() + for job in data: + for build in job: + for test in build: + if y_vals.get(test["parent"], None) is None: + y_vals[test["parent"]] = list() + try: + y_vals[test["parent"]].append(test["throughput"]["value"]) + except (KeyError, TypeError): + y_vals[test["parent"]].append(None) + + # Add None to the lists with missing data + max_len = 0 + for val in y_vals.values(): + if len(val) > max_len: + max_len = len(val) + for key, val in y_vals.items(): + if len(val) < max_len: + val.extend([None for _ in range(max_len - len(val))]) + + # Add plot traces + traces = list() + df = pd.DataFrame(y_vals) + df.head() + for i, col in enumerate(df.columns): + name = "{0}. {1}".format(i + 1, col.lower().replace('-ndrpdrdisc', '')) + traces.append(plgo.Box(x=[str(i + 1) + '.'] * len(df[col]), + y=df[col], + name=name, + **plot["traces"])) + + try: + # Create plot + plpl = plgo.Figure(data=traces, layout=plot["layout"]) + + # Export Plot + logging.info(" Writing file '{0}{1}'.". + format(plot["output-file"], plot["output-file-type"])) + ploff.plot(plpl, + show_link=False, auto_open=False, + filename='{0}{1}'.format(plot["output-file"], + plot["output-file-type"])) + except PlotlyError as err: + logging.error(" Finished with error: {}". + format(str(err).replace("\n", " "))) + return + + logging.info(" Done.") + + +def plot_latency_box(plot, input_data): + """Generate the plot(s) with algorithm: plot_latency_box + specified in the specification file. + + :param plot: Plot to generate. + :param input_data: Data to process. + :type plot: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the plot {0} ...". + format(plot.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(plot.get("type", ""), plot.get("title", ""))) + data = input_data.filter_data(plot) + if data is None: + logging.error("No data.") + return + + # Prepare the data for the plot + y_tmp_vals = dict() + for job in data: + for build in job: + for test in build: + if y_tmp_vals.get(test["parent"], None) is None: + y_tmp_vals[test["parent"]] = [ + list(), # direction1, min + list(), # direction1, avg + list(), # direction1, max + list(), # direction2, min + list(), # direction2, avg + list() # direction2, max + ] + try: + y_tmp_vals[test["parent"]][0].append( + test["latency"]["direction1"]["50"]["min"]) + y_tmp_vals[test["parent"]][1].append( + test["latency"]["direction1"]["50"]["avg"]) + y_tmp_vals[test["parent"]][2].append( + test["latency"]["direction1"]["50"]["max"]) + y_tmp_vals[test["parent"]][3].append( + test["latency"]["direction2"]["50"]["min"]) + y_tmp_vals[test["parent"]][4].append( + test["latency"]["direction2"]["50"]["avg"]) + y_tmp_vals[test["parent"]][5].append( + test["latency"]["direction2"]["50"]["max"]) + except (KeyError, TypeError): + pass + + y_vals = dict() + for key, values in y_tmp_vals.items(): + y_vals[key] = list() + for val in values: + if val: + average = mean(val) + else: + average = None + y_vals[key].append(average) + y_vals[key].append(average) # Twice for plot.ly + + # Add plot traces + traces = list() + try: + df = pd.DataFrame(y_vals) + df.head() + except ValueError as err: + logging.error(" Finished with error: {}". + format(str(err).replace("\n", " "))) + return + + for i, col in enumerate(df.columns): + name = "{0}. {1}".format(i + 1, col.lower().replace('-ndrpdrdisc', '')) + traces.append(plgo.Box(x=['TGint1-to-SUT1-to-SUT2-to-TGint2', + 'TGint1-to-SUT1-to-SUT2-to-TGint2', + 'TGint1-to-SUT1-to-SUT2-to-TGint2', + 'TGint1-to-SUT1-to-SUT2-to-TGint2', + 'TGint1-to-SUT1-to-SUT2-to-TGint2', + 'TGint1-to-SUT1-to-SUT2-to-TGint2', + 'TGint2-to-SUT2-to-SUT1-to-TGint1', + 'TGint2-to-SUT2-to-SUT1-to-TGint1', + 'TGint2-to-SUT2-to-SUT1-to-TGint1', + 'TGint2-to-SUT2-to-SUT1-to-TGint1', + 'TGint2-to-SUT2-to-SUT1-to-TGint1', + 'TGint2-to-SUT2-to-SUT1-to-TGint1'], + y=df[col], + name=name, + **plot["traces"])) + + try: + # Create plot + logging.info(" Writing file '{0}{1}'.". + format(plot["output-file"], plot["output-file-type"])) + plpl = plgo.Figure(data=traces, layout=plot["layout"]) + + # Export Plot + ploff.plot(plpl, + show_link=False, auto_open=False, + filename='{0}{1}'.format(plot["output-file"], + plot["output-file-type"])) + except PlotlyError as err: + logging.error(" Finished with error: {}". + format(str(err).replace("\n", " "))) + return + + logging.info(" Done.") + + +def plot_throughput_speedup_analysis(plot, input_data): + """Generate the plot(s) with algorithm: plot_throughput_speedup_analysis + specified in the specification file. + + :param plot: Plot to generate. + :param input_data: Data to process. + :type plot: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the plot {0} ...". + format(plot.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(plot.get("type", ""), plot.get("title", ""))) + data = input_data.filter_data(plot) + if data is None: + logging.error("No data.") + return + + throughput = dict() + for job in data: + for build in job: + for test in build: + if throughput.get(test["parent"], None) is None: + throughput[test["parent"]] = {"1": list(), + "2": list(), + "4": list()} + try: + if "1T1C" in test["tags"]: + throughput[test["parent"]]["1"].\ + append(test["throughput"]["value"]) + elif "2T2C" in test["tags"]: + throughput[test["parent"]]["2"]. \ + append(test["throughput"]["value"]) + elif "4T4C" in test["tags"]: + throughput[test["parent"]]["4"]. \ + append(test["throughput"]["value"]) + except (KeyError, TypeError): + pass + + if not throughput: + logging.warning("No data for the plot '{}'". + format(plot.get("title", ""))) + return + + for test_name, test_vals in throughput.items(): + for key, test_val in test_vals.items(): + if test_val: + throughput[test_name][key] = sum(test_val) / len(test_val) + + names = ['1 core', '2 cores', '4 cores'] + x_vals = list() + y_vals_1 = list() + y_vals_2 = list() + y_vals_4 = list() + + for test_name, test_vals in throughput.items(): + if test_vals["1"]: + x_vals.append("-".join(test_name.split('-')[1:-1])) + y_vals_1.append(1) + if test_vals["2"]: + y_vals_2.append( + round(float(test_vals["2"]) / float(test_vals["1"]), 2)) + else: + y_vals_2.append(None) + if test_vals["4"]: + y_vals_4.append( + round(float(test_vals["4"]) / float(test_vals["1"]), 2)) + else: + y_vals_4.append(None) + + y_vals = [y_vals_1, y_vals_2, y_vals_4] + + y_vals_zipped = zip(names, y_vals) + traces = list() + for val in y_vals_zipped: + traces.append(plgo.Bar(x=x_vals, + y=val[1], + name=val[0])) + + try: + # Create plot + logging.info(" Writing file '{0}{1}'.". + format(plot["output-file"], plot["output-file-type"])) + plpl = plgo.Figure(data=traces, layout=plot["layout"]) + + # Export Plot + ploff.plot(plpl, + show_link=False, auto_open=False, + filename='{0}{1}'.format(plot["output-file"], + plot["output-file-type"])) + except PlotlyError as err: + logging.error(" Finished with error: {}". + format(str(err).replace("\n", " "))) + return + + logging.info(" Done.") + + +def plot_http_server_performance_box(plot, input_data): + """Generate the plot(s) with algorithm: plot_http_server_performance_box + specified in the specification file. + + :param plot: Plot to generate. + :param input_data: Data to process. + :type plot: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the plot {0} ...". + format(plot.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(plot.get("type", ""), plot.get("title", ""))) + data = input_data.filter_data(plot) + if data is None: + logging.error("No data.") + return + + # Prepare the data for the plot + y_vals = dict() + for job in data: + for build in job: + for test in build: + if y_vals.get(test["name"], None) is None: + y_vals[test["name"]] = list() + try: + y_vals[test["name"]].append(test["result"]["value"]) + except (KeyError, TypeError): + y_vals[test["name"]].append(None) + + # Add None to the lists with missing data + max_len = 0 + for val in y_vals.values(): + if len(val) > max_len: + max_len = len(val) + for key, val in y_vals.items(): + if len(val) < max_len: + val.extend([None for _ in range(max_len - len(val))]) + + # Add plot traces + traces = list() + df = pd.DataFrame(y_vals) + df.head() + for i, col in enumerate(df.columns): + name = "{0}. {1}".format(i + 1, col.lower().replace('-cps', ''). + replace('-rps', '')) + traces.append(plgo.Box(x=[str(i + 1) + '.'] * len(df[col]), + y=df[col], + name=name, + **plot["traces"])) + try: + # Create plot + plpl = plgo.Figure(data=traces, layout=plot["layout"]) + + # Export Plot + logging.info(" Writing file '{0}{1}'.". + format(plot["output-file"], plot["output-file-type"])) + ploff.plot(plpl, + show_link=False, auto_open=False, + filename='{0}{1}'.format(plot["output-file"], + plot["output-file-type"])) + except PlotlyError as err: + logging.error(" Finished with error: {}". + format(str(err).replace("\n", " "))) + return + + logging.info(" Done.") diff --git a/resources/tools/presentation/new/generator_report.py b/resources/tools/presentation/new/generator_report.py new file mode 100644 index 0000000000..07103dbb1f --- /dev/null +++ b/resources/tools/presentation/new/generator_report.py @@ -0,0 +1,191 @@ +# Copyright (c) 2017 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Report generation. +""" + +import logging +import datetime + +from shutil import make_archive + +from utils import get_files, execute_command, archive_input_data + + +# .css file for the html format of the report +THEME_OVERRIDES = """/* override table width restrictions */ +@media screen and (min-width: 767px) { + .wy-table-responsive table td, .wy-table-responsive table th { + white-space: normal !important; + } + + .wy-table-responsive { + font-size: small; + margin-bottom: 24px; + max-width: 100%; + overflow: visible !important; + } +} +.rst-content blockquote { + margin-left: 0px; + line-height: 18px; + margin-bottom: 0px; +} +""" + +# Command to build the html format of the report +HTML_BUILDER = 'sphinx-build -v -c . -a ' \ + '-b html -E ' \ + '-t html ' \ + '-D release={release} ' \ + '-D version="{release} report - {date}" ' \ + '{working_dir} ' \ + '{build_dir}/' + +# Command to build the pdf format of the report +PDF_BUILDER = 'sphinx-build -v -c . -a ' \ + '-b latex -E ' \ + '-t latex ' \ + '-D release={release} ' \ + '-D version="{release} report - {date}" ' \ + '{working_dir} ' \ + '{build_dir}' + + +def generate_report(release, spec): + """Generate all formats and versions of the report. + + :param release: Release string of the product. + :param spec: Specification read from the specification file. + :type release: str + :type spec: Specification + """ + + logging.info("Generating the report ...") + + report = { + "html": generate_html_report, + "pdf": generate_pdf_report + } + + for report_format, versions in spec.output["format"].items(): + report[report_format](release, spec, versions) + + archive_input_data(spec) + archive_report(spec) + + logging.info("Done.") + + +def generate_html_report(release, spec, versions): + """Generate html format of the report. + + :param release: Release string of the product. + :param spec: Specification read from the specification file. + :param versions: List of versions to generate. + :type release: str + :type spec: Specification + :type versions: list + """ + + logging.info(" Generating the html report, give me a few minutes, please " + "...") + + cmd = HTML_BUILDER.format( + release=release, + date=datetime.datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'), + working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"], + build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"]) + execute_command(cmd) + + with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE]"], "w") as \ + css_file: + css_file.write(THEME_OVERRIDES) + + with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE2]"], "w") as \ + css_file: + css_file.write(THEME_OVERRIDES) + + logging.info(" Done.") + + +def generate_pdf_report(release, spec, versions): + """Generate html format of the report. + + :param release: Release string of the product. + :param spec: Specification read from the specification file. + :param versions: List of versions to generate. Not implemented yet. + :type release: str + :type spec: Specification + :type versions: list + """ + + logging.info(" Generating the pdf report, give me a few minutes, please " + "...") + + convert_plots = "xvfb-run -a wkhtmltopdf {html} {pdf}.pdf" + + # Convert PyPLOT graphs in HTML format to PDF. + plots = get_files(spec.environment["paths"]["DIR[STATIC,VPP]"], "html") + plots.extend(get_files(spec.environment["paths"]["DIR[STATIC,DPDK]"], + "html")) + for plot in plots: + file_name = "{0}".format(plot.rsplit(".", 1)[0]) + cmd = convert_plots.format(html=plot, pdf=file_name) + execute_command(cmd) + + # Generate the LaTeX documentation + build_dir = spec.environment["paths"]["DIR[BUILD,LATEX]"] + cmd = PDF_BUILDER.format( + release=release, + date=datetime.datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'), + working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"], + build_dir=build_dir) + execute_command(cmd) + + # Build pdf documentation + archive_dir = spec.environment["paths"]["DIR[STATIC,ARCH]"] + cmds = [ + 'cd {build_dir} && ' + 'pdflatex -shell-escape -interaction nonstopmode csit.tex || true'. + format(build_dir=build_dir), + 'cd {build_dir} && ' + 'pdflatex -interaction nonstopmode csit.tex || true'. + format(build_dir=build_dir), + 'cd {build_dir} && ' + 'cp csit.pdf ../{archive_dir}/csit_{release}.pdf'. + format(build_dir=build_dir, + archive_dir=archive_dir, + release=release) + ] + + for cmd in cmds: + execute_command(cmd) + + logging.info(" Done.") + + +def archive_report(spec): + """Archive the report. + + :param spec: Specification read from the specification file. + :type spec: Specification + """ + + logging.info(" Archiving the report ...") + + make_archive("csit.report", + "gztar", + base_dir=spec.environment["paths"]["DIR[BUILD,HTML]"]) + + logging.info(" Done.") diff --git a/resources/tools/presentation/new/generator_tables.py b/resources/tools/presentation/new/generator_tables.py new file mode 100644 index 0000000000..12f160145b --- /dev/null +++ b/resources/tools/presentation/new/generator_tables.py @@ -0,0 +1,995 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Algorithms to generate tables. +""" + + +import logging +import csv +import prettytable +import pandas as pd + +from string import replace +from collections import OrderedDict +from numpy import nan, isnan +from xml.etree import ElementTree as ET + +from errors import PresentationError +from utils import mean, stdev, relative_change, classify_anomalies + + +def generate_tables(spec, data): + """Generate all tables specified in the specification file. + + :param spec: Specification read from the specification file. + :param data: Data to process. + :type spec: Specification + :type data: InputData + """ + + logging.info("Generating the tables ...") + for table in spec.tables: + try: + eval(table["algorithm"])(table, data) + except NameError as err: + logging.error("Probably algorithm '{alg}' is not defined: {err}". + format(alg=table["algorithm"], err=repr(err))) + logging.info("Done.") + + +def table_details(table, input_data): + """Generate the table(s) with algorithm: table_detailed_test_results + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table) + + # Prepare the header of the tables + header = list() + for column in table["columns"]: + header.append('"{0}"'.format(str(column["title"]).replace('"', '""'))) + + # Generate the data for the table according to the model in the table + # specification + job = table["data"].keys()[0] + build = str(table["data"][job][0]) + try: + suites = input_data.suites(job, build) + except KeyError: + logging.error(" No data available. The table will not be generated.") + return + + for suite_longname, suite in suites.iteritems(): + # Generate data + suite_name = suite["name"] + table_lst = list() + for test in data[job][build].keys(): + if data[job][build][test]["parent"] in suite_name: + row_lst = list() + for column in table["columns"]: + try: + col_data = str(data[job][build][test][column["data"]. + split(" ")[1]]).replace('"', '""') + if column["data"].split(" ")[1] in ("vat-history", + "show-run"): + col_data = replace(col_data, " |br| ", "", + maxreplace=1) + col_data = " |prein| {0} |preout| ".\ + format(col_data[:-5]) + row_lst.append('"{0}"'.format(col_data)) + except KeyError: + row_lst.append("No data") + table_lst.append(row_lst) + + # Write the data to file + if table_lst: + file_name = "{0}_{1}{2}".format(table["output-file"], suite_name, + table["output-file-ext"]) + logging.info(" Writing file: '{}'".format(file_name)) + with open(file_name, "w") as file_handler: + file_handler.write(",".join(header) + "\n") + for item in table_lst: + file_handler.write(",".join(item) + "\n") + + logging.info(" Done.") + + +def table_merged_details(table, input_data): + """Generate the table(s) with algorithm: table_merged_details + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table) + data = input_data.merge_data(data) + data.sort_index(inplace=True) + + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + suites = input_data.filter_data(table, data_set="suites") + suites = input_data.merge_data(suites) + + # Prepare the header of the tables + header = list() + for column in table["columns"]: + header.append('"{0}"'.format(str(column["title"]).replace('"', '""'))) + + for _, suite in suites.iteritems(): + # Generate data + suite_name = suite["name"] + table_lst = list() + for test in data.keys(): + if data[test]["parent"] in suite_name: + row_lst = list() + for column in table["columns"]: + try: + col_data = str(data[test][column["data"]. + split(" ")[1]]).replace('"', '""') + if column["data"].split(" ")[1] in ("vat-history", + "show-run"): + col_data = replace(col_data, " |br| ", "", + maxreplace=1) + col_data = " |prein| {0} |preout| ".\ + format(col_data[:-5]) + row_lst.append('"{0}"'.format(col_data)) + except KeyError: + row_lst.append("No data") + table_lst.append(row_lst) + + # Write the data to file + if table_lst: + file_name = "{0}_{1}{2}".format(table["output-file"], suite_name, + table["output-file-ext"]) + logging.info(" Writing file: '{}'".format(file_name)) + with open(file_name, "w") as file_handler: + file_handler.write(",".join(header) + "\n") + for item in table_lst: + file_handler.write(",".join(item) + "\n") + + logging.info(" Done.") + + +def table_performance_improvements(table, input_data): + """Generate the table(s) with algorithm: table_performance_improvements + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + def _write_line_to_file(file_handler, data): + """Write a line to the .csv file. + + :param file_handler: File handler for the csv file. It must be open for + writing text. + :param data: Item to be written to the file. + :type file_handler: BinaryIO + :type data: list + """ + + line_lst = list() + for item in data: + if isinstance(item["data"], str): + # Remove -?drdisc from the end + if item["data"].endswith("drdisc"): + item["data"] = item["data"][:-8] + line_lst.append(item["data"]) + elif isinstance(item["data"], float): + line_lst.append("{:.1f}".format(item["data"])) + elif item["data"] is None: + line_lst.append("") + file_handler.write(",".join(line_lst) + "\n") + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Read the template + file_name = table.get("template", None) + if file_name: + try: + tmpl = _read_csv_template(file_name) + except PresentationError: + logging.error(" The template '{0}' does not exist. Skipping the " + "table.".format(file_name)) + return None + else: + logging.error("The template is not defined. Skipping the table.") + return None + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table) + + # Prepare the header of the tables + header = list() + for column in table["columns"]: + header.append(column["title"]) + + # Generate the data for the table according to the model in the table + # specification + tbl_lst = list() + for tmpl_item in tmpl: + tbl_item = list() + for column in table["columns"]: + cmd = column["data"].split(" ")[0] + args = column["data"].split(" ")[1:] + if cmd == "template": + try: + val = float(tmpl_item[int(args[0])]) + except ValueError: + val = tmpl_item[int(args[0])] + tbl_item.append({"data": val}) + elif cmd == "data": + jobs = args[0:-1] + operation = args[-1] + data_lst = list() + for job in jobs: + for build in data[job]: + try: + data_lst.append(float(build[tmpl_item[0]] + ["throughput"]["value"])) + except (KeyError, TypeError): + # No data, ignore + continue + if data_lst: + tbl_item.append({"data": (eval(operation)(data_lst)) / + 1000000}) + else: + tbl_item.append({"data": None}) + elif cmd == "operation": + operation = args[0] + try: + nr1 = float(tbl_item[int(args[1])]["data"]) + nr2 = float(tbl_item[int(args[2])]["data"]) + if nr1 and nr2: + tbl_item.append({"data": eval(operation)(nr1, nr2)}) + else: + tbl_item.append({"data": None}) + except (IndexError, ValueError, TypeError): + logging.error("No data for {0}".format(tbl_item[0]["data"])) + tbl_item.append({"data": None}) + continue + else: + logging.error("Not supported command {0}. Skipping the table.". + format(cmd)) + return None + tbl_lst.append(tbl_item) + + # Sort the table according to the relative change + tbl_lst.sort(key=lambda rel: rel[-1]["data"], reverse=True) + + # Create the tables and write them to the files + file_names = [ + "{0}_ndr_top{1}".format(table["output-file"], table["output-file-ext"]), + "{0}_pdr_top{1}".format(table["output-file"], table["output-file-ext"]), + "{0}_ndr_low{1}".format(table["output-file"], table["output-file-ext"]), + "{0}_pdr_low{1}".format(table["output-file"], table["output-file-ext"]) + ] + + for file_name in file_names: + logging.info(" Writing the file '{0}'".format(file_name)) + with open(file_name, "w") as file_handler: + file_handler.write(",".join(header) + "\n") + for item in tbl_lst: + if isinstance(item[-1]["data"], float): + rel_change = round(item[-1]["data"], 1) + else: + rel_change = item[-1]["data"] + if "ndr_top" in file_name \ + and "ndr" in item[0]["data"] \ + and rel_change >= 10.0: + _write_line_to_file(file_handler, item) + elif "pdr_top" in file_name \ + and "pdr" in item[0]["data"] \ + and rel_change >= 10.0: + _write_line_to_file(file_handler, item) + elif "ndr_low" in file_name \ + and "ndr" in item[0]["data"] \ + and rel_change < 10.0: + _write_line_to_file(file_handler, item) + elif "pdr_low" in file_name \ + and "pdr" in item[0]["data"] \ + and rel_change < 10.0: + _write_line_to_file(file_handler, item) + + logging.info(" Done.") + + +def _read_csv_template(file_name): + """Read the template from a .csv file. + + :param file_name: Name / full path / relative path of the file to read. + :type file_name: str + :returns: Data from the template as list (lines) of lists (items on line). + :rtype: list + :raises: PresentationError if it is not possible to read the file. + """ + + try: + with open(file_name, 'r') as csv_file: + tmpl_data = list() + for line in csv_file: + tmpl_data.append(line[:-1].split(",")) + return tmpl_data + except IOError as err: + raise PresentationError(str(err), level="ERROR") + + +def table_performance_comparison(table, input_data): + """Generate the table(s) with algorithm: table_performance_comparison + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table, continue_on_error=True) + + # Prepare the header of the tables + try: + header = ["Test case", ] + + history = table.get("history", None) + if history: + for item in history: + header.extend( + ["{0} Throughput [Mpps]".format(item["title"]), + "{0} Stdev [Mpps]".format(item["title"])]) + header.extend( + ["{0} Throughput [Mpps]".format(table["reference"]["title"]), + "{0} Stdev [Mpps]".format(table["reference"]["title"]), + "{0} Throughput [Mpps]".format(table["compare"]["title"]), + "{0} Stdev [Mpps]".format(table["compare"]["title"]), + "Change [%]"]) + header_str = ",".join(header) + "\n" + except (AttributeError, KeyError) as err: + logging.error("The model is invalid, missing parameter: {0}". + format(err)) + return + + # Prepare data to the table: + tbl_dict = dict() + for job, builds in table["reference"]["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + if tbl_dict.get(tst_name, None) is None: + name = "{0}-{1}".format(tst_data["parent"].split("-")[0], + "-".join(tst_data["name"]. + split("-")[1:])) + tbl_dict[tst_name] = {"name": name, + "ref-data": list(), + "cmp-data": list()} + try: + tbl_dict[tst_name]["ref-data"].\ + append(tst_data["throughput"]["value"]) + except TypeError: + pass # No data in output.xml for this test + + for job, builds in table["compare"]["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + try: + tbl_dict[tst_name]["cmp-data"].\ + append(tst_data["throughput"]["value"]) + except KeyError: + pass + except TypeError: + tbl_dict.pop(tst_name, None) + if history: + for item in history: + for job, builds in item["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + if tbl_dict.get(tst_name, None) is None: + continue + if tbl_dict[tst_name].get("history", None) is None: + tbl_dict[tst_name]["history"] = OrderedDict() + if tbl_dict[tst_name]["history"].get(item["title"], + None) is None: + tbl_dict[tst_name]["history"][item["title"]] = \ + list() + try: + tbl_dict[tst_name]["history"][item["title"]].\ + append(tst_data["throughput"]["value"]) + except (TypeError, KeyError): + pass + + tbl_lst = list() + for tst_name in tbl_dict.keys(): + item = [tbl_dict[tst_name]["name"], ] + if history: + if tbl_dict[tst_name].get("history", None) is not None: + for hist_data in tbl_dict[tst_name]["history"].values(): + if hist_data: + item.append(round(mean(hist_data) / 1000000, 2)) + item.append(round(stdev(hist_data) / 1000000, 2)) + else: + item.extend([None, None]) + else: + item.extend([None, None]) + if tbl_dict[tst_name]["ref-data"]: + data_t = tbl_dict[tst_name]["ref-data"] + # TODO: Specify window size. + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend([None, None]) + else: + item.extend([None, None]) + if tbl_dict[tst_name]["cmp-data"]: + data_t = tbl_dict[tst_name]["cmp-data"] + # TODO: Specify window size. + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend([None, None]) + else: + item.extend([None, None]) + if item[-4] is not None and item[-2] is not None and item[-4] != 0: + item.append(int(relative_change(float(item[-4]), float(item[-2])))) + if len(item) == len(header): + tbl_lst.append(item) + + # Sort the table according to the relative change + tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) + + # Generate tables: + # All tests in csv: + tbl_names = ["{0}-ndr-1t1c-full{1}".format(table["output-file"], + table["output-file-ext"]), + "{0}-ndr-2t2c-full{1}".format(table["output-file"], + table["output-file-ext"]), + "{0}-ndr-4t4c-full{1}".format(table["output-file"], + table["output-file-ext"]), + "{0}-pdr-1t1c-full{1}".format(table["output-file"], + table["output-file-ext"]), + "{0}-pdr-2t2c-full{1}".format(table["output-file"], + table["output-file-ext"]), + "{0}-pdr-4t4c-full{1}".format(table["output-file"], + table["output-file-ext"]) + ] + for file_name in tbl_names: + logging.info(" Writing file: '{0}'".format(file_name)) + with open(file_name, "w") as file_handler: + file_handler.write(header_str) + for test in tbl_lst: + if (file_name.split("-")[-3] in test[0] and # NDR vs PDR + file_name.split("-")[-2] in test[0]): # cores + test[0] = "-".join(test[0].split("-")[:-1]) + file_handler.write(",".join([str(item) for item in test]) + + "\n") + + # All tests in txt: + tbl_names_txt = ["{0}-ndr-1t1c-full.txt".format(table["output-file"]), + "{0}-ndr-2t2c-full.txt".format(table["output-file"]), + "{0}-ndr-4t4c-full.txt".format(table["output-file"]), + "{0}-pdr-1t1c-full.txt".format(table["output-file"]), + "{0}-pdr-2t2c-full.txt".format(table["output-file"]), + "{0}-pdr-4t4c-full.txt".format(table["output-file"]) + ] + + for i, txt_name in enumerate(tbl_names_txt): + txt_table = None + logging.info(" Writing file: '{0}'".format(txt_name)) + with open(tbl_names[i], 'rb') as csv_file: + csv_content = csv.reader(csv_file, delimiter=',', quotechar='"') + for row in csv_content: + if txt_table is None: + txt_table = prettytable.PrettyTable(row) + else: + txt_table.add_row(row) + txt_table.align["Test case"] = "l" + with open(txt_name, "w") as txt_file: + txt_file.write(str(txt_table)) + + # Selected tests in csv: + input_file = "{0}-ndr-1t1c-full{1}".format(table["output-file"], + table["output-file-ext"]) + with open(input_file, "r") as in_file: + lines = list() + for line in in_file: + lines.append(line) + + output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"], + table["output-file-ext"]) + logging.info(" Writing file: '{0}'".format(output_file)) + with open(output_file, "w") as out_file: + out_file.write(header_str) + for i, line in enumerate(lines[1:]): + if i == table["nr-of-tests-shown"]: + break + out_file.write(line) + + output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"], + table["output-file-ext"]) + logging.info(" Writing file: '{0}'".format(output_file)) + with open(output_file, "w") as out_file: + out_file.write(header_str) + for i, line in enumerate(lines[-1:0:-1]): + if i == table["nr-of-tests-shown"]: + break + out_file.write(line) + + input_file = "{0}-pdr-1t1c-full{1}".format(table["output-file"], + table["output-file-ext"]) + with open(input_file, "r") as in_file: + lines = list() + for line in in_file: + lines.append(line) + + output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"], + table["output-file-ext"]) + logging.info(" Writing file: '{0}'".format(output_file)) + with open(output_file, "w") as out_file: + out_file.write(header_str) + for i, line in enumerate(lines[1:]): + if i == table["nr-of-tests-shown"]: + break + out_file.write(line) + + output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"], + table["output-file-ext"]) + logging.info(" Writing file: '{0}'".format(output_file)) + with open(output_file, "w") as out_file: + out_file.write(header_str) + for i, line in enumerate(lines[-1:0:-1]): + if i == table["nr-of-tests-shown"]: + break + out_file.write(line) + + +def table_performance_comparison_mrr(table, input_data): + """Generate the table(s) with algorithm: table_performance_comparison_mrr + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table, continue_on_error=True) + + # Prepare the header of the tables + try: + header = ["Test case", + "{0} Throughput [Mpps]".format(table["reference"]["title"]), + "{0} stdev [Mpps]".format(table["reference"]["title"]), + "{0} Throughput [Mpps]".format(table["compare"]["title"]), + "{0} stdev [Mpps]".format(table["compare"]["title"]), + "Change [%]"] + header_str = ",".join(header) + "\n" + except (AttributeError, KeyError) as err: + logging.error("The model is invalid, missing parameter: {0}". + format(err)) + return + + # Prepare data to the table: + tbl_dict = dict() + for job, builds in table["reference"]["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + if tbl_dict.get(tst_name, None) is None: + name = "{0}-{1}".format(tst_data["parent"].split("-")[0], + "-".join(tst_data["name"]. + split("-")[1:])) + tbl_dict[tst_name] = {"name": name, + "ref-data": list(), + "cmp-data": list()} + try: + tbl_dict[tst_name]["ref-data"].\ + append(tst_data["result"]["throughput"]) + except TypeError: + pass # No data in output.xml for this test + + for job, builds in table["compare"]["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + try: + tbl_dict[tst_name]["cmp-data"].\ + append(tst_data["result"]["throughput"]) + except KeyError: + pass + except TypeError: + tbl_dict.pop(tst_name, None) + + tbl_lst = list() + for tst_name in tbl_dict.keys(): + item = [tbl_dict[tst_name]["name"], ] + if tbl_dict[tst_name]["ref-data"]: + data_t = tbl_dict[tst_name]["ref-data"] + # TODO: Specify window size. + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend([None, None]) + else: + item.extend([None, None]) + if tbl_dict[tst_name]["cmp-data"]: + data_t = tbl_dict[tst_name]["cmp-data"] + # TODO: Specify window size. + if data_t: + item.append(round(mean(data_t) / 1000000, 2)) + item.append(round(stdev(data_t) / 1000000, 2)) + else: + item.extend([None, None]) + else: + item.extend([None, None]) + if item[1] is not None and item[3] is not None and item[1] != 0: + item.append(int(relative_change(float(item[1]), float(item[3])))) + if len(item) == 6: + tbl_lst.append(item) + + # Sort the table according to the relative change + tbl_lst.sort(key=lambda rel: rel[-1], reverse=True) + + # Generate tables: + # All tests in csv: + tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"], + table["output-file-ext"]), + "{0}-2t2c-full{1}".format(table["output-file"], + table["output-file-ext"]), + "{0}-4t4c-full{1}".format(table["output-file"], + table["output-file-ext"]) + ] + for file_name in tbl_names: + logging.info(" Writing file: '{0}'".format(file_name)) + with open(file_name, "w") as file_handler: + file_handler.write(header_str) + for test in tbl_lst: + if file_name.split("-")[-2] in test[0]: # cores + test[0] = "-".join(test[0].split("-")[:-1]) + file_handler.write(",".join([str(item) for item in test]) + + "\n") + + # All tests in txt: + tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]), + "{0}-2t2c-full.txt".format(table["output-file"]), + "{0}-4t4c-full.txt".format(table["output-file"]) + ] + + for i, txt_name in enumerate(tbl_names_txt): + txt_table = None + logging.info(" Writing file: '{0}'".format(txt_name)) + with open(tbl_names[i], 'rb') as csv_file: + csv_content = csv.reader(csv_file, delimiter=',', quotechar='"') + for row in csv_content: + if txt_table is None: + txt_table = prettytable.PrettyTable(row) + else: + txt_table.add_row(row) + txt_table.align["Test case"] = "l" + with open(txt_name, "w") as txt_file: + txt_file.write(str(txt_table)) + + +def table_performance_trending_dashboard(table, input_data): + """Generate the table(s) with algorithm: table_performance_comparison + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + # Transform the data + logging.info(" Creating the data set for the {0} '{1}'.". + format(table.get("type", ""), table.get("title", ""))) + data = input_data.filter_data(table, continue_on_error=True) + + # Prepare the header of the tables + header = ["Test Case", + "Trend [Mpps]", + "Short-Term Change [%]", + "Long-Term Change [%]", + "Regressions [#]", + "Progressions [#]", + "Outliers [#]" + ] + header_str = ",".join(header) + "\n" + + # Prepare data to the table: + tbl_dict = dict() + for job, builds in table["data"].items(): + for build in builds: + for tst_name, tst_data in data[job][str(build)].iteritems(): + if tst_name.lower() in table["ignore-list"]: + continue + if tbl_dict.get(tst_name, None) is None: + name = "{0}-{1}".format(tst_data["parent"].split("-")[0], + "-".join(tst_data["name"]. + split("-")[1:])) + tbl_dict[tst_name] = {"name": name, + "data": OrderedDict()} + try: + tbl_dict[tst_name]["data"][str(build)] = \ + tst_data["result"]["throughput"] + except (TypeError, KeyError): + pass # No data in output.xml for this test + + tbl_lst = list() + for tst_name in tbl_dict.keys(): + if len(tbl_dict[tst_name]["data"]) < 3: + continue + + pd_data = pd.Series(tbl_dict[tst_name]["data"]) + last_key = pd_data.keys()[-1] + win_size = min(pd_data.size, table["window"]) + win_first_idx = pd_data.size - win_size + key_14 = pd_data.keys()[win_first_idx] + long_win_size = min(pd_data.size, table["long-trend-window"]) + median_t = pd_data.rolling(window=win_size, min_periods=2).median() + median_first_idx = median_t.size - long_win_size + try: + max_median = max( + [x for x in median_t.values[median_first_idx:-win_size] + if not isnan(x)]) + except ValueError: + max_median = nan + try: + last_median_t = median_t[last_key] + except KeyError: + last_median_t = nan + try: + median_t_14 = median_t[key_14] + except KeyError: + median_t_14 = nan + + if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0: + rel_change_last = nan + else: + rel_change_last = round( + ((last_median_t - median_t_14) / median_t_14) * 100, 2) + + if isnan(max_median) or isnan(last_median_t) or max_median == 0.0: + rel_change_long = nan + else: + rel_change_long = round( + ((last_median_t - max_median) / max_median) * 100, 2) + + # Classification list: + classification_lst, _ = classify_anomalies(pd_data) + + if classification_lst: + if isnan(rel_change_last) and isnan(rel_change_long): + continue + tbl_lst.append( + [tbl_dict[tst_name]["name"], + '-' if isnan(last_median_t) else + round(last_median_t / 1000000, 2), + '-' if isnan(rel_change_last) else rel_change_last, + '-' if isnan(rel_change_long) else rel_change_long, + classification_lst[win_first_idx:].count("regression"), + classification_lst[win_first_idx:].count("progression"), + classification_lst[win_first_idx:].count("outlier")]) + + tbl_lst.sort(key=lambda rel: rel[0]) + + tbl_sorted = list() + for nrr in range(table["window"], -1, -1): + tbl_reg = [item for item in tbl_lst if item[4] == nrr] + for nrp in range(table["window"], -1, -1): + tbl_pro = [item for item in tbl_reg if item[5] == nrp] + for nro in range(table["window"], -1, -1): + tbl_out = [item for item in tbl_pro if item[6] == nro] + tbl_out.sort(key=lambda rel: rel[2]) + tbl_sorted.extend(tbl_out) + + file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"]) + + logging.info(" Writing file: '{0}'".format(file_name)) + with open(file_name, "w") as file_handler: + file_handler.write(header_str) + for test in tbl_sorted: + file_handler.write(",".join([str(item) for item in test]) + '\n') + + txt_file_name = "{0}.txt".format(table["output-file"]) + txt_table = None + logging.info(" Writing file: '{0}'".format(txt_file_name)) + with open(file_name, 'rb') as csv_file: + csv_content = csv.reader(csv_file, delimiter=',', quotechar='"') + for row in csv_content: + if txt_table is None: + txt_table = prettytable.PrettyTable(row) + else: + txt_table.add_row(row) + txt_table.align["Test case"] = "l" + with open(txt_file_name, "w") as txt_file: + txt_file.write(str(txt_table)) + + +def table_performance_trending_dashboard_html(table, input_data): + """Generate the table(s) with algorithm: + table_performance_trending_dashboard_html specified in the specification + file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + logging.info(" Generating the table {0} ...". + format(table.get("title", ""))) + + try: + with open(table["input-file"], 'rb') as csv_file: + csv_content = csv.reader(csv_file, delimiter=',', quotechar='"') + csv_lst = [item for item in csv_content] + except KeyError: + logging.warning("The input file is not defined.") + return + except csv.Error as err: + logging.warning("Not possible to process the file '{0}'.\n{1}". + format(table["input-file"], err)) + return + + # Table: + dashboard = ET.Element("table", attrib=dict(width="100%", border='0')) + + # Table header: + tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7")) + for idx, item in enumerate(csv_lst[0]): + alignment = "left" if idx == 0 else "center" + th = ET.SubElement(tr, "th", attrib=dict(align=alignment)) + th.text = item + + # Rows: + colors = {"regression": ("#ffcccc", "#ff9999"), + "progression": ("#c6ecc6", "#9fdf9f"), + "outlier": ("#e6e6e6", "#cccccc"), + "normal": ("#e9f1fb", "#d4e4f7")} + for r_idx, row in enumerate(csv_lst[1:]): + if int(row[4]): + color = "regression" + elif int(row[5]): + color = "progression" + elif int(row[6]): + color = "outlier" + else: + color = "normal" + background = colors[color][r_idx % 2] + tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background)) + + # Columns: + for c_idx, item in enumerate(row): + alignment = "left" if c_idx == 0 else "center" + td = ET.SubElement(tr, "td", attrib=dict(align=alignment)) + # Name: + url = "../trending/" + file_name = "" + anchor = "#" + feature = "" + if c_idx == 0: + if "memif" in item: + file_name = "container_memif.html" + + elif "srv6" in item: + file_name = "srv6.html" + + elif "vhost" in item: + if "l2xcbase" in item or "l2bdbasemaclrn" in item: + file_name = "vm_vhost_l2.html" + elif "ip4base" in item: + file_name = "vm_vhost_ip4.html" + + elif "ipsec" in item: + file_name = "ipsec.html" + + elif "ethip4lispip" in item or "ethip4vxlan" in item: + file_name = "ip4_tunnels.html" + + elif "ip4base" in item or "ip4scale" in item: + file_name = "ip4.html" + if "iacl" in item or "snat" in item or "cop" in item: + feature = "-features" + + elif "ip6base" in item or "ip6scale" in item: + file_name = "ip6.html" + + elif "l2xcbase" in item or "l2xcscale" in item \ + or "l2bdbasemaclrn" in item or "l2bdscale" in item \ + or "l2dbbasemaclrn" in item or "l2dbscale" in item: + file_name = "l2.html" + if "iacl" in item: + feature = "-features" + + if "x520" in item: + anchor += "x520-" + elif "x710" in item: + anchor += "x710-" + elif "xl710" in item: + anchor += "xl710-" + + if "64b" in item: + anchor += "64b-" + elif "78b" in item: + anchor += "78b-" + elif "imix" in item: + anchor += "imix-" + elif "9000b" in item: + anchor += "9000b-" + elif "1518" in item: + anchor += "1518b-" + + if "1t1c" in item: + anchor += "1t1c" + elif "2t2c" in item: + anchor += "2t2c" + elif "4t4c" in item: + anchor += "4t4c" + + url = url + file_name + anchor + feature + + ref = ET.SubElement(td, "a", attrib=dict(href=url)) + ref.text = item + + if c_idx > 0: + td.text = item + + try: + with open(table["output-file"], 'w') as html_file: + logging.info(" Writing file: '{0}'". + format(table["output-file"])) + html_file.write(".. raw:: html\n\n\t") + html_file.write(ET.tostring(dashboard)) + html_file.write("\n\t



\n") + except KeyError: + logging.warning("The output file is not defined.") + return diff --git a/resources/tools/presentation/new/input_data_files.py b/resources/tools/presentation/new/input_data_files.py new file mode 100644 index 0000000000..cde6d1acc4 --- /dev/null +++ b/resources/tools/presentation/new/input_data_files.py @@ -0,0 +1,230 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Inputs +Download all data. +""" + +import re + +from os import rename, mkdir +from os.path import join +from zipfile import ZipFile, is_zipfile, BadZipfile +from httplib import responses +from requests import get, codes, RequestException, Timeout, TooManyRedirects, \ + HTTPError, ConnectionError + +from errors import PresentationError +from utils import execute_command + +# Chunk size used for file download +CHUNK_SIZE = 512 + +# Separator used in file names +SEPARATOR = "__" + +REGEX_RELEASE = re.compile(r'(\D*)(\d{4}|master)(\D*)') + + +def _download_file(url, file_name, log): + """Download a file with input data. + + :param url: URL to the file to download. + :param file_name: Name of file to download. + :param log: List of log messages. + :type url: str + :type file_name: str + :type log: list of tuples (severity, msg) + :returns: True if the download was successful, otherwise False. + :rtype: bool + """ + + success = False + try: + log.append(("INFO", " Connecting to '{0}' ...".format(url))) + + response = get(url, stream=True) + code = response.status_code + + log.append(("INFO", " {0}: {1}".format(code, responses[code]))) + + if code != codes["OK"]: + return False + + log.append(("INFO", " Downloading the file '{0}' to '{1}' ...". + format(url, file_name))) + + file_handle = open(file_name, "wb") + for chunk in response.iter_content(chunk_size=CHUNK_SIZE): + if chunk: + file_handle.write(chunk) + file_handle.close() + success = True + except ConnectionError as err: + log.append(("ERROR", "Not possible to connect to '{0}'.".format(url))) + log.append(("DEBUG", str(err))) + except HTTPError as err: + log.append(("ERROR", "Invalid HTTP response from '{0}'.".format(url))) + log.append(("DEBUG", str(err))) + except TooManyRedirects as err: + log.append(("ERROR", "Request exceeded the configured number " + "of maximum re-directions.")) + log.append(("DEBUG", str(err))) + except Timeout as err: + log.append(("ERROR", "Request timed out.")) + log.append(("DEBUG", str(err))) + except RequestException as err: + log.append(("ERROR", "Unexpected HTTP request exception.")) + log.append(("DEBUG", str(err))) + except (IOError, ValueError, KeyError) as err: + log.append(("ERROR", "Download failed.")) + log.append(("DEBUG", str(err))) + + log.append(("INFO", " Download finished.")) + return success + + +def _unzip_file(spec, build, pid, log): + """Unzip downloaded source file. + + :param spec: Specification read form the specification file. + :param build: Information about the build. + :param log: List of log messages. + :type spec: Specification + :type build: dict + :type log: list of tuples (severity, msg) + :returns: True if the download was successful, otherwise False. + :rtype: bool + """ + + data_file = spec.input["extract"] + file_name = build["file-name"] + directory = spec.environment["paths"]["DIR[WORKING,DATA]"] + tmp_dir = join(directory, str(pid)) + try: + mkdir(tmp_dir) + except OSError: + pass + new_name = "{0}{1}{2}".format(file_name.rsplit('.')[-2], + SEPARATOR, + data_file.split("/")[-1]) + + log.append(("INFO", " Unzipping: '{0}' from '{1}'.". + format(data_file, file_name))) + try: + with ZipFile(file_name, 'r') as zip_file: + zip_file.extract(data_file, tmp_dir) + log.append(("INFO", " Renaming the file '{0}' to '{1}'". + format(join(tmp_dir, data_file), new_name))) + rename(join(tmp_dir, data_file), new_name) + build["file-name"] = new_name + return True + except (BadZipfile, RuntimeError) as err: + log.append(("ERROR", "Failed to unzip the file '{0}': {1}.". + format(file_name, str(err)))) + return False + except OSError as err: + log.append(("ERROR", "Failed to rename the file '{0}': {1}.". + format(data_file, str(err)))) + return False + + +def download_and_unzip_data_file(spec, job, build, pid, log): + """Download and unzip a source file. + + :param spec: Specification read form the specification file. + :param job: Name of the Jenkins job. + :param build: Information about the build. + :param pid: PID of the process executing this method. + :param log: List of log messages. + :type spec: Specification + :type job: str + :type build: dict + :type pid: int + :type log: list of tuples (severity, msg) + :returns: True if the download was successful, otherwise False. + :rtype: bool + """ + + if job.startswith("csit-"): + if spec.input["file-name"].endswith(".zip"): + url = spec.environment["urls"]["URL[JENKINS,CSIT]"] + elif spec.input["file-name"].endswith(".gz"): + url = spec.environment["urls"]["URL[NEXUS,LOG]"] + else: + log.append(("ERROR", "Not supported file format.")) + return False + elif job.startswith("hc2vpp-"): + url = spec.environment["urls"]["URL[JENKINS,HC]"] + else: + raise PresentationError("No url defined for the job '{}'.". + format(job)) + file_name = spec.input["file-name"] + full_name = spec.input["download-path"]. \ + format(job=job, build=build["build"], filename=file_name) + url = "{0}/{1}".format(url, full_name) + new_name = join(spec.environment["paths"]["DIR[WORKING,DATA]"], + "{job}{sep}{build}{sep}{name}". + format(job=job, sep=SEPARATOR, build=build["build"], + name=file_name)) + # Download the file from the defined source (Jenkins, logs.fd.io): + success = _download_file(url, new_name, log) + + if success and new_name.endswith(".zip"): + if not is_zipfile(new_name): + success = False + + # If not successful, download from docs.fd.io: + if not success: + log.append(("INFO", " Trying to download from https://docs.fd.io:")) + release = re.search(REGEX_RELEASE, job).group(2) + for rls in (release, "master"): + nexus_file_name = "{job}{sep}{build}{sep}{name}". \ + format(job=job, sep=SEPARATOR, build=build["build"], + name=file_name) + try: + rls = "rls{0}".format(int(rls)) + except ValueError: + pass + url = "{url}/{release}/{dir}/{file}". \ + format(url=spec.environment["urls"]["URL[NEXUS]"], + release=rls, + dir=spec.environment["urls"]["DIR[NEXUS]"], + file=nexus_file_name) + success = _download_file(url, new_name, log) + if success: + break + + if success: + build["file-name"] = new_name + else: + return False + + if spec.input["file-name"].endswith(".gz"): + if "docs.fd.io" in url: + execute_command("gzip --decompress --keep --force {0}". + format(new_name)) + else: + rename(new_name, new_name[:-3]) + execute_command("gzip --keep {0}".format(new_name[:-3])) + build["file-name"] = new_name[:-3] + + if new_name.endswith(".zip"): + if is_zipfile(new_name): + return _unzip_file(spec, build, pid, log) + else: + log.append(("ERROR", + "Zip file '{0}' is corrupted.".format(new_name))) + return False + else: + return True diff --git a/resources/tools/presentation/new/input_data_parser.py b/resources/tools/presentation/new/input_data_parser.py new file mode 100644 index 0000000000..beec34c106 --- /dev/null +++ b/resources/tools/presentation/new/input_data_parser.py @@ -0,0 +1,1093 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data pre-processing + +- extract data from output.xml files generated by Jenkins jobs and store in + pandas' Series, +- provide access to the data. +""" + +import multiprocessing +import os +import re +import pandas as pd +import logging + +from robot.api import ExecutionResult, ResultVisitor +from robot import errors +from collections import OrderedDict +from string import replace +from os import remove + +from input_data_files import download_and_unzip_data_file +from utils import Worker + + +class ExecutionChecker(ResultVisitor): + """Class to traverse through the test suite structure. + + The functionality implemented in this class generates a json structure: + + Performance tests: + + { + "metadata": { # Optional + "version": "VPP version", + "job": "Jenkins job name", + "build": "Information about the build" + }, + "suites": { + "Suite name 1": { + "doc": "Suite 1 documentation", + "parent": "Suite 1 parent", + "level": "Level of the suite in the suite hierarchy" + } + "Suite name N": { + "doc": "Suite N documentation", + "parent": "Suite 2 parent", + "level": "Level of the suite in the suite hierarchy" + } + } + "tests": { + "ID": { + "name": "Test name", + "parent": "Name of the parent of the test", + "doc": "Test documentation" + "msg": "Test message" + "tags": ["tag 1", "tag 2", "tag n"], + "type": "PDR" | "NDR", + "throughput": { + "value": int, + "unit": "pps" | "bps" | "percentage" + }, + "latency": { + "direction1": { + "100": { + "min": int, + "avg": int, + "max": int + }, + "50": { # Only for NDR + "min": int, + "avg": int, + "max": int + }, + "10": { # Only for NDR + "min": int, + "avg": int, + "max": int + } + }, + "direction2": { + "100": { + "min": int, + "avg": int, + "max": int + }, + "50": { # Only for NDR + "min": int, + "avg": int, + "max": int + }, + "10": { # Only for NDR + "min": int, + "avg": int, + "max": int + } + } + }, + "lossTolerance": "lossTolerance", # Only for PDR + "vat-history": "DUT1 and DUT2 VAT History" + }, + "show-run": "Show Run" + }, + "ID" { + # next test + } + } + } + + Functional tests: + + + { + "metadata": { # Optional + "version": "VPP version", + "job": "Jenkins job name", + "build": "Information about the build" + }, + "suites": { + "Suite name 1": { + "doc": "Suite 1 documentation", + "parent": "Suite 1 parent", + "level": "Level of the suite in the suite hierarchy" + } + "Suite name N": { + "doc": "Suite N documentation", + "parent": "Suite 2 parent", + "level": "Level of the suite in the suite hierarchy" + } + } + "tests": { + "ID": { + "name": "Test name", + "parent": "Name of the parent of the test", + "doc": "Test documentation" + "msg": "Test message" + "tags": ["tag 1", "tag 2", "tag n"], + "vat-history": "DUT1 and DUT2 VAT History" + "show-run": "Show Run" + "status": "PASS" | "FAIL" + }, + "ID" { + # next test + } + } + } + + .. note:: ID is the lowercase full path to the test. + """ + + REGEX_RATE = re.compile(r'^[\D\d]*FINAL_RATE:\s(\d+\.\d+)\s(\w+)') + + REGEX_LAT_NDR = re.compile(r'^[\D\d]*' + r'LAT_\d+%NDR:\s\[\'(-?\d+\/-?\d+/-?\d+)\',' + r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n' + r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\',' + r'\s\'(-?\d+/-?\d+/-?\d+)\'\]\s\n' + r'LAT_\d+%NDR:\s\[\'(-?\d+/-?\d+/-?\d+)\',' + r'\s\'(-?\d+/-?\d+/-?\d+)\'\]') + + REGEX_LAT_PDR = re.compile(r'^[\D\d]*' + r'LAT_\d+%PDR:\s\[\'(-?\d+/-?\d+/-?\d+)\',' + r'\s\'(-?\d+/-?\d+/-?\d+)\'\][\D\d]*') + + REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s' + r'[\D\d]*') + + REGEX_VERSION = re.compile(r"(return STDOUT Version:\s*)(.*)") + + REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$') + + REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s' + r'tx\s(\d*),\srx\s(\d*)') + + def __init__(self, metadata): + """Initialisation. + + :param metadata: Key-value pairs to be included in "metadata" part of + JSON structure. + :type metadata: dict + """ + + # Type of message to parse out from the test messages + self._msg_type = None + + # VPP version + self._version = None + + # Number of VAT History messages found: + # 0 - no message + # 1 - VAT History of DUT1 + # 2 - VAT History of DUT2 + self._lookup_kw_nr = 0 + self._vat_history_lookup_nr = 0 + + # Number of Show Running messages found + # 0 - no message + # 1 - Show run message found + self._show_run_lookup_nr = 0 + + # Test ID of currently processed test- the lowercase full path to the + # test + self._test_ID = None + + # The main data structure + self._data = { + "metadata": OrderedDict(), + "suites": OrderedDict(), + "tests": OrderedDict() + } + + # Save the provided metadata + for key, val in metadata.items(): + self._data["metadata"][key] = val + + # Dictionary defining the methods used to parse different types of + # messages + self.parse_msg = { + "setup-version": self._get_version, + "teardown-vat-history": self._get_vat_history, + "test-show-runtime": self._get_show_run + } + + @property + def data(self): + """Getter - Data parsed from the XML file. + + :returns: Data parsed from the XML file. + :rtype: dict + """ + return self._data + + def _get_version(self, msg): + """Called when extraction of VPP version is required. + + :param msg: Message to process. + :type msg: Message + :returns: Nothing. + """ + + if msg.message.count("return STDOUT Version:"): + self._version = str(re.search(self.REGEX_VERSION, msg.message). + group(2)) + self._data["metadata"]["version"] = self._version + self._data["metadata"]["generated"] = msg.timestamp + self._msg_type = None + + def _get_vat_history(self, msg): + """Called when extraction of VAT command history is required. + + :param msg: Message to process. + :type msg: Message + :returns: Nothing. + """ + if msg.message.count("VAT command history:"): + self._vat_history_lookup_nr += 1 + if self._vat_history_lookup_nr == 1: + self._data["tests"][self._test_ID]["vat-history"] = str() + else: + self._msg_type = None + text = re.sub("[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3} " + "VAT command history:", "", msg.message, count=1). \ + replace("\n\n", "\n").replace('\n', ' |br| ').\ + replace('\r', '').replace('"', "'") + + self._data["tests"][self._test_ID]["vat-history"] += " |br| " + self._data["tests"][self._test_ID]["vat-history"] += \ + "**DUT" + str(self._vat_history_lookup_nr) + ":** " + text + + def _get_show_run(self, msg): + """Called when extraction of VPP operational data (output of CLI command + Show Runtime) is required. + + :param msg: Message to process. + :type msg: Message + :returns: Nothing. + """ + if msg.message.count("return STDOUT Thread "): + self._show_run_lookup_nr += 1 + if self._lookup_kw_nr == 1 and self._show_run_lookup_nr == 1: + self._data["tests"][self._test_ID]["show-run"] = str() + if self._lookup_kw_nr > 1: + self._msg_type = None + if self._show_run_lookup_nr == 1: + text = msg.message.replace("vat# ", "").\ + replace("return STDOUT ", "").replace("\n\n", "\n").\ + replace('\n', ' |br| ').\ + replace('\r', '').replace('"', "'") + try: + self._data["tests"][self._test_ID]["show-run"] += " |br| " + self._data["tests"][self._test_ID]["show-run"] += \ + "**DUT" + str(self._lookup_kw_nr) + ":** |br| " + text + except KeyError: + pass + + def _get_latency(self, msg, test_type): + """Get the latency data from the test message. + + :param msg: Message to be parsed. + :param test_type: Type of the test - NDR or PDR. + :type msg: str + :type test_type: str + :returns: Latencies parsed from the message. + :rtype: dict + """ + + if test_type == "NDR": + groups = re.search(self.REGEX_LAT_NDR, msg) + groups_range = range(1, 7) + elif test_type == "PDR": + groups = re.search(self.REGEX_LAT_PDR, msg) + groups_range = range(1, 3) + else: + return {} + + latencies = list() + for idx in groups_range: + try: + lat = [int(item) for item in str(groups.group(idx)).split('/')] + except (AttributeError, ValueError): + lat = [-1, -1, -1] + latencies.append(lat) + + keys = ("min", "avg", "max") + latency = { + "direction1": { + }, + "direction2": { + } + } + + latency["direction1"]["100"] = dict(zip(keys, latencies[0])) + latency["direction2"]["100"] = dict(zip(keys, latencies[1])) + if test_type == "NDR": + latency["direction1"]["50"] = dict(zip(keys, latencies[2])) + latency["direction2"]["50"] = dict(zip(keys, latencies[3])) + latency["direction1"]["10"] = dict(zip(keys, latencies[4])) + latency["direction2"]["10"] = dict(zip(keys, latencies[5])) + + return latency + + def visit_suite(self, suite): + """Implements traversing through the suite and its direct children. + + :param suite: Suite to process. + :type suite: Suite + :returns: Nothing. + """ + if self.start_suite(suite) is not False: + suite.suites.visit(self) + suite.tests.visit(self) + self.end_suite(suite) + + def start_suite(self, suite): + """Called when suite starts. + + :param suite: Suite to process. + :type suite: Suite + :returns: Nothing. + """ + + try: + parent_name = suite.parent.name + except AttributeError: + return + + doc_str = suite.doc.replace('"', "'").replace('\n', ' ').\ + replace('\r', '').replace('*[', ' |br| *[').replace("*", "**") + doc_str = replace(doc_str, ' |br| *[', '*[', maxreplace=1) + + self._data["suites"][suite.longname.lower().replace('"', "'"). + replace(" ", "_")] = { + "name": suite.name.lower(), + "doc": doc_str, + "parent": parent_name, + "level": len(suite.longname.split(".")) + } + + suite.keywords.visit(self) + + def end_suite(self, suite): + """Called when suite ends. + + :param suite: Suite to process. + :type suite: Suite + :returns: Nothing. + """ + pass + + def visit_test(self, test): + """Implements traversing through the test. + + :param test: Test to process. + :type test: Test + :returns: Nothing. + """ + if self.start_test(test) is not False: + test.keywords.visit(self) + self.end_test(test) + + def start_test(self, test): + """Called when test starts. + + :param test: Test to process. + :type test: Test + :returns: Nothing. + """ + + tags = [str(tag) for tag in test.tags] + test_result = dict() + test_result["name"] = test.name.lower() + test_result["parent"] = test.parent.name.lower() + test_result["tags"] = tags + doc_str = test.doc.replace('"', "'").replace('\n', ' '). \ + replace('\r', '').replace('[', ' |br| [') + test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1) + test_result["msg"] = test.message.replace('\n', ' |br| '). \ + replace('\r', '').replace('"', "'") + if test.status == "PASS" and ("NDRPDRDISC" in tags or + "TCP" in tags or + "MRR" in tags): + if "NDRDISC" in tags: + test_type = "NDR" + elif "PDRDISC" in tags: + test_type = "PDR" + elif "TCP" in tags: + test_type = "TCP" + elif "MRR" in tags: + test_type = "MRR" + else: + return + + test_result["type"] = test_type + + if test_type in ("NDR", "PDR"): + try: + rate_value = str(re.search( + self.REGEX_RATE, test.message).group(1)) + except AttributeError: + rate_value = "-1" + try: + rate_unit = str(re.search( + self.REGEX_RATE, test.message).group(2)) + except AttributeError: + rate_unit = "-1" + + test_result["throughput"] = dict() + test_result["throughput"]["value"] = \ + int(rate_value.split('.')[0]) + test_result["throughput"]["unit"] = rate_unit + test_result["latency"] = \ + self._get_latency(test.message, test_type) + if test_type == "PDR": + test_result["lossTolerance"] = str(re.search( + self.REGEX_TOLERANCE, test.message).group(1)) + + elif test_type in ("TCP", ): + groups = re.search(self.REGEX_TCP, test.message) + test_result["result"] = dict() + test_result["result"]["value"] = int(groups.group(2)) + test_result["result"]["unit"] = groups.group(1) + elif test_type in ("MRR", ): + groups = re.search(self.REGEX_MRR, test.message) + test_result["result"] = dict() + test_result["result"]["duration"] = int(groups.group(1)) + test_result["result"]["tx"] = int(groups.group(2)) + test_result["result"]["rx"] = int(groups.group(3)) + test_result["result"]["throughput"] = int( + test_result["result"]["rx"] / + test_result["result"]["duration"]) + else: + test_result["status"] = test.status + + self._test_ID = test.longname.lower() + self._data["tests"][self._test_ID] = test_result + + def end_test(self, test): + """Called when test ends. + + :param test: Test to process. + :type test: Test + :returns: Nothing. + """ + pass + + def visit_keyword(self, keyword): + """Implements traversing through the keyword and its child keywords. + + :param keyword: Keyword to process. + :type keyword: Keyword + :returns: Nothing. + """ + if self.start_keyword(keyword) is not False: + self.end_keyword(keyword) + + def start_keyword(self, keyword): + """Called when keyword starts. Default implementation does nothing. + + :param keyword: Keyword to process. + :type keyword: Keyword + :returns: Nothing. + """ + try: + if keyword.type == "setup": + self.visit_setup_kw(keyword) + elif keyword.type == "teardown": + self._lookup_kw_nr = 0 + self.visit_teardown_kw(keyword) + else: + self._lookup_kw_nr = 0 + self.visit_test_kw(keyword) + except AttributeError: + pass + + def end_keyword(self, keyword): + """Called when keyword ends. Default implementation does nothing. + + :param keyword: Keyword to process. + :type keyword: Keyword + :returns: Nothing. + """ + pass + + def visit_test_kw(self, test_kw): + """Implements traversing through the test keyword and its child + keywords. + + :param test_kw: Keyword to process. + :type test_kw: Keyword + :returns: Nothing. + """ + for keyword in test_kw.keywords: + if self.start_test_kw(keyword) is not False: + self.visit_test_kw(keyword) + self.end_test_kw(keyword) + + def start_test_kw(self, test_kw): + """Called when test keyword starts. Default implementation does + nothing. + + :param test_kw: Keyword to process. + :type test_kw: Keyword + :returns: Nothing. + """ + if test_kw.name.count("Show Runtime Counters On All Duts"): + self._lookup_kw_nr += 1 + self._show_run_lookup_nr = 0 + self._msg_type = "test-show-runtime" + test_kw.messages.visit(self) + + def end_test_kw(self, test_kw): + """Called when keyword ends. Default implementation does nothing. + + :param test_kw: Keyword to process. + :type test_kw: Keyword + :returns: Nothing. + """ + pass + + def visit_setup_kw(self, setup_kw): + """Implements traversing through the teardown keyword and its child + keywords. + + :param setup_kw: Keyword to process. + :type setup_kw: Keyword + :returns: Nothing. + """ + for keyword in setup_kw.keywords: + if self.start_setup_kw(keyword) is not False: + self.visit_setup_kw(keyword) + self.end_setup_kw(keyword) + + def start_setup_kw(self, setup_kw): + """Called when teardown keyword starts. Default implementation does + nothing. + + :param setup_kw: Keyword to process. + :type setup_kw: Keyword + :returns: Nothing. + """ + if setup_kw.name.count("Show Vpp Version On All Duts") \ + and not self._version: + self._msg_type = "setup-version" + setup_kw.messages.visit(self) + + def end_setup_kw(self, setup_kw): + """Called when keyword ends. Default implementation does nothing. + + :param setup_kw: Keyword to process. + :type setup_kw: Keyword + :returns: Nothing. + """ + pass + + def visit_teardown_kw(self, teardown_kw): + """Implements traversing through the teardown keyword and its child + keywords. + + :param teardown_kw: Keyword to process. + :type teardown_kw: Keyword + :returns: Nothing. + """ + for keyword in teardown_kw.keywords: + if self.start_teardown_kw(keyword) is not False: + self.visit_teardown_kw(keyword) + self.end_teardown_kw(keyword) + + def start_teardown_kw(self, teardown_kw): + """Called when teardown keyword starts. Default implementation does + nothing. + + :param teardown_kw: Keyword to process. + :type teardown_kw: Keyword + :returns: Nothing. + """ + + if teardown_kw.name.count("Show Vat History On All Duts"): + self._vat_history_lookup_nr = 0 + self._msg_type = "teardown-vat-history" + teardown_kw.messages.visit(self) + + def end_teardown_kw(self, teardown_kw): + """Called when keyword ends. Default implementation does nothing. + + :param teardown_kw: Keyword to process. + :type teardown_kw: Keyword + :returns: Nothing. + """ + pass + + def visit_message(self, msg): + """Implements visiting the message. + + :param msg: Message to process. + :type msg: Message + :returns: Nothing. + """ + if self.start_message(msg) is not False: + self.end_message(msg) + + def start_message(self, msg): + """Called when message starts. Get required information from messages: + - VPP version. + + :param msg: Message to process. + :type msg: Message + :returns: Nothing. + """ + + if self._msg_type: + self.parse_msg[self._msg_type](msg) + + def end_message(self, msg): + """Called when message ends. Default implementation does nothing. + + :param msg: Message to process. + :type msg: Message + :returns: Nothing. + """ + pass + + +class InputData(object): + """Input data + + The data is extracted from output.xml files generated by Jenkins jobs and + stored in pandas' DataFrames. + + The data structure: + - job name + - build number + - metadata + - job + - build + - vpp version + - suites + - tests + - ID: test data (as described in ExecutionChecker documentation) + """ + + def __init__(self, spec): + """Initialization. + + :param spec: Specification. + :type spec: Specification + """ + + # Specification: + self._cfg = spec + + # Data store: + self._input_data = pd.Series() + + @property + def data(self): + """Getter - Input data. + + :returns: Input data + :rtype: pandas.Series + """ + return self._input_data + + def metadata(self, job, build): + """Getter - metadata + + :param job: Job which metadata we want. + :param build: Build which metadata we want. + :type job: str + :type build: str + :returns: Metadata + :rtype: pandas.Series + """ + + return self.data[job][build]["metadata"] + + def suites(self, job, build): + """Getter - suites + + :param job: Job which suites we want. + :param build: Build which suites we want. + :type job: str + :type build: str + :returns: Suites. + :rtype: pandas.Series + """ + + return self.data[job][str(build)]["suites"] + + def tests(self, job, build): + """Getter - tests + + :param job: Job which tests we want. + :param build: Build which tests we want. + :type job: str + :type build: str + :returns: Tests. + :rtype: pandas.Series + """ + + return self.data[job][build]["tests"] + + @staticmethod + def _parse_tests(job, build, log): + """Process data from robot output.xml file and return JSON structured + data. + + :param job: The name of job which build output data will be processed. + :param build: The build which output data will be processed. + :param log: List of log messages. + :type job: str + :type build: dict + :type log: list of tuples (severity, msg) + :returns: JSON data structure. + :rtype: dict + """ + + metadata = { + "job": job, + "build": build + } + + with open(build["file-name"], 'r') as data_file: + try: + result = ExecutionResult(data_file) + except errors.DataError as err: + log.append(("ERROR", "Error occurred while parsing output.xml: " + "{0}".format(err))) + return None + checker = ExecutionChecker(metadata) + result.visit(checker) + + return checker.data + + def _download_and_parse_build(self, pid, data_queue, job, build, repeat): + """Download and parse the input data file. + + :param pid: PID of the process executing this method. + :param data_queue: Shared memory between processes. Queue which keeps + the result data. This data is then read by the main process and used + in further processing. + :param job: Name of the Jenkins job which generated the processed input + file. + :param build: Information about the Jenkins build which generated the + processed input file. + :param repeat: Repeat the download specified number of times if not + successful. + :type pid: int + :type data_queue: multiprocessing.Manager().Queue() + :type job: str + :type build: dict + :type repeat: int + """ + + logs = list() + + logging.info(" Processing the job/build: {0}: {1}". + format(job, build["build"])) + + logs.append(("INFO", " Processing the job/build: {0}: {1}". + format(job, build["build"]))) + + state = "failed" + success = False + data = None + do_repeat = repeat + while do_repeat: + success = download_and_unzip_data_file(self._cfg, job, build, pid, + logs) + if success: + break + do_repeat -= 1 + if not success: + logs.append(("ERROR", "It is not possible to download the input " + "data file from the job '{job}', build " + "'{build}', or it is damaged. Skipped.". + format(job=job, build=build["build"]))) + if success: + logs.append(("INFO", " Processing data from the build '{0}' ...". + format(build["build"]))) + data = InputData._parse_tests(job, build, logs) + if data is None: + logs.append(("ERROR", "Input data file from the job '{job}', " + "build '{build}' is damaged. Skipped.". + format(job=job, build=build["build"]))) + else: + state = "processed" + + try: + remove(build["file-name"]) + except OSError as err: + logs.append(("ERROR", "Cannot remove the file '{0}': {1}". + format(build["file-name"], err))) + logs.append(("INFO", " Done.")) + + result = { + "data": data, + "state": state, + "job": job, + "build": build, + "logs": logs + } + data_queue.put(result) + + def download_and_parse_data(self, repeat=1): + """Download the input data files, parse input data from input files and + store in pandas' Series. + + :param repeat: Repeat the download specified number of times if not + successful. + :type repeat: int + """ + + logging.info("Downloading and parsing input files ...") + + work_queue = multiprocessing.JoinableQueue() + manager = multiprocessing.Manager() + data_queue = manager.Queue() + cpus = multiprocessing.cpu_count() + + workers = list() + for cpu in range(cpus): + worker = Worker(work_queue, + data_queue, + self._download_and_parse_build) + worker.daemon = True + worker.start() + workers.append(worker) + os.system("taskset -p -c {0} {1} > /dev/null 2>&1". + format(cpu, worker.pid)) + + for job, builds in self._cfg.builds.items(): + for build in builds: + work_queue.put((job, build, repeat)) + + work_queue.join() + + logging.info("Done.") + + while not data_queue.empty(): + result = data_queue.get() + + job = result["job"] + build_nr = result["build"]["build"] + + if result["data"]: + data = result["data"] + build_data = pd.Series({ + "metadata": pd.Series(data["metadata"].values(), + index=data["metadata"].keys()), + "suites": pd.Series(data["suites"].values(), + index=data["suites"].keys()), + "tests": pd.Series(data["tests"].values(), + index=data["tests"].keys())}) + + if self._input_data.get(job, None) is None: + self._input_data[job] = pd.Series() + self._input_data[job][str(build_nr)] = build_data + + self._cfg.set_input_file_name(job, build_nr, + result["build"]["file-name"]) + + self._cfg.set_input_state(job, build_nr, result["state"]) + + for item in result["logs"]: + if item[0] == "INFO": + logging.info(item[1]) + elif item[0] == "ERROR": + logging.error(item[1]) + elif item[0] == "DEBUG": + logging.debug(item[1]) + elif item[0] == "CRITICAL": + logging.critical(item[1]) + elif item[0] == "WARNING": + logging.warning(item[1]) + + del data_queue + + # Terminate all workers + for worker in workers: + worker.terminate() + worker.join() + + logging.info("Done.") + + @staticmethod + def _end_of_tag(tag_filter, start=0, closer="'"): + """Return the index of character in the string which is the end of tag. + + :param tag_filter: The string where the end of tag is being searched. + :param start: The index where the searching is stated. + :param closer: The character which is the tag closer. + :type tag_filter: str + :type start: int + :type closer: str + :returns: The index of the tag closer. + :rtype: int + """ + + try: + idx_opener = tag_filter.index(closer, start) + return tag_filter.index(closer, idx_opener + 1) + except ValueError: + return None + + @staticmethod + def _condition(tag_filter): + """Create a conditional statement from the given tag filter. + + :param tag_filter: Filter based on tags from the element specification. + :type tag_filter: str + :returns: Conditional statement which can be evaluated. + :rtype: str + """ + + index = 0 + while True: + index = InputData._end_of_tag(tag_filter, index) + if index is None: + return tag_filter + index += 1 + tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:] + + def filter_data(self, element, params=None, data_set="tests", + continue_on_error=False): + """Filter required data from the given jobs and builds. + + The output data structure is: + + - job 1 + - build 1 + - test (suite) 1 ID: + - param 1 + - param 2 + ... + - param n + ... + - test (suite) n ID: + ... + ... + - build n + ... + - job n + + :param element: Element which will use the filtered data. + :param params: Parameters which will be included in the output. If None, + all parameters are included. + :param data_set: The set of data to be filtered: tests, suites, + metadata. + :param continue_on_error: Continue if there is error while reading the + data. The Item will be empty then + :type element: pandas.Series + :type params: list + :type data_set: str + :type continue_on_error: bool + :returns: Filtered data. + :rtype pandas.Series + """ + + try: + if element["filter"] in ("all", "template"): + cond = "True" + else: + cond = InputData._condition(element["filter"]) + logging.debug(" Filter: {0}".format(cond)) + except KeyError: + logging.error(" No filter defined.") + return None + + if params is None: + params = element.get("parameters", None) + + data = pd.Series() + try: + for job, builds in element["data"].items(): + data[job] = pd.Series() + for build in builds: + data[job][str(build)] = pd.Series() + try: + data_iter = self.data[job][str(build)][data_set].\ + iteritems() + except KeyError: + if continue_on_error: + continue + else: + return None + for test_ID, test_data in data_iter: + if eval(cond, {"tags": test_data.get("tags", "")}): + data[job][str(build)][test_ID] = pd.Series() + if params is None: + for param, val in test_data.items(): + data[job][str(build)][test_ID][param] = val + else: + for param in params: + try: + data[job][str(build)][test_ID][param] =\ + test_data[param] + except KeyError: + data[job][str(build)][test_ID][param] =\ + "No Data" + return data + + except (KeyError, IndexError, ValueError) as err: + logging.error(" Missing mandatory parameter in the element " + "specification: {0}".format(err)) + return None + except AttributeError: + return None + except SyntaxError: + logging.error(" The filter '{0}' is not correct. Check if all " + "tags are enclosed by apostrophes.".format(cond)) + return None + + @staticmethod + def merge_data(data): + """Merge data from more jobs and builds to a simple data structure. + + The output data structure is: + + - test (suite) 1 ID: + - param 1 + - param 2 + ... + - param n + ... + - test (suite) n ID: + ... + + :param data: Data to merge. + :type data: pandas.Series + :returns: Merged data. + :rtype: pandas.Series + """ + + logging.info(" Merging data ...") + + merged_data = pd.Series() + for _, builds in data.iteritems(): + for _, item in builds.iteritems(): + for ID, item_data in item.iteritems(): + merged_data[ID] = item_data + + return merged_data diff --git a/resources/tools/presentation/new/jumpavg/AbstractGroupClassifier.py b/resources/tools/presentation/new/jumpavg/AbstractGroupClassifier.py new file mode 100644 index 0000000000..26db758ea8 --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/AbstractGroupClassifier.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABCMeta, abstractmethod + + +class AbstractGroupClassifier(object): + + __metaclass__ = ABCMeta + + @abstractmethod + def classify(self, values): + """Divide values into consecutive groups with metadata. + + The metadata does not need to follow any specific rules, + although progression/regression/outlier description would be fine. + + :param values: Sequence of runs to classify. + :type values: Iterable of float or of AvgStdevMetadata + :returns: Classified groups + :rtype: Iterable of RunGroup + """ + pass diff --git a/resources/tools/presentation/new/jumpavg/AbstractGroupMetadata.py b/resources/tools/presentation/new/jumpavg/AbstractGroupMetadata.py new file mode 100644 index 0000000000..6084db5a1a --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/AbstractGroupMetadata.py @@ -0,0 +1,37 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABCMeta, abstractmethod + + +class AbstractGroupMetadata(object): + + __metaclass__ = ABCMeta + + @abstractmethod + def __str__(self): + """Return string with human readable description of the group. + + :returns: Readable description. + :rtype: str + """ + pass + + @abstractmethod + def __repr__(self): + """Return string executable as Python constructor call. + + :returns: Executable constructor call. + :rtype: str + """ + pass diff --git a/resources/tools/presentation/new/jumpavg/AvgStdevMetadata.py b/resources/tools/presentation/new/jumpavg/AvgStdevMetadata.py new file mode 100644 index 0000000000..bd7eca1824 --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/AvgStdevMetadata.py @@ -0,0 +1,50 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class AvgStdevMetadata(object): + """Class for metadata specifying the average and standard deviation.""" + + def __init__(self, size=0, avg=0.0, stdev=0.0): + """Construct the metadata by setting the values needed. + + The values are sanitized, so faulty callers to not cause math errors. + + :param size: Number of values participating in this group. + :param avg: Population average of the participating sample values. + :param stdev: Population standard deviation of the sample values. + :type size: int + :type avg: float + :type stdev: float + """ + self.size = size if size >= 0 else 0 + self.avg = avg if size >= 1 else 0.0 + self.stdev = stdev if size >= 2 else 0.0 + + def __str__(self): + """Return string with human readable description of the group. + + :returns: Readable description. + :rtype: str + """ + return "size={size} avg={avg} stdev={stdev}".format( + size=self.size, avg=self.avg, stdev=self.stdev) + + def __repr__(self): + """Return string executable as Python constructor call. + + :returns: Executable constructor call. + :rtype: str + """ + return "AvgStdevMetadata(size={size},avg={avg},stdev={stdev})".format( + size=self.size, avg=self.avg, stdev=self.stdev) diff --git a/resources/tools/presentation/new/jumpavg/AvgStdevMetadataFactory.py b/resources/tools/presentation/new/jumpavg/AvgStdevMetadataFactory.py new file mode 100644 index 0000000000..d7d0517a57 --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/AvgStdevMetadataFactory.py @@ -0,0 +1,49 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +from AvgStdevMetadata import AvgStdevMetadata + + +class AvgStdevMetadataFactory(object): + """Class factory which creates avg,stdev metadata from data.""" + + @staticmethod + def from_data(values): + """Return new metadata object fitting the values. + + :param values: Run values to be processed. + :type values: Iterable of float or of AvgStdevMetadata + :returns: The metadata matching the values. + :rtype: AvgStdevMetadata + """ + sum_0 = 0 + sum_1 = 0.0 + sum_2 = 0.0 + for value in values: + if isinstance(value, AvgStdevMetadata): + sum_0 += value.size + sum_1 += value.avg * value.size + sum_2 += value.stdev * value.stdev * value.size + sum_2 += value.avg * value.avg * value.size + else: # The value is assumed to be float. + sum_0 += 1 + sum_1 += value + sum_2 += value * value + if sum_0 < 1: + return AvgStdevMetadata() + avg = sum_1 / sum_0 + stdev = math.sqrt(sum_2 / sum_0 - avg * avg) + ret_obj = AvgStdevMetadata(size=sum_0, avg=avg, stdev=stdev) + return ret_obj diff --git a/resources/tools/presentation/new/jumpavg/BitCountingClassifier.py b/resources/tools/presentation/new/jumpavg/BitCountingClassifier.py new file mode 100644 index 0000000000..69b1d65bb2 --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/BitCountingClassifier.py @@ -0,0 +1,63 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from BitCountingGroup import BitCountingGroup +from BitCountingGroupList import BitCountingGroupList +from BitCountingMetadataFactory import BitCountingMetadataFactory +from ClassifiedMetadataFactory import ClassifiedMetadataFactory + + +class BitCountingClassifier(object): + + @staticmethod + def classify(values): + """Return the values in groups of optimal bit count. + + TODO: Could we return BitCountingGroupList and let caller process it? + + :param values: Sequence of runs to classify. + :type values: Iterable of float or of AvgStdevMetadata + :returns: Classified group list. + :rtype: list of BitCountingGroup + """ + max_value = BitCountingMetadataFactory.find_max_value(values) + factory = BitCountingMetadataFactory(max_value) + opened_at = [] + closed_before = [BitCountingGroupList()] + for index, value in enumerate(values): + singleton = BitCountingGroup(factory, [value]) + newly_opened = closed_before[index].with_group_appended(singleton) + opened_at.append(newly_opened) + record_group_list = newly_opened + for previous in range(index): + previous_opened_list = opened_at[previous] + still_opened = ( + previous_opened_list.with_value_added_to_last_group(value)) + opened_at[previous] = still_opened + if still_opened.bits < record_group_list.bits: + record_group_list = still_opened + closed_before.append(record_group_list) + partition = closed_before[-1] + previous_average = partition[0].metadata.avg + for group in partition: + if group.metadata.avg == previous_average: + group.metadata = ClassifiedMetadataFactory.with_classification( + group.metadata, "normal") + elif group.metadata.avg < previous_average: + group.metadata = ClassifiedMetadataFactory.with_classification( + group.metadata, "regression") + elif group.metadata.avg > previous_average: + group.metadata = ClassifiedMetadataFactory.with_classification( + group.metadata, "progression") + previous_average = group.metadata.avg + return partition.group_list diff --git a/resources/tools/presentation/new/jumpavg/BitCountingGroup.py b/resources/tools/presentation/new/jumpavg/BitCountingGroup.py new file mode 100644 index 0000000000..144f5a8780 --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/BitCountingGroup.py @@ -0,0 +1,43 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from RunGroup import RunGroup + + +class BitCountingGroup(RunGroup): + + def __init__(self, metadata_factory, values=[]): + """Create the group from metadata factory and values. + + :param metadata_factory: Factory object to create metadata with. + :param values: The runs belonging to this group. + :type metadata_factory: BitCountingMetadataFactory + :type values: Iterable of float or of AvgStdevMetadata + """ + self.metadata_factory = metadata_factory + metadata = metadata_factory.from_data(values) + super(BitCountingGroup, self).__init__(metadata, values) + + def with_run_added(self, value): + """Create and return a new group with one more run that self. + + :param value: The run value to add to the group. + :type value: float or od AvgStdevMetadata + :returns: New group with the run added. + :rtype: BitCountingGroup + """ + values = list(self.values) + values.append(value) + return BitCountingGroup(self.metadata_factory, values) + # TODO: Is there a good way to save some computation + # by copy&updating the metadata incrementally? diff --git a/resources/tools/presentation/new/jumpavg/BitCountingGroupList.py b/resources/tools/presentation/new/jumpavg/BitCountingGroupList.py new file mode 100644 index 0000000000..7da0656782 --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/BitCountingGroupList.py @@ -0,0 +1,82 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from BitCountingGroup import BitCountingGroup +from BitCountingMetadataFactory import BitCountingMetadataFactory + + +class BitCountingGroupList(object): + + def __init__(self, group_list=[], bits=None): + """Create a group list from given list of groups. + + :param group_list: List of groups to compose this group. + :param bits: Bit count if known, else None. + :type group_list: list of BitCountingGroup + :type bits: float or None + """ + self.group_list = group_list + if bits is not None: + self.bits = bits + return + bits = 0.0 + for group in group_list: + bits += group.metadata.bits + self.bits = bits + + def __getitem__(self, index): + """Return group at the index. This makes self iterable. + + :param index: The position in the array of groups. + :type index: int + :returns: Group at the position. + :rtype: BitCountingGroup + """ + return self.group_list[index] + + def with_group_appended(self, group): + """Create and return new group list with given group more than self. + + The group argument object is updated with derivative metadata. + + :param group: Next group to be appended to the group list. + :type group: BitCountingGroup + :returns: New group list with added group. + :rtype: BitCountingGroupList + """ + group_list = list(self.group_list) + if group_list: + last_group = group_list[-1] + factory = BitCountingMetadataFactory( + last_group.metadata_factory.max_value, last_group.metadata.avg) + group.metadata_factory = factory + group.metadata = factory.from_data(group.values) + group_list.append(group) + bits = self.bits + group.metadata.bits + return BitCountingGroupList(group_list, bits) + + def with_value_added_to_last_group(self, value): + """Create and return new group list with value added to last group. + + :param value: The run value to add to the last group. + :type value: float or od AvgStdevMetadata + :returns: New group list with the last group updated. + :rtype: BitCountingGroupList + """ + last_group = self.group_list[-1] + bits_before = last_group.metadata.bits + last_group = last_group.with_run_added(value) + group_list = list(self.group_list) + group_list[-1] = last_group + bits = self.bits - bits_before + last_group.metadata.bits + return BitCountingGroupList(group_list, bits) diff --git a/resources/tools/presentation/new/jumpavg/BitCountingMetadata.py b/resources/tools/presentation/new/jumpavg/BitCountingMetadata.py new file mode 100644 index 0000000000..67d111985f --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/BitCountingMetadata.py @@ -0,0 +1,102 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +from AvgStdevMetadata import AvgStdevMetadata + + +class BitCountingMetadata(AvgStdevMetadata): + """Class for metadata which includes information content.""" + + def __init__(self, max_value, size=0, avg=0.0, stdev=0.0, prev_avg=None): + """Construct the metadata by computing from the values needed. + + The bit count is not real, as that would depend on numeric precision + (number of significant bits in values). + The difference is assumed to be constant per value, + which is consistent with Gauss distribution + (but not with floating point mechanic). + The hope is the difference will have + no real impact on the classification procedure. + + :param max_value: Maximal expected value. + TODO: This might be more optimal, + but max-invariant algorithm will be nicer. + :param size: Number of values participating in this group. + :param avg: Population average of the participating sample values. + :param stdev: Population standard deviation of the sample values. + :param prev_avg: Population average of the previous group. + If None, no previous average is taken into account. + If not None, the given previous average is used to discourage + consecutive groups with similar averages + (opposite triangle distribution is assumed). + :type max_value: float + :type size: int + :type avg: float + :type stdev: float + :type prev_avg: float or None + """ + super(BitCountingMetadata, self).__init__(size, avg, stdev) + self.max_value = max_value + self.prev_avg = prev_avg + self.bits = 0.0 + if self.size < 1: + return + # Length of the sequence must be also counted in bits, + # otherwise the message would not be decodable. + # Model: probability of k samples is 1/k - 1/(k+1) + # == 1/k/(k+1) + self.bits += math.log(size * (size + 1), 2) + if prev_avg is None: + # Avg is considered to be uniformly distributed + # from zero to max_value. + self.bits += math.log(max_value + 1.0, 2) + else: + # Opposite triangle distribution with minimum. + self.bits += math.log( + max_value * (max_value + 1) / (abs(avg - prev_avg) + 1), 2) + if self.size < 2: + return + # Stdev is considered to be uniformly distributed + # from zero to max_value. That is quite a bad expectation, + # but resilient to negative samples etc. + self.bits += math.log(max_value + 1.0, 2) + # Now we know the samples lie on sphere in size-1 dimensions. + # So it is (size-2)-sphere, with radius^2 == stdev^2 * size. + # https://en.wikipedia.org/wiki/N-sphere + sphere_area_ln = math.log(2) + math.log(math.pi) * ((size - 1) / 2.0) + sphere_area_ln -= math.lgamma((size - 1) / 2.0) + sphere_area_ln += math.log(stdev + 1.0) * (size - 2) + sphere_area_ln += math.log(size) * ((size - 2) / 2.0) + self.bits += sphere_area_ln / math.log(2) + + def __str__(self): + """Return string with human readable description of the group. + + :returns: Readable description. + :rtype: str + """ + return "size={size} avg={avg} stdev={stdev} bits={bits}".format( + size=self.size, avg=self.avg, stdev=self.stdev, bits=self.bits) + + def __repr__(self): + """Return string executable as Python constructor call. + + :returns: Executable constructor call. + :rtype: str + """ + return ("BitCountingMetadata(max_value={max_value},size={size}," + + "avg={avg},stdev={stdev},prev_avg={prev_avg})").format( + max_value=self.max_value, size=self.size, avg=self.avg, + stdev=self.stdev, prev_avg=self.prev_avg) diff --git a/resources/tools/presentation/new/jumpavg/BitCountingMetadataFactory.py b/resources/tools/presentation/new/jumpavg/BitCountingMetadataFactory.py new file mode 100644 index 0000000000..5a7b393b55 --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/BitCountingMetadataFactory.py @@ -0,0 +1,80 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +from AvgStdevMetadata import AvgStdevMetadata +from AvgStdevMetadataFactory import AvgStdevMetadataFactory +from BitCountingMetadata import BitCountingMetadata + + +class BitCountingMetadataFactory(object): + """Class for factory which creates bit counting metadata from data.""" + + @staticmethod + def find_max_value(values): + """Return the max value. + + This is a separate helper method, + because the whole set of values is usually larger than in from_data(). + + :param values: Run values to be processed. + :type values: Iterable of float + :returns: 0.0 or the biggest value found. + :rtype: float + """ + max_value = 0.0 + for value in values: + if isinstance(value, AvgStdevMetadata): + value = value.avg + if value > max_value: + max_value = value + return max_value + + def __init__(self, max_value, prev_avg=None): + """Construct the factory instance with given arguments. + + :param max_value: Maximal expected value. + :param prev_avg: Population average of the previous group. + If None, no previous average is taken into account. + If not None, the given previous average is used to discourage + consecutive groups with similar averages + (opposite triangle distribution is assumed). + :type max_value: float + :type prev_avg: float or None + """ + self.max_value = max_value + self.prev_avg = prev_avg + + def from_avg_stdev_metadata(self, metadata): + """Return new metadata object by adding bits to existing metadata. + + :param metadata: Metadata to count bits for. + :type metadata: AvgStdevMetadata + :returns: The metadata with bits counted. + :rtype: BitCountingMetadata + """ + return BitCountingMetadata( + max_value=self.max_value, size=metadata.size, + avg=metadata.avg, stdev=metadata.stdev, prev_avg=self.prev_avg) + + def from_data(self, values): + """Return new metadata object fitting the values. + + :param values: Run values to be processed. + :type values: Iterable of float or of AvgStdevMetadata + :returns: The metadata matching the values. + :rtype: BitCountingMetadata + """ + metadata = AvgStdevMetadataFactory.from_data(values) + return self.from_avg_stdev_metadata(metadata) diff --git a/resources/tools/presentation/new/jumpavg/ClassifiedBitCountingMetadata.py b/resources/tools/presentation/new/jumpavg/ClassifiedBitCountingMetadata.py new file mode 100644 index 0000000000..9a7277bc3e --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/ClassifiedBitCountingMetadata.py @@ -0,0 +1,68 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from BitCountingMetadata import BitCountingMetadata + + +class ClassifiedBitCountingMetadata(BitCountingMetadata): + """Class for metadata which includes classification.""" + + def __init__( + self, max_value, size=0, avg=0.0, stdev=0.0, prev_avg=None, + classification=None): + """Delegate to ancestor constructors and set classification. + + :param max_value: Maximal expected value. + :param size: Number of values participating in this group. + :param avg: Population average of the participating sample values. + :param stdev: Population standard deviation of the sample values. + :param prev_avg: Population average of the previous group. + If None, no previous average is taken into account. + If not None, the given previous average is used to discourage + consecutive groups with similar averages + (opposite triangle distribution is assumed). + :param classification: Arbitrary object classifying this group. + :type max_value: float + :type size: int + :type avg: float + :type stdev: float + :type prev_avg: float + :type classification: object + """ + super(ClassifiedBitCountingMetadata, self).__init__( + max_value, size, avg, stdev, prev_avg) + self.classification = classification + + def __str__(self): + """Return string with human readable description of the group. + + :returns: Readable description. + :rtype: str + """ + # str(super(...)) describes the proxy, not the proxied object. + super_str = super(ClassifiedBitCountingMetadata, self).__str__() + return super_str + " classification={classification}".format( + classification=self.classification) + + def __repr__(self): + """Return string executable as Python constructor call. + + :returns: Executable constructor call. + :rtype: str + """ + return ("ClassifiedBitCountingMetadata(max_value={max_value}," + + "size={size},avg={avg},stdev={stdev},prev_avg={prev_avg}," + + "classification={cls})").format( + max_value=self.max_value, size=self.size, avg=self.avg, + stdev=self.stdev, prev_avg=self.prev_avg, + cls=self.classification) diff --git a/resources/tools/presentation/new/jumpavg/ClassifiedMetadataFactory.py b/resources/tools/presentation/new/jumpavg/ClassifiedMetadataFactory.py new file mode 100644 index 0000000000..39b157f26b --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/ClassifiedMetadataFactory.py @@ -0,0 +1,42 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +from ClassifiedBitCountingMetadata import ClassifiedBitCountingMetadata + + +class ClassifiedMetadataFactory(object): + """Class for factory which adds classification to bit counting metadata.""" + + @staticmethod + def with_classification(metadata, classification): + """Return new metadata object with added classification. + + TODO: Is there a way to add classification to any metadata, + without messing up constructors and __repr__()? + + FIXME: Factories take raw resources. Find a name for the thing + which takes semi-finished products. Transformer? + + :param metadata: Existing metadata without classification. + :param classification: Arbitrary object classifying this group. + :type metadata: BitCountingMetadata + :type classification: object + :returns: The metadata with added classification. + :rtype: ClassifiedBitCountingMetadata + """ + return ClassifiedBitCountingMetadata( + max_value=metadata.max_value, size=metadata.size, avg=metadata.avg, + stdev=metadata.stdev, prev_avg=metadata.prev_avg, + classification=classification) diff --git a/resources/tools/presentation/new/jumpavg/RunGroup.py b/resources/tools/presentation/new/jumpavg/RunGroup.py new file mode 100644 index 0000000000..808e02b792 --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/RunGroup.py @@ -0,0 +1,26 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class RunGroup(object): + + def __init__(self, metadata, values): + """Create the group from metadata and values. + + :param metadata: Metadata object to associate with the group. + :param values: The runs belonging to this group. + :type metadata: AbstractGroupMetadata + :type values: Iterable of float or od AvgStdevMetadata + """ + self.metadata = metadata + self.values = values diff --git a/resources/tools/presentation/new/jumpavg/__init__.py b/resources/tools/presentation/new/jumpavg/__init__.py new file mode 100644 index 0000000000..f9fc83a1fe --- /dev/null +++ b/resources/tools/presentation/new/jumpavg/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +__init__ file for directory resources/tools/presentation/jumpavg +""" diff --git a/resources/tools/presentation/new/pal.py b/resources/tools/presentation/new/pal.py new file mode 100644 index 0000000000..013c921124 --- /dev/null +++ b/resources/tools/presentation/new/pal.py @@ -0,0 +1,126 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CSIT Presentation and analytics layer. +""" + +import sys +import argparse +import logging + +from errors import PresentationError +from environment import Environment, clean_environment +from specification_parser import Specification +from input_data_parser import InputData +from generator_tables import generate_tables +from generator_plots import generate_plots +from generator_files import generate_files +from static_content import prepare_static_content +from generator_report import generate_report +from generator_CPTA import generate_cpta + + +def parse_args(): + """Parse arguments from cmd line. + + :returns: Parsed arguments. + :rtype: ArgumentParser + """ + + parser = argparse.ArgumentParser(description=__doc__, + formatter_class=argparse. + RawDescriptionHelpFormatter) + parser.add_argument("-s", "--specification", + required=True, + type=argparse.FileType('r'), + help="Specification YAML file.") + parser.add_argument("-r", "--release", + default="master", + type=str, + help="Release string of the product.") + parser.add_argument("-l", "--logging", + choices=["DEBUG", "INFO", "WARNING", + "ERROR", "CRITICAL"], + default="ERROR", + help="Logging level.") + parser.add_argument("-f", "--force", + action='store_true', + help="Force removing the old build(s) if present.") + + return parser.parse_args() + + +def main(): + """Main function.""" + + log_levels = {"NOTSET": logging.NOTSET, + "DEBUG": logging.DEBUG, + "INFO": logging.INFO, + "WARNING": logging.WARNING, + "ERROR": logging.ERROR, + "CRITICAL": logging.CRITICAL} + + args = parse_args() + logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', + datefmt='%Y/%m/%d %H:%M:%S', + level=log_levels[args.logging]) + + logging.info("Application started.") + try: + spec = Specification(args.specification) + spec.read_specification() + except PresentationError: + logging.critical("Finished with error.") + return 1 + + if spec.output["output"] not in ("report", "CPTA"): + logging.critical("The output '{0}' is not supported.". + format(spec.output["output"])) + return 1 + + ret_code = 1 + try: + env = Environment(spec.environment, args.force) + env.set_environment() + + prepare_static_content(spec) + + data = InputData(spec) + data.download_and_parse_data(repeat=2) + + generate_tables(spec, data) + generate_plots(spec, data) + generate_files(spec, data) + + if spec.output["output"] == "report": + generate_report(args.release, spec) + logging.info("Successfully finished.") + elif spec.output["output"] == "CPTA": + sys.stdout.write(generate_cpta(spec, data)) + logging.info("Successfully finished.") + ret_code = 0 + + except (KeyError, ValueError, PresentationError) as err: + logging.info("Finished with an error.") + logging.critical(str(err)) + except Exception as err: + logging.info("Finished with an unexpected error.") + logging.critical(str(err)) + finally: + if spec is not None: + clean_environment(spec.environment) + return ret_code + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/resources/tools/presentation/new/requirements.txt b/resources/tools/presentation/new/requirements.txt new file mode 100644 index 0000000000..a33848d681 --- /dev/null +++ b/resources/tools/presentation/new/requirements.txt @@ -0,0 +1,11 @@ +Sphinx +sphinx-rtd-theme +robotframework==2.9.2 +sphinxcontrib-programoutput +PyYAML +pytz +python-dateutil +numpy +pandas +plotly +PTable diff --git a/resources/tools/presentation/new/run_cpta.sh b/resources/tools/presentation/new/run_cpta.sh new file mode 100755 index 0000000000..6199703dbd --- /dev/null +++ b/resources/tools/presentation/new/run_cpta.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +set -x + +# set default values in config array +typeset -A DIR + +DIR[WORKING]=_tmp + +# Install system dependencies +sudo apt-get -y update +sudo apt-get -y install libxml2 libxml2-dev libxslt-dev build-essential \ + zlib1g-dev unzip + +# Create working directories +mkdir ${DIR[WORKING]} + +# Create virtual environment +virtualenv ${DIR[WORKING]}/env +. ${DIR[WORKING]}/env/bin/activate + +# Install python dependencies: +pip install -r requirements.txt + +export PYTHONPATH=`pwd`:`pwd`/jumpavg + +STATUS=$(python pal.py \ + --specification specification_CPTA.yaml \ + --logging INFO \ + --force) +RETURN_STATUS=$? + +echo ${STATUS} +exit ${RETURN_STATUS} diff --git a/resources/tools/presentation/new/run_report.sh b/resources/tools/presentation/new/run_report.sh new file mode 100755 index 0000000000..d294640fd1 --- /dev/null +++ b/resources/tools/presentation/new/run_report.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +set -x + +RELEASE=$1 + +# set default values in config array +typeset -A CFG +typeset -A DIR + +DIR[WORKING]=_tmp +CFG[BLD_LATEX]=1 + +# Install system dependencies +sudo apt-get -y update +sudo apt-get -y install libxml2 libxml2-dev libxslt-dev build-essential \ + zlib1g-dev unzip + +if [[ ${CFG[BLD_LATEX]} -eq 1 ]] ; +then + sudo apt-get -y install xvfb texlive-latex-recommended \ + texlive-fonts-recommended texlive-fonts-extra texlive-latex-extra latexmk wkhtmltopdf inkscape + sudo sed -i.bak 's/^\(main_memory\s=\s\).*/\110000000/' /usr/share/texlive/texmf-dist/web2c/texmf.cnf +fi + +# Create working directories +mkdir ${DIR[WORKING]} + +# Create virtual environment +virtualenv ${DIR[WORKING]}/env +. ${DIR[WORKING]}/env/bin/activate + +# Install python dependencies: +pip install -r requirements.txt + +export PYTHONPATH=`pwd` + +python pal.py \ + --specification specification.yaml \ + --release ${RELEASE} \ + --logging INFO \ + --force + +RETURN_STATUS=$(echo $?) +exit ${RETURN_STATUS} diff --git a/resources/tools/presentation/new/specification.yaml b/resources/tools/presentation/new/specification.yaml new file mode 100644 index 0000000000..97e616a15d --- /dev/null +++ b/resources/tools/presentation/new/specification.yaml @@ -0,0 +1,3900 @@ +- + type: "environment" + + paths: + # Top level directories: + ## Working directory + DIR[WORKING]: "_tmp" + ## Build directories + DIR[BUILD,HTML]: "_build" + DIR[BUILD,LATEX]: "_build_latex" + + # Static .rst files + DIR[RST]: "../../../docs/report" + + # Working directories + ## Input data files (.zip, .xml) + DIR[WORKING,DATA]: "{DIR[WORKING]}/data" + ## Static source files from git + DIR[WORKING,SRC]: "{DIR[WORKING]}/src" + DIR[WORKING,SRC,STATIC]: "{DIR[WORKING,SRC]}/_static" + + # Static html content + DIR[STATIC]: "{DIR[BUILD,HTML]}/_static" + DIR[STATIC,VPP]: "{DIR[STATIC]}/vpp" + DIR[STATIC,DPDK]: "{DIR[STATIC]}/dpdk" + DIR[STATIC,ARCH]: "{DIR[STATIC]}/archive" + + # Detailed test results + DIR[DTR]: "{DIR[WORKING,SRC]}/detailed_test_results" + DIR[DTR,PERF,DPDK]: "{DIR[DTR]}/dpdk_performance_results" + DIR[DTR,PERF,VPP]: "{DIR[DTR]}/vpp_performance_results" + DIR[DTR,MRR,VPP]: "{DIR[DTR]}/vpp_mrr_results" + DIR[DTR,PERF,COT]: "{DIR[DTR]}/cot_performance_results" + DIR[DTR,PERF,HC]: "{DIR[DTR]}/honeycomb_performance_results" + DIR[DTR,FUNC,VPP]: "{DIR[DTR]}/vpp_functional_results" + DIR[DTR,FUNC,VPP,CENTOS]: "{DIR[DTR]}/vpp_functional_results_centos" + DIR[DTR,FUNC,HC]: "{DIR[DTR]}/honeycomb_functional_results" + DIR[DTR,FUNC,NSHSFC]: "{DIR[DTR]}/nshsfc_functional_results" + DIR[DTR,PERF,VPP,IMPRV]: "{DIR[WORKING,SRC]}/vpp_performance_tests/performance_improvements" + + # Detailed test configurations + DIR[DTC]: "{DIR[WORKING,SRC]}/test_configuration" + DIR[DTC,PERF,VPP]: "{DIR[DTC]}/vpp_performance_configuration" + DIR[DTC,MRR,VPP]: "{DIR[DTC]}/vpp_mrr_configuration" + DIR[DTC,FUNC,VPP]: "{DIR[DTC]}/vpp_functional_configuration" + DIR[DTC,FUNC,VPP,CENTOS]: "{DIR[DTC]}/vpp_functional_configuration_centos" + + # Detailed tests operational data + DIR[DTO]: "{DIR[WORKING,SRC]}/test_operational_data" + DIR[DTO,PERF,VPP]: "{DIR[DTO]}/vpp_performance_operational_data" + + # .css patch file to fix tables generated by Sphinx + DIR[CSS_PATCH_FILE]: "{DIR[STATIC]}/theme_overrides.css" + DIR[CSS_PATCH_FILE2]: "{DIR[WORKING,SRC,STATIC]}/theme_overrides.css" + + urls: + URL[JENKINS,CSIT]: "https://jenkins.fd.io/view/csit/job" + URL[JENKINS,HC]: "https://jenkins.fd.io/view/hc2vpp/job" + URL[NEXUS]: "https://docs.fd.io/csit" + DIR[NEXUS]: "report/_static/archive" + + make-dirs: + # List the directories which are created while preparing the environment. + # All directories MUST be defined in "paths" section. + - "DIR[WORKING,DATA]" + - "DIR[STATIC,VPP]" + - "DIR[STATIC,DPDK]" + - "DIR[STATIC,ARCH]" + - "DIR[BUILD,LATEX]" + - "DIR[WORKING,SRC]" + - "DIR[WORKING,SRC,STATIC]" + + remove-dirs: + # List the directories which are deleted while cleaning the environment. + # All directories MUST be defined in "paths" section. + #- "DIR[BUILD,HTML]" + - "DIR[WORKING,DATA]" + + build-dirs: + # List the directories where the results (build) is stored. + # All directories MUST be defined in "paths" section. + - "DIR[BUILD,HTML]" + - "DIR[BUILD,LATEX]" + +- + type: "configuration" + data-sets: + plot-vpp-http-server-performance: + csit-vpp-perf-1804-all: + - 39 # wrk + - 40 # wrk + - 41 # wrk + - 42 # wrk + - 43 # wrk + - 44 # wrk + - 45 # wrk + - 46 # wrk + - 47 # wrk + - 48 # wrk +# TODO: Add the data sources +# vpp-meltdown-impact: +# csit-vpp-perf-1707-all: +# - 9 +# - 10 +# - 13 +# csit-vpp-perf-1710-all: +# - 11l +# - 12 +# - 13 +# TODO: Add the data sources +# vpp-spectre-impact: +# csit-vpp-perf-1707-all: +# - 9 +# - 10 +# - 13 +# csit-vpp-perf-1710-all: +# - 11 +# - 12 +# - 13 + vpp-performance-changes: + csit-vpp-perf-1710-all: + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 20 + csit-vpp-perf-1801-all: + - 124 # sel + - 127 # sel + - 128 # sel + - 141 # sel + - 142 # sel + - 143 # sel + - 145 # sel + - 146 # sel + - 162 # sel + - 163 # sel + - 167 # sel + - 172 # sel acl only + csit-vpp-perf-1804-all: + - 21 # sel + - 22 # sel + - 23 # sel + - 24 # sel + - 27 # sel + - 28 # sel + - 29 # sel + - 30 # sel + - 31 # sel + - 35 # sel + vpp-performance-changes-mrr: + csit-vpp-perf-check-1801: + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 11 + - 12 + - 13 + csit-vpp-perf-check-1804: + - 6 # mrr - sel + - 13 # mrr - sel + - 14 # mrr - sel + - 15 # mrr - sel + - 16 # mrr - sel + - 17 # mrr - sel + - 19 # mrr - sel + - 20 # mrr - sel + - 21 # mrr - sel + - 22 # mrr - sel + plot-throughput-speedup-analysis: + csit-vpp-perf-1804-all: + - 19 # full + - 20 # full + - 25 # full + - 49 # full + - 21 # sel + - 22 # sel + - 23 # sel + - 24 # sel + - 27 # sel + - 28 # sel + - 29 # sel + - 30 # sel + - 31 # sel + - 35 # sel + plot-ligato-throughput-speedup-analysis: + csit-ligato-perf-1804-all: + - 5 # sel + - 6 # sel + - 7 # sel + - 8 # sel + - 9 # sel + - 10 # sel + - 11 # sel + - 12 # sel + - 13 # sel + - 14 # sel +# performance-improvements: +# csit-vpp-perf-1707-all: +# - 9 +# - 10 +# - 13 +# - 14 +# - 15 +# - 16 +# - 17 +# - 18 +# - 19 +# - 21 +# csit-vpp-perf-1710-all: +# - 11 +# - 12 +# - 13 +# - 14 +# - 15 +# - 16 +# - 17 +# - 18 +# - 19 +# - 20 +# csit-vpp-perf-1801-all: +# - 124 +# - 127 +# - 128 +# csit-ligato-perf-1710-all: +# - 5 +# - 7 +# - 8 +# - 9 +# - 10 +# - 11 +# - 12 +# - 13 +# - 16 +# - 17 +# csit-ligato-perf-1801-all: +# - 16 # sel +# - 17 # sel +# - 18 # sel +# - 19 # sel +# - 20 # sel +# - 21 # sel +# - 22 # sel +# - 23 # sel +# - 24 # sel + vpp-perf-results: + csit-vpp-perf-1804-all: + - 19 # full + - 20 # full + - 25 # full + - 49 # full + vpp-func-results: + csit-vpp-functional-1804-ubuntu1604-virl: + - 229 + vpp-func-results-centos: + csit-vpp-functional-1804-centos7-virl: + - 238 + vpp-mrr-results: + csit-vpp-perf-check-1804: + - 5 # mrr - full + ligato-perf-results: + csit-ligato-perf-1804-all: + - 4 # full + dpdk-perf-results: + csit-dpdk-perf-1804-all: + - 13 + hc-func-results: + csit-hc2vpp-verify-func-1804-ubuntu1604: + - 3 + nsh-func-results: + csit-nsh_sfc-verify-func-1804-ubuntu1604-virl: + - 7 + plot-vpp-throughput-latency: + csit-vpp-perf-1804-all: + - 19 # full + - 20 # full + - 25 # full + - 49 # full + - 21 # sel + - 22 # sel + - 23 # sel + - 24 # sel + - 27 # sel + - 28 # sel + - 29 # sel + - 30 # sel + - 31 # sel + - 35 # sel + plot-dpdk-throughput-latency: + csit-dpdk-perf-1804-all: + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + - 13 + plot-ligato-throughput-latency: + csit-ligato-perf-1804-all: + - 5 # sel + - 6 # sel + - 7 # sel + - 8 # sel + - 9 # sel + - 10 # sel + - 11 # sel + - 12 # sel + - 13 # sel + - 14 # sel + + plot-layouts: + + plot-cps: + xaxis: + autorange: True + autotick: False + fixedrange: False + gridcolor: "rgb(238, 238, 238)" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + tickmode: "linear" + title: "Indexed Test Cases" + zeroline: False + yaxis: + gridcolor: "rgb(238, 238, 238)'" + hoverformat: ".4s" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + range: [] + rangemode: "tozero" + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + title: "Connections Per Second [cps]" + zeroline: False + boxmode: "group" + boxgroupgap: 0.5 + autosize: False + margin: + t: 50 + b: 20 + l: 50 + r: 20 + showlegend: True + legend: + orientation: "h" + width: 700 + height: 1000 + + plot-rps: + xaxis: + autorange: True + autotick: False + fixedrange: False + gridcolor: "rgb(238, 238, 238)" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + tickmode: "linear" + title: "Indexed Test Cases" + zeroline: False + yaxis: + gridcolor: "rgb(238, 238, 238)'" + hoverformat: ".4s" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + range: [] + rangemode: "tozero" + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + title: "Requests Per Second [rps]" + zeroline: False + boxmode: "group" + boxgroupgap: 0.5 + autosize: False + margin: + t: 50 + b: 20 + l: 50 + r: 20 + showlegend: True + legend: + orientation: "h" + width: 700 + height: 1000 + + plot-throughput: + xaxis: + autorange: True + autotick: False + fixedrange: False + gridcolor: "rgb(238, 238, 238)" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + tickmode: "linear" + title: "Indexed Test Cases" + zeroline: False + yaxis: + gridcolor: "rgb(238, 238, 238)'" + hoverformat: ".4s" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + range: [] + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + title: "Packets Per Second [pps]" + zeroline: False + boxmode: "group" + boxgroupgap: 0.5 + autosize: False + margin: + t: 50 + b: 20 + l: 50 + r: 20 + showlegend: True + legend: + orientation: "h" + width: 700 + height: 1000 + + plot-latency: + xaxis: + autorange: True + autotick: False + fixedrange: False + gridcolor: "rgb(238, 238, 238)" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + tickmode: "linear" + title: "Indexed Test Cases" + zeroline: False + yaxis: + gridcolor: "rgb(238, 238, 238)'" + hoverformat: "" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + range: [] + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + title: "Latency min/avg/max [uSec]" + zeroline: False + boxmode: "group" + boxgroupgap: 0.5 + autosize: False + margin: + t: 50 + b: 20 + l: 50 + r: 20 + showlegend: True + legend: + orientation: "h" + width: 700 + height: 1000 + + plot-throughput-speedup-analysis: + xaxis: + autorange: True + autotick: False + fixedrange: False + gridcolor: "rgb(238, 238, 238)" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + tickmode: "linear" + tickangle: 270 + zeroline: False + yaxis: + title: "Throughput speedup factor" + gridcolor: "rgb(238, 238, 238)" + hoverformat: ".4s" + linecolor: "rgb(238, 238, 238)" + linewidth: 1 + range: [] + showgrid: True + showline: True + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + zeroline: False + legend: + orientation: "h" + xanchor: "center" + yanchor: "top" + x: 0.5 + y: 1 + bgcolor: "rgba(255, 255, 255, 0)" + bordercolor: "rgba(255, 255, 255, 0)" + barmode: "group" + bargap: 0.15 + bargroupgap: 0.1 + autosize: False + margin: + 't': 50 + 'b': 300 + 'l': 50 + 'r': 20 + showlegend: True + width: 700 + height: 1000 + +- + type: "static" + src-path: "{DIR[RST]}" + dst-path: "{DIR[WORKING,SRC]}" + +- + type: "input" # Ignored in debug mode + general: + file-name: "robot-plugin.zip" + file-format: ".zip" + download-path: "{job}/{build}/robot/report/*zip*/{filename}" + extract: "robot-plugin/output.xml" + builds: +# csit-vpp-perf-1707-all: +# - 9 +# - 10 +# - 13 +# - 14 +# - 15 +# - 16 +# - 17 +# - 18 +# - 19 +# - 21 + csit-vpp-perf-1710-all: + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 20 + csit-vpp-perf-1801-all: +# - 122 # full +# - 126 # full +# - 129 # full +# - 140 # full + - 124 # sel + - 127 # sel + - 128 # sel + - 141 # sel + - 142 # sel + - 143 # sel + - 145 # sel + - 146 # sel + - 162 # sel + - 163 # sel + - 167 # sel + - 172 # sel acl only + csit-vpp-perf-1804-all: + - 19 # full + - 20 # full + - 25 # full + - 49 # full + - 21 # sel + - 22 # sel + - 23 # sel + - 24 # sel + - 27 # sel + - 28 # sel + - 29 # sel + - 30 # sel + - 31 # sel + - 35 # sel + - 39 # wrk + - 40 # wrk + - 41 # wrk + - 42 # wrk + - 43 # wrk + - 44 # wrk + - 45 # wrk + - 46 # wrk + - 47 # wrk + - 48 # wrk + csit-vpp-perf-check-1801: + - 3 # mrr + - 4 # mrr + - 5 # mrr + - 6 # mrr + - 7 # mrr + - 8 # mrr + - 9 # mrr + - 11 # mrr + - 12 # mrr + - 13 # mrr + csit-vpp-perf-check-1804: + - 5 # mrr - full + - 6 # mrr - sel + - 13 # mrr - sel + - 14 # mrr - sel + - 15 # mrr - sel + - 16 # mrr - sel + - 17 # mrr - sel + - 19 # mrr - sel + - 20 # mrr - sel + - 21 # mrr - sel + - 22 # mrr - sel +# csit-ligato-perf-1710-all: +# - 5 +# - 7 +# - 8 +# - 9 +# - 10 +# - 11 +# - 12 +# - 13 +# - 16 +# - 17 +# csit-ligato-perf-1801-all: +# - 16 # sel +# - 17 # sel +# - 18 # sel +# - 19 # sel +# - 20 # sel +# - 21 # sel +# - 22 # sel +# - 23 # sel +# - 24 # sel +# - 25 # full + csit-ligato-perf-1804-all: + - 4 # full + - 5 # sel + - 6 # sel + - 7 # sel + - 8 # sel + - 9 # sel + - 10 # sel + - 11 # sel + - 12 # sel + - 13 # sel + - 14 # sel + csit-dpdk-perf-1804-all: + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + - 13 + csit-vpp-functional-1804-ubuntu1604-virl: + - 229 + csit-vpp-functional-1804-centos7-virl: + - 238 + csit-nsh_sfc-verify-func-1804-ubuntu1604-virl: + - 7 + csit-hc2vpp-verify-func-1804-ubuntu1604: + - 3 + +- + type: "output" + output: "report" + format: + html: + - full + pdf: + - minimal + +################################################################################ +### T A B L E S ### +################################################################################ + +#- +# type: "table" +# title: "Performance Impact of Meltdown Patches" +# algorithm: "table_performance_comparison" +# output-file-ext: ".csv" +## TODO: specify dir +# output-file: "{DIR[STATIC,VPP]}/meltdown-impact" +# reference: +# title: "No Meltdown" +## TODO: specify data sources +# data: +# csit-vpp-perf-1707-all: +# - 9 +# - 10 +# - 13 +# compare: +# title: "Meltdown Patches Applied" +## TODO: specify data sources +# data: +# csit-vpp-perf-1710-all: +# - 11 +# - 12 +# - 13 +# data: +# "vpp-meltdown-impact" +# filter: "all" +# parameters: +# - "name" +# - "parent" +# - "throughput" +# # Number of the best and the worst tests presented in the table. Use 0 (zero) +# # to present all tests. +# nr-of-tests-shown: 20 +# +#- +# type: "table" +# title: "Performance Impact of Spectre Patches" +# algorithm: "table_performance_comparison" +# output-file-ext: ".csv" +## TODO: specify dir +# output-file: "{DIR[STATIC,VPP]}/meltdown-spectre-impact" +# reference: +# title: "No Spectre" +## TODO: specify data sources +# data: +# csit-vpp-perf-1707-all: +# - 9 +# - 10 +# - 13 +# compare: +# title: "Spectre Patches Applied" +## TODO: specify data sources +# data: +# csit-vpp-perf-1710-all: +# - 11 +# - 12 +# - 13 +# data: +# "vpp-spectre-impact" +# filter: "all" +# parameters: +# - "name" +# - "parent" +# - "throughput" +# # Number of the best and the worst tests presented in the table. Use 0 (zero) +# # to present all tests. +# nr-of-tests-shown: 20 + +- + type: "table" + title: "VPP Performance Changes" + algorithm: "table_performance_comparison" + output-file-ext: ".csv" + output-file: "{DIR[STATIC,VPP]}/performance-changes" + history: + - + title: "rls1710" + data: + csit-vpp-perf-1710-all: + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 20 + reference: + title: "rls1801" + data: + csit-vpp-perf-1801-all: + - 124 # sel + - 127 # sel + - 128 # sel + - 141 # sel + - 142 # sel + - 143 # sel + - 145 # sel + - 146 # sel + - 162 # sel + - 163 # sel + - 167 # sel + - 172 # sel acl only + compare: + title: "rls1804" + data: + csit-vpp-perf-1804-all: + - 21 # sel + - 22 # sel + - 23 # sel + - 24 # sel + - 27 # sel + - 28 # sel + - 29 # sel + - 30 # sel + - 31 # sel + - 35 # sel + data: "vpp-performance-changes" + filter: "all" + parameters: + - "name" + - "parent" + - "throughput" + # Number of the best and the worst tests presented in the table. Use 0 (zero) + # to present all tests. + nr-of-tests-shown: 20 + outlier-const: 1.5 + +- + type: "table" + title: "VPP Performance Changes - MRR" + algorithm: "table_performance_comparison_mrr" + output-file-ext: ".csv" + output-file: "{DIR[STATIC,VPP]}/performance-changes-mrr" + reference: + title: "Release 1801" + data: + csit-vpp-perf-check-1801: + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 11 + - 12 + - 13 + compare: + title: "Release 1804" + data: + csit-vpp-perf-check-1804: + - 6 # mrr - sel + - 13 # mrr - sel + - 14 # mrr - sel + - 15 # mrr - sel + - 16 # mrr - sel + - 17 # mrr - sel + - 19 # mrr - sel + - 20 # mrr - sel + - 21 # mrr - sel + - 22 # mrr - sel + data: "vpp-performance-changes-mrr" + filter: "all" + parameters: + - "name" + - "parent" + - "result" + # Number of the best and the worst tests presented in the table. Use 0 (zero) + # to present all tests. + nr-of-tests-shown: 20 + outlier-const: 1.5 + +#- +# type: "table" +# title: "Performance improvements" +# algorithm: "table_performance_improvements" +# template: "{DIR[DTR,PERF,VPP,IMPRV]}/tmpl_performance_improvements.csv" +# output-file-ext: ".csv" +# output-file: "{DIR[DTR,PERF,VPP,IMPRV]}/performance_improvements" +# columns: +# - +# title: "Test Name" +# data: "template 1" +# - +# title: "16.09 mean [Mpps]" +# data: "template 2" +# - +# title: "17.01 mean [Mpps]" +# data: "template 3" +# - +# title: "17.04 mean [Mpps]" +# data: "template 4" +# - +# title: "17.07 mean [Mpps]" +# data: "data csit-vpp-perf-1707-all mean" +# - +# title: "17.10 mean [Mpps]" +# data: "data csit-vpp-perf-1710-all csit-ligato-perf-1710-all mean" +# - +# title: "18.01 mean [Mpps]" +# data: "data csit-vpp-perf-1801-all csit-ligato-perf-1801-all mean" +# - +# title: "18.01 stdev [Mpps]" +# data: "data csit-vpp-perf-1801-all csit-ligato-perf-1801-all stdev" +# - +# title: "17.10 to 18.01 change [%]" +# data: "operation relative_change 5 6" +# rows: "generated" +# data: +# "performance-improvements" +# filter: "template" +# parameters: +# - "throughput" + +- + type: "table" + title: "Detailed Test Results - VPP Performance Results" + algorithm: "table_merged_details" + output-file-ext: ".csv" + output-file: "{DIR[DTR,PERF,VPP]}/vpp_performance_results" + columns: + - + title: "Name" + data: "data name" + - + title: "Documentation" + data: "data doc" + - + title: "Status" + data: "data msg" + rows: "generated" + data: + "vpp-perf-results" + filter: "not 'NDRCHK' and not 'PDRCHK'" + parameters: + - "name" + - "parent" + - "doc" + - "msg" + +- + type: "table" + title: "Test configuration - VPP Performance Test Configs" + algorithm: "table_merged_details" + output-file-ext: ".csv" + output-file: "{DIR[DTC,PERF,VPP]}/vpp_test_configuration" + columns: + - + title: "Name" + data: "data name" + - + title: "VPP API Test (VAT) Commands History - Commands Used Per Test Case" + data: "data vat-history" + rows: "generated" + data: + "vpp-perf-results" + filter: "not 'NDRCHK' and not 'PDRCHK'" + parameters: + - "parent" + - "name" + - "vat-history" + +- + type: "table" + title: "Test Operational Data - VPP Performance Operational Data" + algorithm: "table_merged_details" + output-file-ext: ".csv" + output-file: "{DIR[DTO,PERF,VPP]}/vpp_test_operational" + columns: + - + title: "Name" + data: "data name" + - + title: "VPP Operational Data - Outputs of 'show runtime' at NDR packet rate" + data: "data show-run" + rows: "generated" + data: + "vpp-perf-results" + filter: "not 'NDRCHK' and not 'PDRCHK'" + parameters: + - "parent" + - "name" + - "show-run" + +- + type: "table" + title: "Detailed Test Results - VPP MRR Results" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[DTR,MRR,VPP]}/vpp_mrr_results" + columns: + - + title: "Name" + data: "data name" + - + title: "Documentation" + data: "data doc" + - + title: "Status" + data: "data msg" + rows: "generated" + data: + "vpp-mrr-results" + filter: "'MRR'" + parameters: + - "name" + - "parent" + - "doc" + - "msg" + +- + type: "table" + title: "Test configuration - VPP MRR Test Configs" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[DTC,MRR,VPP]}/vpp_mrr_test_configuration" + columns: + - + title: "Name" + data: "data name" + - + title: "VPP API Test (VAT) Commands History - Commands Used Per Test Case" + data: "data vat-history" + rows: "generated" + data: + "vpp-mrr-results" + filter: "'MRR'" + parameters: + - "parent" + - "name" + - "vat-history" + +- + type: "table" + title: "Detailed Test Results - VPP Functional Results" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[DTR,FUNC,VPP]}/vpp_functional_results" + columns: + - + title: "Name" + data: "data name" + - + title: "Documentation" + data: "data doc" + - + title: "Status" + data: "data status" + rows: "generated" + data: + "vpp-func-results" + filter: "all" + parameters: + - "name" + - "parent" + - "doc" + - "status" + +- + type: "table" + title: "Detailed Test Results - VPP Functional Results - CentOS" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[DTR,FUNC,VPP,CENTOS]}/vpp_functional_results_centos" + columns: + - + title: "Name" + data: "data name" + - + title: "Documentation" + data: "data doc" + - + title: "Status" + data: "data status" + rows: "generated" + data: + "vpp-func-results-centos" + filter: "all" + parameters: + - "name" + - "parent" + - "doc" + - "status" + +- + type: "table" + title: "Test configuration - VPP Functional Test Configs" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[DTC,FUNC,VPP]}/vpp_functional_configuration" + columns: + - + title: "Name" + data: "data name" + - + title: "VPP API Test (VAT) Commands History - Commands Used Per Test Case" + data: "data vat-history" + rows: "generated" + data: + "vpp-func-results" + filter: "all" + parameters: + - "parent" + - "name" + - "vat-history" + +- + type: "table" + title: "Test configuration - VPP Functional Test Configs - CentOS" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[DTC,FUNC,VPP,CENTOS]}/vpp_functional_configuration_centos" + columns: + - + title: "Name" + data: "data name" + - + title: "VPP API Test (VAT) Commands History - Commands Used Per Test Case" + data: "data vat-history" + rows: "generated" + data: + "vpp-func-results-centos" + filter: "all" + parameters: + - "parent" + - "name" + - "vat-history" + +- + type: "table" + title: "Detailed Test Results - Container Orchestrated Topologies Performance Results" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[DTR,PERF,COT]}/cot_performance_results" + columns: + - + title: "Name" + data: "data name" + - + title: "Documentation" + data: "data doc" + - + title: "Status" + data: "data msg" + rows: "generated" + data: + "ligato-perf-results" + filter: "all" + parameters: + - "name" + - "parent" + - "doc" + - "msg" + +- + type: "table" + title: "Detailed Test Results - DPDK Performance Results" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[DTR,PERF,DPDK]}/dpdk_performance_results" + columns: + - + title: "Name" + data: "data name" + - + title: "Documentation" + data: "data doc" + - + title: "Status" + data: "data msg" + rows: "generated" + data: + "dpdk-perf-results" + filter: "all" + parameters: + - "name" + - "parent" + - "doc" + - "msg" + +- + type: "table" + title: "Detailed Test Results - Honeycomb Functional Results" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[DTR,FUNC,HC]}/hc_functional_results" + columns: + - + title: "Name" + data: "data name" + - + title: "Documentation" + data: "data doc" + - + title: "Status" + data: "data status" + rows: "generated" + data: + "hc-func-results" + filter: "all" + parameters: + - "name" + - "parent" + - "doc" + - "status" + +- + type: "table" + title: "Detailed Test Results - NSH SFC Functional Results" + algorithm: "table_details" + output-file-ext: ".csv" + output-file: "{DIR[DTR,FUNC,NSHSFC]}/nsh_sfc_functional_results" + columns: + - + title: "Name" + data: "data name" + - + title: "Documentation" + data: "data doc" + - + title: "Status" + data: "data status" + rows: "generated" + data: + "nsh-func-results" + filter: "all" + parameters: + - "name" + - "parent" + - "doc" + - "status" + +################################################################################ +### F I L E S ### +################################################################################ + +- + type: "file" + title: "VPP Performance Results" + algorithm: "file_merged_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTR,PERF,VPP]}/vpp_performance_results" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTR,PERF,VPP]}" + data: + "vpp-perf-results" + filter: "not 'NDRCHK' and not 'PDRCHK'" + parameters: + - "name" + - "doc" + - "level" + - "parent" + data-start-level: 3 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "VPP Performance Configuration" + algorithm: "file_merged_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTC,PERF,VPP]}/vpp_performance_configuration" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTC,PERF,VPP]}" + data: + "vpp-perf-results" + filter: "not 'NDRCHK' and not 'PDRCHK'" + parameters: + - "name" + - "doc" + - "level" + - "parent" + data-start-level: 3 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "VPP Performance Operational Data" + algorithm: "file_merged_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTO,PERF,VPP]}/vpp_performance_operational_data" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTO,PERF,VPP]}" + data: + "vpp-perf-results" + filter: "not 'NDRCHK' and not 'PDRCHK'" + parameters: + - "name" + - "doc" + - "level" + - "parent" + data-start-level: 3 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "VPP MRR Results" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTR,MRR,VPP]}/vpp_mrr_results" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTR,MRR,VPP]}" + data: + "vpp-mrr-results" + filter: "'MRR'" + parameters: + - "name" + - "doc" + - "level" + - "parent" + data-start-level: 2 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "VPP MRR Configuration" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTC,MRR,VPP]}/vpp_mrr_configuration" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTC,MRR,VPP]}" + data: + "vpp-mrr-results" + filter: "'MRR'" + parameters: + - "name" + - "doc" + - "level" + - "parent" + data-start-level: 2 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "VPP Functional Results" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTR,FUNC,VPP]}/vpp_functional_results" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTR,FUNC,VPP]}" + data: + "vpp-func-results" + filter: "all" + parameters: + - "name" + - "doc" + - "level" + data-start-level: 3 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "VPP Functional Results - CentOS" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTR,FUNC,VPP,CENTOS]}/vpp_functional_results_centos" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTR,FUNC,VPP,CENTOS]}" + data: + "vpp-func-results-centos" + filter: "all" + parameters: + - "name" + - "doc" + - "level" + data-start-level: 3 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "VPP Functional Configuration" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTC,FUNC,VPP]}/vpp_functional_configuration" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTC,FUNC,VPP]}" + data: + "vpp-func-results" + filter: "all" + parameters: + - "name" + - "doc" + - "level" + data-start-level: 3 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "VPP Functional Configuration - CentOS" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTC,FUNC,VPP,CENTOS]}/vpp_functional_configuration_centos" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTC,FUNC,VPP,CENTOS]}" + data: + "vpp-func-results-centos" + filter: "all" + parameters: + - "name" + - "doc" + - "level" + data-start-level: 3 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "Container Orchestrated Performance Results" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTR,PERF,COT]}/cot_performance_results" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTR,PERF,COT]}" + data: + "ligato-perf-results" + filter: "all" + parameters: + - "name" + - "doc" + - "level" + data-start-level: 2 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "DPDK Performance Results" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTR,PERF,DPDK]}/dpdk_performance_results" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTR,PERF,DPDK]}" + data: + "dpdk-perf-results" + filter: "all" + parameters: + - "name" + - "doc" + - "level" + chapters: + - "suites" + data-start-level: 2 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "Honeycomb Functional Results" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTR,FUNC,HC]}/honeycomb_functional_results" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTR,FUNC,HC]}" + data: + "hc-func-results" + filter: "all" + parameters: + - "name" + - "doc" + - "level" + chapters: + - "suites" + data-start-level: 3 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +- + type: "file" + title: "NSH SFC Functional Results" + algorithm: "file_test_results" + output-file-ext: ".rst" + output-file: "{DIR[DTR,FUNC,NSHSFC]}/nshsfc_functional_results" + file-header: "\n.. |br| raw:: html\n\n
\n\n\n.. |prein| raw:: html\n\n
\n\n\n.. |preout| raw:: html\n\n    
\n\n" + dir-tables: "{DIR[DTR,FUNC,NSHSFC]}" + data: + "nsh-func-results" + filter: "all" + parameters: + - "name" + - "doc" + - "level" + chapters: + - "suites" + data-start-level: 2 # 0, 1, 2, ... + chapters-start-level: 2 # 0, 1, 2, ... + +################################################################################ +### P L O T S ### +################################################################################ + +# Plots VPP HTTP Server Performance +- + type: "plot" + title: "VPP HTTP Server Performance" + algorithm: "plot_http_server_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/http-server-performance-cps" + data: + "plot-vpp-http-server-performance" + # Keep this formatting, the filter is enclosed with " (quotation mark) and + # each tag is enclosed with ' (apostrophe). + filter: "'HTTP' and 'TCP_CPS'" + parameters: + - "result" + - "name" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "VPP HTTP Server Performance" + layout: + "plot-cps" + +- + type: "plot" + title: "VPP HTTP Server Performance" + algorithm: "plot_http_server_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/http-server-performance-rps" + data: + "plot-vpp-http-server-performance" + filter: "'HTTP' and 'TCP_RPS'" + parameters: + - "result" + - "name" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "VPP HTTP Server Performance" + layout: + "plot-rps" + +# Plot Throughput Speedup Analysis + +# L2 - 10ge2p1x520 - NDR +- + type: "plot" + title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-l2-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'BASE' and 'NDRDISC' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'LXC' and not 'DOCKER'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# L2 - 40ge2p1xl710 - NDR +- + type: "plot" + title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-l2-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-XL710' and '64B' and 'BASE' and 'NDRDISC' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'LXC' and not 'DOCKER'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# L2 - 10ge2p1x520 - PDR +- + type: "plot" + title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-l2-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'LXC' and not 'DOCKER'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +# IPv4 - 10ge2p1x520 - NDR +- + type: "plot" + title: "TSA: 64B-*-ethip4-ip4(base|scale)*ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-ip4-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'IP4FWD' and ('BASE' or 'SCALE') and 'NDRDISC' and not 'VHOST' and not 'FEATURE' and not 'DOT1Q' and not 'IPSEC'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-ethip4-ip4(base|scale)*ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# IPv4 - 40ge2p1xl710 - NDR +- + type: "plot" + title: "TSA: 64B-*-ethip4-ip4(base|scale)*ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-ip4-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-XL710' and '64B' and 'IP4FWD' and ('BASE' or 'SCALE') and 'NDRDISC' and not 'VHOST' and not 'FEATURE' and not 'DOT1Q' and not 'IPSEC'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-ethip4-ip4(base|scale)*ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# IPv4 - 10ge2p1x520 - PDR +- + type: "plot" + title: "TSA: 64B-*-ethip4-ip4(base|scale)*pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-ip4-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'IP4FWD' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and not 'VHOST' and not 'FEATURE' and not 'DOT1Q' and not 'IPSEC'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-ethip4-ip4(base|scale)*pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +# IPv6 - 10ge2p1x520 - NDR +- + type: "plot" + title: "TSA: 78B-*-ethip6-ip6(base|scale)*ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-ip6-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '78B' and 'IP6FWD' and ('BASE' or 'SCALE') and 'NDRDISC' and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "78B-*-ethip6-ip6(base|scale)*ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# IPv6 - 40ge2p1xl710 - NDR +- + type: "plot" + title: "TSA: 78B-*-ethip6-ip6(base|scale)*ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-78B-ip6-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-XL710' and '78B' and 'IP6FWD' and ('BASE' or 'SCALE') and 'NDRDISC' and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "78B-*-ethip6-ip6(base|scale)*ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# IPv6 - 10ge2p1x520 - PDR +- + type: "plot" + title: "TSA: 78B-*-ethip6-ip6(base|scale)*pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-ip6-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '78B' and 'IP6FWD' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "78B-*-ethip6-ip6(base|scale)*pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +# SRv6 - 10ge2p1x520 - NDR +- + type: "plot" + title: "TSA: 78B-*-ethip6-ip6(base|scale)*ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-srv6-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '78B' and 'FEATURE' and 'NDRDISC' and 'IP6FWD' and 'SRv6'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "78B-*-ethip6-ip6(base|scale)*ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# SRv6 - 10ge2p1x520 - PDR +- + type: "plot" + title: "TSA: 78B-*-ethip6-ip6(base|scale)*pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-srv6-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '78B' and 'FEATURE' and 'PDRDISC' and not 'NDRDISC' and 'IP6FWD' and 'SRv6'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "78B-*-ethip6-ip6(base|scale)*pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +# IP4_overlay - NDR +- + type: "plot" + title: "TSA: 64B-*-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-ethip4-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'ENCAP' and 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# IP4_overlay - PDR +- + type: "plot" + title: "TSA: 64B-*-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-ethip4-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'ENCAP' and 'PDRDISC' and not 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +# IP6_overlay - NDR +- + type: "plot" + title: "TSA: 78B-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-ethip6-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '78B' and 'ENCAP' and 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "78B-*-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# IP6_overlay - PDR +- + type: "plot" + title: "TSA: 78B-*-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-ethip6-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '78B' and 'ENCAP' and 'PDRDISC' and not 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "78B-*-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +# VM VHOST - NDR +- + type: "plot" + title: "TSA: 64B-*-.*vhost.*-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel1-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*vhost.*-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +- + type: "plot" + title: "TSA: 64B-*-.*vhost.*-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel1-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-XL710' and '64B' and 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*vhost.*-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +- + type: "plot" + title: "TSA: 64B-*-.*vhost.*-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel2-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*vhost.*-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +- + type: "plot" + title: "TSA: 64B-*-.*vhost.*-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-vhost-sel2-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X710' and '64B' and 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*vhost.*-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +- + type: "plot" + title: "TSA: 64B-*-.*vhost.*-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel2-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-XL710' and '64B' and 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*vhost.*-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# VM VHOST - PDR +- + type: "plot" + title: "TSA: 64B-*-.*vhost.*-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel1-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*vhost.*-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +- + type: "plot" + title: "TSA: 64B-*-.*vhost.*-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel1-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-XL710' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*vhost.*-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +- + type: "plot" + title: "TSA: 64B-*-.*vhost.*-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel2-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*vhost.*-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +- + type: "plot" + title: "TSA: 64B-*-.*vhost.*-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-vhost-sel2-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X710' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*vhost.*-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +- + type: "plot" + title: "TSA: 64B-*-.*vhost.*-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel2-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-XL710' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*vhost.*-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +# CRYPTO - NDR +- + type: "plot" + title: "TSA: 64B-*-.*ipsec.*-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-ipsechw-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-XL710' and '64B' and not 'VHOST' and 'IP4FWD' and 'NDRDISC' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*ipsec.*-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# CRYPTO - PDR +- + type: "plot" + title: "TSA: 64B-*-.*ipsec.*-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-ipsechw-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-XL710' and '64B' and not 'VHOST' and 'IP4FWD' and 'PDRDISC' and not 'NDRDISC' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-.*ipsec.*-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +# Container memif - NDR +- + type: "plot" + title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-memif-tsa-ndrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'BASE' and 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# Container memif - PDR +- + type: "plot" + title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-memif-tsa-pdrdisc" + data: + "plot-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +# Container orchestrated - NDR +- + type: "plot" + title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-orchestrated-tsa-ndrdisc" + data: + "plot-ligato-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and ('BASE' or 'SCALE') and 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +- + type: "plot" + title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-container-orchestrated-tsa-ndrdisc" + data: + "plot-ligato-throughput-speedup-analysis" + filter: "'NIC_Intel-X710' and '64B' and ('BASE' or 'SCALE') and 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-throughput-speedup-analysis" + +# Container orchestrated - PDR +- + type: "plot" + title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-orchestrated-tsa-pdrdisc" + data: + "plot-ligato-throughput-speedup-analysis" + filter: "'NIC_Intel-X520-DA2' and '64B' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +- + type: "plot" + title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + algorithm: "plot_throughput_speedup_analysis" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-container-orchestrated-tsa-pdrdisc" + data: + "plot-ligato-throughput-speedup-analysis" + filter: "'NIC_Intel-X710' and '64B' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + - "tags" + layout: + title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + layout: + "plot-throughput-speedup-analysis" + +# Plot packets per second + +# VPP L2 sel1 +- + type: "plot" + title: "VPP Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-l2-sel1-ndrdisc" + data: + "plot-vpp-throughput-latency" + # Keep this formatting, the filter is enclosed with " (quotation mark) and + # each tag is enclosed with ' (apostrophe). + filter: "'64B' and ('BASE' or 'SCALE') and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-l2-sel1-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-l2-sel1-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-l2-sel1-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + layout: + "plot-throughput" + +# VPP L2 sel2 +- + type: "plot" + title: "VPP Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-l2-sel2-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'FEATURE' and ('ACL10' or 'ACL50') and '10k_FLOWS' and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-l2-sel2-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'FEATURE' and ('ACL10' or 'ACL50') and '10k_FLOWS' and 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-l2-sel2-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'FEATURE' and ('ACL10' or 'ACL50') and '10k_FLOWS' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-l2-sel2-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'FEATURE' and ('ACL10' or 'ACL50') and '10k_FLOWS' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + layout: + "plot-throughput" + +# VPP IP4 +- + type: "plot" + title: "VPP Performance 64B-1t1c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-ethip4-ip4-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP4FWD' and not 'ACL1' and not 'ACL10' and not '100_FLOWS' and not '100k_FLOWS' and not 'IPSEC' and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-ethip4-ip4-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP4FWD' and not 'ACL1' and not 'ACL10' and not '100_FLOWS' and not '100k_FLOWS' and not 'IPSEC' and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-1t1c-ethip4-ip4[a-z0-9]+-[a-z-]*pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-ethip4-ip4-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP4FWD' and not 'ACL1' and not 'ACL10' and not '100_FLOWS' and not '100k_FLOWS' and not 'IPSEC' and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-ethip4-ip4[a-z0-9]+-[a-z-]*pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-ethip4-ip4[a-z0-9]+-[a-z-]*pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-ethip4-ip4-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP4FWD' and not 'ACL1' and not 'ACL10' and not '100_FLOWS' and not '100k_FLOWS' and not 'IPSEC' and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-ethip4-ip4[a-z0-9]+-[a-z-]*pdrdisc" + layout: + "plot-throughput" + +# VPP IP6 +- + type: "plot" + title: "VPP Performance 78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ip6-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ip6-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ip6-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ip6-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*pdrdisc" + layout: + "plot-throughput" + +# VPP SRv6 +- + type: "plot" + title: "VPP Performance 78B-1t1c-ethip6*srv6*ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-srv6-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'FEATURE' and 'NDRDISC' and '1T1C' and 'IP6FWD' and 'SRv6'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-1t1c-ethip6*srv6*ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 78B-2t2c-ethip6*srv6*ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-srv6-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'FEATURE' and 'NDRDISC' and '2T2C' and 'IP6FWD' and 'SRv6'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-2t2c-ethip6*srv6*ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 78B-1t1c-ethip6*srv6*pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-srv6-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'FEATURE' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP6FWD' and 'SRv6'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-1t1c-ethip6*srv6*pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 78B-2t2c-ethip6*srv6*pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-srv6-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'FEATURE' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP6FWD' and 'SRv6'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-2t2c-ethip6*srv6*pdrdisc" + layout: + "plot-throughput" + +# VPP IP4_overlay +- + type: "plot" + title: "VPP Performance 64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-ethip4-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'ENCAP' and 'NDRDISC' and '1T1C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-ethip4-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'ENCAP' and 'NDRDISC' and '2T2C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-ethip4-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'ENCAP' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-ethip4-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'ENCAP' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" + layout: + "plot-throughput" + +# VPP IP6_overlay +- + type: "plot" + title: "VPP Performance 78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'ENCAP' and 'NDRDISC' and '1T1C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'ENCAP' and 'NDRDISC' and '2T2C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'ENCAP' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'ENCAP' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" + layout: + "plot-throughput" + +# VPP VM VHOST +- + type: "plot" + title: "VPP Performance 64B-1t1c-.*vhost.*-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-vhost-sel1-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'NDRDISC' and '1T1C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-.*vhost.*-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-.*vhost.*-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-vhost-sel1-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'NDRDISC' and '2T2C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-.*vhost.*-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-1t1c-.*vhost.*-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-vhost-sel1-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-.*vhost.*-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-.*vhost.*-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-vhost-sel1-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-.*vhost.*-pdrdisc" + layout: + "plot-throughput" + +# VPP VM VHOST SELECTION +- + type: "plot" + title: "VPP Performance 64B-1t1c-.*vhost.*-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-vhost-sel2-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'NDRDISC' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-.*vhost.*-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-.*vhost.*-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-vhost-sel2-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'NDRDISC' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-.*vhost.*-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-1t1c-.*vhost.*-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-vhost-sel2-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-.*vhost.*-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-.*vhost.*-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-vhost-sel2-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-.*vhost.*-pdrdisc" + layout: + "plot-throughput" + +# VPP CRYPTO +- + type: "plot" + title: "VPP Performance 64B-1t1c-.*ipsec.*-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-ipsechw-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and not 'VHOST' and 'IP4FWD' and 'NDRDISC' and '1T1C' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-.*ipsec.*-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-.*ipsec.*-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-ipsechw-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and not 'VHOST' and 'IP4FWD' and 'NDRDISC' and '2T2C' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-.*ipsec.*-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-1t1c-.*ipsec.*-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-ipsechw-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and not 'VHOST' and 'IP4FWD' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-.*ipsec.*-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-.*ipsec.*-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-ipsechw-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and not 'VHOST' and 'IP4FWD' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-.*ipsec.*-pdrdisc" + layout: + "plot-throughput" + +# DPDK +- + type: "plot" + title: "DPDK Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-l2-ndrdisc" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "DPDK Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-l2-ndrdisc" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "DPDK Performance 64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-ipv4-ndrdisc" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and 'IP4FWD'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "DPDK Performance 64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-ipv4-ndrdisc" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and 'IP4FWD'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "DPDK Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-l2-pdrdisc" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "DPDK Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-l2-pdrdisc" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "DPDK Performance 64B-1t1c-ethip4-ip4base-l3fwd-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-ipv4-pdrdisc" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP4FWD'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-ethip4-ip4base-l3fwd-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "DPDK Performance 64B-2t2c-ethip4-ip4base-l3fwd-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-ipv4-pdrdisc" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP4FWD'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-ethip4-ip4base-l3fwd-pdrdisc" + layout: + "plot-throughput" + +# Plot latency + +# VPP L2 sel1 +- + type: "plot" + title: "VPP Latency 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-l2-sel1-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-l2-sel1-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-latency" + +# VPP L2 sel2 +- + type: "plot" + title: "VPP Latency 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-l2-sel2-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('FEATURE' and 'ACL50' and '10k_FLOWS') and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-l2-sel2-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('FEATURE' and 'ACL50' and '10k_FLOWS') and 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-latency" + +# VPP IP4 +- + type: "plot" + title: "VPP Latency 64B-1t1c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-ethip4-ip4-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP4FWD' and not 'ACL1' and not 'ACL10' and not '100_FLOWS' and not '100k_FLOWS' and not 'IPSEC' and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 64B-2t2c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-ethip4-ip4-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP4FWD' and not 'ACL1' and not 'ACL10' and not '100_FLOWS' and not '100k_FLOWS' and not 'IPSEC' and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" + layout: + "plot-latency" + +# VPP IP6 +- + type: "plot" + title: "VPP Latency 78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ip6-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ip6-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" + layout: + "plot-latency" + +# VPP SRv6 +- + type: "plot" + title: "VPP Latency 78B-1t1c-ethip6*srv6*ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-srv6-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'FEATURE' and 'NDRDISC' and '1T1C' and 'IP6FWD' and 'SRv6'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "78B-1t1c-ethip6*srv6*ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 78B-2t2c-ethip6*srv6*ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-srv6-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'FEATURE' and 'NDRDISC' and '2T2C' and 'IP6FWD' and 'SRv6'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "78B-2t2c-ethip6*srv6*ndrdisc" + layout: + "plot-latency" + +# VPP IP4_overlay +- + type: "plot" + title: "VPP Latency 64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-ethip4-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'ENCAP' and 'NDRDISC' and '1T1C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-ethip4-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'ENCAP' and 'NDRDISC' and '2T2C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" + layout: + "plot-latency" + +# VPP IP6_overlay +- + type: "plot" + title: "VPP Latency 78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'ENCAP' and 'NDRDISC' and '1T1C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'78B' and 'ENCAP' and 'NDRDISC' and '2T2C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" + layout: + "plot-latency" + +# VPP VM VHOST +- + type: "plot" + title: "VPP Latency 64B-1t1c-.*vhost.*-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-vhost-sel1-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'NDRDISC' and '1T1C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-.*vhost.*-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 64B-2t2c-.*vhost.*-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-vhost-sel1-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'NDRDISC' and '2T2C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-.*vhost.*-ndrdisc" + layout: + "plot-latency" + +# VPP VM VHOST selection +- + type: "plot" + title: "VPP Latency 64B-1t1c-.*vhost.*-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-vhost-sel2-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'NDRDISC' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-.*vhost.*-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 64B-2t2c-.*vhost.*-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-vhost-sel2-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'NDRDISC' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-.*vhost.*-ndrdisc" + layout: + "plot-latency" + +# VPP CRYPTO +- + type: "plot" + title: "VPP Latency 64B-1t1c-.*ipsec.*-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-ipsechw-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and not 'VHOST' and 'IP4FWD' and 'NDRDISC' and '1T1C' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-.*ipsec.*-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 64B-2t2c-.*ipsec.*-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-ipsechw-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and not 'VHOST' and 'IP4FWD' and 'NDRDISC' and '2T2C' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-.*ipsec.*-ndrdisc" + layout: + "plot-latency" + +# DPDK +- + type: "plot" + title: "DPDK Latency 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-l2-ndrdisc-lat50" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "DPDK Latency 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-l2-ndrdisc-lat50" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "DPDK Latency 64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-ipv4-ndrdisc-lat50" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and 'IP4FWD'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "DPDK Latency 64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-ipv4-ndrdisc-lat50" + data: + "plot-dpdk-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and 'IP4FWD'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc" + layout: + "plot-latency" + +# Ligato - Throughput + +# Container memif +- + type: "plot" + title: "VPP Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-container-memif-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-container-memif-ndrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-container-memif-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-container-memif-pdrdisc" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + layout: + "plot-throughput" + +# Container orchestrated +- + type: "plot" + title: "VPP Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-container-orchestrated-ndrdisc" + data: + "plot-ligato-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'NDRDISC' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-container-orchestrated-ndrdisc" + data: + "plot-ligato-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'NDRDISC' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-container-orchestrated-pdrdisc" + data: + "plot-ligato-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + layout: + "plot-throughput" + +- + type: "plot" + title: "VPP Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + algorithm: "plot_performance_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-container-orchestrated-pdrdisc" + data: + "plot-ligato-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "throughput" + - "parent" + traces: + hoverinfo: "x+y" + boxpoints: "outliers" + whiskerwidth: 0 + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc" + layout: + "plot-throughput" + +# Ligato - Latency + +# Container memif +- + type: "plot" + title: "VPP Latency 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-container-memif-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-container-memif-ndrdisc-lat50" + data: + "plot-vpp-throughput-latency" + filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-latency" + +# Container orchestrated +- + type: "plot" + title: "VPP Latency 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-1t1c-container-orchestrated-ndrdisc-lat50" + data: + "plot-ligato-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'NDRDISC' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-latency" + +- + type: "plot" + title: "VPP Latency 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + algorithm: "plot_latency_box" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/64B-2t2c-container-orchestrated-ndrdisc-lat50" + data: + "plot-ligato-throughput-latency" + filter: "'64B' and ('BASE' or 'SCALE') and 'NDRDISC' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "latency" + - "parent" + traces: + boxmean: False + layout: + title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc" + layout: + "plot-latency" diff --git a/resources/tools/presentation/new/specification_CPTA.yaml b/resources/tools/presentation/new/specification_CPTA.yaml new file mode 100644 index 0000000000..971d7c60a8 --- /dev/null +++ b/resources/tools/presentation/new/specification_CPTA.yaml @@ -0,0 +1,1287 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is the specification of parameters for "Continuous Performance Trending +# and Analysis" feature provided by PAL. + +- + type: "environment" + + paths: + # Top level directories: + ## Working directory + DIR[WORKING]: "_tmp" + ## Build directories + DIR[BUILD,HTML]: "_build" + ## Static .rst files + DIR[RST]: "../../../../docs/cpta" + + # Static html content + DIR[STATIC]: "{DIR[BUILD,HTML]}/_static" + DIR[STATIC,VPP]: "{DIR[STATIC]}/vpp" + # DIR[STATIC,DPDK]: "{DIR[STATIC]}/dpdk" + DIR[STATIC,ARCH]: "{DIR[STATIC]}/archive" + + # Working directories + ## Input data files (.zip, .xml) + DIR[WORKING,DATA]: "{DIR[WORKING]}/data" + ## Static source files from git + DIR[WORKING,SRC]: "{DIR[WORKING]}/src" + DIR[WORKING,SRC,STATIC]: "{DIR[WORKING,SRC]}/_static" + + # .css patch file + DIR[CSS_PATCH_FILE]: "{DIR[STATIC]}/theme_overrides.css" + DIR[CSS_PATCH_FILE2]: "{DIR[WORKING,SRC,STATIC]}/theme_overrides.css" + + urls: + URL[JENKINS,CSIT]: "https://jenkins.fd.io/view/csit/job" + URL[NEXUS,LOG]: "https://logs.fd.io/production/vex-yul-rot-jenkins-1" + URL[NEXUS]: "https://docs.fd.io/csit" + DIR[NEXUS]: "trending/_static/archive" + + make-dirs: + # List the directories which are created while preparing the environment. + # All directories MUST be defined in "paths" section. + - "DIR[WORKING,DATA]" + - "DIR[WORKING,SRC,STATIC]" + - "DIR[BUILD,HTML]" + - "DIR[STATIC,VPP]" + - "DIR[STATIC,ARCH]" + build-dirs: + # List the directories where the results (build) is stored. + # All directories MUST be defined in "paths" section. + - "DIR[BUILD,HTML]" + +- + type: "configuration" + + data-sets: + plot-performance-trending: + csit-vpp-perf-mrr-daily-master: +# - 15 +# - 20 +# - 25 +# - 30 +# - 35 +# - 40 +# - 45 +# - 50 +# - 55 +# - 60 +# - 65 +# - 70 +# - 75 +# - 80 +# - 85 +# - 90 +# - 95 + start: 15 + end: "lastCompletedBuild" # "lastSuccessfulBuild" # take all from the 'start' + + plot-layouts: + plot-cpta: + title: "" + autosize: False + showlegend: True + width: 1100 + height: 800 + yaxis: + showticklabels: True + tickformat: ".3s" + title: "Throughput [pps]" + hoverformat: ".4s" + range: [] + gridcolor: "rgb(238, 238, 238)" + linecolor: "rgb(238, 238, 238)" + showline: True + zeroline: False + tickcolor: "rgb(238, 238, 238)" + linewidth: 1 + showgrid: True + xaxis: + title: 'csit-vpp-perf-mrr-daily-master-build/vpp-build' + type: "date" + autorange: True + fixedrange: False + showgrid: True + gridcolor: "rgb(238, 238, 238)" + showline: True + linecolor: "rgb(238, 238, 238)" + zeroline: False + linewidth: 1 + showticklabels: True + tickcolor: "rgb(238, 238, 238)" + autotick: True + tickformat: "%m%d" + rangeselector: + buttons: + - count: 14 + label: "2w" + step: "day" + stepmode: "backward" + - count: 1 + label: "1m" + step: "month" + stepmode: "backward" + - count: 2 + label: "2m" + step: "month" + stepmode: "backward" + - count: 3 + label: "3m" + step: "month" + stepmode: "backward" + - step: "all" + # rangeslider: {} + margin: + r: 20 + # b: 200 + t: 5 + l: 70 + legend: + orientation: "h" + # y: -0.5 + xanchor: "center" + traceorder: "normal" # "grouped" does not work: bug https://github.com/plotly/plotly.js/issues/1913 + tracegroupgap: 20 + bordercolor: "rgb(238, 238, 238)" + # borderwidth: 1 + hoverlabel: + namelength: -1 + +- + type: "static" + src-path: "{DIR[RST]}" + dst-path: "{DIR[WORKING,SRC]}" + +- + type: "input" # Ignored in debug mode +# general: +# file-name: "robot-plugin.zip" +# file-format: ".zip" +# download-path: "{job}/{build}/robot/report/*zip*/{filename}" +# extract: "robot-plugin/output.xml" + general: + file-name: "output.xml.gz" + file-format: ".gz" + download-path: "{job}/{build}/archives/{filename}" + extract: "output.xml" + builds: + csit-vpp-perf-mrr-daily-master: +# - 15 +# - 20 +# - 25 +# - 30 +# - 35 +# - 40 +# - 45 +# - 50 +# - 55 +# - 60 +# - 65 +# - 70 +# - 75 +# - 80 +# - 85 +# - 90 +# - 95 + start: 15 + end: "lastCompletedBuild" # take all from the 'start' + +- + type: "output" + output: +# "report" + "CPTA" # Continuous Performance Trending and Analysis + format: + html: + - full + pdf: + - minimal + +################################################################################ +### T A B L E S ### +################################################################################ + +- + type: "table" + title: "Performance trending dashboard" + algorithm: "table_performance_trending_dashboard" + output-file-ext: ".csv" + output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-1t1c" + data: "plot-performance-trending" + filter: "'MRR' and '1T1C'" + parameters: + - "name" + - "parent" + - "result" + ignore-list: + - "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc01-64b-1t1c-eth-l2bdscale1mmaclrn-ndrdisc" + outlier-const: 1.5 + window: 14 + evaluated-window: 14 + long-trend-window: 180 + +- + type: "table" + title: "Performance trending dashboard" + algorithm: "table_performance_trending_dashboard" + output-file-ext: ".csv" + output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t2c" + data: "plot-performance-trending" + filter: "'MRR' and '2T2C'" + parameters: + - "name" + - "parent" + - "result" + ignore-list: + - "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc05-64b-2t2c-eth-l2bdscale1mmaclrn-ndrdisc" + outlier-const: 1.5 + window: 14 + evaluated-window: 14 + long-trend-window: 180 + +- + type: "table" + title: "Performance trending dashboard" + algorithm: "table_performance_trending_dashboard" + output-file-ext: ".csv" + output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-4t4c" + data: "plot-performance-trending" + filter: "'MRR' and '4T4C'" + parameters: + - "name" + - "parent" + - "result" + ignore-list: + - "tests.vpp.perf.l2.10ge2p1x520-eth-l2bdscale1mmaclrn-mrr.tc09-64b-4t4c-eth-l2bdscale1mmaclrn-ndrdisc" + outlier-const: 1.5 + window: 14 + evaluated-window: 14 + long-trend-window: 180 + +- + type: "table" + title: "HTML performance trending dashboard 1t1c" + algorithm: "table_performance_trending_dashboard_html" + input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-1t1c.csv" + output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-1t1c.rst" + +- + type: "table" + title: "HTML performance trending dashboard 2t2c" + algorithm: "table_performance_trending_dashboard_html" + input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t2c.csv" + output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-2t2c.rst" + +- + type: "table" + title: "HTML performance trending dashboard 4t4c" + algorithm: "table_performance_trending_dashboard_html" + input-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-4t4c.csv" + output-file: "{DIR[STATIC,VPP]}/performance-trending-dashboard-4t4c.rst" + + +################################################################################ +### C P T A ### +################################################################################ + +# Plots VPP Continuous Performance Trending and Analysis +- + type: "cpta" + title: "Continuous Performance Trending and Analysis" + algorithm: "cpta" + output-file-type: ".html" + output-file: "{DIR[STATIC,VPP]}/cpta" + data: "plot-performance-trending" + plots: + +# L2 - x520 + + - title: "VPP 1T1C L2 64B Packet Throughput - Trending" + output-file-name: "l2-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 64B Packet Throughput - Trending" + output-file-name: "l2-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 64B Packet Throughput - Trending" + output-file-name: "l2-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '4T4C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 1T1C L2 64B Packet Throughput - Trending" + output-file-name: "l2-feature-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'FEATURE' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 64B Packet Throughput - Trending" + output-file-name: "l2-feature-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'FEATURE' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 64B Packet Throughput - Trending" + output-file-name: "l2-feature-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'FEATURE' and '4T4C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + +# L2 - xl710 + + - title: "VPP 1T1C L2 64B Packet Throughput - Trending" + output-file-name: "l2-1t1c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 64B Packet Throughput - Trending" + output-file-name: "l2-2t2c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 64B Packet Throughput - Trending" + output-file-name: "l2-4t4c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '4T4C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + +# L2 - x710 + + - title: "VPP 1T1C L2 64B Packet Throughput - Trending" + output-file-name: "l2-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 64B Packet Throughput - Trending" + output-file-name: "l2-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 64B Packet Throughput - Trending" + output-file-name: "l2-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '4T4C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 1T1C L2 64B Packet Throughput - Trending" + output-file-name: "l2-feature-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and 'FEATURE' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 64B Packet Throughput - Trending" + output-file-name: "l2-feature-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and 'FEATURE' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 64B Packet Throughput - Trending" + output-file-name: "l2-feature-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and 'FEATURE' and '4T4C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'" + parameters: + - "result" + layout: "plot-cpta" + +# IPv4 - x520 + + - title: "VPP 1T1C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '1T1C' and 'IP4FWD' and not 'FEATURE' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '2T2C' and 'IP4FWD' and not 'FEATURE' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '4T4C' and 'IP4FWD' and not 'FEATURE' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 1T1C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-feature-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'FEATURE' and '1T1C' and 'IP4FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-feature-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'FEATURE' and '2T2C' and 'IP4FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-feature-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'FEATURE' and '4T4C' and 'IP4FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# IPv4 - xl710 + + - title: "VPP 1T1C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-1t1c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and ('BASE' or 'SCALE' or 'FEATURE') and '1T1C' and 'IP4FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-2t2c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and ('BASE' or 'SCALE' or 'FEATURE') and '2T2C' and 'IP4FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-4t4c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and ('BASE' or 'SCALE' or 'FEATURE') and '4T4C' and 'IP4FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# IPv4 - x710 + + - title: "VPP 1T1C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '1T1C' and 'IP4FWD' and not 'FEATURE' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '2T2C' and 'IP4FWD' and not 'FEATURE' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and ('BASE' or 'SCALE') and '4T4C' and 'IP4FWD' and not 'FEATURE' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 1T1C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-feature-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and 'FEATURE' and '1T1C' and 'IP4FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-feature-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and 'FEATURE' and '2T2C' and 'IP4FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPv4 64B Packet Throughput - Trending" + output-file-name: "ip4-feature-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and 'FEATURE' and '4T4C' and 'IP4FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# IPv4 Tunnels - x520 + + - title: "VPP 1T1C IPv4 Tunnels 64B Packet Throughput - Trending" + output-file-name: "ip4-tunnels-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'ENCAP' and 'MRR' and '1T1C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPv4 Tunnels 64B Packet Throughput - Trending" + output-file-name: "ip4-tunnels-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'ENCAP' and 'MRR' and '2T2C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPv4 Tunnels 64B Packet Throughput - Trending" + output-file-name: "ip4-tunnels-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'ENCAP' and 'MRR' and '4T4C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "result" + layout: "plot-cpta" + +# IPv4 Tunnels - x710 + + - title: "VPP 1T1C IPv4 Tunnels 64B Packet Throughput - Trending" + output-file-name: "ip4-tunnels-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'ENCAP' and 'MRR' and '1T1C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPv4 Tunnels 64B Packet Throughput - Trending" + output-file-name: "ip4-tunnels-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'ENCAP' and 'MRR' and '2T2C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPv4 Tunnels 64B Packet Throughput - Trending" + output-file-name: "ip4-tunnels-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'ENCAP' and 'MRR' and '4T4C' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'" + parameters: + - "result" + layout: "plot-cpta" + +# IPv6 - x520 + + - title: "VPP 1T1C IPv6 78B Packet Throughput - Trending" + output-file-name: "ip6-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '78B' and ('BASE' or 'SCALE' or 'FEATURE') and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPv6 78B Packet Throughput - Trending" + output-file-name: "ip6-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '78B' and ('BASE' or 'SCALE' or 'FEATURE') and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPv6 78B Packet Throughput - Trending" + output-file-name: "ip6-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '78B' and ('BASE' or 'SCALE' or 'FEATURE') and '4T4C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'" + parameters: + - "result" + layout: "plot-cpta" + +# IPv6 - xl710 + + - title: "VPP 1T1C IPv6 78B Packet Throughput - Trending" + output-file-name: "ip6-1t1c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '78B' and ('BASE' or 'SCALE' or 'FEATURE') and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPv6 78B Packet Throughput - Trending" + output-file-name: "ip6-2t2c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '78B' and ('BASE' or 'SCALE' or 'FEATURE') and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPv6 78B Packet Throughput - Trending" + output-file-name: "ip6-4t4c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '78B' and ('BASE' or 'SCALE' or 'FEATURE') and '4T4C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# IPv6 - x710 + + - title: "VPP 1T1C IPv6 78B Packet Throughput - Trending" + output-file-name: "ip6-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '78B' and ('BASE' or 'SCALE' or 'FEATURE') and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPv6 78B Packet Throughput - Trending" + output-file-name: "ip6-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '78B' and ('BASE' or 'SCALE' or 'FEATURE') and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPv6 78B Packet Throughput - Trending" + output-file-name: "ip6-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '78B' and ('BASE' or 'SCALE' or 'FEATURE') and '4T4C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# Container memif - x520, 64B + + - title: "VPP 1T1C L2 Container memif 64B Packet Throughput - Trending" + output-file-name: "container-memif-l2-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'BASE' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 Container memif 64B Packet Throughput - Trending" + output-file-name: "container-memif-l2-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'BASE' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 Container memif 64B Packet Throughput - Trending" + output-file-name: "container-memif-l2-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'BASE' and '4T4C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# Container memif - x520, IMIX + + - title: "VPP 1T1C L2 Container memif IMIX Packet Throughput - Trending" + output-file-name: "container-memif-imix-l2-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and 'IMIX' and 'BASE' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 Container memif IMIX Packet Throughput - Trending" + output-file-name: "container-memif-imix-l2-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and 'IMIX' and 'BASE' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 Container memif IMIX Packet Throughput - Trending" + output-file-name: "container-memif-imix-l2-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'MRR' and 'IMIX' and 'BASE' and '4T4C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# Container memif - xl710, 64B + + - title: "VPP 1T1C L2 Container memif 64B Packet Throughput - Trending" + output-file-name: "container-memif-l2-1t1c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and 'BASE' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 Container memif 64B Packet Throughput - Trending" + output-file-name: "container-memif-l2-2t2c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and 'BASE' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 Container memif 64B Packet Throughput - Trending" + output-file-name: "container-memif-l2-4t4c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and 'BASE' and '4T4C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# Container memif - xl710, IMIX + + - title: "VPP 1T1C L2 Container memif IMIX Packet Throughput - Trending" + output-file-name: "container-memif-imix-l2-1t1c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and 'IMIX' and 'BASE' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 Container memif IMIX Packet Throughput - Trending" + output-file-name: "container-memif-imix-l2-2t2c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and 'IMIX' and 'BASE' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 Container memif IMIX Packet Throughput - Trending" + output-file-name: "container-memif-imix-l2-4t4c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'MRR' and 'IMIX' and 'BASE' and '4T4C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# Container memif - x710, 64B + + - title: "VPP 1T1C L2 Container memif 64B Packet Throughput - Trending" + output-file-name: "container-memif-l2-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and 'BASE' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 Container memif 64B Packet Throughput - Trending" + output-file-name: "container-memif-l2-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and 'BASE' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 Container memif 64B Packet Throughput - Trending" + output-file-name: "container-memif-l2-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and '64B' and 'BASE' and '4T4C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# Container memif - x520, IMIX + + - title: "VPP 1T1C L2 Container memif IMIX Packet Throughput - Trending" + output-file-name: "container-memif-imix-l2-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and 'IMIX' and 'BASE' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C L2 Container memif IMIX Packet Throughput - Trending" + output-file-name: "container-memif-imix-l2-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and 'IMIX' and 'BASE' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C L2 Container memif IMIX Packet Throughput - Trending" + output-file-name: "container-memif-imix-l2-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'MRR' and 'IMIX' and 'BASE' and '4T4C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# VM vhost - x520, ethip4, 64B + + - title: "VPP 1T1C VM vhost ethip4 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-ethip4-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '1T1C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C VM vhost ethip4 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-ethip4-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '2T2C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C VM vhost ethip4 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-ethip4-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '4T4C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# VM vhost - x520, ethip4, IMIX + + - title: "VPP 1T1C VM vhost ethip4 IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-ethip4-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '1T1C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C VM vhost ethip4 IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-ethip4-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '2T2C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C VM vhost ethip4 IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-ethip4-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '4T4C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# VM vhost - x520, eth, 64B + + - title: "VPP 1T1C VM vhost eth 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-eth-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C VM vhost eth 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-eth-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C VM vhost eth 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-eth-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '4T4C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# VM vhost - x520, eth, IMIX + + - title: "VPP 1T1C VM vhost eth IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-eth-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C VM vhost eth IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-eth-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C VM vhost eth IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-eth-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '4T4C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# VM vhost - xl710, eth, 64B + + - title: "VPP 1T1C VM vhost eth 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-eth-1t1c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and '64B' and 'MRR' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C VM vhost eth 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-eth-2t2c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and '64B' and 'MRR' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C VM vhost eth 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-eth-4t4c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and '64B' and 'MRR' and '4T4C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# VM vhost - xl710, eth, IMIX + + - title: "VPP 1T1C VM vhost eth IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-eth-1t1c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'IMIX' and 'MRR' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C VM vhost eth IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-eth-2t2c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'IMIX' and 'MRR' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C VM vhost eth IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-eth-4t4c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and 'IMIX' and 'MRR' and '4T4C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# VM vhost - x710, ethip4, 64B + + - title: "VPP 1T1C VM vhost ethip4 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-ethip4-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'MRR' and '1T1C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C VM vhost ethip4 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-ethip4-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'MRR' and '2T2C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C VM vhost ethip4 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-ethip4-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'MRR' and '4T4C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# VM vhost - x710, ethip4, IMIX + + - title: "VPP 1T1C VM vhost ethip4 IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-ethip4-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'IMIX' and 'MRR' and '1T1C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C VM vhost ethip4 IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-ethip4-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'IMIX' and 'MRR' and '2T2C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C VM vhost ethip4 IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-ethip4-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'IMIX' and 'MRR' and '4T4C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# VM vhost - x710, eth, 64B + + - title: "VPP 1T1C VM vhost eth 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-eth-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'MRR' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C VM vhost eth 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-eth-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'MRR' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C VM vhost eth 64B Packet Throughput - Trending" + output-file-name: "vm-vhost-eth-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'MRR' and '4T4C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# VM vhost - x710, eth, IMIX + + - title: "VPP 1T1C VM vhost eth IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-eth-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'IMIX' and 'MRR' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C VM vhost eth IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-eth-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'IMIX' and 'MRR' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C VM vhost eth IMIX Packet Throughput - Trending" + output-file-name: "vm-vhost-imix-eth-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'IMIX' and 'MRR' and '4T4C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# IPSec + + - title: "VPP 1T1C IPSec 64B Packet Throughput - Trending" + output-file-name: "ipsec-1t1c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and '64B' and 'IP4FWD' and 'MRR' and '1T1C' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C IPSec 64B Packet Throughput - Trending" + output-file-name: "ipsec-2t2c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and '64B' and 'IP4FWD' and 'MRR' and '2T2C' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C IPSec 64B Packet Throughput - Trending" + output-file-name: "ipsec-4t4c-xl710" + data: "plot-performance-trending" + filter: "'NIC_Intel-XL710' and '64B' and 'IP4FWD' and 'MRR' and '4T4C' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN') and not 'VHOST'" + parameters: + - "result" + layout: "plot-cpta" + +# SRv6 - x520 + + - title: "VPP 1T1C SRv6 78B MRR Trending" + output-file-name: "srv6-78b-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '78B' and 'MRR' and '1T1C' and 'SRv6'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C SRv6 78B MRR Trending" + output-file-name: "srv6-78b-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '78B' and 'MRR' and '2T2C' and 'SRv6'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C SRv6 78B MRR Trending" + output-file-name: "srv6-78b-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '78B' and 'MRR' and '4T4C' and 'SRv6'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 1T1C SRv6 IMIX MRR Trending" + output-file-name: "srv6-imix-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '1T1C' and 'SRv6'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C SRv6 IMIX MRR Trending" + output-file-name: "srv6-imix-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '2T2C' and 'SRv6'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C SRv6 IMIX MRR Trending" + output-file-name: "srv6-imix-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '4T4C' and 'SRv6'" + parameters: + - "result" + layout: "plot-cpta" + +# Link Bonding - x520 + + - title: "VPP 1T1C Link Bonding 64B MRR Trending" + output-file-name: "lb-64b-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '1T1C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C Link Bonding 64B MRR Trending" + output-file-name: "lb-64b-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '2T2C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C Link Bonding 64B MRR Trending" + output-file-name: "lb-64b-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '4T4C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 1T1C Link Bonding IMIX MRR Trending" + output-file-name: "lb-imix-1t1c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '1T1C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C Link Bonding IMIX MRR Trending" + output-file-name: "lb-imix-2t2c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '2T2C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C Link Bonding IMIX MRR Trending" + output-file-name: "lb-imix-4t4c-x520" + data: "plot-performance-trending" + filter: "'NIC_Intel-X520-DA2' and 'IMIX' and 'MRR' and '4T4C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + +# Link Bonding - x710 + + - title: "VPP 1T1C Link Bonding 64B MRR Trending" + output-file-name: "lb-64b-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'MRR' and '1T1C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C Link Bonding 64B MRR Trending" + output-file-name: "lb-64b-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'MRR' and '2T2C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C Link Bonding 64B MRR Trending" + output-file-name: "lb-64b-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and '64B' and 'MRR' and '4T4C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 1T1C Link Bonding IMIX MRR Trending" + output-file-name: "lb-imix-1t1c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'IMIX' and 'MRR' and '1T1C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 2T2C Link Bonding IMIX MRR Trending" + output-file-name: "lb-imix-2t2c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'IMIX' and 'MRR' and '2T2C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" + + - title: "VPP 4T4C Link Bonding IMIX MRR Trending" + output-file-name: "lb-imix-4t4c-x710" + data: "plot-performance-trending" + filter: "'NIC_Intel-X710' and 'IMIX' and 'MRR' and '4T4C' and 'LBOND'" + parameters: + - "result" + layout: "plot-cpta" diff --git a/resources/tools/presentation/new/specification_parser.py b/resources/tools/presentation/new/specification_parser.py new file mode 100644 index 0000000000..ebd84530a3 --- /dev/null +++ b/resources/tools/presentation/new/specification_parser.py @@ -0,0 +1,626 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Specification + +Parsing of the specification YAML file. +""" + + +import logging +from yaml import load, YAMLError +from pprint import pformat + +from errors import PresentationError +from utils import get_last_successful_build_number +from utils import get_last_completed_build_number + + +class Specification(object): + """Specification of Presentation and analytics layer. + + - based on specification specified in the specification YAML file + - presentation and analytics layer is model driven + """ + + # Tags are used in specification YAML file and replaced while the file is + # parsed. + TAG_OPENER = "{" + TAG_CLOSER = "}" + + def __init__(self, cfg_file): + """Initialization. + + :param cfg_file: File handler for the specification YAML file. + :type cfg_file: BinaryIO + """ + self._cfg_file = cfg_file + self._cfg_yaml = None + + self._specification = {"environment": dict(), + "configuration": dict(), + "static": dict(), + "input": dict(), + "output": dict(), + "tables": list(), + "plots": list(), + "files": list(), + "cpta": dict()} + + @property + def specification(self): + """Getter - specification. + + :returns: Specification. + :rtype: dict + """ + return self._specification + + @property + def environment(self): + """Getter - environment. + + :returns: Environment specification. + :rtype: dict + """ + return self._specification["environment"] + + @property + def configuration(self): + """Getter - configuration. + + :returns: Configuration of PAL. + :rtype: dict + """ + return self._specification["configuration"] + + @property + def static(self): + """Getter - static content. + + :returns: Static content specification. + :rtype: dict + """ + return self._specification["static"] + + @property + def is_debug(self): + """Getter - debug mode + + :returns: True if debug mode is on, otherwise False. + :rtype: bool + """ + + try: + if self.environment["configuration"]["CFG[DEBUG]"] == 1: + return True + else: + return False + except KeyError: + return False + + @property + def input(self): + """Getter - specification - inputs. + - jobs and builds. + + :returns: Inputs. + :rtype: dict + """ + return self._specification["input"] + + @property + def builds(self): + """Getter - builds defined in specification. + + :returns: Builds defined in the specification. + :rtype: dict + """ + return self.input["builds"] + + @property + def output(self): + """Getter - specification - output formats and versions to be generated. + - formats: html, pdf + - versions: full, ... + + :returns: Outputs to be generated. + :rtype: dict + """ + return self._specification["output"] + + @property + def tables(self): + """Getter - tables to be generated. + + :returns: List of specifications of tables to be generated. + :rtype: list + """ + return self._specification["tables"] + + @property + def plots(self): + """Getter - plots to be generated. + + :returns: List of specifications of plots to be generated. + :rtype: list + """ + return self._specification["plots"] + + @property + def files(self): + """Getter - files to be generated. + + :returns: List of specifications of files to be generated. + :rtype: list + """ + return self._specification["files"] + + @property + def cpta(self): + """Getter - Continuous Performance Trending and Analysis to be + generated. + + :returns: List of specifications of Continuous Performance Trending and + Analysis to be generated. + :rtype: list + """ + return self._specification["cpta"] + + def set_input_state(self, job, build_nr, state): + """Set the state of input + + :param job: + :param build_nr: + :param state: + :return: + """ + + try: + for build in self._specification["input"]["builds"][job]: + if build["build"] == build_nr: + build["status"] = state + break + else: + raise PresentationError("Build '{}' is not defined for job '{}'" + " in specification file.". + format(build_nr, job)) + except KeyError: + raise PresentationError("Job '{}' and build '{}' is not defined in " + "specification file.".format(job, build_nr)) + + def set_input_file_name(self, job, build_nr, file_name): + """Set the state of input + + :param job: + :param build_nr: + :param file_name: + :return: + """ + + try: + for build in self._specification["input"]["builds"][job]: + if build["build"] == build_nr: + build["file-name"] = file_name + break + else: + raise PresentationError("Build '{}' is not defined for job '{}'" + " in specification file.". + format(build_nr, job)) + except KeyError: + raise PresentationError("Job '{}' and build '{}' is not defined in " + "specification file.".format(job, build_nr)) + + def _get_build_number(self, job, build_type): + """Get the number of the job defined by its name: + - lastSuccessfulBuild + - lastCompletedBuild + + :param job: Job name. + :param build_type: Build type: + - lastSuccessfulBuild + - lastCompletedBuild + :type job" str + :raises PresentationError: If it is not possible to get the build + number. + :returns: The build number. + :rtype: int + """ + + # defined as a range + if build_type == "lastSuccessfulBuild": + # defined as a range + ret_code, build_nr, _ = get_last_successful_build_number( + self.environment["urls"]["URL[JENKINS,CSIT]"], job) + elif build_type == "lastCompletedBuild": + # defined as a range + ret_code, build_nr, _ = get_last_completed_build_number( + self.environment["urls"]["URL[JENKINS,CSIT]"], job) + else: + raise PresentationError("Not supported build type: '{0}'". + format(build_type)) + if ret_code != 0: + raise PresentationError("Not possible to get the number of the " + "build number.") + try: + build_nr = int(build_nr) + return build_nr + except ValueError as err: + raise PresentationError("Not possible to get the number of the " + "build number.\nReason: {0}".format(err)) + + def _get_type_index(self, item_type): + """Get index of item type (environment, input, output, ...) in + specification YAML file. + + :param item_type: Item type: Top level items in specification YAML file, + e.g.: environment, input, output. + :type item_type: str + :returns: Index of the given item type. + :rtype: int + """ + + index = 0 + for item in self._cfg_yaml: + if item["type"] == item_type: + return index + index += 1 + return None + + def _find_tag(self, text): + """Find the first tag in the given text. The tag is enclosed by the + TAG_OPENER and TAG_CLOSER. + + :param text: Text to be searched. + :type text: str + :returns: The tag, or None if not found. + :rtype: str + """ + try: + start = text.index(self.TAG_OPENER) + end = text.index(self.TAG_CLOSER, start + 1) + 1 + return text[start:end] + except ValueError: + return None + + def _replace_tags(self, data, src_data=None): + """Replace tag(s) in the data by their values. + + :param data: The data where the tags will be replaced by their values. + :param src_data: Data where the tags are defined. It is dictionary where + the key is the tag and the value is the tag value. If not given, 'data' + is used instead. + :type data: str or dict + :type src_data: dict + :returns: Data with the tags replaced. + :rtype: str or dict + :raises: PresentationError if it is not possible to replace the tag or + the data is not the supported data type (str, dict). + """ + + if src_data is None: + src_data = data + + if isinstance(data, str): + tag = self._find_tag(data) + if tag is not None: + data = data.replace(tag, src_data[tag[1:-1]]) + + elif isinstance(data, dict): + counter = 0 + for key, value in data.items(): + tag = self._find_tag(value) + if tag is not None: + try: + data[key] = value.replace(tag, src_data[tag[1:-1]]) + counter += 1 + except KeyError: + raise PresentationError("Not possible to replace the " + "tag '{}'".format(tag)) + if counter: + self._replace_tags(data, src_data) + else: + raise PresentationError("Replace tags: Not supported data type.") + + return data + + def _parse_env(self): + """Parse environment specification in the specification YAML file. + """ + + logging.info("Parsing specification file: environment ...") + + idx = self._get_type_index("environment") + if idx is None: + return None + + try: + self._specification["environment"]["configuration"] = \ + self._cfg_yaml[idx]["configuration"] + except KeyError: + self._specification["environment"]["configuration"] = None + + try: + self._specification["environment"]["paths"] = \ + self._replace_tags(self._cfg_yaml[idx]["paths"]) + except KeyError: + self._specification["environment"]["paths"] = None + + try: + self._specification["environment"]["urls"] = \ + self._replace_tags(self._cfg_yaml[idx]["urls"]) + except KeyError: + self._specification["environment"]["urls"] = None + + try: + self._specification["environment"]["make-dirs"] = \ + self._cfg_yaml[idx]["make-dirs"] + except KeyError: + self._specification["environment"]["make-dirs"] = None + + try: + self._specification["environment"]["remove-dirs"] = \ + self._cfg_yaml[idx]["remove-dirs"] + except KeyError: + self._specification["environment"]["remove-dirs"] = None + + try: + self._specification["environment"]["build-dirs"] = \ + self._cfg_yaml[idx]["build-dirs"] + except KeyError: + self._specification["environment"]["build-dirs"] = None + + logging.info("Done.") + + def _parse_configuration(self): + """Parse configuration of PAL in the specification YAML file. + """ + + logging.info("Parsing specification file: configuration ...") + + idx = self._get_type_index("configuration") + if idx is None: + logging.warning("No configuration information in the specification " + "file.") + return None + + try: + self._specification["configuration"] = self._cfg_yaml[idx] + + except KeyError: + raise PresentationError("No configuration defined.") + + # Data sets: Replace ranges by lists + for set_name, data_set in self.configuration["data-sets"].items(): + for job, builds in data_set.items(): + if builds: + if isinstance(builds, dict): + build_nr = builds.get("end", None) + try: + build_nr = int(build_nr) + except ValueError: + # defined as a range + build_nr = self._get_build_number(job, build_nr) + builds = [x for x in range(builds["start"], build_nr+1)] + self.configuration["data-sets"][set_name][job] = builds + logging.info("Done.") + + def _parse_input(self): + """Parse input specification in the specification YAML file. + + :raises: PresentationError if there are no data to process. + """ + + logging.info("Parsing specification file: input ...") + + idx = self._get_type_index("input") + if idx is None: + raise PresentationError("No data to process.") + + try: + for key, value in self._cfg_yaml[idx]["general"].items(): + self._specification["input"][key] = value + self._specification["input"]["builds"] = dict() + + for job, builds in self._cfg_yaml[idx]["builds"].items(): + if builds: + if isinstance(builds, dict): + build_nr = builds.get("end", None) + try: + build_nr = int(build_nr) + except ValueError: + # defined as a range + build_nr = self._get_build_number(job, build_nr) + builds = [x for x in range(builds["start"], build_nr+1)] + self._specification["input"]["builds"][job] = list() + for build in builds: + self._specification["input"]["builds"][job]. \ + append({"build": build, "status": None}) + + else: + logging.warning("No build is defined for the job '{}'. " + "Trying to continue without it.". + format(job)) + except KeyError: + raise PresentationError("No data to process.") + + logging.info("Done.") + + def _parse_output(self): + """Parse output specification in the specification YAML file. + + :raises: PresentationError if there is no output defined. + """ + + logging.info("Parsing specification file: output ...") + + idx = self._get_type_index("output") + if idx is None: + raise PresentationError("No output defined.") + + try: + self._specification["output"] = self._cfg_yaml[idx] + except (KeyError, IndexError): + raise PresentationError("No output defined.") + + logging.info("Done.") + + def _parse_static(self): + """Parse specification of the static content in the specification YAML + file. + """ + + logging.info("Parsing specification file: static content ...") + + idx = self._get_type_index("static") + if idx is None: + logging.warning("No static content specified.") + + for key, value in self._cfg_yaml[idx].items(): + if isinstance(value, str): + try: + self._cfg_yaml[idx][key] = self._replace_tags( + value, self._specification["environment"]["paths"]) + except KeyError: + pass + + self._specification["static"] = self._cfg_yaml[idx] + + logging.info("Done.") + + def _parse_elements(self): + """Parse elements (tables, plots) specification in the specification + YAML file. + """ + + logging.info("Parsing specification file: elements ...") + + count = 1 + for element in self._cfg_yaml: + try: + element["output-file"] = self._replace_tags( + element["output-file"], + self._specification["environment"]["paths"]) + except KeyError: + pass + + try: + element["input-file"] = self._replace_tags( + element["input-file"], + self._specification["environment"]["paths"]) + except KeyError: + pass + + # add data sets to the elements: + if isinstance(element.get("data", None), str): + data_set = element["data"] + try: + element["data"] = self.configuration["data-sets"][data_set] + except KeyError: + raise PresentationError("Data set {0} is not defined in " + "the configuration section.". + format(data_set)) + + if element["type"] == "table": + logging.info(" {:3d} Processing a table ...".format(count)) + try: + element["template"] = self._replace_tags( + element["template"], + self._specification["environment"]["paths"]) + except KeyError: + pass + self._specification["tables"].append(element) + count += 1 + + elif element["type"] == "plot": + logging.info(" {:3d} Processing a plot ...".format(count)) + + # Add layout to the plots: + layout = element["layout"].get("layout", None) + if layout is not None: + element["layout"].pop("layout") + try: + for key, val in (self.configuration["plot-layouts"] + [layout].items()): + element["layout"][key] = val + except KeyError: + raise PresentationError("Layout {0} is not defined in " + "the configuration section.". + format(layout)) + self._specification["plots"].append(element) + count += 1 + + elif element["type"] == "file": + logging.info(" {:3d} Processing a file ...".format(count)) + try: + element["dir-tables"] = self._replace_tags( + element["dir-tables"], + self._specification["environment"]["paths"]) + except KeyError: + pass + self._specification["files"].append(element) + count += 1 + + elif element["type"] == "cpta": + logging.info(" {:3d} Processing Continuous Performance " + "Trending and Analysis ...".format(count)) + + for plot in element["plots"]: + # Add layout to the plots: + layout = plot.get("layout", None) + if layout is not None: + try: + plot["layout"] = \ + self.configuration["plot-layouts"][layout] + except KeyError: + raise PresentationError( + "Layout {0} is not defined in the " + "configuration section.".format(layout)) + # Add data sets: + if isinstance(plot.get("data", None), str): + data_set = plot["data"] + try: + plot["data"] = \ + self.configuration["data-sets"][data_set] + except KeyError: + raise PresentationError( + "Data set {0} is not defined in " + "the configuration section.". + format(data_set)) + self._specification["cpta"] = element + count += 1 + + logging.info("Done.") + + def read_specification(self): + """Parse specification in the specification YAML file. + + :raises: PresentationError if an error occurred while parsing the + specification file. + """ + try: + self._cfg_yaml = load(self._cfg_file) + except YAMLError as err: + raise PresentationError(msg="An error occurred while parsing the " + "specification file.", + details=str(err)) + + self._parse_env() + self._parse_configuration() + self._parse_input() + self._parse_output() + self._parse_static() + self._parse_elements() + + logging.debug("Specification: \n{}". + format(pformat(self._specification))) diff --git a/resources/tools/presentation/new/static_content.py b/resources/tools/presentation/new/static_content.py new file mode 100644 index 0000000000..a02330c15f --- /dev/null +++ b/resources/tools/presentation/new/static_content.py @@ -0,0 +1,60 @@ +# Copyright (c) 2017 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Static content + +Process the static content stored in the git. +""" + +import logging + +from os import makedirs +from os.path import isdir +from shutil import rmtree, copytree, Error + +from errors import PresentationError + + +def prepare_static_content(spec): + """Prepare the static content which is stored in the git. + + :param spec: Specification read from the specification file. + :type spec: Specification + :raises PresentationError: If it is not possible to process the static + content. + """ + + src = spec.static.get("src-path", None) + dst = spec.static.get("dst-path", None) + if src is None or dst is None: + logging.warning("No static content specified, skipping") + return + + # Copy all the static content to the build directory: + logging.info("Copying the static content ...") + logging.info(" Source: {0}".format(src)) + logging.info(" Destination: {0}".format(dst)) + + try: + if isdir(dst): + rmtree(dst) + + copytree(src, dst) + + makedirs(spec.environment["paths"]["DIR[WORKING,SRC,STATIC]"]) + + except (Error, OSError) as err: + raise PresentationError("Not possible to process the static content.", + str(err)) + + logging.info("Done.") diff --git a/resources/tools/presentation/new/utils.py b/resources/tools/presentation/new/utils.py new file mode 100644 index 0000000000..83f4f6249b --- /dev/null +++ b/resources/tools/presentation/new/utils.py @@ -0,0 +1,291 @@ +# Copyright (c) 2018 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""General purpose utilities. +""" + +import multiprocessing +import subprocess +import numpy as np +import pandas as pd +import logging + +from os import walk, makedirs, environ +from os.path import join, isdir +from shutil import move, Error +from math import sqrt + +from errors import PresentationError +from jumpavg.BitCountingClassifier import BitCountingClassifier + + +def mean(items): + """Calculate mean value from the items. + + :param items: Mean value is calculated from these items. + :type items: list + :returns: MEan value. + :rtype: float + """ + + return float(sum(items)) / len(items) + + +def stdev(items): + """Calculate stdev from the items. + + :param items: Stdev is calculated from these items. + :type items: list + :returns: Stdev. + :rtype: float + """ + + avg = mean(items) + variance = [(x - avg) ** 2 for x in items] + stddev = sqrt(mean(variance)) + return stddev + + +def relative_change(nr1, nr2): + """Compute relative change of two values. + + :param nr1: The first number. + :param nr2: The second number. + :type nr1: float + :type nr2: float + :returns: Relative change of nr1. + :rtype: float + """ + + return float(((nr2 - nr1) / nr1) * 100) + + +def get_files(path, extension=None, full_path=True): + """Generates the list of files to process. + + :param path: Path to files. + :param extension: Extension of files to process. If it is the empty string, + all files will be processed. + :param full_path: If True, the files with full path are generated. + :type path: str + :type extension: str + :type full_path: bool + :returns: List of files to process. + :rtype: list + """ + + file_list = list() + for root, _, files in walk(path): + for filename in files: + if extension: + if filename.endswith(extension): + if full_path: + file_list.append(join(root, filename)) + else: + file_list.append(filename) + else: + file_list.append(join(root, filename)) + + return file_list + + +def get_rst_title_char(level): + """Return character used for the given title level in rst files. + + :param level: Level of the title. + :type: int + :returns: Character used for the given title level in rst files. + :rtype: str + """ + chars = ('=', '-', '`', "'", '.', '~', '*', '+', '^') + if level < len(chars): + return chars[level] + else: + return chars[-1] + + +def execute_command(cmd): + """Execute the command in a subprocess and log the stdout and stderr. + + :param cmd: Command to execute. + :type cmd: str + :returns: Return code of the executed command. + :rtype: int + """ + + env = environ.copy() + proc = subprocess.Popen( + [cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + env=env) + + stdout, stderr = proc.communicate() + + if stdout: + logging.info(stdout) + if stderr: + logging.info(stderr) + + if proc.returncode != 0: + logging.error(" Command execution failed.") + return proc.returncode, stdout, stderr + + +def get_last_successful_build_number(jenkins_url, job_name): + """Get the number of the last successful build of the given job. + + :param jenkins_url: Jenkins URL. + :param job_name: Job name. + :type jenkins_url: str + :type job_name: str + :returns: The build number as a string. + :rtype: str + """ + + url = "{}/{}/lastSuccessfulBuild/buildNumber".format(jenkins_url, job_name) + cmd = "wget -qO- {url}".format(url=url) + + return execute_command(cmd) + + +def get_last_completed_build_number(jenkins_url, job_name): + """Get the number of the last completed build of the given job. + + :param jenkins_url: Jenkins URL. + :param job_name: Job name. + :type jenkins_url: str + :type job_name: str + :returns: The build number as a string. + :rtype: str + """ + + url = "{}/{}/lastCompletedBuild/buildNumber".format(jenkins_url, job_name) + cmd = "wget -qO- {url}".format(url=url) + + return execute_command(cmd) + + +def archive_input_data(spec): + """Archive the report. + + :param spec: Specification read from the specification file. + :type spec: Specification + :raises PresentationError: If it is not possible to archive the input data. + """ + + logging.info(" Archiving the input data files ...") + + extension = spec.input["file-format"] + data_files = get_files(spec.environment["paths"]["DIR[WORKING,DATA]"], + extension=extension) + dst = spec.environment["paths"]["DIR[STATIC,ARCH]"] + logging.info(" Destination: {0}".format(dst)) + + try: + if not isdir(dst): + makedirs(dst) + + for data_file in data_files: + logging.info(" Moving the file: {0} ...".format(data_file)) + move(data_file, dst) + + except (Error, OSError) as err: + raise PresentationError("Not possible to archive the input data.", + str(err)) + + logging.info(" Done.") + + +def classify_anomalies(data): + """Process the data and return anomalies and trending values. + + Gathers data into groups with common trend value. + Decorates first value in the group to be an outlier, regression, + normal or progression. + + :param data: Full data set with unavailable samples replaced by nan. + :type data: pandas.Series + :returns: Classification and trend values + :rtype: 2-tuple, list of strings and list of floats + """ + bare_data = [sample for _, sample in data.iteritems() + if not np.isnan(sample)] + # TODO: Put analogous iterator into jumpavg library. + groups = BitCountingClassifier.classify(bare_data) + groups.reverse() # Just to use .pop() for FIFO. + classification = [] + avgs = [] + active_group = None + values_left = 0 + avg = 0.0 + for _, sample in data.iteritems(): + if np.isnan(sample): + classification.append("outlier") + avgs.append(sample) + continue + if values_left < 1 or active_group is None: + values_left = 0 + while values_left < 1: # To ignore empty groups. + active_group = groups.pop() + values_left = len(active_group.values) + avg = active_group.metadata.avg + classification.append(active_group.metadata.classification) + avgs.append(avg) + values_left -= 1 + continue + classification.append("normal") + avgs.append(avg) + values_left -= 1 + return classification, avgs + + +class Worker(multiprocessing.Process): + """Worker class used to process tasks in separate parallel processes. + """ + + def __init__(self, work_queue, data_queue, func): + """Initialization. + + :param work_queue: Queue with items to process. + :param data_queue: Shared memory between processes. Queue which keeps + the result data. This data is then read by the main process and used + in further processing. + :param func: Function which is executed by the worker. + :type work_queue: multiprocessing.JoinableQueue + :type data_queue: multiprocessing.Manager().Queue() + :type func: Callable object + """ + super(Worker, self).__init__() + self._work_queue = work_queue + self._data_queue = data_queue + self._func = func + + def run(self): + """Method representing the process's activity. + """ + + while True: + try: + self.process(self._work_queue.get()) + finally: + self._work_queue.task_done() + + def process(self, item_to_process): + """Method executed by the runner. + + :param item_to_process: Data to be processed by the function. + :type item_to_process: tuple + """ + self._func(self.pid, self._data_queue, *item_to_process) diff --git a/resources/tools/presentation/run_cpta.sh b/resources/tools/presentation/run_cpta.sh index 954f17d53c..15a144d401 100755 --- a/resources/tools/presentation/run_cpta.sh +++ b/resources/tools/presentation/run_cpta.sh @@ -2,6 +2,8 @@ set -x +( cd new ; ./run_cpta.sh ) + # set default values in config array typeset -A DIR @@ -30,5 +32,7 @@ STATUS=$(python pal.py \ --force) RETURN_STATUS=$? +mv new/_build _build/new + echo ${STATUS} exit ${RETURN_STATUS} -- 2.16.6