hs-test: fixed timed out tests passing in the CI
[vpp.git] / src / tools / vppapigen / vppapigen.py
index b17ad6d..2b0ce99 100755 (executable)
@@ -1,16 +1,19 @@
 #!/usr/bin/env python3
 
-import ply.lex as lex
-import ply.yacc as yacc
 import sys
 import argparse
 import keyword
 import logging
 import binascii
 import os
-import sys
+from subprocess import Popen, PIPE
+import ply.lex as lex
+import ply.yacc as yacc
 
-log = logging.getLogger('vppapigen')
+assert sys.version_info >= (3, 5), "Not supported Python version: {}".format(
+    sys.version
+)
+log = logging.getLogger("vppapigen")
 
 # Ensure we don't leave temporary files around
 sys.dont_write_bytecode = True
@@ -26,11 +29,10 @@ seen_imports = {}
 
 
 def global_type_add(name, obj):
-    '''Add new type to the dictionary of types '''
-    type_name = 'vl_api_' + name + '_t'
+    """Add new type to the dictionary of types"""
+    type_name = "vl_api_" + name + "_t"
     if type_name in global_types:
-        raise KeyError("Attempted redefinition of {!r} with {!r}.".format(
-            name, obj))
+        raise KeyError("Attempted redefinition of {!r} with {!r}.".format(name, obj))
     global_types[type_name] = obj
 
 
@@ -42,107 +44,118 @@ def exception_handler(exception_type, exception, traceback):
 #
 # Lexer
 #
-class VPPAPILexer(object):
+class VPPAPILexer:
     def __init__(self, filename):
         self.filename = filename
 
     reserved = {
-        'service': 'SERVICE',
-        'rpc': 'RPC',
-        'returns': 'RETURNS',
-        'null': 'NULL',
-        'stream': 'STREAM',
-        'events': 'EVENTS',
-        'define': 'DEFINE',
-        'typedef': 'TYPEDEF',
-        'enum': 'ENUM',
-        'typeonly': 'TYPEONLY',
-        'manual_print': 'MANUAL_PRINT',
-        'manual_endian': 'MANUAL_ENDIAN',
-        'dont_trace': 'DONT_TRACE',
-        'autoreply': 'AUTOREPLY',
-        'option': 'OPTION',
-        'u8': 'U8',
-        'u16': 'U16',
-        'u32': 'U32',
-        'u64': 'U64',
-        'i8': 'I8',
-        'i16': 'I16',
-        'i32': 'I32',
-        'i64': 'I64',
-        'f64': 'F64',
-        'bool': 'BOOL',
-        'string': 'STRING',
-        'import': 'IMPORT',
-        'true': 'TRUE',
-        'false': 'FALSE',
-        'union': 'UNION',
+        "service": "SERVICE",
+        "rpc": "RPC",
+        "returns": "RETURNS",
+        "null": "NULL",
+        "stream": "STREAM",
+        "events": "EVENTS",
+        "define": "DEFINE",
+        "typedef": "TYPEDEF",
+        "enum": "ENUM",
+        "enumflag": "ENUMFLAG",
+        "typeonly": "TYPEONLY",
+        "manual_print": "MANUAL_PRINT",
+        "manual_endian": "MANUAL_ENDIAN",
+        "dont_trace": "DONT_TRACE",
+        "autoreply": "AUTOREPLY",
+        "autoendian": "AUTOENDIAN",
+        "option": "OPTION",
+        "u8": "U8",
+        "u16": "U16",
+        "u32": "U32",
+        "u64": "U64",
+        "i8": "I8",
+        "i16": "I16",
+        "i32": "I32",
+        "i64": "I64",
+        "f64": "F64",
+        "bool": "BOOL",
+        "string": "STRING",
+        "import": "IMPORT",
+        "true": "TRUE",
+        "false": "FALSE",
+        "union": "UNION",
+        "counters": "COUNTERS",
+        "paths": "PATHS",
+        "units": "UNITS",
+        "severity": "SEVERITY",
+        "type": "TYPE",
+        "description": "DESCRIPTION",
     }
 
-    tokens = ['STRING_LITERAL',
-              'ID', 'NUM'] + list(reserved.values())
+    tokens = ["STRING_LITERAL", "COMMENT", "ID", "NUM"] + list(reserved.values())
 
-    t_ignore_LINE_COMMENT = '//.*'
+    t_ignore_LINE_COMMENT = "//.*"
 
     def t_FALSE(self, t):
-        r'false'
+        r"false"
         t.value = False
         return t
 
     def t_TRUE(self, t):
-        r'false'
+        r"false"
         t.value = True
         return t
 
     def t_NUM(self, t):
-        r'0[xX][0-9a-fA-F]+|-?\d+\.?\d*'
-        base = 16 if t.value.startswith('0x') else 10
-        if '.' in t.value:
+        r"0[xX][0-9a-fA-F]+|-?\d+\.?\d*"
+        base = 16 if t.value.startswith("0x") else 10
+        if "." in t.value:
             t.value = float(t.value)
         else:
             t.value = int(t.value, base)
         return t
 
     def t_ID(self, t):
-        r'[a-zA-Z_][a-zA-Z_0-9]*'
+        r"[a-zA-Z_][a-zA-Z_0-9]*"
         # Check for reserved words
-        t.type = VPPAPILexer.reserved.get(t.value, 'ID')
+        t.type = VPPAPILexer.reserved.get(t.value, "ID")
         return t
 
     # C string
     def t_STRING_LITERAL(self, t):
-        r'\"([^\\\n]|(\\.))*?\"'
-        t.value = str(t.value).replace("\"", "")
+        r"\"([^\\\n]|(\\.))*?\" "
+        t.value = str(t.value).replace('"', "")
         return t
 
     # C or C++ comment (ignore)
-    def t_comment(self, t):
-        r'(/\*(.|\n)*?\*/)|(//.*)'
-        t.lexer.lineno += t.value.count('\n')
+    def t_COMMENT(self, t):
+        r"(/\*(.|\n)*?\*/)|(//.*)"
+        t.lexer.lineno += t.value.count("\n")
+        return t
 
     # Error handling rule
     def t_error(self, t):
-        raise ParseError("Illegal character '{}' ({})"
-                         "in {}: line {}".format(t.value[0],
-                                                 hex(ord(t.value[0])),
-                                                 self.filename,
-                                                 t.lexer.lineno))
-        t.lexer.skip(1)
+        raise ParseError(
+            "Illegal character '{}' ({})"
+            "in {}: line {}".format(
+                t.value[0], hex(ord(t.value[0])), self.filename, t.lexer.lineno
+            )
+        )
 
     # Define a rule so we can track line numbers
     def t_newline(self, t):
-        r'\n+'
+        r"\n+"
         t.lexer.lineno += len(t.value)
 
     literals = ":{}[];=.,"
 
     # A string containing ignored characters (spaces and tabs)
-    t_ignore = ' \t'
+    t_ignore = " \t"
 
 
-def crc_block_combine(block, crc):
-    s = str(block).encode()
-    return binascii.crc32(s, crc) & 0xffffffff
+def vla_mark_length_field(block):
+    if isinstance(block[-1], Array):
+        lengthfield = block[-1].lengthfield
+        for b in block:
+            if b.fieldname == lengthfield:
+                b.is_lengthfield = True
 
 
 def vla_is_last_check(name, block):
@@ -152,35 +165,49 @@ def vla_is_last_check(name, block):
             vla = True
             if i + 1 < len(block):
                 raise ValueError(
-                    'VLA field "{}" must be the last field in message "{}"'
-                    .format(b.fieldname, name))
-        elif b.fieldtype.startswith('vl_api_'):
+                    'VLA field "{}" must be the last field in message "{}"'.format(
+                        b.fieldname, name
+                    )
+                )
+        elif b.fieldtype.startswith("vl_api_"):
             if global_types[b.fieldtype].vla:
                 vla = True
                 if i + 1 < len(block):
                     raise ValueError(
                         'VLA field "{}" must be the last '
-                        'field in message "{}"'
-                        .format(b.fieldname, name))
-        elif b.fieldtype == 'string' and b.length == 0:
+                        'field in message "{}"'.format(b.fieldname, name)
+                    )
+        elif b.fieldtype == "string" and b.length == 0:
             vla = True
             if i + 1 < len(block):
                 raise ValueError(
                     'VLA field "{}" must be the last '
-                    'field in message "{}"'
-                    .format(b.fieldname, name))
+                    'field in message "{}"'.format(b.fieldname, name)
+                )
     return vla
 
 
-class Service():
-    def __init__(self, caller, reply, events=None, stream=False):
+class Processable:
+    type = "<Invalid>"
+
+    def process(self, result):  # -> Dict
+        result[self.type].append(self)
+
+
+class Service(Processable):
+    type = "Service"
+
+    def __init__(self, caller, reply, events=None, stream_message=None, stream=False):
         self.caller = caller
         self.reply = reply
         self.stream = stream
+        self.stream_message = stream_message
         self.events = [] if events is None else events
 
 
-class Typedef():
+class Typedef(Processable):
+    type = "Typedef"
+
     def __init__(self, name, flags, block):
         self.name = name
         self.flags = flags
@@ -189,20 +216,25 @@ class Typedef():
         self.manual_print = False
         self.manual_endian = False
         for f in flags:
-            if f == 'manual_print':
+            if f == "manual_print":
                 self.manual_print = True
-            elif f == 'manual_endian':
+            elif f == "manual_endian":
                 self.manual_endian = True
-
         global_type_add(name, self)
 
         self.vla = vla_is_last_check(name, block)
+        vla_mark_length_field(self.block)
+
+    def process(self, result):
+        result["types"].append(self)
 
     def __repr__(self):
         return self.name + str(self.flags) + str(self.block)
 
 
-class Using():
+class Using(Processable):
+    type = "Using"
+
     def __init__(self, name, flags, alias):
         self.name = name
         self.vla = False
@@ -213,35 +245,45 @@ class Using():
         self.manual_print = False
         self.manual_endian = False
         for f in flags:
-            if f == 'manual_print':
+            if f == "manual_print":
                 self.manual_print = True
-            elif f == 'manual_endian':
+            elif f == "manual_endian":
                 self.manual_endian = True
 
         if isinstance(alias, Array):
-            a = {'type': alias.fieldtype,
-                 'length': alias.length}
+            a = {"type": alias.fieldtype, "length": alias.length}
         else:
-            a = {'type': alias.fieldtype}
+            a = {"type": alias.fieldtype}
         self.alias = a
-        self.crc = str(alias).encode()
+        self.using = alias
+
+        #
+        # Should have been:
+        #  self.crc = str(alias).encode()
+        # but to be backwards compatible use the block ([])
+        #
+        self.crc = str(self.block).encode()
         global_type_add(name, self)
 
+    def process(self, result):  # -> Dict
+        result["types"].append(self)
+
     def __repr__(self):
         return self.name + str(self.alias)
 
 
-class Union():
+class Union(Processable):
+    type = "Union"
+
     def __init__(self, name, flags, block):
-        self.type = 'Union'
         self.manual_print = False
         self.manual_endian = False
         self.name = name
 
         for f in flags:
-            if f == 'manual_print':
+            if f == "manual_print":
                 self.manual_print = True
-            elif f == 'manual_endian':
+            elif f == "manual_endian":
                 self.manual_endian = True
 
         self.block = block
@@ -250,12 +292,17 @@ class Union():
 
         global_type_add(name, self)
 
+    def process(self, result):
+        result["types"].append(self)
+
     def __repr__(self):
         return str(self.block)
 
 
-class Define():
-    def __init__(self, name, flags, block):
+class Define(Processable):
+    type = "Define"
+
+    def __init__(self, name, flags, block, comment=None):
         self.name = name
         self.flags = flags
         self.block = block
@@ -263,56 +310,111 @@ class Define():
         self.manual_print = False
         self.manual_endian = False
         self.autoreply = False
-        self.singular = False
+        self.autoendian = 0
         self.options = {}
+        self.comment = comment
         for f in flags:
-            if f == 'dont_trace':
+            if f == "dont_trace":
                 self.dont_trace = True
-            elif f == 'manual_print':
+            elif f == "manual_print":
                 self.manual_print = True
-            elif f == 'manual_endian':
+            elif f == "manual_endian":
                 self.manual_endian = True
-            elif f == 'autoreply':
+            elif f == "autoreply":
                 self.autoreply = True
+            elif f == "autoendian":
+                self.autoendian = 1
 
+        remove = []
         for b in block:
             if isinstance(b, Option):
-                if b[1] == 'singular' and b[2] == 'true':
-                    self.singular = True
-                else:
-                    self.options[b.option] = b.value
-                block.remove(b)
+                self.options[b.option] = b.value
+                remove.append(b)
 
+        block = [x for x in block if x not in remove]
+        self.block = block
         self.vla = vla_is_last_check(name, block)
+        vla_mark_length_field(self.block)
+
         self.crc = str(block).encode()
 
+    def autoreply_block(self, name, parent):
+        block = [Field("u32", "context"), Field("i32", "retval")]
+        # inherit the parent's options
+        for k, v in parent.options.items():
+            block.append(Option(k, v))
+        return Define(name + "_reply", [], block)
+
+    def process(self, result):  # -> Dict
+        tname = self.__class__.__name__
+        result[tname].append(self)
+        if self.autoreply:
+            result[tname].append(self.autoreply_block(self.name, self))
+
     def __repr__(self):
         return self.name + str(self.flags) + str(self.block)
 
 
-class Enum():
-    def __init__(self, name, block, enumtype='u32'):
+class Enum(Processable):
+    type = "Enum"
+
+    def __init__(self, name, block, enumtype="u32"):
         self.name = name
         self.enumtype = enumtype
         self.vla = False
+        self.manual_print = False
+
+        count = -1
+        block2 = []
+        block3 = []
+        bc_set = False
 
-        count = 0
-        for i, b in enumerate(block):
-            if type(b) is list:
-                count = b[1]
+        for b in block:
+            if "value" in b:
+                count = b["value"]
             else:
                 count += 1
-                block[i] = [b, count]
-
-        self.block = block
-        self.crc = str(block).encode()
+            block2.append([b["id"], count])
+            try:
+                if b["option"]["backwards_compatible"]:
+                    pass
+                bc_set = True
+            except KeyError:
+                block3.append([b["id"], count])
+                if bc_set:
+                    raise ValueError(
+                        "Backward compatible enum must "
+                        "be last {!r} {!r}".format(name, b["id"])
+                    )
+        self.block = block2
+        self.crc = str(block3).encode()
         global_type_add(name, self)
 
+    def process(self, result):
+        result["types"].append(self)
+
     def __repr__(self):
         return self.name + str(self.block)
 
 
-class Import():
+class EnumFlag(Enum):
+    type = "EnumFlag"
+
+    def __init__(self, name, block, enumtype="u32"):
+        super(EnumFlag, self).__init__(name, block, enumtype)
+
+        for b in self.block:
+            if bin(b[1])[2:].count("1") > 1:
+                raise TypeError(
+                    "%s is not a flag enum.  No element in a "
+                    "flag enum may have more than a "
+                    "single bit set." % self.name
+                )
+
+
+class Import(Processable):
+    type = "Import"
+    _initialized = False
 
     def __new__(cls, *args, **kwargs):
         if args[0] not in seen_imports:
@@ -322,38 +424,36 @@ class Import():
 
         return seen_imports[args[0]]
 
-    def __init__(self, filename):
+    def __init__(self, filename, revision):
         if self._initialized:
             return
-        else:
-            self.filename = filename
-            # Deal with imports
-            parser = VPPAPI(filename=filename)
-            dirlist = dirlist_get()
-            f = filename
-            for dir in dirlist:
-                f = os.path.join(dir, filename)
-                if os.path.exists(f):
-                    break
-            if sys.version[0] == '2':
-                with open(f) as fd:
-                    self.result = parser.parse_file(fd, None)
-            else:
-                with open(f, encoding='utf-8') as fd:
-                    self.result = parser.parse_file(fd, None)
-            self._initialized = True
+        self.filename = filename
+        # Deal with imports
+        parser = VPPAPI(filename=filename, revision=revision)
+        dirlist = dirlist_get()
+        f = filename
+        for dir in dirlist:
+            f = os.path.join(dir, filename)
+            if os.path.exists(f):
+                break
+        self.result = parser.parse_filename(f, None)
+        self._initialized = True
 
     def __repr__(self):
         return self.filename
 
 
-class Option():
-    def __init__(self, option, value):
-        self.type = 'Option'
+class Option(Processable):
+    type = "Option"
+
+    def __init__(self, option, value=None):
         self.option = option
         self.value = value
         self.crc = str(option).encode()
 
+    def process(self, result):  # -> Dict
+        result[self.type][self.option] = self.value
+
     def __repr__(self):
         return str(self.option)
 
@@ -361,9 +461,10 @@ class Option():
         return self.option[index]
 
 
-class Array():
+class Array(Processable):
+    type = "Array"
+
     def __init__(self, fieldtype, name, length, modern_vla=False):
-        self.type = 'Array'
         self.fieldtype = fieldtype
         self.fieldname = name
         self.modern_vla = modern_vla
@@ -377,22 +478,26 @@ class Array():
             self.vla = False
 
     def __repr__(self):
-        return str([self.fieldtype, self.fieldname, self.length,
-                    self.lengthfield])
+        return str([self.fieldtype, self.fieldname, self.length, self.lengthfield])
+
 
+class Field(Processable):
+    type = "Field"
 
-class Field():
     def __init__(self, fieldtype, name, limit=None):
-        self.type = 'Field'
+        # limit field has been expanded to an options dict.
+
         self.fieldtype = fieldtype
+        self.is_lengthfield = False
 
-        if self.fieldtype == 'string':
-            raise ValueError("The string type {!r} is an "
-                             "array type ".format(name))
+        if self.fieldtype == "string":
+            raise ValueError("The string type {!r} is an " "array type ".format(name))
 
         if name in keyword.kwlist:
-            raise ValueError("Fieldname {!r} is a python keyword and is not "
-                             "accessible via the python API. ".format(name))
+            raise ValueError(
+                "Fieldname {!r} is a python keyword and is not "
+                "accessible via the python API. ".format(name)
+            )
         self.fieldname = name
         self.limit = limit
 
@@ -400,13 +505,35 @@ class Field():
         return str([self.fieldtype, self.fieldname])
 
 
-class Coord(object):
-    """ Coordinates of a syntactic element. Consists of:
-            - File name
-            - Line number
-            - (optional) column number, for the Lexer
+class Counter(Processable):
+    type = "Counter"
+
+    def __init__(self, path, counter):
+        self.name = path
+        self.block = counter
+
+    def process(self, result):  # -> Dict
+        result["Counters"].append(self)
+
+
+class Paths(Processable):
+    type = "Paths"
+
+    def __init__(self, pathset):
+        self.paths = pathset
+
+    def __repr__(self):
+        return "%s(paths=%s)" % (self.__class__.__name__, self.paths)
+
+
+class Coord:
+    """Coordinates of a syntactic element. Consists of:
+    - File name
+    - Line number
+    - (optional) column number, for the Lexer
     """
-    __slots__ = ('file', 'line', 'column', '__weakref__')
+
+    __slots__ = ("file", "line", "column", "__weakref__")
 
     def __init__(self, file, line, column=None):
         self.file = file
@@ -427,13 +554,15 @@ class ParseError(Exception):
 #
 # Grammar rules
 #
-class VPPAPIParser(object):
+class VPPAPIParser:
     tokens = VPPAPILexer.tokens
 
-    def __init__(self, filename, logger):
+    def __init__(self, filename, logger, revision=None):
         self.filename = filename
         self.logger = logger
         self.fields = []
+        self.revision = revision
+        self.last_comment = None
 
     def _parse_error(self, msg, coord):
         raise ParseError("%s: %s" % (coord, msg))
@@ -443,65 +572,121 @@ class VPPAPIParser(object):
             self.logger.warning("%s: %s" % (coord, msg))
 
     def _coord(self, lineno, column=None):
-        return Coord(
-                file=self.filename,
-                line=lineno, column=column)
+        return Coord(file=self.filename, line=lineno, column=column)
 
     def _token_coord(self, p, token_idx):
-        """ Returns the coordinates for the YaccProduction object 'p' indexed
-            with 'token_idx'. The coordinate includes the 'lineno' and
-            'column'. Both follow the lex semantic, starting from 1.
+        """Returns the coordinates for the YaccProduction object 'p' indexed
+        with 'token_idx'. The coordinate includes the 'lineno' and
+        'column'. Both follow the lex semantic, starting from 1.
         """
-        last_cr = p.lexer.lexdata.rfind('\n', 0, p.lexpos(token_idx))
+        last_cr = p.lexer.lexdata.rfind("\n", 0, p.lexpos(token_idx))
         if last_cr < 0:
             last_cr = -1
-        column = (p.lexpos(token_idx) - (last_cr))
+        column = p.lexpos(token_idx) - (last_cr)
         return self._coord(p.lineno(token_idx), column)
 
     def p_slist(self, p):
-        '''slist : stmt
-                 | slist stmt'''
+        """slist : stmt
+        | slist stmt"""
         if len(p) == 2:
             p[0] = [p[1]]
         else:
             p[0] = p[1] + [p[2]]
 
     def p_stmt(self, p):
-        '''stmt : define
-                | typedef
-                | option
-                | import
-                | enum
-                | union
-                | service'''
+        """stmt : define
+        | typedef
+        | option
+        | import
+        | enum
+        | enumflag
+        | union
+        | service
+        | paths
+        | comment
+        | counters"""
         p[0] = p[1]
 
     def p_import(self, p):
-        '''import : IMPORT STRING_LITERAL ';' '''
-        p[0] = Import(p[2])
+        """import : IMPORT STRING_LITERAL ';'"""
+        p[0] = Import(p[2], revision=self.revision)
+
+    def p_path_elements(self, p):
+        """path_elements : path_element
+        | path_elements path_element"""
+        if len(p) == 2:
+            p[0] = p[1]
+        else:
+            if type(p[1]) is dict:
+                p[0] = [p[1], p[2]]
+            else:
+                p[0] = p[1] + [p[2]]
+
+    def p_path_element(self, p):
+        """path_element : STRING_LITERAL STRING_LITERAL ';'"""
+        p[0] = {"path": p[1], "counter": p[2]}
+
+    def p_paths(self, p):
+        """paths : PATHS '{' path_elements '}' ';'"""
+        p[0] = Paths(p[3])
+
+    def p_counters(self, p):
+        """counters : COUNTERS ID '{' counter_elements '}' ';'"""
+        p[0] = Counter(p[2], p[4])
+
+    def p_counter_elements(self, p):
+        """counter_elements : counter_element
+        | counter_elements counter_element"""
+        if len(p) == 2:
+            p[0] = [p[1]]
+        else:
+            if type(p[1]) is dict:
+                p[0] = [p[1], p[2]]
+            else:
+                p[0] = p[1] + [p[2]]
+
+    def p_counter_element(self, p):
+        """counter_element : ID '{' counter_statements '}' ';'"""
+        p[0] = {**{"name": p[1]}, **p[3]}
+
+    def p_counter_statements(self, p):
+        """counter_statements : counter_statement
+        | counter_statements counter_statement"""
+        if len(p) == 2:
+            p[0] = p[1]
+        else:
+            p[0] = {**p[1], **p[2]}
+
+    def p_counter_statement(self, p):
+        """counter_statement : SEVERITY ID ';'
+        | UNITS STRING_LITERAL ';'
+        | DESCRIPTION STRING_LITERAL ';'
+        | TYPE ID ';'"""
+        p[0] = {p[1]: p[2]}
 
     def p_service(self, p):
-        '''service : SERVICE '{' service_statements '}' ';' '''
+        """service : SERVICE '{' service_statements '}' ';'"""
         p[0] = p[3]
 
     def p_service_statements(self, p):
-        '''service_statements : service_statement
-                        | service_statements service_statement'''
+        """service_statements : service_statement
+        | service_statements service_statement"""
         if len(p) == 2:
             p[0] = [p[1]]
         else:
             p[0] = p[1] + [p[2]]
 
     def p_service_statement(self, p):
-        '''service_statement : RPC ID RETURNS NULL ';'
-                             | RPC ID RETURNS ID ';'
-                             | RPC ID RETURNS STREAM ID ';'
-                             | RPC ID RETURNS ID EVENTS event_list ';' '''
+        """service_statement : RPC ID RETURNS NULL ';'
+        | RPC ID RETURNS ID ';'
+        | RPC ID RETURNS STREAM ID ';'
+        | RPC ID RETURNS ID EVENTS event_list ';'"""
         if p[2] == p[4]:
             # Verify that caller and reply differ
             self._parse_error(
-                'Reply ID ({}) should not be equal to Caller ID'.format(p[2]),
-                self._token_coord(p, 1))
+                "Reply ID ({}) should not be equal to Caller ID".format(p[2]),
+                self._token_coord(p, 1),
+            )
         if len(p) == 8:
             p[0] = Service(p[2], p[4], p[6])
         elif len(p) == 7:
@@ -509,359 +694,430 @@ class VPPAPIParser(object):
         else:
             p[0] = Service(p[2], p[4])
 
+    def p_service_statement2(self, p):
+        """service_statement : RPC ID RETURNS ID STREAM ID ';'"""
+        p[0] = Service(p[2], p[4], stream_message=p[6], stream=True)
+
     def p_event_list(self, p):
-        '''event_list : events
-                      | event_list events '''
+        """event_list : events
+        | event_list events"""
         if len(p) == 2:
             p[0] = [p[1]]
         else:
             p[0] = p[1] + [p[2]]
 
     def p_event(self, p):
-        '''events : ID
-                  | ID ',' '''
+        """events : ID
+        | ID ','"""
         p[0] = p[1]
 
     def p_enum(self, p):
-        '''enum : ENUM ID '{' enum_statements '}' ';' '''
+        """enum : ENUM ID '{' enum_statements '}' ';'"""
         p[0] = Enum(p[2], p[4])
 
     def p_enum_type(self, p):
-        ''' enum : ENUM ID ':' enum_size '{' enum_statements '}' ';' '''
+        """enum : ENUM ID ':' enum_size '{' enum_statements '}' ';'"""
         if len(p) == 9:
             p[0] = Enum(p[2], p[6], enumtype=p[4])
         else:
             p[0] = Enum(p[2], p[4])
 
+    def p_enumflag(self, p):
+        """enumflag : ENUMFLAG ID '{' enum_statements '}' ';'"""
+        p[0] = EnumFlag(p[2], p[4])
+
+    def p_enumflag_type(self, p):
+        """enumflag : ENUMFLAG ID ':' enumflag_size '{' enum_statements '}' ';'"""  # noqa : E502
+        if len(p) == 9:
+            p[0] = EnumFlag(p[2], p[6], enumtype=p[4])
+        else:
+            p[0] = EnumFlag(p[2], p[4])
+
     def p_enum_size(self, p):
-        ''' enum_size : U8
-                      | U16
-                      | U32 '''
+        """enum_size : U8
+        | U16
+        | U32
+        | I8
+        | I16
+        | I32"""
+        p[0] = p[1]
+
+    def p_enumflag_size(self, p):
+        """enumflag_size : U8
+        | U16
+        | U32"""
         p[0] = p[1]
 
     def p_define(self, p):
-        '''define : DEFINE ID '{' block_statements_opt '}' ';' '''
+        """define : DEFINE ID '{' block_statements_opt '}' ';'"""
         self.fields = []
-        p[0] = Define(p[2], [], p[4])
+        p[0] = Define(p[2], [], p[4], self.last_comment)
+        self.last_comment = None
 
     def p_define_flist(self, p):
-        '''define : flist DEFINE ID '{' block_statements_opt '}' ';' '''
+        """define : flist DEFINE ID '{' block_statements_opt '}' ';'"""
         # Legacy typedef
-        if 'typeonly' in p[1]:
-            self._parse_error('legacy typedef. use typedef: {} {}[{}];'
-                              .format(p[1], p[2], p[4]),
-                              self._token_coord(p, 1))
+        if "typeonly" in p[1]:
+            self._parse_error(
+                "legacy typedef. use typedef: {} {}[{}];".format(p[1], p[2], p[4]),
+                self._token_coord(p, 1),
+            )
         else:
-            p[0] = Define(p[3], p[1], p[5])
+            p[0] = Define(p[3], p[1], p[5], self.last_comment)
+            self.last_comment = None
 
     def p_flist(self, p):
-        '''flist : flag
-                 | flist flag'''
+        """flist : flag
+        | flist flag"""
         if len(p) == 2:
             p[0] = [p[1]]
         else:
             p[0] = p[1] + [p[2]]
 
     def p_flag(self, p):
-        '''flag : MANUAL_PRINT
-                | MANUAL_ENDIAN
-                | DONT_TRACE
-                | TYPEONLY
-                | AUTOREPLY'''
+        """flag : MANUAL_PRINT
+        | MANUAL_ENDIAN
+        | DONT_TRACE
+        | TYPEONLY
+        | AUTOENDIAN
+        | AUTOREPLY"""
         if len(p) == 1:
             return
         p[0] = p[1]
 
     def p_typedef(self, p):
-        '''typedef : TYPEDEF ID '{' block_statements_opt '}' ';' '''
+        """typedef : TYPEDEF ID '{' block_statements_opt '}' ';'"""
         p[0] = Typedef(p[2], [], p[4])
 
     def p_typedef_flist(self, p):
-        '''typedef : flist TYPEDEF ID '{' block_statements_opt '}' ';' '''
+        """typedef : flist TYPEDEF ID '{' block_statements_opt '}' ';'"""
         p[0] = Typedef(p[3], p[1], p[5])
 
     def p_typedef_alias(self, p):
-        '''typedef : TYPEDEF declaration '''
+        """typedef : TYPEDEF declaration"""
         p[0] = Using(p[2].fieldname, [], p[2])
 
     def p_typedef_alias_flist(self, p):
-        '''typedef : flist TYPEDEF declaration '''
+        """typedef : flist TYPEDEF declaration"""
         p[0] = Using(p[3].fieldname, p[1], p[3])
 
     def p_block_statements_opt(self, p):
-        '''block_statements_opt : block_statements '''
+        """block_statements_opt : block_statements"""
         p[0] = p[1]
 
     def p_block_statements(self, p):
-        '''block_statements : block_statement
-                            | block_statements block_statement'''
+        """block_statements : block_statement
+        | block_statements block_statement"""
         if len(p) == 2:
             p[0] = [p[1]]
         else:
             p[0] = p[1] + [p[2]]
 
     def p_block_statement(self, p):
-        '''block_statement : declaration
-                           | option '''
+        """block_statement : declaration
+        | option"""
         p[0] = p[1]
 
     def p_enum_statements(self, p):
-        '''enum_statements : enum_statement
-                           | enum_statements enum_statement'''
+        """enum_statements : enum_statement
+        | enum_statements enum_statement"""
         if len(p) == 2:
             p[0] = [p[1]]
         else:
             p[0] = p[1] + [p[2]]
 
     def p_enum_statement(self, p):
-        '''enum_statement : ID '=' NUM ','
-                          | ID ',' '''
-        if len(p) == 5:
-            p[0] = [p[1], p[3]]
+        """enum_statement : ID '=' NUM ','
+        | ID ','
+        | ID '[' field_options ']' ','
+        | ID '=' NUM '[' field_options ']' ','"""
+        if len(p) == 3:
+            p[0] = {"id": p[1]}
+        elif len(p) == 5:
+            p[0] = {"id": p[1], "value": p[3]}
+        elif len(p) == 6:
+            p[0] = {"id": p[1], "option": p[3]}
+        elif len(p) == 8:
+            p[0] = {"id": p[1], "value": p[3], "option": p[5]}
         else:
-            p[0] = p[1]
+            self._parse_error("ERROR", self._token_coord(p, 1))
 
     def p_field_options(self, p):
-        '''field_options : field_option
-                           | field_options field_option'''
+        """field_options : field_option
+        | field_options field_option"""
         if len(p) == 2:
             p[0] = p[1]
         else:
             p[0] = {**p[1], **p[2]}
 
     def p_field_option(self, p):
-        '''field_option : ID
-                        | ID '=' assignee ','
-                        | ID '=' assignee
+        """field_option : ID
+        | ID '=' assignee ','
+        | ID '=' assignee
 
-        '''
+        """
         if len(p) == 2:
             p[0] = {p[1]: None}
         else:
             p[0] = {p[1]: p[3]}
 
+    def p_variable_name(self, p):
+        """variable_name : ID
+        | TYPE
+        | SEVERITY
+        | DESCRIPTION
+        | COUNTERS
+        | PATHS
+        """
+        p[0] = p[1]
+
+    def p_comment(self, p):
+        """comment : COMMENT"""
+        self.last_comment = p[1]
+        p[0] = []
+
     def p_declaration(self, p):
-        '''declaration : type_specifier ID ';'
-                       | type_specifier ID '[' field_options ']' ';' '''
+        """declaration : type_specifier variable_name ';'
+        | type_specifier variable_name '[' field_options ']' ';'
+        """
         if len(p) == 7:
             p[0] = Field(p[1], p[2], p[4])
         elif len(p) == 4:
             p[0] = Field(p[1], p[2])
         else:
-            self._parse_error('ERROR', self._token_coord(p, 1))
+            self._parse_error("ERROR", self._token_coord(p, 1))
         self.fields.append(p[2])
 
     def p_declaration_array_vla(self, p):
-        '''declaration : type_specifier ID '[' ']' ';' '''
+        """declaration : type_specifier variable_name '[' ']' ';'"""
         p[0] = Array(p[1], p[2], 0, modern_vla=True)
 
     def p_declaration_array(self, p):
-        '''declaration : type_specifier ID '[' NUM ']' ';'
-                       | type_specifier ID '[' ID ']' ';' '''
+        """declaration : type_specifier variable_name '[' NUM ']' ';'
+        | type_specifier variable_name '[' ID ']' ';'"""
 
         if len(p) != 7:
             return self._parse_error(
-                'array: %s' % p.value,
-                self._coord(lineno=p.lineno))
+                "array: %s" % p.value, self._coord(lineno=p.lineno)
+            )
 
         # Make this error later
         if type(p[4]) is int and p[4] == 0:
             # XXX: Line number is wrong
-            self._parse_warning('Old Style VLA: {} {}[{}];'
-                                .format(p[1], p[2], p[4]),
-                                self._token_coord(p, 1))
+            self._parse_warning(
+                "Old Style VLA: {} {}[{}];".format(p[1], p[2], p[4]),
+                self._token_coord(p, 1),
+            )
 
         if type(p[4]) is str and p[4] not in self.fields:
             # Verify that length field exists
-            self._parse_error('Missing length field: {} {}[{}];'
-                              .format(p[1], p[2], p[4]),
-                              self._token_coord(p, 1))
+            self._parse_error(
+                "Missing length field: {} {}[{}];".format(p[1], p[2], p[4]),
+                self._token_coord(p, 1),
+            )
         p[0] = Array(p[1], p[2], p[4])
 
     def p_option(self, p):
-        '''option : OPTION ID '=' assignee ';' '''
-        p[0] = Option(p[2], p[4])
+        """option : OPTION ID '=' assignee ';'
+        | OPTION ID ';'"""
+        if len(p) == 4:
+            p[0] = Option(p[2])
+        else:
+            p[0] = Option(p[2], p[4])
 
     def p_assignee(self, p):
-        '''assignee : NUM
-                    | TRUE
-                    | FALSE
-                    | STRING_LITERAL '''
+        """assignee : NUM
+        | TRUE
+        | FALSE
+        | STRING_LITERAL"""
         p[0] = p[1]
 
     def p_type_specifier(self, p):
-        '''type_specifier : U8
-                          | U16
-                          | U32
-                          | U64
-                          | I8
-                          | I16
-                          | I32
-                          | I64
-                          | F64
-                          | BOOL
-                          | STRING'''
+        """type_specifier : U8
+        | U16
+        | U32
+        | U64
+        | I8
+        | I16
+        | I32
+        | I64
+        | F64
+        | BOOL
+        | STRING"""
         p[0] = p[1]
 
     # Do a second pass later to verify that user defined types are defined
     def p_typedef_specifier(self, p):
-        '''type_specifier : ID '''
+        """type_specifier : ID"""
         if p[1] not in global_types:
-            self._parse_error('Undefined type: {}'.format(p[1]),
-                              self._token_coord(p, 1))
+            self._parse_error(
+                "Undefined type: {}".format(p[1]), self._token_coord(p, 1)
+            )
         p[0] = p[1]
 
     def p_union(self, p):
-        '''union : UNION ID '{' block_statements_opt '}' ';' '''
+        """union : UNION ID '{' block_statements_opt '}' ';'"""
         p[0] = Union(p[2], [], p[4])
 
     def p_union_flist(self, p):
-        '''union : flist UNION ID '{' block_statements_opt '}' ';' '''
+        """union : flist UNION ID '{' block_statements_opt '}' ';'"""
         p[0] = Union(p[3], p[1], p[5])
 
     # Error rule for syntax errors
     def p_error(self, p):
         if p:
-            self._parse_error(
-                'before: %s' % p.value,
-                self._coord(lineno=p.lineno))
+            if p.type == "COMMENT":
+                self.parser.errok()
+                return
+            self._parse_error("before: %s" % p.value, self._coord(lineno=p.lineno))
         else:
-            self._parse_error('At end of input', self.filename)
+            self._parse_error("At end of input", self.filename)
 
+    def build(self, **kwargs):
+        self.parser = yacc.yacc(module=self, **kwargs)
 
-class VPPAPI(object):
 
-    def __init__(self, debug=False, filename='', logger=None):
+class VPPAPI:
+    def __init__(self, debug=False, filename="", logger=None, revision=None):
         self.lexer = lex.lex(module=VPPAPILexer(filename), debug=debug)
-        self.parser = yacc.yacc(module=VPPAPIParser(filename, logger),
-                                write_tables=False, debug=debug)
+        self.parser = VPPAPIParser(filename, logger, revision=revision)
+        self.parser.build(write_tables=False, debug=debug)
         self.logger = logger
+        self.revision = revision
+        self.filename = filename
 
     def parse_string(self, code, debug=0, lineno=1):
         self.lexer.lineno = lineno
-        return self.parser.parse(code, lexer=self.lexer, debug=debug)
+        return self.parser.parser.parse(code, lexer=self.lexer, debug=debug)
 
-    def parse_file(self, fd, debug=0):
+    def parse_fd(self, fd, debug=0):
         data = fd.read()
         return self.parse_string(data, debug=debug)
 
-    def autoreply_block(self, name):
-        block = [Field('u32', 'context'),
-                 Field('i32', 'retval')]
-        return Define(name + '_reply', [], block)
+    def parse_filename(self, filename, debug=0):
+        if self.revision:
+            git_show = "git show {}:{}".format(self.revision, filename)
+            proc = Popen(git_show.split(), stdout=PIPE, encoding="utf-8")
+            try:
+                data, errs = proc.communicate()
+                if proc.returncode != 0:
+                    print(
+                        "File not found: {}:{}".format(self.revision, filename),
+                        file=sys.stderr,
+                    )
+                    sys.exit(2)
+                return self.parse_string(data, debug=debug)
+            except Exception:
+                sys.exit(3)
+        else:
+            try:
+                with open(filename, encoding="utf-8") as fd:
+                    return self.parse_fd(fd, None)
+            except FileNotFoundError:
+                print("File not found: {}".format(filename), file=sys.stderr)
+                sys.exit(2)
 
     def process(self, objs):
         s = {}
-        s['Option'] = {}
-        s['Define'] = []
-        s['Service'] = []
-        s['types'] = []
-        s['Import'] = []
+        s["Option"] = {}
+        s["Define"] = []
+        s["Service"] = []
+        s["types"] = []
+        s["Import"] = []
+        s["Counters"] = []
+        s["Paths"] = []
         crc = 0
         for o in objs:
-            tname = o.__class__.__name__
             try:
-                crc = binascii.crc32(o.crc, crc) & 0xffffffff
+                crc = binascii.crc32(o.crc, crc) & 0xFFFFFFFF
             except AttributeError:
                 pass
-            if isinstance(o, Define):
-                s[tname].append(o)
-                if o.autoreply:
-                    s[tname].append(self.autoreply_block(o.name))
-            elif isinstance(o, Option):
-                s[tname][o.option] = o.value
-            elif type(o) is list:
+
+            if type(o) is list:
                 for o2 in o:
                     if isinstance(o2, Service):
-                        s['Service'].append(o2)
-            elif (isinstance(o, Enum) or
-                  isinstance(o, Typedef) or
-                  isinstance(o, Using) or
-                  isinstance(o, Union)):
-                s['types'].append(o)
+                        o2.process(s)
             else:
-                if tname not in s:
-                    raise ValueError('Unknown class type: {} {}'
-                                     .format(tname, o))
-                s[tname].append(o)
-
-        msgs = {d.name: d for d in s['Define']}
-        svcs = {s.caller: s for s in s['Service']}
-        replies = {s.reply: s for s in s['Service']}
+                o.process(s)
+
+        msgs = {d.name: d for d in s["Define"]}
+        svcs = {s.caller: s for s in s["Service"]}
+        replies = {s.reply: s for s in s["Service"]}
         seen_services = {}
 
-        s['file_crc'] = crc
+        s["file_crc"] = crc
 
         for service in svcs:
             if service not in msgs:
                 raise ValueError(
-                    'Service definition refers to unknown message'
-                    ' definition: {}'.format(service))
-            if svcs[service].reply != 'null' and \
-               svcs[service].reply not in msgs:
-                raise ValueError('Service definition refers to unknown message'
-                                 ' definition in reply: {}'
-                                 .format(svcs[service].reply))
+                    "Service definition refers to unknown message"
+                    " definition: {}".format(service)
+                )
+            if svcs[service].reply != "null" and svcs[service].reply not in msgs:
+                raise ValueError(
+                    "Service definition refers to unknown message"
+                    " definition in reply: {}".format(svcs[service].reply)
+                )
             if service in replies:
-                raise ValueError('Service definition refers to message'
-                                 ' marked as reply: {}'.format(service))
+                raise ValueError(
+                    "Service definition refers to message"
+                    " marked as reply: {}".format(service)
+                )
             for event in svcs[service].events:
                 if event not in msgs:
-                    raise ValueError('Service definition refers to unknown '
-                                     'event: {} in message: {}'
-                                     .format(event, service))
+                    raise ValueError(
+                        "Service definition refers to unknown "
+                        "event: {} in message: {}".format(event, service)
+                    )
                 seen_services[event] = True
 
         # Create services implicitly
         for d in msgs:
             if d in seen_services:
                 continue
-            if msgs[d].singular is True:
-                continue
-            if d.endswith('_reply'):
+            if d.endswith("_reply"):
                 if d[:-6] in svcs:
                     continue
                 if d[:-6] not in msgs:
-                    raise ValueError('{} missing calling message'
-                                     .format(d))
+                    raise ValueError("{} missing calling message".format(d))
                 continue
-            if d.endswith('_dump'):
+            if d.endswith("_dump"):
                 if d in svcs:
                     continue
-                if d[:-5]+'_details' in msgs:
-                    s['Service'].append(Service(d, d[:-5]+'_details',
-                                                stream=True))
+                if d[:-5] + "_details" in msgs:
+                    s["Service"].append(Service(d, d[:-5] + "_details", stream=True))
                 else:
-                    raise ValueError('{} missing details message'
-                                     .format(d))
+                    raise ValueError("{} missing details message".format(d))
                 continue
 
-            if d.endswith('_details'):
-                if d[:-8]+'_dump' not in msgs:
-                    raise ValueError('{} missing dump message'
-                                     .format(d))
-                continue
+            if d.endswith("_details"):
+                if d[:-8] + "_get" in msgs:
+                    if d[:-8] + "_get" in svcs:
+                        continue
+                    raise ValueError(
+                        "{} should be in a stream service".format(d[:-8] + "_get")
+                    )
+                if d[:-8] + "_dump" in msgs:
+                    continue
+                raise ValueError("{} missing dump or get message".format(d))
 
             if d in svcs:
                 continue
-            if d+'_reply' in msgs:
-                s['Service'].append(Service(d, d+'_reply'))
+            if d + "_reply" in msgs:
+                s["Service"].append(Service(d, d + "_reply"))
             else:
                 raise ValueError(
-                    '{} missing reply message ({}) or service definition'
-                    .format(d, d+'_reply'))
+                    "{} missing reply message ({}) or service definition".format(
+                        d, d + "_reply"
+                    )
+                )
 
         return s
 
-    def process_imports(self, objs, in_import, result):
-        imported_objs = []
+    def process_imports(self, objs, in_import, result):  # -> List
         for o in objs:
             # Only allow the following object types from imported file
-            if in_import and not (isinstance(o, Enum) or
-                                  isinstance(o, Union) or
-                                  isinstance(o, Typedef) or
-                                  isinstance(o, Import) or
-                                  isinstance(o, Using)):
+            if in_import and not isinstance(o, (Enum, Import, Typedef, Union, Using)):
                 continue
             if isinstance(o, Import):
                 result.append(o)
@@ -874,7 +1130,7 @@ class VPPAPI(object):
 # Add message ids to each message.
 def add_msg_id(s):
     for o in s:
-        o.block.insert(0, Field('u16', '_vl_msg_id'))
+        o.block.insert(0, Field("u16", "_vl_msg_id"))
     return s
 
 
@@ -894,12 +1150,12 @@ def dirlist_get():
 def foldup_blocks(block, crc):
     for b in block:
         # Look up CRC in user defined types
-        if b.fieldtype.startswith('vl_api_'):
+        if b.fieldtype.startswith("vl_api_"):
             # Recursively
             t = global_types[b.fieldtype]
             try:
-                crc = crc_block_combine(t.block, crc)
-                return foldup_blocks(t.block, crc)
+                crc = binascii.crc32(t.crc, crc) & 0xFFFFFFFF
+                crc = foldup_blocks(t.block, crc)
             except AttributeError:
                 pass
     return crc
@@ -907,124 +1163,181 @@ def foldup_blocks(block, crc):
 
 def foldup_crcs(s):
     for f in s:
-        f.crc = foldup_blocks(f.block,
-                              binascii.crc32(f.crc) & 0xffffffff)
-
-
-#
-# Main
-#
-def main():
-    if sys.version_info < (3, 5,):
-        log.exception('vppapigen requires a supported version of python. '
-                      'Please use version 3.5 or greater. '
-                      'Using {}'.format(sys.version))
-        return 1
-
-    cliparser = argparse.ArgumentParser(description='VPP API generator')
-    cliparser.add_argument('--pluginpath', default=""),
-    cliparser.add_argument('--includedir', action='append'),
-    cliparser.add_argument('--outputdir', action='store'),
-    cliparser.add_argument('--input',
-                           type=argparse.FileType('r', encoding='UTF-8'),
-                           default=sys.stdin)
-    cliparser.add_argument('--output', nargs='?',
-                           type=argparse.FileType('w', encoding='UTF-8'),
-                           default=sys.stdout)
-
-    cliparser.add_argument('output_module', nargs='?', default='C')
-    cliparser.add_argument('--debug', action='store_true')
-    cliparser.add_argument('--show-name', nargs=1)
-    args = cliparser.parse_args()
-
-    dirlist_add(args.includedir)
-    if not args.debug:
+        f.crc = foldup_blocks(f.block, binascii.crc32(f.crc) & 0xFFFFFFFF)
+
+
+def run_vppapigen(
+    input_file=None,
+    output=sys.stdout,
+    includedir=None,
+    debug=False,
+    show_name=None,
+    output_module="C",
+    outputdir=None,
+    pluginpath="",
+    git_revision=None,
+):
+    # reset globals
+    dirlist.clear()
+    global_types.clear()
+    seen_imports.clear()
+
+    dirlist_add(includedir)
+    if not debug:
         sys.excepthook = exception_handler
 
     # Filename
-    if args.show_name:
-        filename = args.show_name[0]
-    elif args.input != sys.stdin:
-        filename = args.input.name
+    if show_name:
+        filename = show_name[0]
+    elif input_file:
+        filename = input_file
     else:
-        filename = ''
+        filename = ""
 
-    if args.debug:
+    if debug:
         logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
     else:
         logging.basicConfig()
 
-    parser = VPPAPI(debug=args.debug, filename=filename, logger=log)
-    parsed_objects = parser.parse_file(args.input, log)
+    #
+    # Generate representation
+    #
+    from importlib.machinery import SourceFileLoader
+
+    # Default path
+    pluginpath = ""
+    if not pluginpath:
+        cand = []
+        cand.append(os.path.dirname(os.path.realpath(__file__)))
+        cand.append(os.path.dirname(os.path.realpath(__file__)) + "/../share/vpp/")
+        for c in cand:
+            c += "/"
+            if os.path.isfile("{}vppapigen_{}.py".format(c, output_module.lower())):
+                pluginpath = c
+                break
+    else:
+        pluginpath = pluginpath + "/"
+    if pluginpath == "":
+        log.exception("Output plugin not found")
+        return 1
+    module_path = "{}vppapigen_{}.py".format(pluginpath, output_module.lower())
+
+    try:
+        plugin = SourceFileLoader(output_module, module_path).load_module()
+    except Exception as err:
+        log.exception("Error importing output plugin: %s, %s", module_path, err)
+        return 1
+
+    parser = VPPAPI(debug=debug, filename=filename, logger=log, revision=git_revision)
+
+    try:
+        if not input_file:
+            parsed_objects = parser.parse_fd(sys.stdin, log)
+        else:
+            parsed_objects = parser.parse_filename(input_file, log)
+    except ParseError as e:
+        print("Parse error: ", e, file=sys.stderr)
+        sys.exit(1)
 
     # Build a list of objects. Hash of lists.
     result = []
 
-    if args.output_module == 'C':
-        s = parser.process(parsed_objects)
-    else:
+    # if the variable is not set in the plugin, assume it to be false.
+    try:
+        plugin.process_imports
+    except AttributeError:
+        plugin.process_imports = False
+
+    if plugin.process_imports:
         result = parser.process_imports(parsed_objects, False, result)
         s = parser.process(result)
+    else:
+        s = parser.process(parsed_objects)
+        imports = parser.process_imports(parsed_objects, False, result)
+        s["imported"] = parser.process(imports)
 
     # Add msg_id field
-    s['Define'] = add_msg_id(s['Define'])
+    s["Define"] = add_msg_id(s["Define"])
 
     # Fold up CRCs
-    foldup_crcs(s['Define'])
+    foldup_crcs(s["Define"])
 
     #
     # Debug
-    if args.debug:
+    if debug:
         import pprint
+
         pp = pprint.PrettyPrinter(indent=4, stream=sys.stderr)
-        for t in s['Define']:
+        for t in s["Define"]:
             pp.pprint([t.name, t.flags, t.block])
-        for t in s['types']:
+        for t in s["types"]:
             pp.pprint([t.name, t.block])
 
-    #
-    # Generate representation
-    #
-    from importlib.machinery import SourceFileLoader
-
-    # Default path
-    pluginpath = ''
-    if not args.pluginpath:
-        cand = []
-        cand.append(os.path.dirname(os.path.realpath(__file__)))
-        cand.append(os.path.dirname(os.path.realpath(__file__)) +
-                    '/../share/vpp/')
-        for c in cand:
-            c += '/'
-            if os.path.isfile('{}vppapigen_{}.py'
-                              .format(c, args.output_module.lower())):
-                pluginpath = c
-                break
+    result = plugin.run(outputdir, filename, s)
+    if result:
+        if isinstance(output, str):
+            with open(output, "w", encoding="UTF-8") as f:
+                print(result, file=f)
+        else:
+            print(result, file=output)
     else:
-        pluginpath = args.pluginpath + '/'
-    if pluginpath == '':
-        log.exception('Output plugin not found')
+        log.exception("Running plugin failed: %s %s", filename, result)
         return 1
-    module_path = '{}vppapigen_{}.py'.format(pluginpath,
-                                             args.output_module.lower())
+    return 0
 
-    try:
-        plugin = SourceFileLoader(args.output_module,
-                                  module_path).load_module()
-    except Exception as err:
-        log.exception('Error importing output plugin: {}, {}'
-                      .format(module_path, err))
-        return 1
 
-    result = plugin.run(args, filename, s)
-    if result:
-        print(result, file=args.output)
-    else:
-        log.exception('Running plugin failed: {} {}'
-                      .format(filename, result))
+def run_kw_vppapigen(kwargs):
+    return run_vppapigen(**kwargs)
+
+
+#
+# Main
+#
+def main():
+    if sys.version_info < (
+        3,
+        5,
+    ):
+        log.exception(
+            "vppapigen requires a supported version of python. "
+            "Please use version 3.5 or greater. "
+            "Using %s",
+            sys.version,
+        )
         return 1
-    return 0
+
+    cliparser = argparse.ArgumentParser(description="VPP API generator")
+    cliparser.add_argument("--pluginpath", default="")
+    cliparser.add_argument("--includedir", action="append")
+    cliparser.add_argument("--outputdir", action="store")
+    cliparser.add_argument("--input")
+    cliparser.add_argument(
+        "--output",
+        nargs="?",
+        type=argparse.FileType("w", encoding="UTF-8"),
+        default=sys.stdout,
+    )
+
+    cliparser.add_argument("output_module", nargs="?", default="C")
+    cliparser.add_argument("--debug", action="store_true")
+    cliparser.add_argument("--show-name", nargs=1)
+    cliparser.add_argument(
+        "--git-revision", help="Git revision to use for opening files"
+    )
+    args = cliparser.parse_args()
+
+    return run_vppapigen(
+        includedir=args.includedir,
+        debug=args.debug,
+        outputdir=args.outputdir,
+        show_name=args.show_name,
+        input_file=args.input,
+        output_module=args.output_module,
+        pluginpath=args.pluginpath,
+        git_revision=args.git_revision,
+        output=args.output,
+    )
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     sys.exit(main())