VPP-470: Introduce VxLAN-GPE as transport for iOAM.
authorVengada Govindan <[email protected]>
Wed, 12 Oct 2016 12:54:09 +0000 (05:54 -0700)
committerVengada Govindan <[email protected]>
Fri, 9 Dec 2016 06:15:02 +0000 (06:15 +0000)
See Jira ticket for more details
- New plugins created to (a) Add VxLAN-GPE as transport (b) Provide export infra for
  VxLAN-GPE.

Change-Id: Ife50c7434f53d17a4783062310f73d063d53494c
Signed-off-by: Vengada Govindan <[email protected]>
37 files changed:
plugins/ioam-plugin/Makefile.am
plugins/ioam-plugin/ioam/encap/ip6_ioam_trace.c
plugins/ioam-plugin/ioam/export-common/ioam_export.h [new file with mode: 0644]
plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api [new file with mode: 0644]
plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.c [new file with mode: 0644]
plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h [new file with mode: 0644]
plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_msg_enum.h [new file with mode: 0644]
plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_test.c [new file with mode: 0644]
plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_thread.c [new file with mode: 0644]
plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_node.c [new file with mode: 0644]
plugins/ioam-plugin/ioam/export/ioam_export.c
plugins/ioam-plugin/ioam/export/ioam_export.h [deleted file]
plugins/ioam-plugin/ioam/export/ioam_export_thread.c
plugins/ioam-plugin/ioam/export/node.c
plugins/ioam-plugin/ioam/lib-trace/trace_api.c
plugins/ioam-plugin/ioam/lib-trace/trace_util.c
plugins/ioam-plugin/ioam/lib-trace/trace_util.h
plugins/ioam-plugin/ioam/lib-vxlan-gpe/ioam_decap.c [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/ioam_encap.c [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/ioam_pop.c [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe.api [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_api.c [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h [new file with mode: 0644]
plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_test.c [new file with mode: 0644]
vnet/vnet/ip/ip6_hop_by_hop.h
vnet/vnet/vxlan-gpe/decap.c
vnet/vnet/vxlan-gpe/encap.c
vnet/vnet/vxlan-gpe/vxlan_gpe.c
vnet/vnet/vxlan-gpe/vxlan_gpe.h
vnet/vnet/vxlan-gpe/vxlan_gpe_packet.h
vpp-api-test/vat/api_format.c

index 6e17959..990f6da 100644 (file)
@@ -68,7 +68,7 @@ vppapitestplugins_LTLIBRARIES = ioam_pot_test_plugin.la
 vppplugins_LTLIBRARIES = ioam_pot_plugin.la
 
 ########################################
-# iOAM trace export
+# iOAM trace export for IPv6
 ########################################
 
 ioam_export_plugin_la_SOURCES =   \
@@ -96,10 +96,10 @@ vppplugins_LTLIBRARIES += ioam_export_plugin.la
 ########################################
 # iOAM Trace
 ########################################
-ioam_trace_plugin_la_SOURCES =                 \
-       ioam/lib-trace/trace_util.c                     \
+libioam_trace_plugin_la_SOURCES =              \
+       ioam/lib-trace/trace_util.c             \
        ioam/encap/ip6_ioam_trace.c             \
-       ioam/lib-trace/trace_util.h                     \
+       ioam/lib-trace/trace_util.h             \
        ioam/lib-trace/trace_api.c
 
 BUILT_SOURCES +=                               \
@@ -108,17 +108,78 @@ BUILT_SOURCES +=                          \
 
 noinst_HEADERS +=                       \
   ioam/export/ioam_export_all_api_h.h   \
-  ioam/lib-trace/trace_all_api_h.h                     \
-  ioam/lib-trace/trace_msg_enum.h                      \
-  ioam/lib-trace/trace.api.h                   \
+  ioam/lib-trace/trace_all_api_h.h     \
+  ioam/lib-trace/trace_msg_enum.h      \
+  ioam/lib-trace/trace.api.h           \
   ioam/lib-trace/trace_util.h
 
 ioam_trace_test_plugin_la_SOURCES =            \
-       ioam/lib-trace/trace_test.c                     \
+       ioam/lib-trace/trace_test.c             \
        ioam/lib-trace/trace_plugin.api.h
 
 vppapitestplugins_LTLIBRARIES += ioam_trace_test_plugin.la
-vppplugins_LTLIBRARIES += ioam_trace_plugin.la
+vppplugins_LTLIBRARIES += libioam_trace_plugin.la
+
+########################################
+# VxLAN-GPE
+########################################
+libioam_vxlan_gpe_plugin_la_SOURCES =                  \
+       ioam/lib-vxlan-gpe/ioam_encap.c                 \
+       ioam/lib-vxlan-gpe/ioam_decap.c                 \
+       ioam/lib-vxlan-gpe/ioam_pop.c                   \
+       ioam/lib-vxlan-gpe/vxlan_gpe_api.c              \
+       ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c       \
+    ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
+
+BUILT_SOURCES +=                               \
+       ioam/lib-vxlan-gpe/vxlan_gpe.api.h              \
+       ioam/lib-vxlan-gpe/vxlan_gpe.api.json
+
+noinst_HEADERS +=                       \
+  ioam/export/ioam_export_all_api_h.h   \
+  ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h                     \
+  ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h                      \
+  ioam/lib-vxlan-gpe/vxlan_gpe.api.h                   \
+  ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h                     \
+  ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h                    \
+  ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h
+
+ioam_vxlan_gpe_test_plugin_la_SOURCES =                \
+       ioam/lib-vxlan-gpe/vxlan_gpe_test.c                     \
+       ioam/lib-vxlan-gpe/vxlan_gpe_plugin.api.h
+
+libioam_vxlan_gpe_plugin_la_LIBADD = libioam_trace_plugin.la
+
+vppapitestplugins_LTLIBRARIES += ioam_vxlan_gpe_test_plugin.la
+vppplugins_LTLIBRARIES += libioam_vxlan_gpe_plugin.la
+
+########################################
+# iOAM export for VxLAN-GPE
+########################################
+
+libvxlan_gpe_ioam_export_plugin_la_SOURCES =   \
+ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.c         \
+ioam/export-vxlan-gpe/vxlan_gpe_node.c                \
+ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api.h     \
+ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_thread.c
+
+BUILT_SOURCES +=                               \
+       ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api.h               \
+       ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api.json
+
+noinst_HEADERS +=                                            \
+  ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h       \
+  ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_msg_enum.h        \
+  ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api.h
+
+vxlan_gpe_ioam_export_test_plugin_la_SOURCES =          \
+  ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_test.c      \
+  ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_plugin.api.h
+
+libvxlan_gpe_ioam_export_plugin_la_LIBADD = libioam_vxlan_gpe_plugin.la
+
+vppapitestplugins_LTLIBRARIES += vxlan_gpe_ioam_export_test_plugin.la
+vppplugins_LTLIBRARIES += libvxlan_gpe_ioam_export_plugin.la
 
 ########################################
 # iOAM E2E plugin
index 16e1081..e63db6e 100644 (file)
@@ -38,6 +38,15 @@ typedef union
   u32 as_u32[2];
 } time_u64_t;
 
+/* *INDENT-OFF* */
+typedef CLIB_PACKED(struct {
+  ip6_hop_by_hop_option_t hdr;
+  u8 ioam_trace_type;
+  u8 data_list_elts_left;
+  u32 elts[0]; /* Variable type. So keep it generic */
+}) ioam_trace_option_t;
+/* *INDENT-ON* */
+
 
 extern ip6_hop_by_hop_ioam_main_t ip6_hop_by_hop_ioam_main;
 extern ip6_main_t ip6_main;
@@ -130,7 +139,7 @@ format_ioam_data_list_element (u8 * s, va_list * args)
 
 
 int
-ioam_trace_get_sizeof_handler (u32 * result)
+ip6_ioam_trace_get_sizeof_handler (u32 * result)
 {
   u16 size = 0;
   u8 trace_data_size = 0;
@@ -290,12 +299,6 @@ ip6_hbh_ioam_trace_data_list_trace_handler (u8 * s,
   int elt_index = 0;
 
   trace = (ioam_trace_option_t *) opt;
-#if 0
-  s =
-    format (s, "  Trace Type 0x%x , %d elts left ts msb(s) 0x%x\n",
-           trace->ioam_trace_type, trace->data_list_elts_left,
-           t->timestamp_msbs);
-#endif
   s =
     format (s, "  Trace Type 0x%x , %d elts left\n", trace->ioam_trace_type,
            trace->data_list_elts_left);
@@ -385,6 +388,45 @@ ip6_hop_by_hop_ioam_trace_init (vlib_main_t * vm)
   return (0);
 }
 
+int
+ip6_trace_profile_cleanup (void)
+{
+  ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+  hm->options_size[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST] = 0;
+
+  return 0;
+
+}
+
+
+int
+ip6_trace_profile_setup (void)
+{
+  u32 trace_size = 0;
+  ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
+
+  trace_profile *profile = NULL;
+
+
+  profile = trace_profile_find ();
+
+  if (PREDICT_FALSE (!profile))
+    {
+      ip6_ioam_trace_stats_increment_counter (IP6_IOAM_TRACE_PROFILE_MISS, 1);
+      return (-1);
+    }
+
+
+  if (ip6_ioam_trace_get_sizeof_handler (&trace_size) < 0)
+    return (-1);
+
+  hm->options_size[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST] = trace_size;
+
+  return (0);
+}
+
+
 VLIB_INIT_FUNCTION (ip6_hop_by_hop_ioam_trace_init);
 
 /*
diff --git a/plugins/ioam-plugin/ioam/export-common/ioam_export.h b/plugins/ioam-plugin/ioam/export-common/ioam_export.h
new file mode 100644 (file)
index 0000000..82559c9
--- /dev/null
@@ -0,0 +1,617 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_ioam_export_h__
+#define __included_ioam_export_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ip/ip_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/udp.h>
+#include <vnet/flow/ipfix_packet.h>
+
+#include <vppinfra/pool.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <vppinfra/elog.h>
+
+#include <vlib/threads.h>
+
+typedef struct ioam_export_buffer
+{
+  /* Allocated buffer */
+  u32 buffer_index;
+  u64 touched_at;
+  u8 records_in_this_buffer;
+} ioam_export_buffer_t;
+
+
+typedef struct
+{
+  /* API message ID base */
+  u16 msg_id_base;
+
+  /* TODO: to support multiple collectors all this has to be grouped and create a vector here */
+  u8 *record_header;
+  u32 sequence_number;
+  u32 domain_id;
+
+  /* ipfix collector, our ip address */
+  ip4_address_t ipfix_collector;
+  ip4_address_t src_address;
+
+  /* Pool of ioam_export_buffer_t */
+  ioam_export_buffer_t *buffer_pool;
+  /* Vector of per thread ioam_export_buffer_t to buffer pool index */
+  u32 *buffer_per_thread;
+  /* Lock per thread to swap buffers between worker and timer process */
+  volatile u32 **lockp;
+
+  /* time scale transform */
+  u32 unix_time_0;
+  f64 vlib_time_0;
+
+  /* convenience */
+  vlib_main_t *vlib_main;
+  vnet_main_t *vnet_main;
+  ethernet_main_t *ethernet_main;
+  u32 ip4_lookup_node_index;
+
+  uword my_hbh_slot;
+  u32 export_process_node_index;
+} ioam_export_main_t;
+
+ioam_export_main_t ioam_export_main;
+ioam_export_main_t vxlan_gpe_ioam_export_main;
+
+vlib_node_registration_t export_node;
+
+#define DEFAULT_EXPORT_SIZE (3 * CLIB_CACHE_LINE_BYTES)
+/*
+ *  Number of records in a buffer
+ * ~(MTU (1500) - [ip hdr(40) + UDP(8) + ipfix (24)]) / DEFAULT_EXPORT_SIZE
+ */
+#define DEFAULT_EXPORT_RECORDS 7
+
+always_inline ioam_export_buffer_t *
+ioam_export_get_my_buffer (ioam_export_main_t * em, u32 thread_id)
+{
+
+  if (vec_len (em->buffer_per_thread) > thread_id)
+    return (pool_elt_at_index
+           (em->buffer_pool, em->buffer_per_thread[thread_id]));
+  return (0);
+}
+
+inline static int
+ioam_export_buffer_add_header (ioam_export_main_t * em, vlib_buffer_t * b0)
+{
+  clib_memcpy (b0->data, em->record_header, vec_len (em->record_header));
+  b0->current_data = 0;
+  b0->current_length = vec_len (em->record_header);
+  b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+  return (1);
+}
+
+inline static int
+ioam_export_init_buffer (ioam_export_main_t * em, vlib_main_t * vm,
+                        ioam_export_buffer_t * eb)
+{
+  vlib_buffer_t *b = 0;
+
+  if (!eb)
+    return (-1);
+  /* TODO: Perhaps buffer init from template here */
+  if (vlib_buffer_alloc (vm, &(eb->buffer_index), 1) != 1)
+    return (-2);
+  eb->records_in_this_buffer = 0;
+  eb->touched_at = vlib_time_now (vm);
+  b = vlib_get_buffer (vm, eb->buffer_index);
+  (void) ioam_export_buffer_add_header (em, b);
+  vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
+  vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0;
+  return (1);
+}
+
+inline static void
+ioam_export_thread_buffer_free (ioam_export_main_t * em)
+{
+  vlib_main_t *vm = em->vlib_main;
+  ioam_export_buffer_t *eb = 0;
+  int i;
+  for (i = 0; i < vec_len (em->buffer_per_thread); i++)
+    {
+      eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
+      if (eb)
+       vlib_buffer_free (vm, &(eb->buffer_index), 1);
+    }
+  for (i = 0; i < vec_len (em->lockp); i++)
+    clib_mem_free ((void *) em->lockp[i]);
+  vec_free (em->buffer_per_thread);
+  pool_free (em->buffer_pool);
+  vec_free (em->lockp);
+  em->buffer_per_thread = 0;
+  em->buffer_pool = 0;
+  em->lockp = 0;
+}
+
+inline static int
+ioam_export_thread_buffer_init (ioam_export_main_t * em, vlib_main_t * vm)
+{
+  int no_of_threads = vec_len (vlib_worker_threads);
+  int i;
+  ioam_export_buffer_t *eb = 0;
+  vlib_node_t *ip4_lookup_node;
+
+  pool_alloc_aligned (em->buffer_pool,
+                     no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
+  vec_validate_aligned (em->buffer_per_thread,
+                       no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
+  vec_validate_aligned (em->lockp, no_of_threads - 1, CLIB_CACHE_LINE_BYTES);
+  ip4_lookup_node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup");
+  em->ip4_lookup_node_index = ip4_lookup_node->index;
+  if (!em->buffer_per_thread || !em->buffer_pool || !em->lockp)
+    {
+      return (-1);
+    }
+  for (i = 0; i < no_of_threads; i++)
+    {
+      eb = 0;
+      pool_get_aligned (em->buffer_pool, eb, CLIB_CACHE_LINE_BYTES);
+      memset (eb, 0, sizeof (*eb));
+      em->buffer_per_thread[i] = eb - em->buffer_pool;
+      if (ioam_export_init_buffer (em, vm, eb) != 1)
+       {
+         ioam_export_thread_buffer_free (em);
+         return (-2);
+       }
+      em->lockp[i] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+                                            CLIB_CACHE_LINE_BYTES);
+      memset ((void *) em->lockp[i], 0, CLIB_CACHE_LINE_BYTES);
+    }
+  return (1);
+}
+
+#define IPFIX_IOAM_EXPORT_ID 272
+
+/* Used to build the rewrite */
+/* data set packet */
+typedef struct
+{
+  ipfix_message_header_t h;
+  ipfix_set_header_t s;
+} ipfix_data_packet_t;
+
+typedef struct
+{
+  ip4_header_t ip4;
+  udp_header_t udp;
+  ipfix_data_packet_t ipfix;
+} ip4_ipfix_data_packet_t;
+
+
+inline static void
+ioam_export_header_cleanup (ioam_export_main_t * em,
+                           ip4_address_t * collector_address,
+                           ip4_address_t * src_address)
+{
+  vec_free (em->record_header);
+  em->record_header = 0;
+}
+
+inline static int
+ioam_export_header_create (ioam_export_main_t * em,
+                          ip4_address_t * collector_address,
+                          ip4_address_t * src_address)
+{
+  ip4_header_t *ip;
+  udp_header_t *udp;
+  ipfix_message_header_t *h;
+  ipfix_set_header_t *s;
+  u8 *rewrite = 0;
+  ip4_ipfix_data_packet_t *tp;
+
+
+  /* allocate rewrite space */
+  vec_validate_aligned (rewrite,
+                       sizeof (ip4_ipfix_data_packet_t) - 1,
+                       CLIB_CACHE_LINE_BYTES);
+
+  tp = (ip4_ipfix_data_packet_t *) rewrite;
+  ip = (ip4_header_t *) & tp->ip4;
+  udp = (udp_header_t *) (ip + 1);
+  h = (ipfix_message_header_t *) (udp + 1);
+  s = (ipfix_set_header_t *) (h + 1);
+
+  ip->ip_version_and_header_length = 0x45;
+  ip->ttl = 254;
+  ip->protocol = IP_PROTOCOL_UDP;
+  ip->src_address.as_u32 = src_address->as_u32;
+  ip->dst_address.as_u32 = collector_address->as_u32;
+  udp->src_port = clib_host_to_net_u16 (4939 /* $$FIXME */ );
+  udp->dst_port = clib_host_to_net_u16 (4939);
+  /* FIXUP: UDP length */
+  udp->length = clib_host_to_net_u16 (vec_len (rewrite) +
+                                     (DEFAULT_EXPORT_RECORDS *
+                                      DEFAULT_EXPORT_SIZE) - sizeof (*ip));
+
+  /* FIXUP: message header export_time */
+  /* FIXUP: message header sequence_number */
+  h->domain_id = clib_host_to_net_u32 (em->domain_id);
+
+  /*FIXUP: Setid length in octets if records exported are not default */
+  s->set_id_length = ipfix_set_id_length (IPFIX_IOAM_EXPORT_ID,
+                                         (sizeof (*s) +
+                                          (DEFAULT_EXPORT_RECORDS *
+                                           DEFAULT_EXPORT_SIZE)));
+
+  /* FIXUP: h version and length length in octets if records exported are not default */
+  h->version_length = version_length (sizeof (*h) +
+                                     (sizeof (*s) +
+                                      (DEFAULT_EXPORT_RECORDS *
+                                       DEFAULT_EXPORT_SIZE)));
+
+  /* FIXUP: ip length if records exported are not default */
+  /* FIXUP: ip checksum if records exported are not default */
+  ip->length = clib_host_to_net_u16 (vec_len (rewrite) +
+                                    (DEFAULT_EXPORT_RECORDS *
+                                     DEFAULT_EXPORT_SIZE));
+  ip->checksum = ip4_header_checksum (ip);
+  _vec_len (rewrite) = sizeof (ip4_ipfix_data_packet_t);
+  em->record_header = rewrite;
+  return (1);
+}
+
+inline static int
+ioam_export_send_buffer (ioam_export_main_t * em, vlib_main_t * vm,
+                        ioam_export_buffer_t * eb)
+{
+  ip4_header_t *ip;
+  udp_header_t *udp;
+  ipfix_message_header_t *h;
+  ipfix_set_header_t *s;
+  ip4_ipfix_data_packet_t *tp;
+  vlib_buffer_t *b0;
+  u16 new_l0, old_l0;
+  ip_csum_t sum0;
+  vlib_frame_t *nf = 0;
+  u32 *to_next;
+
+  b0 = vlib_get_buffer (vm, eb->buffer_index);
+  tp = vlib_buffer_get_current (b0);
+  ip = (ip4_header_t *) & tp->ip4;
+  udp = (udp_header_t *) (ip + 1);
+  h = (ipfix_message_header_t *) (udp + 1);
+  s = (ipfix_set_header_t *) (h + 1);
+
+  /* FIXUP: message header export_time */
+  h->export_time = clib_host_to_net_u32 ((u32)
+                                        (((f64) em->unix_time_0) +
+                                         (vlib_time_now (em->vlib_main) -
+                                          em->vlib_time_0)));
+
+  /* FIXUP: message header sequence_number */
+  h->sequence_number = clib_host_to_net_u32 (em->sequence_number++);
+
+  /* FIXUP: lengths if different from default */
+  if (PREDICT_FALSE (eb->records_in_this_buffer != DEFAULT_EXPORT_RECORDS))
+    {
+      s->set_id_length =
+       ipfix_set_id_length (IPFIX_IOAM_EXPORT_ID /* set_id */ ,
+                            b0->current_length - (sizeof (*ip) +
+                                                  sizeof (*udp) +
+                                                  sizeof (*h)));
+      h->version_length =
+       version_length (b0->current_length - (sizeof (*ip) + sizeof (*udp)));
+      sum0 = ip->checksum;
+      old_l0 = ip->length;
+      new_l0 = clib_host_to_net_u16 ((u16) b0->current_length);
+      sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+                            length /* changed member */ );
+      ip->checksum = ip_csum_fold (sum0);
+      ip->length = new_l0;
+      udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
+    }
+
+  /* Enqueue pkts to ip4-lookup */
+
+  nf = vlib_get_frame_to_node (vm, em->ip4_lookup_node_index);
+  nf->n_vectors = 0;
+  to_next = vlib_frame_vector_args (nf);
+  nf->n_vectors = 1;
+  to_next[0] = eb->buffer_index;
+  vlib_put_frame_to_node (vm, em->ip4_lookup_node_index, nf);
+  return (1);
+
+}
+
+#define EXPORT_TIMEOUT (20.0)
+#define THREAD_PERIOD (30.0)
+inline static uword
+ioam_export_process_common (ioam_export_main_t * em, vlib_main_t * vm,
+                           vlib_node_runtime_t * rt, vlib_frame_t * f,
+                           u32 index)
+{
+  f64 now;
+  f64 timeout = 30.0;
+  uword event_type;
+  uword *event_data = 0;
+  int i;
+  ioam_export_buffer_t *eb = 0, *new_eb = 0;
+  u32 *vec_buffer_indices = 0;
+  u32 *vec_buffer_to_be_sent = 0;
+  u32 *thread_index = 0;
+  u32 new_pool_index = 0;
+
+  em->export_process_node_index = index;
+  /* Wait for Godot... */
+  vlib_process_wait_for_event_or_clock (vm, 1e9);
+  event_type = vlib_process_get_events (vm, &event_data);
+  if (event_type != 1)
+    clib_warning ("bogus kickoff event received, %d", event_type);
+  vec_reset_length (event_data);
+
+  while (1)
+    {
+      vlib_process_wait_for_event_or_clock (vm, timeout);
+      event_type = vlib_process_get_events (vm, &event_data);
+      switch (event_type)
+       {
+       case 2:         /* Stop and Wait for kickoff again */
+         timeout = 1e9;
+         break;
+       case 1:         /* kickoff : Check for unsent buffers */
+         timeout = THREAD_PERIOD;
+         break;
+       case ~0:                /* timeout */
+         break;
+       }
+      vec_reset_length (event_data);
+      now = vlib_time_now (vm);
+      /*
+       * Create buffers for threads that are not active enough
+       * to send out the export records
+       */
+      for (i = 0; i < vec_len (em->buffer_per_thread); i++)
+       {
+         /* If the worker thread is processing export records ignore further checks */
+         if (*em->lockp[i] == 1)
+           continue;
+         eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
+         if (eb->records_in_this_buffer > 0
+             && now > (eb->touched_at + EXPORT_TIMEOUT))
+           {
+             pool_get_aligned (em->buffer_pool, new_eb,
+                               CLIB_CACHE_LINE_BYTES);
+             memset (new_eb, 0, sizeof (*new_eb));
+             if (ioam_export_init_buffer (em, vm, new_eb) == 1)
+               {
+                 new_pool_index = new_eb - em->buffer_pool;
+                 vec_add (vec_buffer_indices, &new_pool_index, 1);
+                 vec_add (vec_buffer_to_be_sent, &em->buffer_per_thread[i],
+                          1);
+                 vec_add (thread_index, &i, 1);
+               }
+             else
+               {
+                 pool_put (em->buffer_pool, new_eb);
+                 /*Give up */
+                 goto CLEANUP;
+               }
+           }
+       }
+      if (vec_len (thread_index) != 0)
+       {
+         /*
+          * Now swap the buffers out
+          */
+         for (i = 0; i < vec_len (thread_index); i++)
+           {
+             while (__sync_lock_test_and_set (em->lockp[thread_index[i]], 1))
+               ;
+             em->buffer_per_thread[thread_index[i]] =
+               vec_pop (vec_buffer_indices);
+             *em->lockp[thread_index[i]] = 0;
+           }
+
+         /* Send the buffers */
+         for (i = 0; i < vec_len (vec_buffer_to_be_sent); i++)
+           {
+             eb =
+               pool_elt_at_index (em->buffer_pool, vec_buffer_to_be_sent[i]);
+             ioam_export_send_buffer (em, vm, eb);
+             pool_put (em->buffer_pool, eb);
+           }
+       }
+
+    CLEANUP:
+      /* Free any leftover/unused buffers and everything that was allocated */
+      for (i = 0; i < vec_len (vec_buffer_indices); i++)
+       {
+         new_eb = pool_elt_at_index (em->buffer_pool, vec_buffer_indices[i]);
+         vlib_buffer_free (vm, &new_eb->buffer_index, 1);
+         pool_put (em->buffer_pool, new_eb);
+       }
+      vec_free (vec_buffer_indices);
+      vec_free (vec_buffer_to_be_sent);
+      vec_free (thread_index);
+    }
+  return 0;                    /* not so much */
+}
+
+#define ioam_export_node_common(EM, VM, N, F, HTYPE, L, V, NEXT)               \
+do {                                                                           \
+  u32 n_left_from, *from, *to_next;                                            \
+  export_next_t next_index;                                                    \
+  u32 pkts_recorded = 0;                                                       \
+  ioam_export_buffer_t *my_buf = 0;                                            \
+  vlib_buffer_t *eb0 = 0;                                                      \
+  u32 ebi0 = 0;                                                                \
+  from = vlib_frame_vector_args (F);                                           \
+  n_left_from = (F)->n_vectors;                                                \
+  next_index = (N)->cached_next_index;                                         \
+  while (__sync_lock_test_and_set ((EM)->lockp[(VM)->cpu_index], 1));          \
+  my_buf = ioam_export_get_my_buffer (EM, (VM)->cpu_index);                    \
+  my_buf->touched_at = vlib_time_now (VM);                                     \
+  while (n_left_from > 0)                                                      \
+    {                                                                          \
+      u32 n_left_to_next;                                                      \
+      vlib_get_next_frame (VM, N, next_index, to_next, n_left_to_next);        \
+      while (n_left_from >= 4 && n_left_to_next >= 2)                          \
+       {                                                                      \
+         u32 next0 = NEXT;                                                    \
+         u32 next1 = NEXT;                                                    \
+         u32 bi0, bi1;                                                        \
+         HTYPE *ip0, *ip1;                                                    \
+         vlib_buffer_t *p0, *p1;                                              \
+         u32 ip_len0, ip_len1;                                                \
+         {                                                                    \
+           vlib_buffer_t *p2, *p3;                                            \
+           p2 = vlib_get_buffer (VM, from[2]);                                \
+           p3 = vlib_get_buffer (VM, from[3]);                                \
+           vlib_prefetch_buffer_header (p2, LOAD);                            \
+           vlib_prefetch_buffer_header (p3, LOAD);                            \
+           CLIB_PREFETCH (p2->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD);         \
+           CLIB_PREFETCH (p3->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD);         \
+         }                                                                    \
+         to_next[0] = bi0 = from[0];                                          \
+         to_next[1] = bi1 = from[1];                                          \
+         from += 2;                                                           \
+         to_next += 2;                                                        \
+         n_left_from -= 2;                                                    \
+         n_left_to_next -= 2;                                                 \
+         p0 = vlib_get_buffer (VM, bi0);                                      \
+         p1 = vlib_get_buffer (VM, bi1);                                      \
+         ip0 = vlib_buffer_get_current (p0);                                  \
+         ip1 = vlib_buffer_get_current (p1);                                  \
+         ip_len0 =                                                            \
+           clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE);                    \
+         ip_len1 =                                                            \
+           clib_net_to_host_u16 (ip1->L) + sizeof (HTYPE);                    \
+         ebi0 = my_buf->buffer_index;                                         \
+         eb0 = vlib_get_buffer (VM, ebi0);                                    \
+         if (PREDICT_FALSE (eb0 == 0))                                        \
+           goto NO_BUFFER1;                                                   \
+         ip_len0 =                                                            \
+           ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0;     \
+         ip_len1 =                                                            \
+           ip_len1 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len1;     \
+         copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0);     \
+         eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
+         my_buf->records_in_this_buffer++;                                    \
+         if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
+           {                                                                  \
+             ioam_export_send_buffer (EM, VM, my_buf);                        \
+             ioam_export_init_buffer (EM, VM, my_buf);                        \
+           }                                                                  \
+         ebi0 = my_buf->buffer_index;                                         \
+         eb0 = vlib_get_buffer (VM, ebi0);                                    \
+         if (PREDICT_FALSE (eb0 == 0))                                        \
+           goto NO_BUFFER1;                                                   \
+         copy3cachelines (eb0->data + eb0->current_length, ip1, ip_len1);     \
+         eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
+         my_buf->records_in_this_buffer++;                                    \
+         if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
+           {                                                                  \
+             ioam_export_send_buffer (EM, VM, my_buf);                        \
+             ioam_export_init_buffer (EM, VM, my_buf);                        \
+           }                                                                  \
+         pkts_recorded += 2;                                                  \
+         if (PREDICT_FALSE (((node)->flags & VLIB_NODE_FLAG_TRACE)))          \
+           {                                                                  \
+             if (p0->flags & VLIB_BUFFER_IS_TRACED)                           \
+               {                                                              \
+                 export_trace_t *t =                                          \
+                   vlib_add_trace (VM, node, p0, sizeof (*t));                \
+                 t->flow_label =                                              \
+                   clib_net_to_host_u32 (ip0->V);                             \
+                 t->next_index = next0;                                       \
+               }                                                              \
+             if (p1->flags & VLIB_BUFFER_IS_TRACED)                           \
+               {                                                              \
+                 export_trace_t *t =                                          \
+                   vlib_add_trace (VM, N, p1, sizeof (*t));                   \
+                 t->flow_label =                                              \
+                   clib_net_to_host_u32 (ip1->V);                             \
+                 t->next_index = next1;                                       \
+               }                                                              \
+           }                                                                  \
+       NO_BUFFER1:                                                            \
+         vlib_validate_buffer_enqueue_x2 (VM, N, next_index,                  \
+                                          to_next, n_left_to_next,            \
+                                          bi0, bi1, next0, next1);            \
+       }                                                                      \
+      while (n_left_from > 0 && n_left_to_next > 0)                            \
+       {                                                                      \
+         u32 bi0;                                                             \
+         vlib_buffer_t *p0;                                                   \
+         u32 next0 = NEXT;                                                    \
+         HTYPE *ip0;                                                          \
+         u32 ip_len0;                                                         \
+         bi0 = from[0];                                                       \
+         to_next[0] = bi0;                                                    \
+         from += 1;                                                           \
+         to_next += 1;                                                        \
+         n_left_from -= 1;                                                    \
+         n_left_to_next -= 1;                                                 \
+         p0 = vlib_get_buffer (VM, bi0);                                      \
+         ip0 = vlib_buffer_get_current (p0);                                  \
+         ip_len0 =                                                            \
+           clib_net_to_host_u16 (ip0->L) + sizeof (HTYPE);                    \
+         ebi0 = my_buf->buffer_index;                                         \
+         eb0 = vlib_get_buffer (VM, ebi0);                                    \
+         if (PREDICT_FALSE (eb0 == 0))                                        \
+           goto NO_BUFFER;                                                    \
+         ip_len0 =                                                            \
+           ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0;     \
+         copy3cachelines (eb0->data + eb0->current_length, ip0, ip_len0);     \
+         eb0->current_length += DEFAULT_EXPORT_SIZE;                          \
+         my_buf->records_in_this_buffer++;                                    \
+         if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)        \
+           {                                                                  \
+             ioam_export_send_buffer (EM, VM, my_buf);                        \
+             ioam_export_init_buffer (EM, VM, my_buf);                        \
+           }                                                                  \
+         if (PREDICT_FALSE (((N)->flags & VLIB_NODE_FLAG_TRACE)               \
+                            && (p0->flags & VLIB_BUFFER_IS_TRACED)))          \
+           {                                                                  \
+             export_trace_t *t = vlib_add_trace (VM, (N), p0, sizeof (*t));   \
+             t->flow_label =                                                  \
+               clib_net_to_host_u32 (ip0->V);                                 \
+             t->next_index = next0;                                           \
+           }                                                                  \
+         pkts_recorded += 1;                                                  \
+       NO_BUFFER:                                                             \
+         vlib_validate_buffer_enqueue_x1 (VM, N, next_index,                  \
+                                          to_next, n_left_to_next,            \
+                                          bi0, next0);                        \
+       }                                                                      \
+      vlib_put_next_frame (VM, N, next_index, n_left_to_next);                 \
+    }                                                                          \
+  vlib_node_increment_counter (VM, export_node.index,                          \
+                              EXPORT_ERROR_RECORDED, pkts_recorded);          \
+  *(EM)->lockp[(VM)->cpu_index] = 0;                                           \
+} while(0)
+
+#endif /* __included_ioam_export_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api b/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api
new file mode 100644 (file)
index 0000000..7b17c3f
--- /dev/null
@@ -0,0 +1,42 @@
+/* Hey Emacs use -*- mode: C -*- */
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Define a simple binary API to control the feature */
+
+define vxlan_gpe_ioam_export_enable_disable {
+    /* Client identifier, set from api_main.my_client_index */
+    u32 client_index;
+
+    /* Arbitrary context, so client can match reply to request */
+    u32 context;
+
+    /* Enable / disable the feature */
+    u8 is_disable;
+
+    /* Collector ip address */
+    u8 collector_address[4];
+    u8 src_address[4];
+
+    /* Src ip address */
+};
+
+define vxlan_gpe_ioam_export_enable_disable_reply {
+    /* From the request */
+    u32 context;
+
+    /* Return value, zero means all OK */
+    i32 retval;
+};
\ No newline at end of file
diff --git a/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.c b/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.c
new file mode 100644 (file)
index 0000000..6c46ee4
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ *------------------------------------------------------------------
+ * vxlan_gpe_ioam_export.c - ioam export API / debug CLI handling
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <ioam/export-common/ioam_export.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vlibsocket/api.h>
+
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+
+/* define message IDs */
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_msg_enum.h>
+
+/* define message structures */
+#define vl_typedefs
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h>
+#undef vl_typedefs
+
+/* define generated endian-swappers */
+#define vl_endianfun
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h>
+#undef vl_printfun
+
+/* Get the API version number */
+#define vl_api_version(n,v) static u32 api_version=(v);
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h>
+#undef vl_api_version
+
+/*
+ * A handy macro to set up a message reply.
+ * Assumes that the following variables are available:
+ * mp - pointer to request message
+ * rmp - pointer to reply message type
+ * rv - return value
+ */
+
+#define REPLY_MACRO(t)                                          \
+do {                                                            \
+    unix_shared_memory_queue_t * q =                            \
+    vl_api_client_index_to_input_queue (mp->client_index);      \
+    if (!q)                                                     \
+        return;                                                 \
+                                                                \
+    rmp = vl_msg_api_alloc (sizeof (*rmp));                     \
+    rmp->_vl_msg_id = ntohs((t)+sm->msg_id_base);               \
+    rmp->context = mp->context;                                 \
+    rmp->retval = ntohl(rv);                                    \
+                                                                \
+    vl_msg_api_send_shmem (q, (u8 *)&rmp);                      \
+} while(0);
+
+
+/* List of message types that this plugin understands */
+
+
+#define foreach_vxlan_gpe_ioam_export_plugin_api_msg                        \
+_(VXLAN_GPE_IOAM_EXPORT_ENABLE_DISABLE, vxlan_gpe_ioam_export_enable_disable)
+
+/*
+ * This routine exists to convince the vlib plugin framework that
+ * we haven't accidentally copied a random .dll into the plugin directory.
+ *
+ * Also collects global variable pointers passed from the vpp engine
+ */
+
+clib_error_t *
+vlib_plugin_register (vlib_main_t * vm, vnet_plugin_handoff_t * h,
+                     int from_early_init)
+{
+  ioam_export_main_t *em = &vxlan_gpe_ioam_export_main;
+  clib_error_t *error = 0;
+
+  em->vlib_main = vm;
+  em->vnet_main = h->vnet_main;
+  em->ethernet_main = h->ethernet_main;
+
+  return error;
+}
+
+extern void vxlan_gpe_set_next_override (uword next);
+/* Action function shared between message handler and debug CLI */
+int
+vxlan_gpe_ioam_export_enable_disable (ioam_export_main_t * em,
+                                     u8 is_disable,
+                                     ip4_address_t * collector_address,
+                                     ip4_address_t * src_address)
+{
+  vlib_main_t *vm = em->vlib_main;
+
+  if (is_disable == 0)
+    {
+      if (1 == ioam_export_header_create (em, collector_address, src_address))
+       {
+         ioam_export_thread_buffer_init (em, vm);
+         vxlan_gpe_set_next_override (em->my_hbh_slot);
+         /* Turn on the export buffer check process */
+         vlib_process_signal_event (vm, em->export_process_node_index, 1, 0);
+
+       }
+      else
+       {
+         return (-2);
+       }
+    }
+  else
+    {
+      vxlan_gpe_set_next_override (VXLAN_GPE_DECAP_IOAM_V4_NEXT_POP);
+      ioam_export_header_cleanup (em, collector_address, src_address);
+      ioam_export_thread_buffer_free (em);
+      /* Turn off the export buffer check process */
+      vlib_process_signal_event (vm, em->export_process_node_index, 2, 0);
+
+    }
+
+  return 0;
+}
+
+/* API message handler */
+static void vl_api_vxlan_gpe_ioam_export_enable_disable_t_handler
+  (vl_api_vxlan_gpe_ioam_export_enable_disable_t * mp)
+{
+  vl_api_vxlan_gpe_ioam_export_enable_disable_reply_t *rmp;
+  ioam_export_main_t *sm = &vxlan_gpe_ioam_export_main;
+  int rv;
+
+  rv = vxlan_gpe_ioam_export_enable_disable (sm, (int) (mp->is_disable),
+                                            (ip4_address_t *)
+                                            mp->collector_address,
+                                            (ip4_address_t *)
+                                            mp->src_address);
+
+  REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_EXPORT_ENABLE_DISABLE_REPLY);
+}                              /* API message handler */
+
+
+
+/* Set up the API message handling tables */
+static clib_error_t *
+vxlan_gpe_ioam_export_plugin_api_hookup (vlib_main_t * vm)
+{
+  ioam_export_main_t *sm = &vxlan_gpe_ioam_export_main;
+#define _(N,n)                                                  \
+    vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base),     \
+                           #n,                                 \
+                           vl_api_##n##_t_handler,              \
+                           vl_noop_handler,                     \
+                           vl_api_##n##_t_endian,               \
+                           vl_api_##n##_t_print,                \
+                           sizeof(vl_api_##n##_t), 1);
+  foreach_vxlan_gpe_ioam_export_plugin_api_msg;
+#undef _
+
+  return 0;
+}
+
+
+static clib_error_t *
+set_vxlan_gpe_ioam_export_ipfix_command_fn (vlib_main_t * vm,
+                                           unformat_input_t * input,
+                                           vlib_cli_command_t * cmd)
+{
+  ioam_export_main_t *em = &vxlan_gpe_ioam_export_main;
+  ip4_address_t collector, src;
+  u8 is_disable = 0;
+
+  collector.as_u32 = 0;
+  src.as_u32 = 0;
+
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "collector %U", unformat_ip4_address, &collector))
+       ;
+      else if (unformat (input, "src %U", unformat_ip4_address, &src))
+       ;
+      else if (unformat (input, "disable"))
+       is_disable = 1;
+      else
+       break;
+    }
+
+  if (collector.as_u32 == 0)
+    return clib_error_return (0, "collector address required");
+
+  if (src.as_u32 == 0)
+    return clib_error_return (0, "src address required");
+
+  em->ipfix_collector.as_u32 = collector.as_u32;
+  em->src_address.as_u32 = src.as_u32;
+
+  vlib_cli_output (vm, "Collector %U, src address %U",
+                  format_ip4_address, &em->ipfix_collector,
+                  format_ip4_address, &em->src_address);
+
+  /* Turn on the export timer process */
+  // vlib_process_signal_event (vm, flow_report_process_node.index,
+  //1, 0);
+  vxlan_gpe_ioam_export_enable_disable (em, is_disable, &collector, &src);
+
+  return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_vxlan_gpe_ioam_ipfix_command, static) =
+{
+.path = "set vxlan-gpe-ioam export ipfix",
+.short_help = "set vxlan-gpe-ioam export ipfix collector <ip4-address> src <ip4-address>",
+.function = set_vxlan_gpe_ioam_export_ipfix_command_fn,
+};
+/* *INDENT-ON* */
+
+
+static clib_error_t *
+vxlan_gpe_ioam_export_init (vlib_main_t * vm)
+{
+  ioam_export_main_t *em = &vxlan_gpe_ioam_export_main;
+  clib_error_t *error = 0;
+  u8 *name;
+  u32 node_index = export_node.index;
+  vlib_node_t *vxlan_gpe_decap_ioam_node = NULL;
+
+  name = format (0, "vxlan_gpe_ioam_export_%08x%c", api_version, 0);
+
+  /* Ask for a correctly-sized block of API message decode slots */
+  em->msg_id_base = vl_msg_api_get_msg_ids
+    ((char *) name, VL_MSG_FIRST_AVAILABLE);
+  em->unix_time_0 = (u32) time (0);    /* Store starting time */
+  em->vlib_time_0 = vlib_time_now (vm);
+
+  error = vxlan_gpe_ioam_export_plugin_api_hookup (vm);
+
+  /* Hook this export node to vxlan-gpe-decap-ioam-v4 */
+  vxlan_gpe_decap_ioam_node =
+    vlib_get_node_by_name (vm, (u8 *) "vxlan-gpe-decap-ioam-v4");
+  em->my_hbh_slot =
+    vlib_node_add_next (vm, vxlan_gpe_decap_ioam_node->index, node_index);
+  vec_free (name);
+
+  return error;
+}
+
+VLIB_INIT_FUNCTION (vxlan_gpe_ioam_export_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h b/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h
new file mode 100644 (file)
index 0000000..6d93f09
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Include the generated file, see BUILT_SOURCES in Makefile.am */
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api.h>
diff --git a/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_msg_enum.h b/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_msg_enum.h
new file mode 100644 (file)
index 0000000..cc5698d
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vxlan_gpe_ioam_export_msg_enum_h
+#define included_vxlan_gpe_ioam_export_msg_enum_h
+
+#include <vppinfra/byte_order.h>
+
+#define vl_msg_id(n,h) n,
+typedef enum {
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h>
+    /* We'll want to know how many messages IDs we need... */
+    VL_MSG_FIRST_AVAILABLE,
+} vl_msg_id_t;
+#undef vl_msg_id
+
+#endif /* included_vxlan_gpe_ioam_export_msg_enum_h */
diff --git a/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_test.c b/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_test.c
new file mode 100644 (file)
index 0000000..494263d
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ *------------------------------------------------------------------
+ * vxlan_gpe_ioam_export_test.c - test harness plugin
+ *------------------------------------------------------------------
+ */
+
+#include <vat/vat.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vlibsocket/api.h>
+#include <vppinfra/error.h>
+
+
+/* Declare message IDs */
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_msg_enum.h>
+
+/* define message structures */
+#define vl_typedefs
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h>
+#undef vl_typedefs
+
+/* declare message handlers for each api */
+
+#define vl_endianfun           /* define message structures */
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h>
+#undef vl_printfun
+
+/* Get the API version number. */
+#define vl_api_version(n,v) static u32 api_version=(v);
+#include <ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_all_api_h.h>
+#undef vl_api_version
+
+
+typedef struct
+{
+  /* API message ID base */
+  u16 msg_id_base;
+  vat_main_t *vat_main;
+} export_test_main_t;
+
+export_test_main_t export_test_main;
+
+#define foreach_standard_reply_retval_handler   \
+_(vxlan_gpe_ioam_export_enable_disable_reply)
+
+#define _(n)                                            \
+    static void vl_api_##n##_t_handler                  \
+    (vl_api_##n##_t * mp)                               \
+    {                                                   \
+        vat_main_t * vam = export_test_main.vat_main;   \
+        i32 retval = ntohl(mp->retval);                 \
+        if (vam->async_mode) {                          \
+            vam->async_errors += (retval < 0);          \
+        } else {                                        \
+            vam->retval = retval;                       \
+            vam->result_ready = 1;                      \
+        }                                               \
+    }
+foreach_standard_reply_retval_handler;
+#undef _
+
+/*
+ * Table of message reply handlers, must include boilerplate handlers
+ * we just generated
+ */
+#define foreach_vpe_api_reply_msg                                       \
+_(VXLAN_GPE_IOAM_EXPORT_ENABLE_DISABLE_REPLY, vxlan_gpe_ioam_export_enable_disable_reply)
+
+
+/* M: construct, but don't yet send a message */
+
+#define M(T,t)                                                  \
+do {                                                            \
+    vam->result_ready = 0;                                      \
+    mp = vl_msg_api_alloc(sizeof(*mp));                         \
+    memset (mp, 0, sizeof (*mp));                               \
+    mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base);      \
+    mp->client_index = vam->my_client_index;                    \
+} while(0);
+
+#define M2(T,t,n)                                               \
+do {                                                            \
+    vam->result_ready = 0;                                      \
+    mp = vl_msg_api_alloc(sizeof(*mp)+(n));                     \
+    memset (mp, 0, sizeof (*mp));                               \
+    mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base);      \
+    mp->client_index = vam->my_client_index;                    \
+} while(0);
+
+/* S: send a message */
+#define S (vl_msg_api_send_shmem (vam->vl_input_queue, (u8 *)&mp))
+
+/* W: wait for results, with timeout */
+#define W                                       \
+do {                                            \
+    timeout = vat_time_now (vam) + 1.0;         \
+                                                \
+    while (vat_time_now (vam) < timeout) {      \
+        if (vam->result_ready == 1) {           \
+            return (vam->retval);               \
+        }                                       \
+    }                                           \
+    return -99;                                 \
+} while(0);
+
+static int
+api_vxlan_gpe_ioam_export_enable_disable (vat_main_t * vam)
+{
+  export_test_main_t *sm = &export_test_main;
+  unformat_input_t *i = vam->input;
+  f64 timeout;
+  int is_disable = 0;
+  vl_api_vxlan_gpe_ioam_export_enable_disable_t *mp;
+
+  /* Parse args required to build the message */
+  while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (i, "disable"))
+       is_disable = 1;
+      else
+       break;
+    }
+
+  /* Construct the API message */
+  M (VXLAN_GPE_IOAM_EXPORT_ENABLE_DISABLE,
+     vxlan_gpe_ioam_export_enable_disable);
+  mp->is_disable = is_disable;
+
+  /* send it... */
+  S;
+
+  /* Wait for a reply... */
+  W;
+}
+
+/*
+ * List of messages that the api test plugin sends,
+ * and that the data plane plugin processes
+ */
+#define foreach_vpe_api_msg \
+_(vxlan_gpe_ioam_export_enable_disable, "<intfc> [disable]")
+
+void
+vat_api_hookup (vat_main_t * vam)
+{
+  export_test_main_t *sm = &export_test_main;
+  /* Hook up handlers for replies from the data plane plug-in */
+#define _(N,n)                                                  \
+    vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base),     \
+                           #n,                                  \
+                           vl_api_##n##_t_handler,              \
+                           vl_noop_handler,                     \
+                           vl_api_##n##_t_endian,               \
+                           vl_api_##n##_t_print,                \
+                           sizeof(vl_api_##n##_t), 1);
+  foreach_vpe_api_reply_msg;
+#undef _
+
+  /* API messages we can send */
+#define _(n,h) hash_set_mem (vam->function_by_name, #n, api_##n);
+  foreach_vpe_api_msg;
+#undef _
+
+  /* Help strings */
+#define _(n,h) hash_set_mem (vam->help_by_name, #n, h);
+  foreach_vpe_api_msg;
+#undef _
+}
+
+clib_error_t *
+vat_plugin_register (vat_main_t * vam)
+{
+  export_test_main_t *sm = &export_test_main;
+  u8 *name;
+
+  sm->vat_main = vam;
+
+  name = format (0, "vxlan_gpe_ioam_export_%08x%c", api_version, 0);
+  sm->msg_id_base = vl_client_get_first_plugin_msg_id ((char *) name);
+
+  if (sm->msg_id_base != (u16) ~ 0)
+    vat_api_hookup (vam);
+
+  vec_free (name);
+
+  return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_thread.c b/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export_thread.c
new file mode 100644 (file)
index 0000000..58508eb
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * ioam_export_thread.c
+ */
+#include <vnet/api_errno.h>
+#include <vppinfra/pool.h>
+#include <ioam/export-common/ioam_export.h>
+
+static vlib_node_registration_t vxlan_gpe_ioam_export_process_node;
+
+static uword
+vxlan_gpe_ioam_export_process (vlib_main_t * vm,
+                              vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+  return (ioam_export_process_common (&vxlan_gpe_ioam_export_main,
+                                     vm, rt, f,
+                                     vxlan_gpe_ioam_export_process_node.index));
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vxlan_gpe_ioam_export_process_node, static) =
+{
+ .function = vxlan_gpe_ioam_export_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "vxlan-gpe-ioam-export-process",
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_node.c b/plugins/ioam-plugin/ioam/export-vxlan-gpe/vxlan_gpe_node.c
new file mode 100644 (file)
index 0000000..722c2b0
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/ip.h>
+#include <ioam/export-common/ioam_export.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+
+typedef struct
+{
+  u32 next_index;
+  u32 flow_label;
+} export_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_export_trace (u8 * s, va_list * args)
+{
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+  export_trace_t *t = va_arg (*args, export_trace_t *);
+
+  s = format (s, "EXPORT: flow_label %d, next index %d",
+             t->flow_label, t->next_index);
+  return s;
+}
+
+vlib_node_registration_t export_node;
+
+#define foreach_export_error \
+_(RECORDED, "Packets recorded for export")
+
+typedef enum
+{
+#define _(sym,str) EXPORT_ERROR_##sym,
+  foreach_export_error
+#undef _
+    EXPORT_N_ERROR,
+} export_error_t;
+
+static char *export_error_strings[] = {
+#define _(sym,string) string,
+  foreach_export_error
+#undef _
+};
+
+typedef enum
+{
+  EXPORT_NEXT_VXLAN_GPE_INPUT,
+  EXPORT_N_NEXT,
+} export_next_t;
+
+always_inline void
+copy3cachelines (void *dst, const void *src, size_t n)
+{
+#if 0
+  if (PREDICT_FALSE (n < DEFAULT_EXPORT_SIZE))
+    {
+      /* Copy only the first 1/2 cache lines whatever is available */
+      if (n >= 64)
+       clib_mov64 ((u8 *) dst, (const u8 *) src);
+      if (n >= 128)
+       clib_mov64 ((u8 *) dst + 64, (const u8 *) src + 64);
+      return;
+    }
+  clib_mov64 ((u8 *) dst, (const u8 *) src);
+  clib_mov64 ((u8 *) dst + 64, (const u8 *) src + 64);
+  clib_mov64 ((u8 *) dst + 128, (const u8 *) src + 128);
+#endif
+#if 1
+
+  u64 *copy_dst, *copy_src;
+  int i;
+  copy_dst = (u64 *) dst;
+  copy_src = (u64 *) src;
+  if (PREDICT_FALSE (n < DEFAULT_EXPORT_SIZE))
+    {
+      for (i = 0; i < n / 64; i++)
+       {
+         copy_dst[0] = copy_src[0];
+         copy_dst[1] = copy_src[1];
+         copy_dst[2] = copy_src[2];
+         copy_dst[3] = copy_src[3];
+         copy_dst[4] = copy_src[4];
+         copy_dst[5] = copy_src[5];
+         copy_dst[6] = copy_src[6];
+         copy_dst[7] = copy_src[7];
+         copy_dst += 8;
+         copy_src += 8;
+       }
+      return;
+    }
+  for (i = 0; i < 3; i++)
+    {
+      copy_dst[0] = copy_src[0];
+      copy_dst[1] = copy_src[1];
+      copy_dst[2] = copy_src[2];
+      copy_dst[3] = copy_src[3];
+      copy_dst[4] = copy_src[4];
+      copy_dst[5] = copy_src[5];
+      copy_dst[6] = copy_src[6];
+      copy_dst[7] = copy_src[7];
+      copy_dst += 8;
+      copy_src += 8;
+    }
+#endif
+}
+
+
+static uword
+vxlan_gpe_export_node_fn (vlib_main_t * vm,
+                         vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+  ioam_export_main_t *em = &vxlan_gpe_ioam_export_main;
+  ioam_export_node_common (em, vm, node, frame, ip4_header_t, length,
+                          ip_version_and_header_length,
+                          EXPORT_NEXT_VXLAN_GPE_INPUT);
+  return frame->n_vectors;
+}
+
+/*
+ * Node for VXLAN-GPE export
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (export_node) =
+{
+  .function = vxlan_gpe_export_node_fn,
+  .name = "vxlan-gpe-ioam-export",
+  .vector_size = sizeof (u32),
+  .format_trace = format_export_trace,
+  .type = VLIB_NODE_TYPE_INTERNAL,
+  .n_errors = ARRAY_LEN (export_error_strings),
+  .error_strings = export_error_strings,
+  .n_next_nodes = EXPORT_N_NEXT,
+    /* edit / add dispositions here */
+    .next_nodes =
+  {[EXPORT_NEXT_VXLAN_GPE_INPUT] = "vxlan-gpe-pop-ioam-v4"},
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index e51c43e..96de8bf 100644 (file)
 
 #include <vnet/vnet.h>
 #include <vnet/plugin/plugin.h>
-#include <ioam/export/ioam_export.h>
+#include <ioam/export-common/ioam_export.h>
 
 #include <vlibapi/api.h>
 #include <vlibmemory/api.h>
 #include <vlibsocket/api.h>
 #include <vnet/ip/ip6_hop_by_hop.h>
 
-#include "ioam_export.h"
 
 /* define message IDs */
 #include <ioam/export/ioam_export_msg_enum.h>
@@ -115,9 +114,9 @@ ioam_export_ip6_enable_disable (ioam_export_main_t * em,
 
   if (is_disable == 0)
     {
-      if (1 == ioam_export_header_create (collector_address, src_address))
+      if (1 == ioam_export_header_create (em, collector_address, src_address))
        {
-         ioam_export_thread_buffer_init (vm);
+         ioam_export_thread_buffer_init (em, vm);
          ip6_hbh_set_next_override (em->my_hbh_slot);
          /* Turn on the export buffer check process */
          vlib_process_signal_event (vm, em->export_process_node_index, 1, 0);
@@ -131,8 +130,8 @@ ioam_export_ip6_enable_disable (ioam_export_main_t * em,
   else
     {
       ip6_hbh_set_next_override (IP6_LOOKUP_NEXT_POP_HOP_BY_HOP);
-      ioam_export_header_cleanup (collector_address, src_address);
-      ioam_export_thread_buffer_free ();
+      ioam_export_header_cleanup (em, collector_address, src_address);
+      ioam_export_thread_buffer_free (em);
       /* Turn off the export buffer check process */
       vlib_process_signal_event (vm, em->export_process_node_index, 2, 0);
 
@@ -150,8 +149,8 @@ static void vl_api_ioam_export_ip6_enable_disable_t_handler
   int rv;
 
   rv = ioam_export_ip6_enable_disable (sm, (int) (mp->is_disable),
-                                      (ip4_address_t *) mp->
-                                      collector_address,
+                                      (ip4_address_t *)
+                                      mp->collector_address,
                                       (ip4_address_t *) mp->src_address);
 
   REPLY_MACRO (VL_API_IOAM_EXPORT_IP6_ENABLE_DISABLE_REPLY);
@@ -234,11 +233,13 @@ set_ioam_export_ipfix_command_fn (vlib_main_t * vm,
   return 0;
 }
 
+/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (set_ipfix_command, static) =
 {
 .path = "set ioam export ipfix",.short_help =
     "set ioam export ipfix collector <ip4-address> src <ip4-address>",.
     function = set_ioam_export_ipfix_command_fn,};
+/* *INDENT-ON* */
 
 
 static clib_error_t *
@@ -272,3 +273,11 @@ ioam_export_init (vlib_main_t * vm)
 }
 
 VLIB_INIT_FUNCTION (ioam_export_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/export/ioam_export.h b/plugins/ioam-plugin/ioam/export/ioam_export.h
deleted file mode 100644 (file)
index f4a461f..0000000
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef __included_ioam_export_h__
-#define __included_ioam_export_h__
-
-#include <vnet/vnet.h>
-#include <vnet/ip/ip.h>
-#include <vnet/ip/ip_packet.h>
-#include <vnet/ip/ip4_packet.h>
-#include <vnet/ip/ip6_packet.h>
-#include <vnet/ip/udp.h>
-#include <vnet/flow/ipfix_packet.h>
-
-#include <vppinfra/pool.h>
-#include <vppinfra/hash.h>
-#include <vppinfra/error.h>
-#include <vppinfra/elog.h>
-
-#include <vlib/threads.h>
-
-typedef struct ioam_export_buffer {
-  /* Allocated buffer */
-  u32 buffer_index;
-  u64 touched_at;
-  u8 records_in_this_buffer;
-} ioam_export_buffer_t;
-
-
-typedef struct {
-  /* API message ID base */
-  u16 msg_id_base;
-
-  /* TODO: to support multiple collectors all this has to be grouped and create a vector here*/
-  u8 *record_header;
-  u32 sequence_number;
-  u32 domain_id;
-
-  /* ipfix collector, our ip address */
-  ip4_address_t ipfix_collector;
-  ip4_address_t src_address;
-
-  /* Pool of ioam_export_buffer_t */
-  ioam_export_buffer_t *buffer_pool;
-  /* Vector of per thread ioam_export_buffer_t to buffer pool index */
-  u32 *buffer_per_thread;
-  /* Lock per thread to swap buffers between worker and timer process*/
-  volatile u32 **lockp;
-
-  /* time scale transform*/
-  u32 unix_time_0;
-  f64 vlib_time_0;
-
-  /* convenience */
-  vlib_main_t * vlib_main;
-  vnet_main_t * vnet_main;
-  ethernet_main_t * ethernet_main;
-  u32 ip4_lookup_node_index;
-
-  uword my_hbh_slot;
-  u32 export_process_node_index;
-} ioam_export_main_t;
-
-ioam_export_main_t ioam_export_main;
-
-vlib_node_registration_t export_node;
-
-#define DEFAULT_EXPORT_SIZE (3 * CLIB_CACHE_LINE_BYTES)
-/*
- *  Number of records in a buffer
- * ~(MTU (1500) - [ip hdr(40) + UDP(8) + ipfix (24)]) / DEFAULT_EXPORT_SIZE
- */
-#define DEFAULT_EXPORT_RECORDS 7
-
-always_inline ioam_export_buffer_t *ioam_export_get_my_buffer(u32 thread_id)
-{
-  ioam_export_main_t *em = &ioam_export_main;
-
-  if (vec_len(em->buffer_per_thread) > thread_id)
-    return(pool_elt_at_index(em->buffer_pool, em->buffer_per_thread[thread_id]));
-  return(0);
-}
-
-inline static int ioam_export_buffer_add_header (vlib_buffer_t *b0)
-{
-  ioam_export_main_t *em = &ioam_export_main;
-  clib_memcpy(b0->data, em->record_header, vec_len(em->record_header));
-  b0->current_data = 0;
-  b0->current_length = vec_len(em->record_header);
-  b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
-  return(1);
-}
-
-inline static int ioam_export_init_buffer (vlib_main_t *vm,
-                                          ioam_export_buffer_t *eb)
-{
-  vlib_buffer_t *b = 0;
-
-  if (!eb)
-    return(-1);
-  /* TODO: Perhaps buffer init from template here */
-  if (vlib_buffer_alloc (vm, &(eb->buffer_index), 1) != 1)
-    return(-2);
-  eb->records_in_this_buffer = 0;
-  eb->touched_at = vlib_time_now(vm);
-  b = vlib_get_buffer(vm, eb->buffer_index);
-  (void) ioam_export_buffer_add_header(b);
-  vnet_buffer(b)->sw_if_index[VLIB_RX] = 0;
-  vnet_buffer(b)->sw_if_index[VLIB_TX] = ~0;
-  return(1);
-}
-
-inline static void ioam_export_thread_buffer_free (void)
-{
-  ioam_export_main_t *em = &ioam_export_main;
-  vlib_main_t *vm = em->vlib_main;
-  ioam_export_buffer_t *eb = 0;
-  int i;
-  for (i = 0; i < vec_len(em->buffer_per_thread); i++)
-    {
-      eb = pool_elt_at_index(em->buffer_pool, em->buffer_per_thread[i]);
-      if (eb)
-       vlib_buffer_free(vm, &(eb->buffer_index), 1);
-    }
-  for (i = 0; i < vec_len(em->lockp); i++)
-    clib_mem_free((void *) em->lockp[i]);
-  vec_free(em->buffer_per_thread);
-  pool_free(em->buffer_pool);
-  vec_free(em->lockp);
-  em->buffer_per_thread = 0;
-  em->buffer_pool = 0;
-  em->lockp = 0;
-}
-
-inline static int ioam_export_thread_buffer_init (vlib_main_t *vm)
-{
-  ioam_export_main_t *em = &ioam_export_main;
-  int no_of_threads = vec_len(vlib_worker_threads);
-  int i;
-  ioam_export_buffer_t *eb = 0;
-  vlib_node_t * ip4_lookup_node;
-
-  pool_alloc_aligned(em->buffer_pool,
-                     no_of_threads - 1,
-                     CLIB_CACHE_LINE_BYTES);
-  vec_validate_aligned(em->buffer_per_thread,
-                       no_of_threads-1,
-                       CLIB_CACHE_LINE_BYTES);
-  vec_validate_aligned(em->lockp, no_of_threads-1,
-                      CLIB_CACHE_LINE_BYTES);
-  ip4_lookup_node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup");
-  em->ip4_lookup_node_index = ip4_lookup_node->index;
-  if (!em->buffer_per_thread || !em->buffer_pool || !em->lockp)
-    {
-      return(-1);
-    }
-  for (i=0; i < no_of_threads; i++)
-    {
-      eb = 0;
-      pool_get_aligned(em->buffer_pool, eb, CLIB_CACHE_LINE_BYTES);
-      memset(eb, 0, sizeof (*eb));
-      em->buffer_per_thread[i] = eb - em->buffer_pool;
-      if (ioam_export_init_buffer(vm, eb) != 1)
-       {
-         ioam_export_thread_buffer_free();
-         return(-2);
-       }
-      em->lockp[i] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
-                                            CLIB_CACHE_LINE_BYTES);
-      memset ((void *) em->lockp[i], 0, CLIB_CACHE_LINE_BYTES);
-    }
-  return(1);
-}
-
-#define IPFIX_IOAM_EXPORT_ID 272
-
-/* Used to build the rewrite */
-/* data set packet */
-typedef struct {
-  ipfix_message_header_t h;
-  ipfix_set_header_t s;
-} ipfix_data_packet_t;
-
-typedef struct {
-  ip4_header_t ip4;
-  udp_header_t udp;
-  ipfix_data_packet_t ipfix;
-} ip4_ipfix_data_packet_t;
-
-
-inline static void ioam_export_header_cleanup (ip4_address_t * collector_address,
-                                              ip4_address_t * src_address)
-{
-  ioam_export_main_t *em = &ioam_export_main;
-  vec_free(em->record_header);
-  em->record_header = 0;
-}
-
-inline static int ioam_export_header_create (ip4_address_t * collector_address,
-                                            ip4_address_t * src_address)
-{
-  ioam_export_main_t *em = &ioam_export_main;
-  ip4_header_t * ip;
-  udp_header_t * udp;
-  ipfix_message_header_t * h;
-  ipfix_set_header_t * s;
-  u8 * rewrite = 0;
-  ip4_ipfix_data_packet_t * tp;
-
-
-  /* allocate rewrite space */
-  vec_validate_aligned (rewrite,
-                        sizeof (ip4_ipfix_data_packet_t) - 1,
-                        CLIB_CACHE_LINE_BYTES);
-
-  tp = (ip4_ipfix_data_packet_t *) rewrite;
-  ip = (ip4_header_t *) &tp->ip4;
-  udp = (udp_header_t *) (ip+1);
-  h = (ipfix_message_header_t *)(udp+1);
-  s = (ipfix_set_header_t *)(h+1);
-
-  ip->ip_version_and_header_length = 0x45;
-  ip->ttl = 254;
-  ip->protocol = IP_PROTOCOL_UDP;
-  ip->src_address.as_u32 = src_address->as_u32;
-  ip->dst_address.as_u32 = collector_address->as_u32;
-  udp->src_port = clib_host_to_net_u16 (4939 /* $$FIXME */);
-  udp->dst_port = clib_host_to_net_u16 (4939);
-  /* FIXUP: UDP length */
-  udp->length = clib_host_to_net_u16 (vec_len(rewrite) +
-    (DEFAULT_EXPORT_RECORDS * DEFAULT_EXPORT_SIZE) - sizeof (*ip));
-
-  /* FIXUP: message header export_time */
-  /* FIXUP: message header sequence_number */
-  h->domain_id = clib_host_to_net_u32 (em->domain_id);
-
-  /*FIXUP: Setid length in octets if records exported are not default*/
-  s->set_id_length = ipfix_set_id_length (IPFIX_IOAM_EXPORT_ID,
-    (sizeof(*s) + (DEFAULT_EXPORT_RECORDS * DEFAULT_EXPORT_SIZE)));
-
-  /* FIXUP: h version and length length in octets if records exported are not default */
-  h->version_length = version_length (sizeof(*h)+
-    (sizeof(*s) + (DEFAULT_EXPORT_RECORDS * DEFAULT_EXPORT_SIZE)));
-
-  /* FIXUP: ip length if records exported are not default */
-  /* FIXUP: ip checksum if records exported are not default */
-  ip->length = clib_host_to_net_u16 (vec_len(rewrite) +
-    (DEFAULT_EXPORT_RECORDS * DEFAULT_EXPORT_SIZE));
-  ip->checksum = ip4_header_checksum (ip);
-  _vec_len(rewrite) = sizeof(ip4_ipfix_data_packet_t);
-  em->record_header = rewrite;
-  return(1);
-}
-
-inline static int ioam_export_send_buffer (vlib_main_t *vm,
-    ioam_export_buffer_t *eb)
-{
-  ioam_export_main_t *em = &ioam_export_main;
-  ip4_header_t * ip;
-  udp_header_t * udp;
-  ipfix_message_header_t * h;
-  ipfix_set_header_t * s;
-  ip4_ipfix_data_packet_t * tp;
-  vlib_buffer_t *b0;
-  u16 new_l0, old_l0;
-  ip_csum_t sum0;
-  vlib_frame_t * nf = 0;
-  u32 * to_next;
-
-  b0 = vlib_get_buffer(vm, eb->buffer_index);
-  tp = vlib_buffer_get_current (b0);
-  ip = (ip4_header_t *) &tp->ip4;
-  udp = (udp_header_t *) (ip+1);
-  h = (ipfix_message_header_t *)(udp+1);
-  s = (ipfix_set_header_t *)(h+1);
-
-  /* FIXUP: message header export_time */
-  h->export_time = clib_host_to_net_u32((u32)
-    (((f64)em->unix_time_0) +
-    (vlib_time_now(em->vlib_main) - em->vlib_time_0)));
-
-  /* FIXUP: message header sequence_number */
-  h->sequence_number = clib_host_to_net_u32 (em->sequence_number++);
-
-  /* FIXUP: lengths if different from default */
-  if (PREDICT_FALSE(eb->records_in_this_buffer != DEFAULT_EXPORT_RECORDS)) {
-     s->set_id_length = ipfix_set_id_length (IPFIX_IOAM_EXPORT_ID /* set_id */,
-                                            b0->current_length -
-                                            (sizeof (*ip) + sizeof (*udp) +
-                                            sizeof (*h)));
-     h->version_length = version_length (b0->current_length -
-                                        (sizeof (*ip) + sizeof (*udp)));
-     sum0 = ip->checksum;
-     old_l0 = ip->length;
-     new_l0 = clib_host_to_net_u16 ((u16)b0->current_length);
-     sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
-                            length /* changed member */);
-     ip->checksum = ip_csum_fold (sum0);
-     ip->length = new_l0;
-     udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
-  }
-
-  /* Enqueue pkts to ip4-lookup */
-
-  nf = vlib_get_frame_to_node (vm, em->ip4_lookup_node_index);
-  nf->n_vectors = 0;
-  to_next = vlib_frame_vector_args (nf);
-  nf->n_vectors = 1;
-  to_next[0] = eb->buffer_index;
-  vlib_put_frame_to_node(vm, em->ip4_lookup_node_index, nf);
-  return(1);
-
-}
-
-#endif /* __included_ioam_export_h__ */
index e64b0bf..d2eb200 100644 (file)
  */
 #include <vnet/api_errno.h>
 #include <vppinfra/pool.h>
-#include "ioam_export.h"
+#include <ioam/export-common/ioam_export.h>
 
 static vlib_node_registration_t ioam_export_process_node;
-#define EXPORT_TIMEOUT (20.0)
-#define THREAD_PERIOD (30.0)
 
 static uword
 ioam_export_process (vlib_main_t * vm,
                     vlib_node_runtime_t * rt, vlib_frame_t * f)
 {
-  ioam_export_main_t *em = &ioam_export_main;
-  f64 now;
-  f64 timeout = 30.0;
-  uword event_type;
-  uword *event_data = 0;
-  int i;
-  ioam_export_buffer_t *eb = 0, *new_eb = 0;
-  u32 *vec_buffer_indices = 0;
-  u32 *vec_buffer_to_be_sent = 0;
-  u32 *thread_index = 0;
-  u32 new_pool_index = 0;
-
-  em->export_process_node_index = ioam_export_process_node.index;
-  /* Wait for Godot... */
-  vlib_process_wait_for_event_or_clock (vm, 1e9);
-  event_type = vlib_process_get_events (vm, &event_data);
-  if (event_type != 1)
-    clib_warning ("bogus kickoff event received, %d", event_type);
-  vec_reset_length (event_data);
-
-  while (1)
-    {
-      vlib_process_wait_for_event_or_clock (vm, timeout);
-      event_type = vlib_process_get_events (vm, &event_data);
-      switch (event_type)
-       {
-       case 2:         /* Stop and Wait for kickoff again */
-         timeout = 1e9;
-         break;
-       case 1:         /* kickoff : Check for unsent buffers */
-         timeout = THREAD_PERIOD;
-         break;
-       case ~0:                /* timeout */
-         break;
-       }
-      vec_reset_length (event_data);
-      now = vlib_time_now (vm);
-      /*
-       * Create buffers for threads that are not active enough
-       * to send out the export records
-       */
-      for (i = 0; i < vec_len (em->buffer_per_thread); i++)
-       {
-         /* If the worker thread is processing export records ignore further checks */
-         if (*em->lockp[i] == 1)
-           continue;
-         eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
-         if (eb->records_in_this_buffer > 0 && now > (eb->touched_at + EXPORT_TIMEOUT))
-           {
-             pool_get_aligned (em->buffer_pool, new_eb,
-                               CLIB_CACHE_LINE_BYTES);
-             memset (new_eb, 0, sizeof (*new_eb));
-             if (ioam_export_init_buffer (vm, new_eb) == 1)
-               {
-                 new_pool_index = new_eb - em->buffer_pool;
-                 vec_add (vec_buffer_indices, &new_pool_index, 1);
-                 vec_add (vec_buffer_to_be_sent, &em->buffer_per_thread[i],
-                          1);
-                 vec_add (thread_index, &i, 1);
-               }
-             else
-               {
-                 pool_put (em->buffer_pool, new_eb);
-                 /*Give up */
-                 goto CLEANUP;
-               }
-           }
-       }
-      if (vec_len (thread_index) != 0)
-       {
-         /*
-          * Now swap the buffers out
-          */
-         for (i = 0; i < vec_len (thread_index); i++)
-           {
-             while (__sync_lock_test_and_set (em->lockp[thread_index[i]], 1))
-               ;
-             em->buffer_per_thread[thread_index[i]] =
-               vec_pop (vec_buffer_indices);
-             *em->lockp[thread_index[i]] = 0;
-           }
-
-         /* Send the buffers */
-         for (i = 0; i < vec_len (vec_buffer_to_be_sent); i++)
-           {
-             eb =
-               pool_elt_at_index (em->buffer_pool, vec_buffer_to_be_sent[i]);
-             ioam_export_send_buffer (vm, eb);
-             pool_put (em->buffer_pool, eb);
-           }
-       }
-
-    CLEANUP:
-      /* Free any leftover/unused buffers and everything that was allocated */
-      for (i = 0; i < vec_len (vec_buffer_indices); i++)
-       {
-         new_eb = pool_elt_at_index (em->buffer_pool, vec_buffer_indices[i]);
-         vlib_buffer_free (vm, &new_eb->buffer_index, 1);
-         pool_put (em->buffer_pool, new_eb);
-       }
-      vec_free (vec_buffer_indices);
-      vec_free (vec_buffer_to_be_sent);
-      vec_free (thread_index);
-    }
-  return 0;                    /* not so much */
+   return (ioam_export_process_common(&ioam_export_main,
+                                      vm, rt, f,
+                                      ioam_export_process_node.index));
 }
 
 VLIB_REGISTER_NODE (ioam_export_process_node, static) =
index 484bcb5..19f143d 100644 (file)
@@ -17,7 +17,7 @@
 #include <vnet/pg/pg.h>
 #include <vppinfra/error.h>
 #include <vnet/ip/ip.h>
-#include <ioam/export/ioam_export.h>
+#include <ioam/export-common/ioam_export.h>
 
 typedef struct
 {
@@ -124,210 +124,9 @@ ip6_export_node_fn (vlib_main_t * vm,
                    vlib_node_runtime_t * node, vlib_frame_t * frame)
 {
   ioam_export_main_t *em = &ioam_export_main;
-  u32 n_left_from, *from, *to_next;
-  export_next_t next_index;
-  u32 pkts_recorded = 0;
-  ioam_export_buffer_t *my_buf = 0;
-  vlib_buffer_t *eb0 = 0;
-  u32 ebi0 = 0;
-  from = vlib_frame_vector_args (frame);
-  n_left_from = frame->n_vectors;
-  next_index = node->cached_next_index;
-
-  while (__sync_lock_test_and_set (em->lockp[vm->cpu_index], 1))
-    ;
-  my_buf = ioam_export_get_my_buffer (vm->cpu_index);
-  my_buf->touched_at = vlib_time_now (vm);
-  while (n_left_from > 0)
-    {
-      u32 n_left_to_next;
-
-      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-      while (n_left_from >= 4 && n_left_to_next >= 2)
-       {
-         u32 next0 = EXPORT_NEXT_POP_HBYH;
-         u32 next1 = EXPORT_NEXT_POP_HBYH;
-         u32 bi0, bi1;
-         ip6_header_t *ip60, *ip61;
-         vlib_buffer_t *p0, *p1;
-         u32 ip_len0, ip_len1;
-
-         /* Prefetch next iteration. */
-         {
-           vlib_buffer_t *p2, *p3;
-
-           p2 = vlib_get_buffer (vm, from[2]);
-           p3 = vlib_get_buffer (vm, from[3]);
-
-           vlib_prefetch_buffer_header (p2, LOAD);
-           vlib_prefetch_buffer_header (p3, LOAD);
-
-           /* IPv6 + HbyH header + Trace option */
-           /* 40   +           2 + [4 hdr] + [16]* no_of_nodes */
-           /* 3 cache lines can get v6 hdr + trace option with upto 9 node trace */
-           CLIB_PREFETCH (p2->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD);
-           CLIB_PREFETCH (p3->data, 3 * CLIB_CACHE_LINE_BYTES, LOAD);
-         }
-
-         /* speculatively enqueue p0 and p1 to the current next frame */
-         to_next[0] = bi0 = from[0];
-         to_next[1] = bi1 = from[1];
-         from += 2;
-         to_next += 2;
-         n_left_from -= 2;
-         n_left_to_next -= 2;
-
-         p0 = vlib_get_buffer (vm, bi0);
-         p1 = vlib_get_buffer (vm, bi1);
-
-         ip60 = vlib_buffer_get_current (p0);
-         ip61 = vlib_buffer_get_current (p1);
-
-         ip_len0 =
-           clib_net_to_host_u16 (ip60->payload_length) +
-           sizeof (ip6_header_t);
-         ip_len1 =
-           clib_net_to_host_u16 (ip61->payload_length) +
-           sizeof (ip6_header_t);
-
-         ebi0 = my_buf->buffer_index;
-         eb0 = vlib_get_buffer (vm, ebi0);
-         if (PREDICT_FALSE (eb0 == 0))
-           goto NO_BUFFER1;
-
-         ip_len0 =
-           ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0;
-         ip_len1 =
-           ip_len1 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len1;
-
-         copy3cachelines (eb0->data + eb0->current_length, ip60, ip_len0);
-         eb0->current_length += DEFAULT_EXPORT_SIZE;
-         /* To maintain uniform size per export, each
-          * record is default size, ip6 hdr can be
-          * used to parse the record correctly
-          */
-         my_buf->records_in_this_buffer++;
-         /* if number of buf exceeds max that fits in a MTU sized buffer
-          * ship it to the queue and pick new one
-          */
-         if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)
-           {
-             ioam_export_send_buffer (vm, my_buf);
-             ioam_export_init_buffer (vm, my_buf);
-           }
-
-         ebi0 = my_buf->buffer_index;
-         eb0 = vlib_get_buffer (vm, ebi0);
-         if (PREDICT_FALSE (eb0 == 0))
-           goto NO_BUFFER1;
-
-         copy3cachelines (eb0->data + eb0->current_length, ip61, ip_len1);
-         eb0->current_length += DEFAULT_EXPORT_SIZE;
-         my_buf->records_in_this_buffer++;
-         if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)
-           {
-             ioam_export_send_buffer (vm, my_buf);
-             ioam_export_init_buffer (vm, my_buf);
-           }
-
-         pkts_recorded += 2;
-
-         if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
-           {
-             if (p0->flags & VLIB_BUFFER_IS_TRACED)
-               {
-                 export_trace_t *t =
-                   vlib_add_trace (vm, node, p0, sizeof (*t));
-                 t->flow_label =
-                   clib_net_to_host_u32 (ip60->
-                                         ip_version_traffic_class_and_flow_label);
-                 t->next_index = next0;
-               }
-             if (p1->flags & VLIB_BUFFER_IS_TRACED)
-               {
-                 export_trace_t *t =
-                   vlib_add_trace (vm, node, p1, sizeof (*t));
-                 t->flow_label =
-                   clib_net_to_host_u32 (ip61->
-                                         ip_version_traffic_class_and_flow_label);
-                 t->next_index = next1;
-               }
-           }
-       NO_BUFFER1:
-         /* verify speculative enqueues, maybe switch current next frame */
-         vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
-                                          to_next, n_left_to_next,
-                                          bi0, bi1, next0, next1);
-       }
-
-      while (n_left_from > 0 && n_left_to_next > 0)
-       {
-         u32 bi0;
-         vlib_buffer_t *p0;
-         u32 next0 = EXPORT_NEXT_POP_HBYH;
-         ip6_header_t *ip60;
-         u32 ip_len0;
-
-         /* speculatively enqueue p0 to the current next frame */
-         bi0 = from[0];
-         to_next[0] = bi0;
-         from += 1;
-         to_next += 1;
-         n_left_from -= 1;
-         n_left_to_next -= 1;
-
-         p0 = vlib_get_buffer (vm, bi0);
-         ip60 = vlib_buffer_get_current (p0);
-         ip_len0 =
-           clib_net_to_host_u16 (ip60->payload_length) +
-           sizeof (ip6_header_t);
-
-         ebi0 = my_buf->buffer_index;
-         eb0 = vlib_get_buffer (vm, ebi0);
-         if (PREDICT_FALSE (eb0 == 0))
-           goto NO_BUFFER;
-
-         ip_len0 =
-           ip_len0 > DEFAULT_EXPORT_SIZE ? DEFAULT_EXPORT_SIZE : ip_len0;
-         copy3cachelines (eb0->data + eb0->current_length, ip60, ip_len0);
-         eb0->current_length += DEFAULT_EXPORT_SIZE;
-         /* To maintain uniform size per export, each
-          * record is default size, ip6 hdr can be
-          * used to parse the record correctly
-          */
-         my_buf->records_in_this_buffer++;
-         /* if number of buf exceeds max that fits in a MTU sized buffer
-          * ship it to the queue and pick new one
-          */
-         if (my_buf->records_in_this_buffer >= DEFAULT_EXPORT_RECORDS)
-           {
-             ioam_export_send_buffer (vm, my_buf);
-             ioam_export_init_buffer (vm, my_buf);
-           }
-         if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
-                            && (p0->flags & VLIB_BUFFER_IS_TRACED)))
-           {
-             export_trace_t *t = vlib_add_trace (vm, node, p0, sizeof (*t));
-             t->flow_label =
-               clib_net_to_host_u32 (ip60->
-                                     ip_version_traffic_class_and_flow_label);
-             t->next_index = next0;
-           }
-
-         pkts_recorded += 1;
-       NO_BUFFER:
-         /* verify speculative enqueue, maybe switch current next frame */
-         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                          to_next, n_left_to_next,
-                                          bi0, next0);
-       }
-
-      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-    }
-
-  vlib_node_increment_counter (vm, export_node.index,
-                              EXPORT_ERROR_RECORDED, pkts_recorded);
-  *em->lockp[vm->cpu_index] = 0;
+  ioam_export_node_common(em, vm, node, frame, ip6_header_t, payload_length,
+                          ip_version_traffic_class_and_flow_label, 
+                          EXPORT_NEXT_POP_HBYH);
   return frame->n_vectors;
 }
 
index 32c8e23..e35bb7c 100644 (file)
@@ -105,7 +105,6 @@ static void vl_api_trace_profile_add_t_handler
   int rv = 0;
   vl_api_trace_profile_add_reply_t *rmp;
   trace_profile *profile = NULL;
-  u8 *name = 0;
 
   profile = trace_profile_find ();
   if (profile)
@@ -122,7 +121,6 @@ static void vl_api_trace_profile_add_t_handler
       rv = -3;
     }
 ERROROUT:
-  vec_free (name);
   TRACE_REPLY_MACRO (VL_API_TRACE_PROFILE_ADD_REPLY);
 }
 
index adc02b2..5c7f1ee 100644 (file)
 
 trace_main_t trace_main;
 
-extern ip6_hop_by_hop_ioam_main_t ip6_hop_by_hop_ioam_main;
-
 static int
 trace_profile_cleanup (trace_profile * profile)
 {
-  int rv;
-  ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
 
   memset (profile, 0, sizeof (trace_profile));
   profile->trace_tsp = TSP_MICROSECONDS;       /* Micro seconds */
-  hm->options_size[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST] = 0;
-  if (0 !=
-      (rv =
-       ip6_ioam_set_rewrite (&hm->rewrite, hm->has_trace_option,
-                            hm->has_pot_option, hm->has_seqno_option)))
-    return (-1);
+  ip6_trace_profile_cleanup ();        /* lib-trace_TODO: Remove this once IOAM-IPv6 transport is a plugin */
   return 0;
 
 }
@@ -65,10 +56,11 @@ int
 trace_profile_create (trace_profile * profile, u8 trace_type, u8 num_elts,
                      u32 trace_tsp, u32 node_id, u32 app_data)
 {
-  u32 trace_size = 0;
-  int rv;
-  ip6_hop_by_hop_ioam_main_t *hm = &ip6_hop_by_hop_ioam_main;
 
+  if (!trace_type || !num_elts || !(node_id))
+    {
+      return (-1);
+    }
   if (profile && !profile->valid)
     {
       //rv = trace_profile_cleanup (profile);
@@ -79,21 +71,8 @@ trace_profile_create (trace_profile * profile, u8 trace_type, u8 num_elts,
       profile->app_data = app_data;
       profile->valid = 1;
 
-      if (ioam_trace_get_sizeof_handler (&trace_size) < 0)
-       return (-1);
-
-      hm->options_size[HBH_OPTION_TYPE_IOAM_TRACE_DATA_LIST] = trace_size;
-
-      if (hm->has_trace_option)
-       {
-         if (0 !=
-             (rv =
-              ip6_ioam_set_rewrite (&hm->rewrite, hm->has_trace_option,
-                                    hm->has_pot_option,
-                                    hm->has_seqno_option)))
-           return (-1);
-
-       }
+      /* lib-trace_TODO: Remove this once IOAM-IPv6 transport is a plugin */
+      ip6_trace_profile_setup ();
       return (0);
     }
 
index 4528b18..556f07e 100644 (file)
@@ -18,7 +18,6 @@
 #ifndef include_vnet_trace_util_h
 #define include_vnet_trace_util_h
 
-#include <vnet/ip/ip6_hop_by_hop.h>
 #define debug_ioam debug_ioam_fn
 
 
@@ -72,9 +71,8 @@ int trace_util_init (void);
 
 
 /*
- * Find a trace profile by ID
+ * Find a trace profile
  */
-always_inline trace_profile *trace_profile_find (void);
 
 always_inline trace_profile *
 trace_profile_find (void)
@@ -209,15 +207,6 @@ typedef struct
 } ioam_trace_ts_app_t;
 
 
-/* *INDENT-OFF* */
-typedef CLIB_PACKED(struct {
-  ip6_hop_by_hop_option_t hdr;
-  u8 ioam_trace_type;
-  u8 data_list_elts_left;
-  u32 elts[0]; /* Variable type. So keep it generic */
-}) ioam_trace_option_t;
-/* *INDENT-ON* */
-
 
 static inline u8
 fetch_trace_data_size (u8 trace_type)
@@ -239,6 +228,14 @@ fetch_trace_data_size (u8 trace_type)
 }
 
 int ioam_trace_get_sizeof_handler (u32 * result);
+int ip6_trace_profile_setup (void);
+int ip6_trace_profile_cleanup (void);
+
+#define TSP_SECONDS              0
+#define TSP_MILLISECONDS         1
+#define TSP_MICROSECONDS         2
+#define TSP_NANOSECONDS          3
+
 #endif
 
 /*
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/ioam_decap.c b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/ioam_decap.c
new file mode 100644 (file)
index 0000000..f938a33
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h>
+
+/* Statistics (not really errors) */
+#define foreach_vxlan_gpe_decap_ioam_v4_error    \
+_(DECAPSULATED, "good packets decapsulated")
+
+static char *vxlan_gpe_decap_ioam_v4_error_strings[] = {
+#define _(sym,string) string,
+  foreach_vxlan_gpe_decap_ioam_v4_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) VXLAN_GPE_DECAP_IOAM_V4_ERROR_##sym,
+  foreach_vxlan_gpe_decap_ioam_v4_error
+#undef _
+    VXLAN_GPE_DECAP_IOAM_V4_N_ERROR,
+} vxlan_gpe_decap_ioam_v4_error_t;
+
+
+always_inline void
+vxlan_gpe_decap_ioam_v4_two_inline (vlib_main_t * vm,
+                                   vlib_node_runtime_t * node,
+                                   vxlan_gpe_main_t * ngm,
+                                   vlib_buffer_t * b0, vlib_buffer_t * b1,
+                                   u32 * next0, u32 * next1)
+{
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  next0[0] = next1[0] = hm->decap_v4_next_override;
+  vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, ngm, b0, &next0[0],
+                                           VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
+                                           0 /* use_adj */ );
+  vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, ngm, b1, &next0[1],
+                                           VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
+                                           0 /* use_adj */ );
+}
+
+
+
+static uword
+vxlan_gpe_decap_ioam (vlib_main_t * vm,
+                     vlib_node_runtime_t * node,
+                     vlib_frame_t * from_frame, u8 is_ipv6)
+{
+  u32 n_left_from, next_index, *from, *to_next;
+  vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  from = vlib_frame_vector_args (from_frame);
+  n_left_from = from_frame->n_vectors;
+
+  next_index = node->cached_next_index;
+
+  while (n_left_from > 0)
+    {
+      u32 n_left_to_next;
+
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 bi0, bi1;
+         vlib_buffer_t *b0, *b1;
+         u32 next0, next1;
+
+         next0 = next1 = hm->decap_v4_next_override;
+
+         /* Prefetch next iteration. */
+         {
+           vlib_buffer_t *p2, *p3;
+
+           p2 = vlib_get_buffer (vm, from[2]);
+           p3 = vlib_get_buffer (vm, from[3]);
+
+           vlib_prefetch_buffer_header (p2, LOAD);
+           vlib_prefetch_buffer_header (p3, LOAD);
+
+           CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+           CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+         }
+
+         bi0 = from[0];
+         bi1 = from[1];
+         to_next[0] = bi0;
+         to_next[1] = bi1;
+         from += 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+         n_left_from -= 2;
+
+         b0 = vlib_get_buffer (vm, bi0);
+         b1 = vlib_get_buffer (vm, bi1);
+
+
+         vlib_buffer_advance (b0,
+                              -(word) (sizeof (udp_header_t) +
+                                       sizeof (ip4_header_t) +
+                                       sizeof (vxlan_gpe_header_t)));
+         vlib_buffer_advance (b1,
+                              -(word) (sizeof (udp_header_t) +
+                                       sizeof (ip4_header_t) +
+                                       sizeof (vxlan_gpe_header_t)));
+
+         vxlan_gpe_decap_ioam_v4_two_inline (vm, node, ngm, b0, b1,
+                                             &next0, &next1);
+
+
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+                                          n_left_to_next, bi0, bi1, next0,
+                                          next1);
+
+         if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             vxlan_gpe_ioam_v4_trace_t *tr = vlib_add_trace (vm, node, b0,
+                                                             sizeof (*tr));
+           }
+       }
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 bi0;
+         vlib_buffer_t *b0;
+         u32 next0 = hm->decap_v4_next_override;
+
+         bi0 = from[0];
+         to_next[0] = bi0;
+         from += 1;
+         to_next += 1;
+         n_left_from -= 1;
+         n_left_to_next -= 1;
+
+         b0 = vlib_get_buffer (vm, bi0);
+
+
+         vlib_buffer_advance (b0,
+                              -(word) (sizeof (udp_header_t) +
+                                       sizeof (ip4_header_t) +
+                                       sizeof (vxlan_gpe_header_t)));
+
+         next0 = hm->decap_v4_next_override;
+         vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, ngm, b0,
+                                                   &next0,
+                                                   VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
+                                                   0 /* use_adj */ );
+
+         if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             vxlan_gpe_ioam_v4_trace_t *tr = vlib_add_trace (vm, node, b0,
+                                                             sizeof (*tr));
+           }
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                          n_left_to_next, bi0, next0);
+       }
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+    }
+
+  return from_frame->n_vectors;
+}
+
+
+static uword
+vxlan_gpe_decap_ioam_v4 (vlib_main_t * vm,
+                        vlib_node_runtime_t * node,
+                        vlib_frame_t * from_frame)
+{
+  return vxlan_gpe_decap_ioam (vm, node, from_frame, 0);
+}
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vxlan_gpe_decap_ioam_v4_node) = {
+  .function = vxlan_gpe_decap_ioam_v4,
+  .name = "vxlan-gpe-decap-ioam-v4",
+  .vector_size = sizeof (u32),
+  .format_trace = format_vxlan_gpe_ioam_v4_trace,
+  .type = VLIB_NODE_TYPE_INTERNAL,
+
+  .n_errors = ARRAY_LEN(vxlan_gpe_decap_ioam_v4_error_strings),
+  .error_strings = vxlan_gpe_decap_ioam_v4_error_strings,
+
+  .n_next_nodes = VXLAN_GPE_DECAP_IOAM_V4_N_NEXT,
+
+  .next_nodes = {
+    [VXLAN_GPE_DECAP_IOAM_V4_NEXT_POP] = "vxlan-gpe-pop-ioam-v4",
+    [VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP] = "error-drop",
+  },
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/ioam_encap.c b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/ioam_encap.c
new file mode 100644 (file)
index 0000000..7add6d0
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h>
+
+/* Statistics (not really errors) */
+#define foreach_vxlan_gpe_encap_ioam_v4_error    \
+_(ENCAPSULATED, "good packets encapsulated")
+
+static char *vxlan_gpe_encap_ioam_v4_error_strings[] = {
+#define _(sym,string) string,
+  foreach_vxlan_gpe_encap_ioam_v4_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) VXLAN_GPE_ENCAP_IOAM_V4_ERROR_##sym,
+  foreach_vxlan_gpe_encap_ioam_v4_error
+#undef _
+    VXLAN_GPE_ENCAP_IOAM_V4_N_ERROR,
+} vxlan_gpe_encap_ioam_v4_error_t;
+
+typedef enum
+{
+  VXLAN_GPE_ENCAP_IOAM_V4_NEXT_IP4_LOOKUP,
+  VXLAN_GPE_ENCAP_IOAM_V4_NEXT_DROP,
+  VXLAN_GPE_ENCAP_IOAM_V4_N_NEXT
+} vxlan_gpe_encap_ioam_v4_next_t;
+
+
+always_inline void
+vxlan_gpe_encap_ioam_v4_two_inline (vlib_main_t * vm,
+                                   vlib_node_runtime_t * node,
+                                   vxlan_gpe_main_t * ngm,
+                                   vlib_buffer_t * b0, vlib_buffer_t * b1,
+                                   u32 * next0, u32 * next1)
+{
+  next0[0] = next1[0] = VXLAN_GPE_ENCAP_IOAM_V4_NEXT_IP4_LOOKUP;
+  vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, ngm, b0, next0,
+                                           VXLAN_GPE_ENCAP_IOAM_V4_NEXT_DROP,
+                                           0 /* use_adj */ );
+  vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, ngm, b1, next1,
+                                           VXLAN_GPE_ENCAP_IOAM_V4_NEXT_DROP,
+                                           0 /* use_adj */ );
+}
+
+
+static uword
+vxlan_gpe_encap_ioam_v4 (vlib_main_t * vm,
+                        vlib_node_runtime_t * node,
+                        vlib_frame_t * from_frame)
+{
+  u32 n_left_from, next_index, *from, *to_next;
+  vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+
+  from = vlib_frame_vector_args (from_frame);
+  n_left_from = from_frame->n_vectors;
+
+  next_index = node->cached_next_index;
+
+  while (n_left_from > 0)
+    {
+      u32 n_left_to_next;
+
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 bi0, bi1;
+         vlib_buffer_t *b0, *b1;
+         u32 next0, next1;
+
+         next0 = next1 = VXLAN_GPE_ENCAP_IOAM_V4_NEXT_IP4_LOOKUP;
+
+         /* Prefetch next iteration. */
+         {
+           vlib_buffer_t *p2, *p3;
+
+           p2 = vlib_get_buffer (vm, from[2]);
+           p3 = vlib_get_buffer (vm, from[3]);
+
+           vlib_prefetch_buffer_header (p2, LOAD);
+           vlib_prefetch_buffer_header (p3, LOAD);
+
+           CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+           CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+         }
+
+         bi0 = from[0];
+         bi1 = from[1];
+         to_next[0] = bi0;
+         to_next[1] = bi1;
+         from += 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+         n_left_from -= 2;
+
+         b0 = vlib_get_buffer (vm, bi0);
+         b1 = vlib_get_buffer (vm, bi1);
+
+         vxlan_gpe_encap_ioam_v4_two_inline (vm, node, ngm, b0, b1,
+                                             &next0, &next1);
+
+
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+                                          n_left_to_next, bi0, bi1, next0,
+                                          next1);
+       }
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 bi0;
+         vlib_buffer_t *b0;
+         u32 next0 = VXLAN_GPE_ENCAP_IOAM_V4_NEXT_IP4_LOOKUP;
+
+         bi0 = from[0];
+         to_next[0] = bi0;
+         from += 1;
+         to_next += 1;
+         n_left_from -= 1;
+         n_left_to_next -= 1;
+
+         b0 = vlib_get_buffer (vm, bi0);
+
+         vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, ngm, b0,
+                                                   &next0,
+                                                   VXLAN_GPE_ENCAP_IOAM_V4_NEXT_DROP,
+                                                   0 /* use_adj */ );
+
+         if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+           {
+             vxlan_gpe_ioam_v4_trace_t *tr = vlib_add_trace (vm, node, b0,
+                                                             sizeof (*tr));
+           }
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                          n_left_to_next, bi0, next0);
+       }
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+    }
+
+  return from_frame->n_vectors;
+}
+
+
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vxlan_gpe_encap_ioam_v4_node) = {
+  .function = vxlan_gpe_encap_ioam_v4,
+  .name = "vxlan-gpe-encap-ioam-v4",
+  .vector_size = sizeof (u32),
+  .format_trace = format_vxlan_gpe_ioam_v4_trace,
+  .type = VLIB_NODE_TYPE_INTERNAL,
+
+  .n_errors = ARRAY_LEN(vxlan_gpe_encap_ioam_v4_error_strings),
+  .error_strings = vxlan_gpe_encap_ioam_v4_error_strings,
+
+  .n_next_nodes = VXLAN_GPE_ENCAP_IOAM_V4_N_NEXT,
+
+  .next_nodes = {
+    [VXLAN_GPE_ENCAP_IOAM_V4_NEXT_IP4_LOOKUP] = "ip4-lookup",
+    [VXLAN_GPE_ENCAP_IOAM_V4_NEXT_DROP] = "error-drop",
+  },
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/ioam_pop.c b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/ioam_pop.c
new file mode 100644 (file)
index 0000000..55c33b1
--- /dev/null
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vppinfra/error.h>
+#include <vppinfra/hash.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+
+/* Statistics (not really errors) */
+#define foreach_vxlan_gpe_pop_ioam_v4_error    \
+_(POPPED, "good packets popped")
+
+static char *vxlan_gpe_pop_ioam_v4_error_strings[] = {
+#define _(sym,string) string,
+  foreach_vxlan_gpe_pop_ioam_v4_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) VXLAN_GPE_POP_IOAM_V4_ERROR_##sym,
+  foreach_vxlan_gpe_pop_ioam_v4_error
+#undef _
+    VXLAN_GPE_POP_IOAM_V4_N_ERROR,
+} vxlan_gpe_pop_ioam_v4_error_t;
+
+typedef struct
+{
+  ioam_trace_t fmt_trace;
+} vxlan_gpe_pop_ioam_v4_trace_t;
+
+
+u8 *
+format_vxlan_gpe_pop_ioam_v4_trace (u8 * s, va_list * args)
+{
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+  vxlan_gpe_pop_ioam_v4_trace_t *t1
+    = va_arg (*args, vxlan_gpe_pop_ioam_v4_trace_t *);
+  ioam_trace_t *t = &(t1->fmt_trace);
+  vxlan_gpe_ioam_option_t *fmt_trace0;
+  vxlan_gpe_ioam_option_t *opt0, *limit0;
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  u8 type0;
+
+  fmt_trace0 = (vxlan_gpe_ioam_option_t *) t->option_data;
+
+  s = format (s, "VXLAN_GPE_IOAM_POP: next_index %d len %d traced %d",
+             t->next_index, fmt_trace0->length, t->trace_len);
+
+  opt0 = (vxlan_gpe_ioam_option_t *) (fmt_trace0 + 1);
+  limit0 = (vxlan_gpe_ioam_option_t *) ((u8 *) fmt_trace0) + t->trace_len;
+
+  while (opt0 < limit0)
+    {
+      type0 = opt0->type;
+      switch (type0)
+       {
+       case 0:         /* Pad, just stop */
+         opt0 = (vxlan_gpe_ioam_option_t *) ((u8 *) opt0) + 1;
+         break;
+
+       default:
+         if (hm->trace[type0])
+           {
+             s = (*hm->trace[type0]) (s, opt0);
+           }
+         else
+           {
+             s =
+               format (s, "\n    unrecognized option %d length %d", type0,
+                       opt0->length);
+           }
+         opt0 =
+           (vxlan_gpe_ioam_option_t *) (((u8 *) opt0) + opt0->length +
+                                        sizeof (vxlan_gpe_ioam_option_t));
+         break;
+       }
+    }
+
+  return s;
+}
+
+always_inline void
+vxlan_gpe_ioam_pop_v4 (vlib_main_t * vm, vlib_node_runtime_t * node,
+                      vlib_buffer_t * b0)
+{
+  ip4_header_t *ip0;
+  udp_header_t *udp_hdr0;
+  vxlan_gpe_header_t *gpe_hdr0;
+  vxlan_gpe_ioam_hdr_t *gpe_ioam0;
+
+  ip0 = vlib_buffer_get_current (b0);
+
+  udp_hdr0 = (udp_header_t *) (ip0 + 1);
+  gpe_hdr0 = (vxlan_gpe_header_t *) (udp_hdr0 + 1);
+  gpe_ioam0 = (vxlan_gpe_ioam_hdr_t *) (gpe_hdr0 + 1);
+
+  /* Pop the iOAM data */
+  vlib_buffer_advance (b0,
+                      (word) (sizeof (udp_header_t) +
+                              sizeof (ip4_header_t) +
+                              sizeof (vxlan_gpe_header_t) +
+                              gpe_ioam0->length));
+
+  return;
+}
+
+
+
+always_inline void
+vxlan_gpe_pop_ioam_v4_one_inline (vlib_main_t * vm,
+                                 vlib_node_runtime_t * node,
+                                 vxlan_gpe_main_t * ngm,
+                                 vlib_buffer_t * b0, u32 * next0)
+{
+  CLIB_UNUSED (ip4_header_t * ip0);
+  CLIB_UNUSED (udp_header_t * udp_hdr0);
+  CLIB_UNUSED (vxlan_gpe_header_t * gpe_hdr0);
+  CLIB_UNUSED (vxlan_gpe_ioam_hdr_t * gpe_ioam0);
+  CLIB_UNUSED (vxlan_gpe_ioam_option_t * opt0);
+  CLIB_UNUSED (vxlan_gpe_ioam_option_t * limit0);
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+
+  /* Pop the iOAM header */
+  ip0 = vlib_buffer_get_current (b0);
+  udp_hdr0 = (udp_header_t *) (ip0 + 1);
+  gpe_hdr0 = (vxlan_gpe_header_t *) (udp_hdr0 + 1);
+  gpe_ioam0 = (vxlan_gpe_ioam_hdr_t *) (gpe_hdr0 + 1);
+  opt0 = (vxlan_gpe_ioam_option_t *) (gpe_ioam0 + 1);
+  limit0 = (vxlan_gpe_ioam_option_t *) ((u8 *) gpe_ioam0 + gpe_ioam0->length);
+
+  /*
+   * Basic validity checks
+   */
+  if (gpe_ioam0->length > clib_net_to_host_u16 (ip0->length))
+    {
+      next0[0] = VXLAN_GPE_INPUT_NEXT_DROP;
+      goto trace00;
+    }
+
+  /* Scan the set of h-b-h options, process ones that we understand */
+  while (opt0 < limit0)
+    {
+      u8 type0;
+      type0 = opt0->type;
+      switch (type0)
+       {
+       case 0:         /* Pad1 */
+         opt0 = (vxlan_gpe_ioam_option_t *) ((u8 *) opt0) + 1;
+         continue;
+       case 1:         /* PadN */
+         break;
+       default:
+         if (hm->pop_options[type0])
+           {
+             if ((*hm->pop_options[type0]) (ip0, opt0) < 0)
+               {
+                 next0[0] = VXLAN_GPE_INPUT_NEXT_DROP;
+                 goto trace00;
+               }
+           }
+         break;
+       }
+      opt0 =
+       (vxlan_gpe_ioam_option_t *) (((u8 *) opt0) + opt0->length +
+                                    sizeof (vxlan_gpe_ioam_hdr_t));
+    }
+
+
+  next0[0] =
+    (gpe_ioam0->protocol < VXLAN_GPE_PROTOCOL_MAX) ?
+    ngm->
+    decap_next_node_list[gpe_ioam0->protocol] : VXLAN_GPE_INPUT_NEXT_DROP;
+
+trace00:
+  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+    {
+      vxlan_gpe_pop_ioam_v4_trace_t *t =
+       vlib_add_trace (vm, node, b0, sizeof (*t));
+      u32 trace_len = gpe_ioam0->length;
+      t->fmt_trace.next_index = next0[0];
+      /* Capture the h-b-h option verbatim */
+      trace_len =
+       trace_len <
+       ARRAY_LEN (t->fmt_trace.
+                  option_data) ? trace_len : ARRAY_LEN (t->fmt_trace.
+                                                        option_data);
+      t->fmt_trace.trace_len = trace_len;
+      clib_memcpy (&(t->fmt_trace.option_data), gpe_ioam0, trace_len);
+    }
+
+  /* Remove the iOAM header inside the VxLAN-GPE header */
+  vxlan_gpe_ioam_pop_v4 (vm, node, b0);
+  return;
+}
+
+always_inline void
+vxlan_gpe_pop_ioam_v4_two_inline (vlib_main_t * vm,
+                                 vlib_node_runtime_t * node,
+                                 vxlan_gpe_main_t * ngm,
+                                 vlib_buffer_t * b0, vlib_buffer_t * b1,
+                                 u32 * next0, u32 * next1)
+{
+
+  vxlan_gpe_pop_ioam_v4_one_inline (vm, node, ngm, b0, next0);
+  vxlan_gpe_pop_ioam_v4_one_inline (vm, node, ngm, b1, next1);
+}
+
+
+
+static uword
+vxlan_gpe_pop_ioam (vlib_main_t * vm,
+                   vlib_node_runtime_t * node,
+                   vlib_frame_t * from_frame, u8 is_ipv6)
+{
+  u32 n_left_from, next_index, *from, *to_next;
+  vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
+
+  from = vlib_frame_vector_args (from_frame);
+  n_left_from = from_frame->n_vectors;
+
+  next_index = node->cached_next_index;
+
+  while (n_left_from > 0)
+    {
+      u32 n_left_to_next;
+
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+      while (n_left_from >= 4 && n_left_to_next >= 2)
+       {
+         u32 bi0, bi1;
+         vlib_buffer_t *b0, *b1;
+         u32 next0, next1;
+
+         /* Prefetch next iteration. */
+         {
+           vlib_buffer_t *p2, *p3;
+
+           p2 = vlib_get_buffer (vm, from[2]);
+           p3 = vlib_get_buffer (vm, from[3]);
+
+           vlib_prefetch_buffer_header (p2, LOAD);
+           vlib_prefetch_buffer_header (p3, LOAD);
+
+           CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+           CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+         }
+
+         bi0 = from[0];
+         bi1 = from[1];
+         to_next[0] = bi0;
+         to_next[1] = bi1;
+         from += 2;
+         to_next += 2;
+         n_left_to_next -= 2;
+         n_left_from -= 2;
+
+         b0 = vlib_get_buffer (vm, bi0);
+         b1 = vlib_get_buffer (vm, bi1);
+
+         vxlan_gpe_pop_ioam_v4_two_inline (vm, node, ngm, b0, b1, &next0,
+                                           &next1);
+
+
+         vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+                                          n_left_to_next, bi0, bi1, next0,
+                                          next1);
+       }
+
+      while (n_left_from > 0 && n_left_to_next > 0)
+       {
+         u32 bi0;
+         vlib_buffer_t *b0;
+         u32 next0;
+
+         bi0 = from[0];
+         to_next[0] = bi0;
+         from += 1;
+         to_next += 1;
+         n_left_from -= 1;
+         n_left_to_next -= 1;
+
+         b0 = vlib_get_buffer (vm, bi0);
+
+         vxlan_gpe_pop_ioam_v4_one_inline (vm, node, ngm, b0, &next0);
+
+
+         vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                          n_left_to_next, bi0, next0);
+       }
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+    }
+
+  return from_frame->n_vectors;
+}
+
+
+static uword
+vxlan_gpe_pop_ioam_v4 (vlib_main_t * vm,
+                      vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+{
+  return vxlan_gpe_pop_ioam (vm, node, from_frame, 0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vxlan_gpe_pop_ioam_v4_node) = {
+  .function = vxlan_gpe_pop_ioam_v4,
+  .name = "vxlan-gpe-pop-ioam-v4",
+  .vector_size = sizeof (u32),
+  .format_trace = format_vxlan_gpe_pop_ioam_v4_trace,
+  .type = VLIB_NODE_TYPE_INTERNAL,
+
+  .n_errors = ARRAY_LEN(vxlan_gpe_pop_ioam_v4_error_strings),
+  .error_strings = vxlan_gpe_pop_ioam_v4_error_strings,
+
+  .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
+
+  .next_nodes = {
+#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
+    foreach_vxlan_gpe_input_next
+#undef _
+  },
+};
+/* *INDENT-ON* */
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe.api b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe.api
new file mode 100644 (file)
index 0000000..2cf7a59
--- /dev/null
@@ -0,0 +1,126 @@
+/* Hey Emacs use -*- mode: C -*- */
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/** \brief iOAM Over VxLAN-GPE - Set iOAM transport for VxLAN-GPE
+    @param client_index - opaque cookie to identify the sender
+    @param context - sender context, to match reply w/ request
+    @param id - profile id
+    @param trace_ppc - Trace PPC (none/encap/decap)
+    @param pow_enable - Proof of Work enabled or not flag
+    @param trace_enable - iOAM Trace enabled or not flag
+
+*/
+define vxlan_gpe_ioam_enable {
+  u32 client_index;
+  u32 context;
+  u16 id;
+  u8 trace_ppc;
+  u8 pow_enable;
+  u8 trace_enable;
+};
+
+/** \brief iOAM Over VxLAN-GPE - Set iOAM transport for VXLAN-GPE reply
+    @param context - sender context, to match reply w/ request
+    @param retval - return value for request
+*/
+define vxlan_gpe_ioam_enable_reply {
+    u32 context;
+    i32 retval;
+};
+
+
+/** \brief iOAM for VxLAN-GPE disable
+    @param client_index - opaque cookie to identify the sender
+    @param context - sender context, to match reply w/ request
+    @param id - profile id
+*/
+define vxlan_gpe_ioam_disable
+{
+  u32 client_index;
+  u32 context;
+  u16 id;
+};
+
+/** \brief vxlan_gpe_ioam disable response
+    @param context - sender context, to match reply w/ request
+    @param retval - return value for request
+*/
+define vxlan_gpe_ioam_disable_reply
+{
+  u32 context;
+  i32 retval;
+};
+
+/** \brief Enable iOAM for a VNI (VXLAN-GPE)
+    @param client_index - opaque cookie to identify the sender
+    @param context - sender context, to match reply w/ request
+    @param vni - VXLAN-GPE VNI
+    @param local - IPv4/6 Address of the local VTEP
+    @param remote - IPv4/6 Address of the remote VTEP
+
+*/
+define vxlan_gpe_ioam_vni_enable {
+  u32 client_index;
+  u32 context;
+  u32 vni;
+  u8  local[16];
+  u8  remote[16];
+  u8  is_ipv6;
+};
+
+/** \brief Reply to enable iOAM for a VNI (VXLAN-GPE)
+    @param client_index - opaque cookie to identify the sender
+    @param context - sender context, to match reply w/ request
+    @param retval - return value for request
+
+*/
+define vxlan_gpe_ioam_vni_enable_reply {
+  u32 client_index;
+  u32 context;
+  i32 retval;
+};
+
+/** \brief Disable iOAM for a VNI (VXLAN-GPE)
+    @param client_index - opaque cookie to identify the sender
+    @param context - sender context, to match reply w/ request
+    @param vni - VXLAN-GPE VNI
+    @param local - IPv4/6 Address of the local VTEP
+    @param remote - IPv4/6 Address of the remote VTEP
+
+*/
+define vxlan_gpe_ioam_vni_disable {
+  u32 client_index;
+  u32 context;
+  u32 vni;
+  u8  local[16];
+  u8  remote[16];
+  u8  is_ipv6;
+};
+
+/** \brief Reply to disable iOAM for a VNI (VXLAN-GPE)
+    @param client_index - opaque cookie to identify the sender
+    @param context - sender context, to match reply w/ request
+    @param retval - return value for request
+
+*/
+define vxlan_gpe_ioam_vni_disable_reply {
+  u32 client_index;
+  u32 context;
+  i32 retval;
+};
+
+
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h
new file mode 100644 (file)
index 0000000..bbf2c10
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Include the generated file, see BUILT_SOURCES in Makefile.am */
+#include <ioam/lib-vxlan-gpe/vxlan_gpe.api.h>
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_api.c b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_api.c
new file mode 100644 (file)
index 0000000..3e14385
--- /dev/null
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ *------------------------------------------------------------------
+ * vxlan_gpe_api.c - iOAM VxLAN-GPE related APIs to create
+ *               and maintain profiles
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vlibsocket/api.h>
+
+/* define message IDs */
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h>
+
+/* define message structures */
+#define vl_typedefs
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_typedefs
+
+/* define generated endian-swappers */
+#define vl_endianfun
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_printfun
+
+/* Get the API version number */
+#define vl_api_version(n,v) static u32 api_version=(v);
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_api_version
+
+/*
+ * A handy macro to set up a message reply.
+ * Assumes that the following variables are available:
+ * mp - pointer to request message
+ * rmp - pointer to reply message type
+ * rv - return value
+ */
+
+#define VXLAN_GPE_REPLY_MACRO(t)                                \
+do {                                                            \
+    unix_shared_memory_queue_t * q =                            \
+    vl_api_client_index_to_input_queue (mp->client_index);      \
+    if (!q)                                                     \
+        return;                                                 \
+                                                                \
+    rmp = vl_msg_api_alloc (sizeof (*rmp));                     \
+    rmp->_vl_msg_id = ntohs((t)+sm->msg_id_base);               \
+    rmp->context = mp->context;                                 \
+    rmp->retval = ntohl(rv);                                    \
+                                                                \
+    vl_msg_api_send_shmem (q, (u8 *)&rmp);                      \
+} while(0);
+
+/* *INDENT-OFF* */
+#define VXLAN_GPE_REPLY_MACRO2(t, body)                         \
+do {                                                            \
+    unix_shared_memory_queue_t * q;                             \
+    rv = vl_msg_api_pd_handler (mp, rv);                        \
+    q = vl_api_client_index_to_input_queue (mp->client_index);  \
+    if (!q)                                                     \
+        return;                                                 \
+                                                                \
+    rmp = vl_msg_api_alloc (sizeof (*rmp));                     \
+    rmp->_vl_msg_id = ntohs((t));                               \
+    rmp->context = mp->context;                                 \
+    rmp->retval = ntohl(rv);                                    \
+    do {body;} while (0);                                       \
+    vl_msg_api_send_shmem (q, (u8 *)&rmp);                      \
+} while(0);
+/* *INDENT-ON* */
+
+/* List of message types that this plugin understands */
+
+#define foreach_vxlan_gpe_plugin_api_msg                        \
+_(VXLAN_GPE_IOAM_ENABLE, vxlan_gpe_ioam_enable)                 \
+_(VXLAN_GPE_IOAM_DISABLE, vxlan_gpe_ioam_disable)               \
+_(VXLAN_GPE_IOAM_VNI_ENABLE, vxlan_gpe_ioam_vni_enable)         \
+_(VXLAN_GPE_IOAM_VNI_DISABLE, vxlan_gpe_ioam_vni_disable)       \
+
+
+static void vl_api_vxlan_gpe_ioam_enable_t_handler
+  (vl_api_vxlan_gpe_ioam_enable_t * mp)
+{
+  int rv = 0;
+  vl_api_vxlan_gpe_ioam_enable_reply_t *rmp;
+  clib_error_t *error;
+  vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+
+  /* Ignoring the profile id as currently a single profile
+   * is supported */
+  error =
+    vxlan_gpe_ioam_enable (mp->trace_enable, mp->pow_enable, mp->trace_ppc);
+  if (error)
+    {
+      clib_error_report (error);
+      rv = clib_error_get_code (error);
+    }
+
+  VXLAN_GPE_REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_ENABLE_REPLY);
+}
+
+static void vl_api_vxlan_gpe_ioam_disable_t_handler
+  (vl_api_vxlan_gpe_ioam_disable_t * mp)
+{
+  int rv = 0;
+  vl_api_vxlan_gpe_ioam_disable_reply_t *rmp;
+  clib_error_t *error;
+  vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+
+  /* Ignoring the profile id as currently a single profile
+   * is supported */
+  error = vxlan_gpe_ioam_disable (0, 0, 0);
+  if (error)
+    {
+      clib_error_report (error);
+      rv = clib_error_get_code (error);
+    }
+
+  VXLAN_GPE_REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_DISABLE_REPLY);
+}
+
+static void vl_api_vxlan_gpe_ioam_vni_enable_t_handler
+  (vl_api_vxlan_gpe_ioam_vni_enable_t * mp)
+{
+  int rv = 0;
+  vl_api_vxlan_gpe_ioam_vni_enable_reply_t *rmp;
+  clib_error_t *error;
+  vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+  vxlan4_gpe_tunnel_key_t key4;
+  uword *p = NULL;
+  vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+  vxlan_gpe_tunnel_t *t = 0;
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+  u32 vni;
+
+
+  if (!mp->is_ipv6)
+    {
+      clib_memcpy (&key4.local, &mp->local, sizeof (key4.local));
+      clib_memcpy (&key4.remote, &mp->remote, sizeof (key4.remote));
+      vni = clib_net_to_host_u32 (mp->vni);
+      key4.vni = clib_host_to_net_u32 (vni << 8);
+      key4.pad = 0;
+
+      p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
+    }
+  else
+    {
+      return;
+    }
+
+  if (!p)
+    return;
+
+  t = pool_elt_at_index (gm->tunnels, p[0]);
+
+  error = vxlan_gpe_ioam_set (t, hm->has_trace_option,
+                             hm->has_pot_option,
+                             hm->has_ppc_option, mp->is_ipv6);
+
+
+  if (error)
+    {
+      clib_error_report (error);
+      rv = clib_error_get_code (error);
+    }
+
+  VXLAN_GPE_REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_VNI_ENABLE_REPLY);
+}
+
+
+static void vl_api_vxlan_gpe_ioam_vni_disable_t_handler
+  (vl_api_vxlan_gpe_ioam_vni_disable_t * mp)
+{
+  int rv = 0;
+  vl_api_vxlan_gpe_ioam_vni_enable_reply_t *rmp;
+  clib_error_t *error;
+  vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+  vxlan4_gpe_tunnel_key_t key4;
+  uword *p = NULL;
+  vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+  vxlan_gpe_tunnel_t *t = 0;
+  u32 vni;
+
+
+  if (!mp->is_ipv6)
+    {
+      clib_memcpy (&key4.local, &mp->local, sizeof (key4.local));
+      clib_memcpy (&key4.remote, &mp->remote, sizeof (key4.remote));
+      vni = clib_net_to_host_u32 (mp->vni);
+      key4.vni = clib_host_to_net_u32 (vni << 8);
+      key4.pad = 0;
+
+      p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
+    }
+  else
+    {
+      return;
+    }
+
+  if (!p)
+    return;
+
+  t = pool_elt_at_index (gm->tunnels, p[0]);
+
+  error = vxlan_gpe_ioam_clear (t, 0, 0, 0, 0);
+
+
+  if (error)
+    {
+      clib_error_report (error);
+      rv = clib_error_get_code (error);
+    }
+
+
+  VXLAN_GPE_REPLY_MACRO (VL_API_VXLAN_GPE_IOAM_VNI_DISABLE_REPLY);
+}
+
+/*
+ * This routine exists to convince the vlib plugin framework that
+ * we haven't accidentally copied a random .dll into the plugin directory.
+ *
+ * Also collects global variable pointers passed from the vpp engine
+ */
+
+clib_error_t *
+vlib_plugin_register (vlib_main_t * vm, vnet_plugin_handoff_t * h,
+                     int from_early_init)
+{
+  vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+  clib_error_t *error = 0;
+
+  sm->vlib_main = vm;
+  sm->vnet_main = h->vnet_main;
+  return error;
+}
+
+/* Set up the API message handling tables */
+static clib_error_t *
+vxlan_gpe_plugin_api_hookup (vlib_main_t * vm)
+{
+  vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+#define _(N,n)                                                  \
+    vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base),     \
+                           #n,                                 \
+                           vl_api_##n##_t_handler,              \
+                           vl_noop_handler,                     \
+                           vl_api_##n##_t_endian,               \
+                           vl_api_##n##_t_print,                \
+                           sizeof(vl_api_##n##_t), 1);
+  foreach_vxlan_gpe_plugin_api_msg;
+#undef _
+
+  return 0;
+}
+
+static clib_error_t *
+vxlan_gpe_init (vlib_main_t * vm)
+{
+  vxlan_gpe_ioam_main_t *sm = &vxlan_gpe_ioam_main;
+  clib_error_t *error = 0;
+  u8 *name;
+  u32 encap_node_index = vxlan_gpe_encap_ioam_v4_node.index;
+  u32 decap_node_index = vxlan_gpe_decap_ioam_v4_node.index;
+  vlib_node_t *vxlan_gpe_encap_node = NULL;
+  vlib_node_t *vxlan_gpe_decap_node = NULL;
+  uword next_node = 0;
+
+  name = format (0, "ioam_vxlan_gpe_%08x%c", api_version, 0);
+
+  /* Ask for a correctly-sized block of API message decode slots */
+  sm->msg_id_base = vl_msg_api_get_msg_ids
+    ((char *) name, VL_MSG_FIRST_AVAILABLE);
+
+  error = vxlan_gpe_plugin_api_hookup (vm);
+
+  /* Hook the ioam-encap node to vxlan-gpe-encap */
+  vxlan_gpe_encap_node = vlib_get_node_by_name (vm, (u8 *) "vxlan-gpe-encap");
+  sm->encap_v4_next_node =
+    vlib_node_add_next (vm, vxlan_gpe_encap_node->index, encap_node_index);
+
+  vxlan_gpe_decap_node =
+    vlib_get_node_by_name (vm, (u8 *) "vxlan4-gpe-input");
+  next_node =
+    vlib_node_add_next (vm, vxlan_gpe_decap_node->index, decap_node_index);
+  vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IOAM, next_node);
+
+  vec_free (name);
+
+  return error;
+}
+
+VLIB_INIT_FUNCTION (vxlan_gpe_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c
new file mode 100644 (file)
index 0000000..066f582
--- /dev/null
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <vnet/ip/format.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+
+vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main;
+
+int
+vxlan_gpe_ioam_set_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
+                           int has_pot_option, int has_ppc_option,
+                           u8 ipv6_set)
+{
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+  u32 size;
+  vxlan_gpe_ioam_hdr_t *vxlan_gpe_ioam_hdr;
+  u8 *current;
+  u8 trace_data_size = 0;
+  u8 pot_data_size = 0;
+
+  if (has_trace_option == 0 && has_pot_option == 0)
+    return -1;
+
+  /* Work out how much space we need */
+  size = sizeof (vxlan_gpe_ioam_hdr_t);
+
+  if (has_trace_option
+      && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
+    {
+      size += sizeof (vxlan_gpe_ioam_option_t);
+      size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE];
+    }
+  if (has_pot_option
+      && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
+    {
+      size += sizeof (vxlan_gpe_ioam_option_t);
+      size += hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
+    }
+
+  t->rewrite_size = size;
+
+  if (!ipv6_set)
+    {
+      vxlan4_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
+                         hm->encap_v4_next_node);
+      vxlan_gpe_ioam_hdr =
+       (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
+                                 sizeof (ip4_vxlan_gpe_header_t));
+    }
+  else
+    {
+      vxlan6_gpe_rewrite (t, size, VXLAN_GPE_PROTOCOL_IOAM,
+                         VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
+      vxlan_gpe_ioam_hdr =
+       (vxlan_gpe_ioam_hdr_t *) (t->rewrite +
+                                 sizeof (ip6_vxlan_gpe_header_t));
+    }
+
+
+  vxlan_gpe_ioam_hdr->type = VXLAN_GPE_PROTOCOL_IOAM;
+  /* Length of the header in octets */
+  vxlan_gpe_ioam_hdr->length = size;
+  vxlan_gpe_ioam_hdr->protocol = t->protocol;
+  current = (u8 *) vxlan_gpe_ioam_hdr + sizeof (vxlan_gpe_ioam_hdr_t);
+
+  if (has_trace_option
+      && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] != 0)
+    {
+      if (0 != hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] (current,
+                                                                 &trace_data_size))
+       return -1;
+      current += trace_data_size;
+    }
+  if (has_pot_option
+      && hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT] != 0)
+    {
+      pot_data_size =
+       hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT];
+      if (0 ==
+         hm->add_options[VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT]
+         (current, &pot_data_size))
+       current += pot_data_size;
+    }
+
+  return 0;
+}
+
+int
+vxlan_gpe_ioam_clear_rewrite (vxlan_gpe_tunnel_t * t, int has_trace_option,
+                             int has_pot_option, int has_ppc_option,
+                             u8 ipv6_set)
+{
+
+  t->rewrite_size = 0;
+
+  if (!ipv6_set)
+    {
+      vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
+    }
+  else
+    {
+      vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
+    }
+
+
+  return 0;
+}
+
+clib_error_t *
+vxlan_gpe_ioam_clear (vxlan_gpe_tunnel_t * t,
+                     int has_trace_option, int has_pot_option,
+                     int has_ppc_option, u8 ipv6_set)
+{
+  int rv;
+  rv = vxlan_gpe_ioam_clear_rewrite (t, 0, 0, 0, 0);
+
+  if (rv == 0)
+    {
+      return (0);
+    }
+  else
+    {
+      return clib_error_return_code (0, rv, 0,
+                                    "vxlan_gpe_ioam_clear_rewrite returned %d",
+                                    rv);
+    }
+
+}
+
+
+clib_error_t *
+vxlan_gpe_ioam_set (vxlan_gpe_tunnel_t * t,
+                   int has_trace_option, int has_pot_option,
+                   int has_ppc_option, u8 ipv6_set)
+{
+  int rv;
+  rv = vxlan_gpe_ioam_set_rewrite (t, has_trace_option,
+                                  has_pot_option, has_ppc_option, ipv6_set);
+
+  if (rv == 0)
+    {
+      return (0);
+    }
+  else
+    {
+      return clib_error_return_code (0, rv, 0,
+                                    "vxlan_gpe_ioam_set_rewrite returned %d",
+                                    rv);
+    }
+
+}
+
+
+static clib_error_t *
+vxlan_gpe_set_ioam_rewrite_command_fn (vlib_main_t * vm,
+                                      unformat_input_t * input,
+                                      vlib_cli_command_t * cmd)
+{
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+  ip46_address_t local, remote;
+  u8 local_set = 0;
+  u8 remote_set = 0;
+  u8 ipv4_set = 0;
+  u8 ipv6_set = 0;
+  u32 vni;
+  u8 vni_set = 0;
+  u8 disable = 0;
+  clib_error_t *rv = 0;
+  vxlan4_gpe_tunnel_key_t key4;
+  vxlan6_gpe_tunnel_key_t key6;
+  uword *p;
+  vxlan_gpe_main_t *gm = &vxlan_gpe_main;
+  vxlan_gpe_tunnel_t *t = 0;
+
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "local %U", unformat_ip4_address, &local.ip4))
+       {
+         local_set = 1;
+         ipv4_set = 1;
+       }
+      else if (unformat (input, "remote %U",
+                        unformat_ip4_address, &remote.ip4))
+       {
+         remote_set = 1;
+         ipv4_set = 1;
+       }
+      else if (unformat (input, "local %U", unformat_ip6_address, &local.ip6))
+       {
+         local_set = 1;
+         ipv6_set = 1;
+       }
+      else if (unformat (input, "remote %U",
+                        unformat_ip6_address, &remote.ip6))
+       {
+         remote_set = 1;
+         ipv6_set = 1;
+       }
+      else if (unformat (input, "vni %d", &vni))
+       vni_set = 1;
+      else if (unformat (input, "disable"))
+       disable = 1;
+      else
+       break;
+    }
+
+  if (local_set == 0)
+    return clib_error_return (0, "tunnel local address not specified");
+
+  if (remote_set == 0)
+    return clib_error_return (0, "tunnel remote address not specified");
+
+  if (ipv4_set && ipv6_set)
+    return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+
+  if ((ipv4_set && memcmp (&local.ip4, &remote.ip4, sizeof (local.ip4)) == 0)
+      || (ipv6_set
+         && memcmp (&local.ip6, &remote.ip6, sizeof (local.ip6)) == 0))
+    return clib_error_return (0, "src and dst addresses are identical");
+
+  if (vni_set == 0)
+    return clib_error_return (0, "vni not specified");
+
+
+  if (!ipv6_set)
+    {
+      key4.local = local.ip4.as_u32;
+      key4.remote = remote.ip4.as_u32;
+      key4.vni = clib_host_to_net_u32 (vni << 8);
+      key4.pad = 0;
+
+      p = hash_get_mem (gm->vxlan4_gpe_tunnel_by_key, &key4);
+    }
+  else
+    {
+      key6.local.as_u64[0] = local.ip6.as_u64[0];
+      key6.local.as_u64[1] = local.ip6.as_u64[1];
+      key6.remote.as_u64[0] = remote.ip6.as_u64[0];
+      key6.remote.as_u64[1] = remote.ip6.as_u64[1];
+      key6.vni = clib_host_to_net_u32 (vni << 8);
+
+      p = hash_get_mem (gm->vxlan6_gpe_tunnel_by_key, &key6);
+    }
+
+  if (!p)
+    return clib_error_return (0, "VxLAN Tunnel not found");
+
+  t = pool_elt_at_index (gm->tunnels, p[0]);
+
+  if (!disable)
+    {
+      rv = vxlan_gpe_ioam_set (t, hm->has_trace_option,
+                              hm->has_pot_option,
+                              hm->has_ppc_option, ipv6_set);
+    }
+  else
+    {
+      rv = vxlan_gpe_ioam_clear (t, 0, 0, 0, 0);
+    }
+  return rv;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_rewrite_cmd, static) = {
+  .path = "set vxlan-gpe-ioam",
+  .short_help = "set vxlan-gpe-ioam vxlan <src-ip> <dst_ip> <vnid> [disable]",
+  .function = vxlan_gpe_set_ioam_rewrite_command_fn,
+};
+/* *INDENT-ON* */
+
+
+
+clib_error_t *
+vxlan_gpe_ioam_enable (int has_trace_option, int has_pot_option,
+                      int has_ppc_option)
+{
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  hm->has_trace_option = has_trace_option;
+  hm->has_pot_option = has_pot_option;
+  hm->has_ppc_option = has_ppc_option;
+  if (hm->has_trace_option)
+    {
+      vxlan_gpe_trace_profile_setup ();
+    }
+
+  return 0;
+}
+
+clib_error_t *
+vxlan_gpe_ioam_disable (int has_trace_option, int has_pot_option,
+                       int has_ppc_option)
+{
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  hm->has_trace_option = has_trace_option;
+  hm->has_pot_option = has_pot_option;
+  hm->has_ppc_option = has_ppc_option;
+  if (!hm->has_trace_option)
+    {
+      vxlan_gpe_trace_profile_cleanup ();
+    }
+
+  return 0;
+}
+
+void
+vxlan_gpe_set_next_override (uword next)
+{
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+  hm->decap_v4_next_override = next;
+  return;
+}
+
+static clib_error_t *
+vxlan_gpe_set_ioam_flags_command_fn (vlib_main_t * vm,
+                                    unformat_input_t * input,
+                                    vlib_cli_command_t * cmd)
+{
+  int has_trace_option = 0;
+  int has_pot_option = 0;
+  int has_ppc_option = 0;
+  clib_error_t *rv = 0;
+
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "trace"))
+       has_trace_option = 1;
+      else if (unformat (input, "pot"))
+       has_pot_option = 1;
+      else if (unformat (input, "ppc encap"))
+       has_ppc_option = PPC_ENCAP;
+      else if (unformat (input, "ppc decap"))
+       has_ppc_option = PPC_DECAP;
+      else if (unformat (input, "ppc none"))
+       has_ppc_option = PPC_NONE;
+      else
+       break;
+    }
+
+
+  rv =
+    vxlan_gpe_ioam_enable (has_trace_option, has_pot_option, has_ppc_option);
+
+  return rv;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vxlan_gpe_set_ioam_flags_cmd, static) =
+{
+.path = "set vxlan-gpe-ioam rewrite",
+.short_help = "set vxlan-gpe-ioam [trace] [pot] [ppc <encap|decap>]",
+.function = vxlan_gpe_set_ioam_flags_command_fn,};
+/* *INDENT-ON* */
+
+
+
+
+clib_error_t *
+clear_vxlan_gpe_ioam_rewrite_command_fn (vlib_main_t * vm,
+                                        unformat_input_t * input,
+                                        vlib_cli_command_t * cmd)
+{
+  return (vxlan_gpe_ioam_disable (0, 0, 0));
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vxlan_gpe_clear_ioam_flags_cmd, static) =
+{
+.path = "clear vxlan-gpe-ioam rewrite",
+.short_help = "clear vxlan-gpe-ioam rewrite",
+.function = clear_vxlan_gpe_ioam_rewrite_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h
new file mode 100644 (file)
index 0000000..ac82480
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vxlan_gpe_ioam_h__
+#define __included_vxlan_gpe_ioam_h__
+
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
+#include <vnet/ip/ip.h>
+
+
+typedef struct vxlan_gpe_ioam_main_
+{
+  /* time scale transform. Joy. */
+  u32 unix_time_0;
+  f64 vlib_time_0;
+
+
+  /* Trace option */
+  u8 has_trace_option;
+
+  /* Pot option */
+  u8 has_pot_option;
+
+#define PPC_NONE  0
+#define PPC_ENCAP 1
+#define PPC_DECAP 2
+  u8 has_ppc_option;
+
+#define TSP_SECONDS              0
+#define TSP_MILLISECONDS         1
+#define TSP_MICROSECONDS         2
+#define TSP_NANOSECONDS          3
+
+  /* Array of function pointers to ADD and POP VxLAN-GPE iOAM option handling routines */
+  u8 options_size[256];
+  int (*add_options[256]) (u8 * rewrite_string, u8 * rewrite_size);
+  int (*pop_options[256]) (ip4_header_t * ip, vxlan_gpe_ioam_option_t * opt);
+
+  /* Array of function pointers to iOAM option handling routines */
+  int (*options[256]) (vlib_buffer_t * b, vxlan_gpe_ioam_option_t * opt,
+                      u8 is_ipv4, u8 use_adj);
+  u8 *(*trace[256]) (u8 * s, vxlan_gpe_ioam_option_t * opt);
+
+  /* API message ID base */
+  u16 msg_id_base;
+
+  /* Override to export for iOAM */
+  uword decap_v4_next_override;
+  uword decap_v6_next_override;
+
+  /* sequence of node graph for encap */
+  uword encap_v4_next_node;
+  uword encap_v6_next_node;
+
+  /** State convenience vlib_main_t */
+  vlib_main_t *vlib_main;
+  /** State convenience vnet_main_t */
+  vnet_main_t *vnet_main;
+
+
+} vxlan_gpe_ioam_main_t;
+extern vxlan_gpe_ioam_main_t vxlan_gpe_ioam_main;
+
+/*
+ * Primary h-b-h handler trace support
+ */
+typedef struct
+{
+  u32 next_index;
+  u32 trace_len;
+  u8 option_data[256];
+} ioam_trace_t;
+
+
+vlib_node_registration_t vxlan_gpe_encap_ioam_v4_node;
+vlib_node_registration_t vxlan_gpe_decap_ioam_v4_node;
+
+clib_error_t *vxlan_gpe_ioam_enable (int has_trace_option, int has_pot_option,
+                                    int has_ppc_option);
+
+clib_error_t *vxlan_gpe_ioam_disable (int has_trace_option,
+                                     int has_pot_option, int has_ppc_option);
+
+clib_error_t *vxlan_gpe_ioam_set (vxlan_gpe_tunnel_t * t,
+                                 int has_trace_option,
+                                 int has_pot_option,
+                                 int has_ppc_option, u8 ipv6_set);
+clib_error_t *vxlan_gpe_ioam_clear (vxlan_gpe_tunnel_t * t,
+                                   int has_trace_option, int has_pot_option,
+                                   int has_ppc_option, u8 ipv6_set);
+
+int vxlan_gpe_ioam_add_register_option (u8 option,
+                                       u8 size,
+                                       int rewrite_options (u8 *
+                                                            rewrite_string,
+                                                            u8 *
+                                                            rewrite_size));
+
+int vxlan_gpe_add_unregister_option (u8 option);
+
+int vxlan_gpe_ioam_register_option (u8 option,
+                                   int options (vlib_buffer_t * b,
+                                                vxlan_gpe_ioam_option_t *
+                                                opt, u8 is_ipv4, u8 use_adj),
+                                   u8 * trace (u8 * s,
+                                               vxlan_gpe_ioam_option_t *
+                                               opt));
+int vxlan_gpe_ioam_unregister_option (u8 option);
+
+int vxlan_gpe_trace_profile_setup (void);
+
+int vxlan_gpe_trace_profile_cleanup (void);
+
+typedef enum
+{
+  VXLAN_GPE_DECAP_IOAM_V4_NEXT_POP,
+  VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
+  VXLAN_GPE_DECAP_IOAM_V4_N_NEXT
+} vxlan_gpe_decap_ioam_v4_next_t;
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h
new file mode 100644 (file)
index 0000000..a7ef859
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vxlan_gpe_ioam_packet_h__
+#define __included_vxlan_gpe_ioam_packet_h__
+
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <vnet/ip/ip.h>
+
+
+
+#define VXLAN_GPE_OPTION_TYPE_IOAM_TRACE   59
+#define VXLAN_GPE_OPTION_TYPE_IOAM_PROOF_OF_TRANSIT 60
+
+/**
+ * @brief VXLAN GPE Extension (iOAM) Header definition
+ */
+typedef struct
+{
+  u8 type;
+  u8 length;
+  /** Reserved */
+  u8 reserved;
+  /** see vxlan_gpe_protocol_t */
+  u8 protocol;
+} vxlan_gpe_ioam_hdr_t;
+
+/*
+ * @brief VxLAN GPE iOAM Option definition
+ */
+typedef struct
+{
+  /* Option Type */
+  u8 type;
+  /* Length in octets of the option data field */
+  u8 length;
+} vxlan_gpe_ioam_option_t;
+
+
+#endif
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c
new file mode 100644 (file)
index 0000000..a10e85a
--- /dev/null
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <vppinfra/elog.h>
+
+#include <ioam/lib-trace/trace_util.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+
+/* Timestamp precision multipliers for seconds, milliseconds, microseconds
+ * and nanoseconds respectively.
+ */
+static f64 trace_tsp_mul[4] = { 1, 1e3, 1e6, 1e9 };
+
+typedef union
+{
+  u64 as_u64;
+  u32 as_u32[2];
+} time_u64_t;
+
+
+/* *INDENT-OFF* */
+typedef CLIB_PACKED(struct {
+  vxlan_gpe_ioam_option_t hdr;
+  u8 ioam_trace_type;
+  u8 data_list_elts_left;
+  u32 elts[0]; /* Variable type. So keep it generic */
+}) vxlan_gpe_ioam_trace_option_t;
+/* *INDENT-ON* */
+
+
+#define foreach_vxlan_gpe_ioam_trace_stats                             \
+  _(SUCCESS, "Pkts updated with TRACE records")                                        \
+  _(FAILED, "Errors in TRACE due to lack of TRACE records")
+
+static char *vxlan_gpe_ioam_trace_stats_strings[] = {
+#define _(sym,string) string,
+  foreach_vxlan_gpe_ioam_trace_stats
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) VXLAN_GPE_IOAM_TRACE_##sym,
+  foreach_vxlan_gpe_ioam_trace_stats
+#undef _
+    VXLAN_GPE_IOAM_TRACE_N_STATS,
+} vxlan_gpe_ioam_trace_stats_t;
+
+
+typedef struct
+{
+  /* stats */
+  u64 counters[ARRAY_LEN (vxlan_gpe_ioam_trace_stats_strings)];
+
+  /* convenience */
+  vlib_main_t *vlib_main;
+  vnet_main_t *vnet_main;
+} vxlan_gpe_ioam_trace_main_t;
+
+vxlan_gpe_ioam_trace_main_t vxlan_gpe_ioam_trace_main;
+
+int
+vxlan_gpe_ioam_add_register_option (u8 option,
+                                   u8 size,
+                                   int rewrite_options (u8 * rewrite_string,
+                                                        u8 * rewrite_size))
+{
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  ASSERT (option < ARRAY_LEN (hm->add_options));
+
+  /* Already registered */
+  if (hm->add_options[option])
+    return (-1);
+
+  hm->add_options[option] = rewrite_options;
+  hm->options_size[option] = size;
+
+  return (0);
+}
+
+int
+vxlan_gpe_add_unregister_option (u8 option)
+{
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  ASSERT (option < ARRAY_LEN (hm->add_options));
+
+  /* Not registered */
+  if (!hm->add_options[option])
+    return (-1);
+
+  hm->add_options[option] = NULL;
+  hm->options_size[option] = 0;
+  return (0);
+}
+
+
+int
+vxlan_gpe_ioam_register_option (u8 option,
+                               int options (vlib_buffer_t * b,
+                                            vxlan_gpe_ioam_option_t * opt,
+                                            u8 is_ipv4, u8 use_adj),
+                               u8 * trace (u8 * s,
+                                           vxlan_gpe_ioam_option_t * opt))
+{
+  vxlan_gpe_ioam_main_t *im = &vxlan_gpe_ioam_main;
+
+  ASSERT (option < ARRAY_LEN (im->options));
+
+  /* Already registered */
+  if (im->options[option])
+    return (-1);
+
+  im->options[option] = options;
+  im->trace[option] = trace;
+
+  return (0);
+}
+
+int
+vxlan_gpe_ioam_unregister_option (u8 option)
+{
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  ASSERT (option < ARRAY_LEN (hm->options));
+
+  /* Not registered */
+  if (!hm->options[option])
+    return (-1);
+
+  hm->options[option] = NULL;
+  hm->trace[option] = NULL;
+
+  return (0);
+}
+
+
+always_inline void
+vxlan_gpe_ioam_trace_stats_increment_counter (u32 counter_index,
+                                             u64 increment)
+{
+  vxlan_gpe_ioam_trace_main_t *hm = &vxlan_gpe_ioam_trace_main;
+
+  hm->counters[counter_index] += increment;
+}
+
+
+static u8 *
+format_ioam_data_list_element (u8 * s, va_list * args)
+{
+  u32 *elt = va_arg (*args, u32 *);
+  u8 *trace_type_p = va_arg (*args, u8 *);
+  u8 trace_type = *trace_type_p;
+
+
+  if (trace_type & BIT_TTL_NODEID)
+    {
+      u32 ttl_node_id_host_byte_order = clib_net_to_host_u32 (*elt);
+      s = format (s, "ttl 0x%x node id 0x%x ",
+                 ttl_node_id_host_byte_order >> 24,
+                 ttl_node_id_host_byte_order & 0x00FFFFFF);
+
+      elt++;
+    }
+
+  if (trace_type & BIT_ING_INTERFACE && trace_type & BIT_ING_INTERFACE)
+    {
+      u32 ingress_host_byte_order = clib_net_to_host_u32 (*elt);
+      s = format (s, "ingress 0x%x egress 0x%x ",
+                 ingress_host_byte_order >> 16,
+                 ingress_host_byte_order & 0xFFFF);
+      elt++;
+    }
+
+  if (trace_type & BIT_TIMESTAMP)
+    {
+      u32 ts_in_host_byte_order = clib_net_to_host_u32 (*elt);
+      s = format (s, "ts 0x%x \n", ts_in_host_byte_order);
+      elt++;
+    }
+
+  if (trace_type & BIT_APPDATA)
+    {
+      u32 appdata_in_host_byte_order = clib_net_to_host_u32 (*elt);
+      s = format (s, "app 0x%x ", appdata_in_host_byte_order);
+      elt++;
+    }
+
+  return s;
+}
+
+
+
+int
+vxlan_gpe_ioam_trace_rewrite_handler (u8 * rewrite_string, u8 * rewrite_size)
+{
+  vxlan_gpe_ioam_trace_option_t *trace_option = NULL;
+  u8 trace_data_size = 0;
+  u8 trace_option_elts = 0;
+  trace_profile *profile = NULL;
+
+
+  profile = trace_profile_find ();
+
+  if (PREDICT_FALSE (!profile))
+    {
+      return (-1);
+    }
+
+  if (PREDICT_FALSE (!rewrite_string))
+    return -1;
+
+  trace_option_elts = profile->num_elts;
+  trace_data_size = fetch_trace_data_size (profile->trace_type);
+  trace_option = (vxlan_gpe_ioam_trace_option_t *) rewrite_string;
+  trace_option->hdr.type = VXLAN_GPE_OPTION_TYPE_IOAM_TRACE;
+  trace_option->hdr.length = 2 /*ioam_trace_type,data_list_elts_left */  +
+    trace_option_elts * trace_data_size;
+  trace_option->ioam_trace_type = profile->trace_type & TRACE_TYPE_MASK;
+  trace_option->data_list_elts_left = trace_option_elts;
+  *rewrite_size =
+    sizeof (vxlan_gpe_ioam_trace_option_t) +
+    (trace_option_elts * trace_data_size);
+
+  return 0;
+}
+
+
+int
+vxlan_gpe_ioam_trace_data_list_handler (vlib_buffer_t * b,
+                                       vxlan_gpe_ioam_option_t * opt,
+                                       u8 is_ipv4, u8 use_adj)
+{
+  u8 elt_index = 0;
+  vxlan_gpe_ioam_trace_option_t *trace =
+    (vxlan_gpe_ioam_trace_option_t *) opt;
+  time_u64_t time_u64;
+  u32 *elt;
+  int rv = 0;
+  trace_profile *profile = NULL;
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+
+  profile = trace_profile_find ();
+
+  if (PREDICT_FALSE (!profile))
+    {
+      return (-1);
+    }
+
+
+  time_u64.as_u64 = 0;
+
+  if (PREDICT_TRUE (trace->data_list_elts_left))
+    {
+      trace->data_list_elts_left--;
+      /* fetch_trace_data_size returns in bytes. Convert it to 4-bytes
+       * to skip to this node's location.
+       */
+      elt_index =
+       trace->data_list_elts_left *
+       fetch_trace_data_size (trace->ioam_trace_type) / 4;
+      elt = &trace->elts[elt_index];
+      if (is_ipv4)
+       {
+         if (trace->ioam_trace_type & BIT_TTL_NODEID)
+           {
+             ip4_header_t *ip0 = vlib_buffer_get_current (b);
+             *elt = clib_host_to_net_u32 (((ip0->ttl - 1) << 24) |
+                                          profile->node_id);
+             elt++;
+           }
+
+         if (trace->ioam_trace_type & BIT_ING_INTERFACE)
+           {
+             u16 tx_if = 0;
+             u32 adj_index = vnet_buffer (b)->ip.adj_index[VLIB_TX];
+             ip4_main_t *im4 = &ip4_main;
+             ip_lookup_main_t *lm = &im4->lookup_main;
+             if (use_adj)
+               {
+                 ip_adjacency_t *adj = ip_get_adjacency (lm, adj_index);
+                 tx_if = adj->rewrite_header.sw_if_index & 0xFFFF;
+               }
+
+             *elt =
+               (vnet_buffer (b)->sw_if_index[VLIB_RX] & 0xFFFF) << 16 |
+               tx_if;
+             *elt = clib_host_to_net_u32 (*elt);
+             elt++;
+           }
+       }
+      else
+       {
+         if (trace->ioam_trace_type & BIT_TTL_NODEID)
+           {
+             ip6_header_t *ip0 = vlib_buffer_get_current (b);
+             *elt = clib_host_to_net_u32 ((ip0->hop_limit << 24) |
+                                          profile->node_id);
+             elt++;
+           }
+         if (trace->ioam_trace_type & BIT_ING_INTERFACE)
+           {
+             u16 tx_if = 0;
+             u32 adj_index = vnet_buffer (b)->ip.adj_index[VLIB_TX];
+             ip6_main_t *im6 = &ip6_main;
+             ip_lookup_main_t *lm = &im6->lookup_main;
+             if (use_adj)
+               {
+                 ip_adjacency_t *adj = ip_get_adjacency (lm, adj_index);
+                 tx_if = adj->rewrite_header.sw_if_index & 0xFFFF;
+               }
+
+             *elt =
+               (vnet_buffer (b)->sw_if_index[VLIB_RX] & 0xFFFF) << 16 |
+               tx_if;
+             *elt = clib_host_to_net_u32 (*elt);
+             elt++;
+           }
+       }
+
+      if (trace->ioam_trace_type & BIT_TIMESTAMP)
+       {
+         /* Send least significant 32 bits */
+         f64 time_f64 =
+           (f64) (((f64) hm->unix_time_0) +
+                  (vlib_time_now (hm->vlib_main) - hm->vlib_time_0));
+
+         time_u64.as_u64 = time_f64 * trace_tsp_mul[profile->trace_tsp];
+         *elt = clib_host_to_net_u32 (time_u64.as_u32[0]);
+         elt++;
+       }
+
+      if (trace->ioam_trace_type & BIT_APPDATA)
+       {
+         /* $$$ set elt0->app_data */
+         *elt = clib_host_to_net_u32 (profile->app_data);
+         elt++;
+       }
+      vxlan_gpe_ioam_trace_stats_increment_counter
+       (VXLAN_GPE_IOAM_TRACE_SUCCESS, 1);
+    }
+  else
+    {
+      vxlan_gpe_ioam_trace_stats_increment_counter
+       (VXLAN_GPE_IOAM_TRACE_FAILED, 1);
+    }
+  return (rv);
+}
+
+u8 *
+vxlan_gpe_ioam_trace_data_list_trace_handler (u8 * s,
+                                             vxlan_gpe_ioam_option_t * opt)
+{
+  vxlan_gpe_ioam_trace_option_t *trace;
+  u8 trace_data_size_in_words = 0;
+  u32 *elt;
+  int elt_index = 0;
+
+  trace = (vxlan_gpe_ioam_trace_option_t *) opt;
+  s =
+    format (s, "  Trace Type 0x%x , %d elts left\n", trace->ioam_trace_type,
+           trace->data_list_elts_left);
+  trace_data_size_in_words =
+    fetch_trace_data_size (trace->ioam_trace_type) / 4;
+  elt = &trace->elts[0];
+  while ((u8 *) elt < ((u8 *) (&trace->elts[0]) + trace->hdr.length - 2
+                      /* -2 accounts for ioam_trace_type,elts_left */ ))
+    {
+      s = format (s, "    [%d] %U\n", elt_index,
+                 format_ioam_data_list_element,
+                 elt, &trace->ioam_trace_type);
+      elt_index++;
+      elt += trace_data_size_in_words;
+    }
+  return (s);
+}
+
+
+static clib_error_t *
+vxlan_gpe_show_ioam_trace_cmd_fn (vlib_main_t * vm,
+                                 unformat_input_t * input,
+                                 vlib_cli_command_t * cmd)
+{
+  vxlan_gpe_ioam_trace_main_t *hm = &vxlan_gpe_ioam_trace_main;
+  u8 *s = 0;
+  int i = 0;
+
+  for (i = 0; i < VXLAN_GPE_IOAM_TRACE_N_STATS; i++)
+    {
+      s = format (s, " %s - %lu\n", vxlan_gpe_ioam_trace_stats_strings[i],
+                 hm->counters[i]);
+    }
+
+  vlib_cli_output (vm, "%v", s);
+  vec_free (s);
+  return 0;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vxlan_gpe_show_ioam_trace_cmd, static) = {
+  .path = "show ioam vxlan-gpe trace",
+  .short_help = "iOAM trace statistics",
+  .function = vxlan_gpe_show_ioam_trace_cmd_fn,
+};
+/* *INDENT-ON* */
+
+
+static clib_error_t *
+vxlan_gpe_ioam_trace_init (vlib_main_t * vm)
+{
+  vxlan_gpe_ioam_trace_main_t *hm = &vxlan_gpe_ioam_trace_main;
+  clib_error_t *error;
+
+  if ((error = vlib_call_init_function (vm, ip_main_init)))
+    return (error);
+
+  if ((error = vlib_call_init_function (vm, ip6_lookup_init)))
+    return error;
+
+  if ((error = vlib_call_init_function (vm, vxlan_gpe_init)))
+    return (error);
+
+  hm->vlib_main = vm;
+  hm->vnet_main = vnet_get_main ();
+  memset (hm->counters, 0, sizeof (hm->counters));
+
+  if (vxlan_gpe_ioam_register_option
+      (VXLAN_GPE_OPTION_TYPE_IOAM_TRACE,
+       vxlan_gpe_ioam_trace_data_list_handler,
+       vxlan_gpe_ioam_trace_data_list_trace_handler) < 0)
+    return (clib_error_create
+           ("registration of VXLAN_GPE_OPTION_TYPE_IOAM_TRACE failed"));
+
+
+  if (vxlan_gpe_ioam_add_register_option
+      (VXLAN_GPE_OPTION_TYPE_IOAM_TRACE,
+       sizeof (vxlan_gpe_ioam_trace_option_t),
+       vxlan_gpe_ioam_trace_rewrite_handler) < 0)
+    return (clib_error_create
+           ("registration of VXLAN_GPE_OPTION_TYPE_IOAM_TRACE for rewrite failed"));
+
+
+  return (0);
+}
+
+VLIB_INIT_FUNCTION (vxlan_gpe_ioam_trace_init);
+
+int
+vxlan_gpe_trace_profile_cleanup (void)
+{
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] = 0;
+
+  return 0;
+
+}
+
+static int
+vxlan_gpe_ioam_trace_get_sizeof_handler (u32 * result)
+{
+  u16 size = 0;
+  u8 trace_data_size = 0;
+  trace_profile *profile = NULL;
+
+  *result = 0;
+
+  profile = trace_profile_find ();
+
+  if (PREDICT_FALSE (!profile))
+    {
+      return (-1);
+    }
+
+  trace_data_size = fetch_trace_data_size (profile->trace_type);
+  if (PREDICT_FALSE (trace_data_size == 0))
+    return VNET_API_ERROR_INVALID_VALUE;
+
+  if (PREDICT_FALSE (profile->num_elts * trace_data_size > 254))
+    return VNET_API_ERROR_INVALID_VALUE;
+
+  size +=
+    sizeof (vxlan_gpe_ioam_trace_option_t) +
+    profile->num_elts * trace_data_size;
+  *result = size;
+
+  return 0;
+}
+
+
+int
+vxlan_gpe_trace_profile_setup (void)
+{
+  u32 trace_size = 0;
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  trace_profile *profile = NULL;
+
+
+  profile = trace_profile_find ();
+
+  if (PREDICT_FALSE (!profile))
+    {
+      return (-1);
+    }
+
+
+  if (vxlan_gpe_ioam_trace_get_sizeof_handler (&trace_size) < 0)
+    return (-1);
+
+  hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] = trace_size;
+
+  return (0);
+}
+
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h
new file mode 100644 (file)
index 0000000..138eba1
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_vxlan_gpe_ioam_util_h__
+#define __included_vxlan_gpe_ioam_util_h__
+
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
+#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>
+#include <vnet/ip/ip.h>
+
+
+typedef struct
+{
+  u32 tunnel_index;
+  ioam_trace_t fmt_trace;
+} vxlan_gpe_ioam_v4_trace_t;
+
+
+static u8 *
+format_vxlan_gpe_ioam_v4_trace (u8 * s, va_list * args)
+{
+  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+  vxlan_gpe_ioam_v4_trace_t *t1 = va_arg (*args, vxlan_gpe_ioam_v4_trace_t *);
+  ioam_trace_t *t = &(t1->fmt_trace);
+  vxlan_gpe_ioam_option_t *fmt_trace0;
+  vxlan_gpe_ioam_option_t *opt0, *limit0;
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  u8 type0;
+
+  fmt_trace0 = (vxlan_gpe_ioam_option_t *) t->option_data;
+
+  s = format (s, "VXLAN-GPE-IOAM: next_index %d len %d traced %d",
+             t->next_index, fmt_trace0->length, t->trace_len);
+
+  opt0 = (vxlan_gpe_ioam_option_t *) (fmt_trace0 + 1);
+  limit0 = (vxlan_gpe_ioam_option_t *) ((u8 *) fmt_trace0) + t->trace_len;
+
+  while (opt0 < limit0)
+    {
+      type0 = opt0->type;
+      switch (type0)
+       {
+       case 0:         /* Pad, just stop */
+         opt0 = (vxlan_gpe_ioam_option_t *) ((u8 *) opt0) + 1;
+         break;
+
+       default:
+         if (hm->trace[type0])
+           {
+             s = (*hm->trace[type0]) (s, opt0);
+           }
+         else
+           {
+             s =
+               format (s, "\n    unrecognized option %d length %d", type0,
+                       opt0->length);
+           }
+         opt0 =
+           (vxlan_gpe_ioam_option_t *) (((u8 *) opt0) + opt0->length +
+                                        sizeof (vxlan_gpe_ioam_option_t));
+         break;
+       }
+    }
+
+  s = format (s, "VXLAN-GPE-IOAM: tunnel %d", t1->tunnel_index);
+  return s;
+}
+
+
+always_inline void
+vxlan_gpe_encap_decap_ioam_v4_one_inline (vlib_main_t * vm,
+                                         vlib_node_runtime_t * node,
+                                         vxlan_gpe_main_t * ngm,
+                                         vlib_buffer_t * b0,
+                                         u32 * next0, u32 drop_node_val,
+                                         u8 use_adj)
+{
+  ip4_header_t *ip0;
+  udp_header_t *udp_hdr0;
+  vxlan_gpe_header_t *gpe_hdr0;
+  vxlan_gpe_ioam_hdr_t *gpe_ioam0;
+  vxlan_gpe_ioam_option_t *opt0;
+  vxlan_gpe_ioam_option_t *limit0;
+  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
+
+  /* Populate the iOAM header */
+  ip0 = vlib_buffer_get_current (b0);
+  udp_hdr0 = (udp_header_t *) (ip0 + 1);
+  gpe_hdr0 = (vxlan_gpe_header_t *) (udp_hdr0 + 1);
+  gpe_ioam0 = (vxlan_gpe_ioam_hdr_t *) (gpe_hdr0 + 1);
+  opt0 = (vxlan_gpe_ioam_option_t *) (gpe_ioam0 + 1);
+  limit0 = (vxlan_gpe_ioam_option_t *) ((u8 *) gpe_ioam0 + gpe_ioam0->length);
+
+  /*
+   * Basic validity checks
+   */
+  if (gpe_ioam0->length > clib_net_to_host_u16 (ip0->length))
+    {
+      *next0 = drop_node_val;
+      return;
+    }
+
+  /* Scan the set of h-b-h options, process ones that we understand */
+  while (opt0 < limit0)
+    {
+      u8 type0;
+      type0 = opt0->type;
+      switch (type0)
+       {
+       case 0:         /* Pad1 */
+         opt0 = (vxlan_gpe_ioam_option_t *) ((u8 *) opt0) + 1;
+         continue;
+       case 1:         /* PadN */
+         break;
+       default:
+         if (hm->options[type0])
+           {
+             if ((*hm->options[type0]) (b0, opt0, 1 /* is_ipv4 */ ,
+                                        use_adj) < 0)
+               {
+                 *next0 = drop_node_val;
+                 return;
+               }
+           }
+         break;
+       }
+      opt0 =
+       (vxlan_gpe_ioam_option_t *) (((u8 *) opt0) + opt0->length +
+                                    sizeof (vxlan_gpe_ioam_hdr_t));
+    }
+
+
+  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+    {
+      vxlan_gpe_ioam_v4_trace_t *t =
+       vlib_add_trace (vm, node, b0, sizeof (*t));
+      u32 trace_len = gpe_ioam0->length;
+      t->fmt_trace.next_index = *next0;
+      /* Capture the ioam option verbatim */
+      trace_len =
+       trace_len <
+       ARRAY_LEN (t->fmt_trace.
+                  option_data) ? trace_len : ARRAY_LEN (t->fmt_trace.
+                                                        option_data);
+      t->fmt_trace.trace_len = trace_len;
+      clib_memcpy (&(t->fmt_trace.option_data), gpe_ioam0, trace_len);
+    }
+  return;
+}
+
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h
new file mode 100644 (file)
index 0000000..cc0a10a
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef included_vxlan_gpe_msg_enum_h
+#define included_vxlan_gpe_msg_enum_h
+
+#include <vppinfra/byte_order.h>
+
+#define vl_msg_id(n,h) n,
+typedef enum {
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+    /* We'll want to know how many messages IDs we need... */
+    VL_MSG_FIRST_AVAILABLE,
+} vl_msg_id_t;
+#undef vl_msg_id
+
+#endif /* included_vxlan_gpe_msg_enum_h */
diff --git a/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_test.c b/plugins/ioam-plugin/ioam/lib-vxlan-gpe/vxlan_gpe_test.c
new file mode 100644 (file)
index 0000000..500e056
--- /dev/null
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ *------------------------------------------------------------------
+ * vxlan_gpe_test.c - test harness for vxlan_gpe plugin
+ *------------------------------------------------------------------
+ */
+
+#include <vat/vat.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vlibsocket/api.h>
+#include <vppinfra/error.h>
+
+/* Declare message IDs */
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_msg_enum.h>
+
+/* define message structures */
+#define vl_typedefs
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_typedefs
+
+/* declare message handlers for each api */
+
+#define vl_endianfun           /* define message structures */
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_printfun
+
+/* Get the API version number. */
+#define vl_api_version(n,v) static u32 api_version=(v);
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_all_api_h.h>
+#undef vl_api_version
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
+#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
+
+typedef struct
+{
+  /* API message ID base */
+  u16 msg_id_base;
+  vat_main_t *vat_main;
+} vxlan_gpe_test_main_t;
+
+vxlan_gpe_test_main_t vxlan_gpe_test_main;
+
+#define foreach_standard_reply_retval_handler     \
+_(vxlan_gpe_ioam_enable_reply)                    \
+_(vxlan_gpe_ioam_disable_reply)                   \
+_(vxlan_gpe_ioam_vni_enable_reply)                \
+_(vxlan_gpe_ioam_vni_disable_reply)
+
+#define _(n)                                            \
+    static void vl_api_##n##_t_handler                  \
+    (vl_api_##n##_t * mp)                               \
+    {                                                   \
+        vat_main_t * vam = vxlan_gpe_test_main.vat_main;   \
+        i32 retval = ntohl(mp->retval);                 \
+        if (vam->async_mode) {                          \
+            vam->async_errors += (retval < 0);          \
+        } else {                                        \
+            vam->retval = retval;                       \
+            vam->result_ready = 1;                      \
+        }                                               \
+    }
+foreach_standard_reply_retval_handler;
+#undef _
+
+/*
+ * Table of message reply handlers, must include boilerplate handlers
+ * we just generated
+ */
+#define foreach_vpe_api_reply_msg                                       \
+_(VXLAN_GPE_IOAM_ENABLE_REPLY, vxlan_gpe_ioam_enable_reply)             \
+_(VXLAN_GPE_IOAM_DISABLE_REPLY, vxlan_gpe_ioam_disable_reply)           \
+_(VXLAN_GPE_IOAM_VNI_ENABLE_REPLY, vxlan_gpe_ioam_vni_enable_reply)     \
+_(VXLAN_GPE_IOAM_VNI_DISABLE_REPLY, vxlan_gpe_ioam_vni_disable_reply)   \
+
+
+/* M: construct, but don't yet send a message */
+
+#define M(T,t)                                                  \
+do {                                                            \
+    vam->result_ready = 0;                                      \
+    mp = vl_msg_api_alloc(sizeof(*mp));                         \
+    memset (mp, 0, sizeof (*mp));                               \
+    mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base);      \
+    mp->client_index = vam->my_client_index;                    \
+} while(0);
+
+#define M2(T,t,n)                                               \
+do {                                                            \
+    vam->result_ready = 0;                                      \
+    mp = vl_msg_api_alloc(sizeof(*mp)+(n));                     \
+    memset (mp, 0, sizeof (*mp));                               \
+    mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base);      \
+    mp->client_index = vam->my_client_index;                    \
+} while(0);
+
+/* S: send a message */
+#define S (vl_msg_api_send_shmem (vam->vl_input_queue, (u8 *)&mp))
+
+/* W: wait for results, with timeout */
+#define W                                       \
+do {                                            \
+    timeout = vat_time_now (vam) + 1.0;         \
+                                                \
+    while (vat_time_now (vam) < timeout) {      \
+        if (vam->result_ready == 1) {           \
+            return (vam->retval);               \
+        }                                       \
+    }                                           \
+    return -99;                                 \
+} while(0);
+
+
+static int
+api_vxlan_gpe_ioam_enable (vat_main_t * vam)
+{
+  vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+
+  unformat_input_t *input = vam->input;
+  vl_api_vxlan_gpe_ioam_enable_t *mp;
+  f64 timeout;
+  u32 id = 0;
+  int has_trace_option = 0;
+  int has_pow_option = 0;
+  int has_ppc_option = 0;
+
+  while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (input, "trace"))
+       has_trace_option = 1;
+      else if (unformat (input, "pow"))
+       has_pow_option = 1;
+      else if (unformat (input, "ppc encap"))
+       has_ppc_option = PPC_ENCAP;
+      else if (unformat (input, "ppc decap"))
+       has_ppc_option = PPC_DECAP;
+      else if (unformat (input, "ppc none"))
+       has_ppc_option = PPC_NONE;
+      else
+       break;
+    }
+  M (VXLAN_GPE_IOAM_ENABLE, vxlan_gpe_ioam_enable);
+  mp->id = htons (id);
+  mp->trace_ppc = has_ppc_option;
+  mp->pow_enable = has_pow_option;
+  mp->trace_enable = has_trace_option;
+
+
+  S;
+  W;
+
+  return (0);
+}
+
+
+static int
+api_vxlan_gpe_ioam_disable (vat_main_t * vam)
+{
+  vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+  vl_api_vxlan_gpe_ioam_disable_t *mp;
+  f64 timeout;
+
+  M (VXLAN_GPE_IOAM_DISABLE, vxlan_gpe_ioam_disable);
+  S;
+  W;
+  return 0;
+}
+
+static int
+api_vxlan_gpe_ioam_vni_enable (vat_main_t * vam)
+{
+  vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+
+  unformat_input_t *line_input = vam->input;
+  vl_api_vxlan_gpe_ioam_vni_enable_t *mp;
+  ip4_address_t local4, remote4;
+  ip6_address_t local6, remote6;
+  u8 ipv4_set = 0, ipv6_set = 0;
+  u8 local_set = 0;
+  u8 remote_set = 0;
+  u32 vni;
+  u8 vni_set = 0;
+  f64 timeout;
+
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "local %U", unformat_ip4_address, &local4))
+       {
+         local_set = 1;
+         ipv4_set = 1;
+       }
+      else if (unformat (line_input, "remote %U",
+                        unformat_ip4_address, &remote4))
+       {
+         remote_set = 1;
+         ipv4_set = 1;
+       }
+      else if (unformat (line_input, "local %U",
+                        unformat_ip6_address, &local6))
+       {
+         local_set = 1;
+         ipv6_set = 1;
+       }
+      else if (unformat (line_input, "remote %U",
+                        unformat_ip6_address, &remote6))
+       {
+         remote_set = 1;
+         ipv6_set = 1;
+       }
+
+      else if (unformat (line_input, "vni %d", &vni))
+       vni_set = 1;
+      else
+       {
+         errmsg ("parse error '%U'\n", format_unformat_error, line_input);
+         return -99;
+       }
+    }
+
+  if (local_set == 0)
+    {
+      errmsg ("tunnel local address not specified\n");
+      return -99;
+    }
+  if (remote_set == 0)
+    {
+      errmsg ("tunnel remote address not specified\n");
+      return -99;
+    }
+  if (ipv4_set && ipv6_set)
+    {
+      errmsg ("both IPv4 and IPv6 addresses specified");
+      return -99;
+    }
+
+  if (vni_set == 0)
+    {
+      errmsg ("vni not specified\n");
+      return -99;
+    }
+
+  M (VXLAN_GPE_IOAM_VNI_ENABLE, vxlan_gpe_ioam_vni_enable);
+
+
+  if (ipv6_set)
+    {
+      clib_memcpy (&mp->local, &local6, sizeof (local6));
+      clib_memcpy (&mp->remote, &remote6, sizeof (remote6));
+    }
+  else
+    {
+      clib_memcpy (&mp->local, &local4, sizeof (local4));
+      clib_memcpy (&mp->remote, &remote4, sizeof (remote4));
+    }
+
+  mp->vni = ntohl (vni);
+  mp->is_ipv6 = ipv6_set;
+
+  S;
+  W;
+
+  return (0);
+}
+
+static int
+api_vxlan_gpe_ioam_vni_disable (vat_main_t * vam)
+{
+  vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+
+  unformat_input_t *line_input = vam->input;
+  vl_api_vxlan_gpe_ioam_vni_disable_t *mp;
+  ip4_address_t local4, remote4;
+  ip6_address_t local6, remote6;
+  u8 ipv4_set = 0, ipv6_set = 0;
+  u8 local_set = 0;
+  u8 remote_set = 0;
+  u32 vni;
+  u8 vni_set = 0;
+  f64 timeout;
+
+
+  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+    {
+      if (unformat (line_input, "local %U", unformat_ip4_address, &local4))
+       {
+         local_set = 1;
+         ipv4_set = 1;
+       }
+      else if (unformat (line_input, "remote %U",
+                        unformat_ip4_address, &remote4))
+       {
+         remote_set = 1;
+         ipv4_set = 1;
+       }
+      else if (unformat (line_input, "local %U",
+                        unformat_ip6_address, &local6))
+       {
+         local_set = 1;
+         ipv6_set = 1;
+       }
+      else if (unformat (line_input, "remote %U",
+                        unformat_ip6_address, &remote6))
+       {
+         remote_set = 1;
+         ipv6_set = 1;
+       }
+
+      else if (unformat (line_input, "vni %d", &vni))
+       vni_set = 1;
+      else
+       {
+         errmsg ("parse error '%U'\n", format_unformat_error, line_input);
+         return -99;
+       }
+    }
+
+  if (local_set == 0)
+    {
+      errmsg ("tunnel local address not specified\n");
+      return -99;
+    }
+  if (remote_set == 0)
+    {
+      errmsg ("tunnel remote address not specified\n");
+      return -99;
+    }
+  if (ipv4_set && ipv6_set)
+    {
+      errmsg ("both IPv4 and IPv6 addresses specified");
+      return -99;
+    }
+
+  if (vni_set == 0)
+    {
+      errmsg ("vni not specified\n");
+      return -99;
+    }
+
+  M (VXLAN_GPE_IOAM_VNI_DISABLE, vxlan_gpe_ioam_vni_disable);
+
+
+  if (ipv6_set)
+    {
+      clib_memcpy (&mp->local, &local6, sizeof (local6));
+      clib_memcpy (&mp->remote, &remote6, sizeof (remote6));
+    }
+  else
+    {
+      clib_memcpy (&mp->local, &local4, sizeof (local4));
+      clib_memcpy (&mp->remote, &remote4, sizeof (remote4));
+    }
+
+  mp->vni = ntohl (vni);
+  mp->is_ipv6 = ipv6_set;
+
+  S;
+  W;
+
+  return 0;
+}
+
+
+
+/*
+ * List of messages that the api test plugin sends,
+ * and that the data plane plugin processes
+ */
+#define foreach_vpe_api_msg \
+_(vxlan_gpe_ioam_enable, ""\
+  "[trace] [pow] [ppc <encap|ppc decap>]") \
+_(vxlan_gpe_ioam_disable, "")                    \
+_(vxlan_gpe_ioam_vni_enable, ""\
+  "local <local_vtep_ip> remote <remote_vtep_ip> vni <vnid>") \
+_(vxlan_gpe_ioam_vni_disable, ""\
+  "local <local_vtep_ip> remote <remote_vtep_ip> vni <vnid>") \
+
+
+void
+vat_api_hookup (vat_main_t * vam)
+{
+  vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+  /* Hook up handlers for replies from the data plane plug-in */
+#define _(N,n)                                                  \
+    vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base),     \
+                           #n,                                  \
+                           vl_api_##n##_t_handler,              \
+                           vl_noop_handler,                     \
+                           vl_api_##n##_t_endian,               \
+                           vl_api_##n##_t_print,                \
+                           sizeof(vl_api_##n##_t), 1);
+  foreach_vpe_api_reply_msg;
+#undef _
+
+  /* API messages we can send */
+#define _(n,h) hash_set_mem (vam->function_by_name, #n, api_##n);
+  foreach_vpe_api_msg;
+#undef _
+
+  /* Help strings */
+#define _(n,h) hash_set_mem (vam->help_by_name, #n, h);
+  foreach_vpe_api_msg;
+#undef _
+}
+
+clib_error_t *
+vat_plugin_register (vat_main_t * vam)
+{
+  vxlan_gpe_test_main_t *sm = &vxlan_gpe_test_main;
+  u8 *name;
+
+  sm->vat_main = vam;
+
+  name = format (0, "ioam_vxlan_gpe_%08x%c", api_version, 0);
+  sm->msg_id_base = vl_client_get_first_plugin_msg_id ((char *) name);
+
+  if (sm->msg_id_base != (u16) ~ 0)
+    vat_api_hookup (vam);
+
+  vec_free (name);
+
+  return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
index 7d157cf..e86a6d0 100644 (file)
@@ -69,11 +69,6 @@ typedef struct {
   /* Enabling analyis of iOAM data on decap node */
   u8 has_analyse_option;
 
-#define TSP_SECONDS              0
-#define TSP_MILLISECONDS         1
-#define TSP_MICROSECONDS         2
-#define TSP_NANOSECONDS          3
-  
   /* Array of function pointers to ADD and POP HBH option handling routines */
   u8 options_size[MAX_IP6_HBH_OPTION];
   int (*add_options[MAX_IP6_HBH_OPTION])(u8 *rewrite_string, u8 *rewrite_size);
@@ -189,4 +184,6 @@ static inline u8 is_seqno_enabled (void)
   return (ip6_hop_by_hop_ioam_main.has_seqno_option);
 }
 
+int
+ip6_trace_profile_setup ();
 #endif /* __included_ip6_hop_by_hop_ioam_h__ */
index f6d1402..22ab4b6 100644 (file)
@@ -211,10 +211,14 @@ vxlan_gpe_input (vlib_main_t * vm,
 
       if (is_ip4)
       {
-        next0 = (iuvn4_0->vxlan.protocol < node->n_next_nodes) ?
-                iuvn4_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
-        next1 = (iuvn4_1->vxlan.protocol < node->n_next_nodes) ?
-                iuvn4_1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+        next0 =
+            (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+            ngm->decap_next_node_list[iuvn4_0->vxlan.protocol]: \
+            VXLAN_GPE_INPUT_NEXT_DROP;
+        next1 =
+            (iuvn4_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+            ngm->decap_next_node_list[iuvn4_1->vxlan.protocol]: \
+            VXLAN_GPE_INPUT_NEXT_DROP;
 
         key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
         key4_1.local = iuvn4_1->ip4.dst_address.as_u32;
@@ -273,6 +277,28 @@ vxlan_gpe_input (vlib_main_t * vm,
       }
       else /* is_ip6 */
       {
+        next0 =
+            (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+            ngm->decap_next_node_list[iuvn6_0->vxlan.protocol]: \
+            VXLAN_GPE_INPUT_NEXT_DROP;
+        next1 =
+            (iuvn6_1->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+            ngm->decap_next_node_list[iuvn6_1->vxlan.protocol]: \
+            VXLAN_GPE_INPUT_NEXT_DROP;
+
+        key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
+        key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
+        key6_1.local.as_u64[0] = iuvn6_1->ip6.dst_address.as_u64[0];
+        key6_1.local.as_u64[1] = iuvn6_1->ip6.dst_address.as_u64[1];
+
+        key6_0.remote.as_u64[0] = iuvn6_0->ip6.src_address.as_u64[0];
+        key6_0.remote.as_u64[1] = iuvn6_0->ip6.src_address.as_u64[1];
+        key6_1.remote.as_u64[0] = iuvn6_1->ip6.src_address.as_u64[0];
+        key6_1.remote.as_u64[1] = iuvn6_1->ip6.src_address.as_u64[1];
+
+        key6_0.vni = iuvn6_0->vxlan.vni_res;
+        key6_1.vni = iuvn6_1->vxlan.vni_res;
+
         /* Processing for key6_0 */
         if (PREDICT_FALSE(memcmp (&key6_0, &last_key6, sizeof(last_key6)) != 0))
         {
@@ -293,7 +319,6 @@ vxlan_gpe_input (vlib_main_t * vm,
 
       t0 = pool_elt_at_index(ngm->tunnels, tunnel_index0);
 
-      next0 = t0->protocol;
 
       sw_if_index0 = t0->sw_if_index;
       len0 = vlib_buffer_length_in_chain (vm, b0);
@@ -378,7 +403,6 @@ vxlan_gpe_input (vlib_main_t * vm,
 
       t1 = pool_elt_at_index(ngm->tunnels, tunnel_index1);
 
-      next1 = t1->protocol;
       sw_if_index1 = t1->sw_if_index;
       len1 = vlib_buffer_length_in_chain (vm, b1);
 
@@ -477,8 +501,9 @@ vxlan_gpe_input (vlib_main_t * vm,
       if (is_ip4)
       {
         next0 =
-            (iuvn4_0->vxlan.protocol < node->n_next_nodes) ?
-                iuvn4_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+            (iuvn4_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+            ngm->decap_next_node_list[iuvn4_0->vxlan.protocol]: \
+            VXLAN_GPE_INPUT_NEXT_DROP;
 
         key4_0.local = iuvn4_0->ip4.dst_address.as_u32;
         key4_0.remote = iuvn4_0->ip4.src_address.as_u32;
@@ -507,8 +532,10 @@ vxlan_gpe_input (vlib_main_t * vm,
       }
       else /* is_ip6 */
       {
-        next0 = (iuvn6_0->vxlan.protocol < node->n_next_nodes) ?
-                iuvn6_0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+        next0 =
+            (iuvn6_0->vxlan.protocol < VXLAN_GPE_PROTOCOL_MAX)?
+            ngm->decap_next_node_list[iuvn6_0->vxlan.protocol]: \
+            VXLAN_GPE_INPUT_NEXT_DROP;
 
         key6_0.local.as_u64[0] = iuvn6_0->ip6.dst_address.as_u64[0];
         key6_0.local.as_u64[1] = iuvn6_0->ip6.dst_address.as_u64[1];
@@ -536,7 +563,6 @@ vxlan_gpe_input (vlib_main_t * vm,
 
       t0 = pool_elt_at_index(ngm->tunnels, tunnel_index0);
 
-      next0 = t0->protocol;
 
       sw_if_index0 = t0->sw_if_index;
       len0 = vlib_buffer_length_in_chain (vm, b0);
@@ -614,6 +640,24 @@ vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
   return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */1);
 }
 
+
+void
+vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
+{
+  vxlan_gpe_main_t *hm = &vxlan_gpe_main;
+  hm->decap_next_node_list[protocol_id] = next_node_index;
+  return;
+}
+
+void
+vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index)
+{
+  vxlan_gpe_main_t *hm = &vxlan_gpe_main;
+  hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
+  return;
+}
+
+
 /**
  * @brief Graph processing dispatch function for IPv6 VXLAN GPE
  *
index 9cd2c72..3a486e5 100644 (file)
@@ -47,16 +47,6 @@ typedef enum {
     VXLAN_GPE_ENCAP_N_ERROR,
 } vxlan_gpe_encap_error_t;
 
-/**
- * @brief Struct for defining VXLAN GPE next nodes
- */
-typedef enum {
-  VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP,
-  VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP,
-  VXLAN_GPE_ENCAP_NEXT_DROP,
-  VXLAN_GPE_ENCAP_N_NEXT
-} vxlan_gpe_encap_next_t;
-
 /**
  * @brief Struct for tracing VXLAN GPE encapsulated packets
  */
@@ -96,22 +86,14 @@ u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
  */
 always_inline void
 vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
-                            vxlan_gpe_tunnel_t * t0, u32 * next0, u8 is_v4)
+                            vxlan_gpe_tunnel_t * t0, u32 * next0,
+                            u8 is_v4)
 {
   ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
   ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
 
-  if (is_v4)
-    {
-      ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 36, 1);
-      next0[0] = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
-
-    }
-  else
-    {
-      ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 56, 0);
-      next0[0] = VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP;
-    }
+  ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
+  next0[0] = t0->encap_next_node;
 }
 
 /**
@@ -128,25 +110,17 @@ vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
  *
  */
 always_inline void
-vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0, vlib_buffer_t * b1,
-                            vxlan_gpe_tunnel_t * t0, vxlan_gpe_tunnel_t * t1, u32 * next0,
+vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
+                            vlib_buffer_t * b1, vxlan_gpe_tunnel_t * t0,
+                            vxlan_gpe_tunnel_t * t1, u32 * next0,
                             u32 * next1, u8 is_v4)
 {
   ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
   ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
 
-  if (is_v4)
-    {
-      ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 36, 1);
-      ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, 36, 1);
-      next0[0] = next1[0] = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
-    }
-  else
-    {
-      ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 56, 0);
-      ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, 56, 0);
-      next0[0] = next1[0] = VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP;
-    }
+  ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
+  ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, is_v4);
+  next0[0] = next1[0] = t0->encap_next_node;
 }
 
 /**
index 979864e..b97510c 100644 (file)
@@ -121,6 +121,7 @@ VNET_DEVICE_CLASS (vxlan_gpe_device_class,static) = {
   .admin_up_down_function = vxlan_gpe_interface_admin_up_down,
 };
 
+
 /**
  * @brief Formatting function for tracing VXLAN GPE with length
  *
@@ -172,15 +173,17 @@ _(decap_fib_index)
  * @return rc
  *
  */
-static int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t)
+int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size, 
+                        u8 protocol_override, uword encap_next_node)
 {
   u8 *rw = 0;
   ip4_header_t * ip0;
   ip4_vxlan_gpe_header_t * h0;
   int len;
 
-  len = sizeof (*h0);
+  len = sizeof (*h0) + extension_size;
 
+  vec_free(t->rewrite);
   vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
 
   h0 = (ip4_vxlan_gpe_header_t *) rw;
@@ -203,10 +206,19 @@ static int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t)
   /* VXLAN header. Are we having fun yet? */
   h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
   h0->vxlan.ver_res = VXLAN_GPE_VERSION;
-  h0->vxlan.protocol = t->protocol;
+  if (protocol_override)
+  {
+      h0->vxlan.protocol = protocol_override;
+  }
+  else
+  {
+      h0->vxlan.protocol = t->protocol;
+  }
+  t->rewrite_size = sizeof(ip4_vxlan_gpe_header_t) +  extension_size;
   h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8);
 
   t->rewrite = rw;
+  t->encap_next_node = encap_next_node;
   return (0);
 }
 
@@ -218,15 +230,17 @@ static int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t)
  * @return rc
  *
  */
-static int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t)
+int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size, 
+                        u8 protocol_override, uword encap_next_node)
 {
   u8 *rw = 0;
   ip6_header_t * ip0;
   ip6_vxlan_gpe_header_t * h0;
   int len;
 
-  len = sizeof (*h0);
+  len = sizeof (*h0) + extension_size;
 
+  vec_free(t->rewrite);
   vec_validate_aligned (rw, len-1, CLIB_CACHE_LINE_BYTES);
 
   h0 = (ip6_vxlan_gpe_header_t *) rw;
@@ -249,10 +263,19 @@ static int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t)
   /* VXLAN header. Are we having fun yet? */
   h0->vxlan.flags = VXLAN_GPE_FLAGS_I | VXLAN_GPE_FLAGS_P;
   h0->vxlan.ver_res = VXLAN_GPE_VERSION;
-  h0->vxlan.protocol = t->protocol;
+  if (protocol_override)
+  {
+      h0->vxlan.protocol = t->protocol;
+  }
+  else
+  {
+      h0->vxlan.protocol = protocol_override;
+  }
+  t->rewrite_size = sizeof(ip4_vxlan_gpe_header_t) +  extension_size;
   h0->vxlan.vni_res = clib_host_to_net_u32 (t->vni<<8);
 
   t->rewrite = rw;
+  t->encap_next_node = encap_next_node;
   return (0);
 }
 
@@ -319,9 +342,9 @@ int vnet_vxlan_gpe_add_del_tunnel
       if (!a->is_ip6) t->flags |= VXLAN_GPE_TUNNEL_IS_IPV4;
 
       if (!a->is_ip6) {
-        rv = vxlan4_gpe_rewrite (t);
+        rv = vxlan4_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP);
       } else {
-        rv = vxlan6_gpe_rewrite (t);
+        rv = vxlan6_gpe_rewrite (t, 0, 0, VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP);
       }
 
       if (rv)
@@ -621,6 +644,14 @@ clib_error_t *vxlan_gpe_init (vlib_main_t *vm)
                          vxlan4_gpe_input_node.index, 1 /* is_ip4 */);
   udp_register_dst_port (vm, UDP_DST_PORT_vxlan6_gpe,
                          vxlan6_gpe_input_node.index, 0 /* is_ip4 */);
+
+  /* Register the list of standard decap protocols supported */
+  vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IP4,
+                                     VXLAN_GPE_INPUT_NEXT_IP4_INPUT);
+  vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_IP6,
+                                     VXLAN_GPE_INPUT_NEXT_IP6_INPUT);
+  vxlan_gpe_register_decap_protocol (VXLAN_GPE_PROTOCOL_ETHERNET,
+                                     VXLAN_GPE_INPUT_NEXT_ETHERNET_INPUT);
   return 0;
 }
 
index e33725f..1b4bc44 100644 (file)
@@ -114,6 +114,12 @@ typedef struct {
 
   /** flags */
   u32 flags;
+
+  /** rewrite size for dynamic plugins like iOAM */
+  u8  rewrite_size;
+
+  /** Next node after VxLAN-GPE encap */
+  uword encap_next_node;
 } vxlan_gpe_tunnel_t;
 
 /** Flags for vxlan_gpe_tunnel_t */
@@ -162,6 +168,9 @@ typedef struct {
   vlib_main_t * vlib_main;
   /** State convenience vnet_main_t */
   vnet_main_t * vnet_main;
+
+  /** List of next nodes for the decap indexed on protocol */
+  uword decap_next_node_list[VXLAN_GPE_PROTOCOL_MAX];
 } vxlan_gpe_main_t;
 
 vxlan_gpe_main_t vxlan_gpe_main;
@@ -188,7 +197,25 @@ int vnet_vxlan_gpe_add_del_tunnel
 (vnet_vxlan_gpe_add_del_tunnel_args_t *a, u32 * sw_if_indexp);
 
 
+int vxlan4_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+                        u8 protocol_override, uword encap_next_node);
+int vxlan6_gpe_rewrite (vxlan_gpe_tunnel_t * t, u32 extension_size,
+                       u8 protocol_override, uword encap_next_node);
+
+/**
+ * @brief Struct for defining VXLAN GPE next nodes
+ */
+typedef enum {
+  VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP,
+  VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP,
+  VXLAN_GPE_ENCAP_NEXT_DROP,
+  VXLAN_GPE_ENCAP_N_NEXT
+} vxlan_gpe_encap_next_t;
+
+
+void vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index);
 
+void vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index);
 
 
 #endif /* included_vnet_vxlan_gpe_h */
index e5501b9..ec3c2e5 100644 (file)
@@ -68,7 +68,8 @@
 _ (0x01, IP4)                         \
 _ (0x02, IP6)                         \
 _ (0x03, ETHERNET)                  \
-_ (0x04, NSH)
+_ (0x04, NSH)               \
+_ (0x05, IOAM)
 
 
 /**
@@ -77,11 +78,13 @@ _ (0x04, NSH)
  * 2 - IP6
  * 3 - ETHERNET
  * 4 - NSH
+ * 5 - IOAM
  */
 typedef enum {
 #define _(n,f) VXLAN_GPE_PROTOCOL_##f = n,
   foreach_vxlan_gpe_protocol
 #undef _
+  VXLAN_GPE_PROTOCOL_MAX,
 } vxlan_gpe_protocol_t;
 
 /**
index e9cef11..85821ee 100644 (file)
@@ -11140,7 +11140,7 @@ api_vxlan_gpe_add_del_tunnel (vat_main_t * vam)
 
   mp->encap_vrf_id = ntohl (encap_vrf_id);
   mp->decap_vrf_id = ntohl (decap_vrf_id);
-  mp->protocol = ntohl (protocol);
+  mp->protocol = protocol;
   mp->vni = ntohl (vni);
   mp->is_add = is_add;
   mp->is_ipv6 = ipv6_set;