/*
- * nsh.c: nsh packet processing
+ * decap.c - decapsulate VXLAN GPE
*
* Copyright (c) 2013 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
#include <vlib/vlib.h>
#include <vnet/pg/pg.h>
-#include <vnet/nsh-gre/nsh_gre.h>
-#include <vnet/nsh/nsh_packet.h>
-
-vlib_node_registration_t nsh_input_node;
+#include <vnet/vxlan-gpe/vxlan_gpe.h>
typedef struct {
u32 next_index;
u32 tunnel_index;
u32 error;
- nsh_header_t h;
-} nsh_rx_trace_t;
-
+} vxlan_gpe_rx_trace_t;
-u8 * format_nsh_header_with_length (u8 * s, va_list * args)
-{
- nsh_header_t * h = va_arg (*args, nsh_header_t *);
- u32 max_header_bytes = va_arg (*args, u32);
- u32 tmp, header_bytes;
-
- header_bytes = sizeof (h[0]);
- if (max_header_bytes != 0 && header_bytes > max_header_bytes)
- return format (s, "gre-nsh header truncated");
-
- s = format (s, "ver %d ", h->ver_o_c>>6);
-
- if (h->ver_o_c & NSH_O_BIT)
- s = format (s, "O-set ");
-
- if (h->ver_o_c & NSH_C_BIT)
- s = format (s, "C-set ");
-
- s = format (s, "len %d (%d bytes) md_type %d next_protocol %d\n",
- h->length, h->length * 4, h->md_type, h->next_protocol);
-
- tmp = clib_net_to_host_u32 (h->spi_si);
-
- s = format (s, " spi %d si %d ",
- (tmp>>NSH_SPI_SHIFT) & NSH_SPI_MASK,
- tmp & NSH_SINDEX_MASK);
-
- s = format (s, "c1 %u c2 %u c3 %u c4 %u",
- clib_net_to_host_u32 (h->c1),
- clib_net_to_host_u32 (h->c2),
- clib_net_to_host_u32 (h->c3),
- clib_net_to_host_u32 (h->c4));
-
- return s;
-}
-
-
-u8 * format_nsh_rx_trace (u8 * s, va_list * args)
+static u8 * format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- nsh_rx_trace_t * t = va_arg (*args, nsh_rx_trace_t *);
+ vxlan_gpe_rx_trace_t * t = va_arg (*args, vxlan_gpe_rx_trace_t *);
if (t->tunnel_index != ~0)
{
- s = format (s, "NSH: tunnel %d next %d error %d", t->tunnel_index,
+ s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
t->next_index, t->error);
}
else
{
- s = format (s, "NSH: no tunnel next %d error %d\n", t->next_index,
+ s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
t->error);
}
- s = format (s, "\n %U", format_nsh_header_with_length, &t->h,
- (u32) sizeof (t->h) /* max size */);
+ return s;
+}
+
+
+static u8 * format_vxlan_gpe_with_length (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+
+
return s;
}
static uword
-nsh_gre_input (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+vxlan_gpe_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
{
u32 n_left_from, next_index, * from, * to_next;
- nsh_gre_main_t * ngm = &nsh_gre_main;
+ vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
vnet_main_t * vnm = ngm->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 last_tunnel_index = ~0;
- u64 last_key = ~0ULL;
+ vxlan_gpe_tunnel_key_t last_key;
u32 pkts_decapsulated = 0;
u32 cpu_index = os_get_cpu_number();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
+ memset (&last_key, 0xff, sizeof (last_key));
+
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
u32 bi0, bi1;
vlib_buffer_t * b0, * b1;
u32 next0, next1;
- nsh_header_t * h0, * h1;
+ ip4_vxlan_gpe_header_t * iuvn0, * iuvn1;
uword * p0, * p1;
u32 tunnel_index0, tunnel_index1;
- nsh_gre_tunnel_t * t0, * t1;
- u64 key0, key1;
+ vxlan_gpe_tunnel_t * t0, * t1;
+ vxlan_gpe_tunnel_key_t key0, key1;
u32 error0, error1;
u32 sw_if_index0, sw_if_index1, len0, len1;
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
- h0 = vlib_buffer_get_current (b0);
- h1 = vlib_buffer_get_current (b1);
+ /* udp leaves current_data pointing at the vxlan header */
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
+ vlib_buffer_advance
+ (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
- /* gre stashed the src ip4 address for us... */
- key0 = (((u64)(vnet_buffer(b0)->gre.src))<<32) | h0->spi_si;
- key1 = (((u64)(vnet_buffer(b1)->gre.src))<<32) | h1->spi_si;
+ iuvn0 = vlib_buffer_get_current (b0);
+ iuvn1 = vlib_buffer_get_current (b1);
- /* "pop" nsh header */
- vlib_buffer_advance (b0, sizeof (*h0));
- vlib_buffer_advance (b1, sizeof (*h1));
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof (*iuvn0));
+ vlib_buffer_advance (b1, sizeof (*iuvn1));
tunnel_index0 = ~0;
tunnel_index1 = ~0;
error0 = 0;
error1 = 0;
- next0 = NSH_GRE_INPUT_NEXT_DROP;
- next1 = NSH_GRE_INPUT_NEXT_DROP;
- if (PREDICT_FALSE(key0 != last_key))
+ next0 = (iuvn0->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+ next1 = (iuvn1->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
+
+
+
+
+ key0.local = iuvn0->ip4.dst_address.as_u32;
+ key1.local = iuvn1->ip4.dst_address.as_u32;
+
+ key0.remote = iuvn0->ip4.src_address.as_u32;
+ key1.remote = iuvn1->ip4.src_address.as_u32;
+
+ key0.vni = iuvn0->vxlan.vni_res;
+ key1.vni = iuvn1->vxlan.vni_res;
+
+ key0.pad = 0;
+ key1.pad = 0;
+
+ /* Processing for key0 */
+ if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])
+ || (key0.as_u64[1] != last_key.as_u64[1])))
{
- p0 = hash_get (ngm->nsh_gre_tunnel_by_src_address, key0);
+ p0 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key0);
if (p0 == 0)
{
- error0 = NSH_GRE_ERROR_NO_SUCH_TUNNEL;
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
goto trace0;
}
- last_key = key0;
+ last_key.as_u64[0] = key0.as_u64[0];
+ last_key.as_u64[1] = key0.as_u64[1];
tunnel_index0 = last_tunnel_index = p0[0];
}
else
t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
- next0 = t0->decap_next_index;
+ next0 = t0->protocol;
+
sw_if_index0 = t0->sw_if_index;
len0 = vlib_buffer_length_in_chain(vm, b0);
/* Required to make the l2 tag push / pop code work on l2 subifs */
vnet_update_l2_len (b0);
-
- next0 = t0->decap_next_index;
-
- /* ip[46] lookup in the configured FIB, otherwise an opaque */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
+
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
pkts_decapsulated++;
stats_n_packets += 1;
trace0:
b0->error = error0 ? node->errors[error0] : 0;
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
- nsh_rx_trace_t *tr = vlib_add_trace (vm, node,
- b0, sizeof (*tr));
+ vxlan_gpe_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->next_index = next0;
tr->error = error0;
tr->tunnel_index = tunnel_index0;
- tr->h = h0[0];
}
- if (PREDICT_FALSE(key1 != last_key))
+
+ /* Processing for key1 */
+ if (PREDICT_FALSE ((key1.as_u64[0] != last_key.as_u64[0])
+ || (key1.as_u64[1] != last_key.as_u64[1])))
{
- p1 = hash_get (ngm->nsh_gre_tunnel_by_src_address, key1);
+ p1 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key1);
if (p1 == 0)
{
- error1 = NSH_GRE_ERROR_NO_SUCH_TUNNEL;
+ error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
goto trace1;
}
- last_key = key1;
+ last_key.as_u64[0] = key1.as_u64[0];
+ last_key.as_u64[1] = key1.as_u64[1];
tunnel_index1 = last_tunnel_index = p1[0];
}
else
t1 = pool_elt_at_index (ngm->tunnels, tunnel_index1);
- next1 = t1->decap_next_index;
+ next1 = t1->protocol;
sw_if_index1 = t1->sw_if_index;
len1 = vlib_buffer_length_in_chain(vm, b1);
/* Required to make the l2 tag push / pop code work on l2 subifs */
vnet_update_l2_len (b1);
- next1 = t1->decap_next_index;
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
- /* ip[46] lookup in the configured FIB, otherwise an opaque */
- vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
pkts_decapsulated++;
stats_n_packets += 1;
stats_n_bytes += len1;
- /* Batch stats increment on the same nsh-gre tunnel so counter
+
+ /* Batch stats increment on the same vxlan tunnel so counter
is not incremented per packet */
if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
{
trace1:
b1->error = error1 ? node->errors[error1] : 0;
- if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
{
- nsh_rx_trace_t *tr = vlib_add_trace (vm, node,
- b1, sizeof (*tr));
+ vxlan_gpe_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b1, sizeof (*tr));
tr->next_index = next1;
tr->error = error1;
tr->tunnel_index = tunnel_index1;
- tr->h = h1[0];
}
vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
u32 bi0;
vlib_buffer_t * b0;
u32 next0;
- nsh_header_t * h0;
+ ip4_vxlan_gpe_header_t * iuvn0;
uword * p0;
u32 tunnel_index0;
- nsh_gre_tunnel_t * t0;
- u64 key0;
+ vxlan_gpe_tunnel_t * t0;
+ vxlan_gpe_tunnel_key_t key0;
u32 error0;
u32 sw_if_index0, len0;
n_left_to_next -= 1;
b0 = vlib_get_buffer (vm, bi0);
- h0 = vlib_buffer_get_current (b0);
- /* gre stashed the src ip4 address for us... */
- key0 = (((u64)(vnet_buffer(b0)->gre.src))<<32) | h0->spi_si;
+ /* udp leaves current_data pointing at the vxlan header */
+ vlib_buffer_advance
+ (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
- /* "pop" nsh header */
- vlib_buffer_advance (b0, sizeof (*h0));
+ iuvn0 = vlib_buffer_get_current (b0);
+
+ /* pop (ip, udp, vxlan) */
+ vlib_buffer_advance (b0, sizeof (*iuvn0));
tunnel_index0 = ~0;
error0 = 0;
- next0 = NSH_GRE_INPUT_NEXT_DROP;
+ next0 = (iuvn0->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
- if (PREDICT_FALSE(key0 != last_key))
- {
- p0 = hash_get (ngm->nsh_gre_tunnel_by_src_address, key0);
+ key0.local = iuvn0->ip4.dst_address.as_u32;
+ key0.remote = iuvn0->ip4.src_address.as_u32;
+ key0.vni = iuvn0->vxlan.vni_res;
+ key0.pad = 0;
+ if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])
+ || (key0.as_u64[1] != last_key.as_u64[1])))
+ {
+ p0 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key0);
+
if (p0 == 0)
{
- error0 = NSH_GRE_ERROR_NO_SUCH_TUNNEL;
+ error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
goto trace00;
}
- last_key = key0;
+ last_key.as_u64[0] = key0.as_u64[0];
+ last_key.as_u64[1] = key0.as_u64[1];
tunnel_index0 = last_tunnel_index = p0[0];
}
else
t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
- next0 = t0->decap_next_index;
- sw_if_index0 = t0->sw_if_index;
+ next0 = t0->protocol;
+
+ sw_if_index0 = t0->sw_if_index;
len0 = vlib_buffer_length_in_chain(vm, b0);
/* Required to make the l2 tag push / pop code work on l2 subifs */
vnet_update_l2_len (b0);
- next0 = t0->decap_next_index;
+ /*
+ * ip[46] lookup in the configured FIB
+ */
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
- /* ip[46] lookup in the configured FIB, otherwise an opaque */
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
pkts_decapsulated ++;
-
stats_n_packets += 1;
stats_n_bytes += len0;
- /* Batch stats increment on the same nsh-gre tunnel so counter
+ /* Batch stats increment on the same vxlan-gpe tunnel so counter
is not incremented per packet */
if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
{
trace00:
b0->error = error0 ? node->errors[error0] : 0;
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
- nsh_rx_trace_t *tr = vlib_add_trace (vm, node,
- b0, sizeof (*tr));
+ vxlan_gpe_rx_trace_t *tr
+ = vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->next_index = next0;
tr->error = error0;
tr->tunnel_index = tunnel_index0;
- tr->h = h0[0];
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, nsh_gre_input_node.index,
- NSH_GRE_ERROR_DECAPSULATED,
+ vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
+ VXLAN_GPE_ERROR_DECAPSULATED,
pkts_decapsulated);
/* Increment any remaining batch stats */
if (stats_n_packets)
return from_frame->n_vectors;
}
-static char * nsh_error_strings[] = {
-#define nsh_gre_error(n,s) s,
-#include <vnet/nsh/nsh_error.def>
-#undef nsh_gre_error
+static char * vxlan_gpe_error_strings[] = {
+#define vxlan_gpe_error(n,s) s,
+#include <vnet/vxlan-gpe/vxlan_gpe_error.def>
+#undef vxlan_gpe_error
#undef _
};
-VLIB_REGISTER_NODE (nsh_gre_input_node) = {
- .function = nsh_gre_input,
- .name = "nsh-gre-input",
+VLIB_REGISTER_NODE (vxlan_gpe_input_node) = {
+ .function = vxlan_gpe_input,
+ .name = "vxlan-gpe-input",
/* Takes a vector of packets. */
.vector_size = sizeof (u32),
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
+ .error_strings = vxlan_gpe_error_strings,
- .n_errors = NSH_GRE_N_ERROR,
- .error_strings = nsh_error_strings,
-
- .n_next_nodes = NSH_GRE_INPUT_N_NEXT,
+ .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
.next_nodes = {
-#define _(s,n) [NSH_GRE_INPUT_NEXT_##s] = n,
- foreach_nsh_gre_input_next
+#define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
+ foreach_vxlan_gpe_input_next
#undef _
},
- .format_buffer = format_nsh_header_with_length,
- .format_trace = format_nsh_rx_trace,
- // $$$$ .unformat_buffer = unformat_nsh_gre_header,
+ .format_buffer = format_vxlan_gpe_with_length,
+ .format_trace = format_vxlan_gpe_rx_trace,
+ // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,
};
+
+