#include <vnet/mfib/mfib_table.h>
#include <vnet/adj/adj_mcast.h>
#include <vnet/adj/rewrite.h>
+#include <vnet/dpo/drop_dpo.h>
#include <vnet/interface.h>
#include <vnet/flow/flow.h>
+#include <vnet/udp/udp_local.h>
#include <vlib/vlib.h>
/**
vxlan_main_t vxlan_main;
+static u32
+vxlan_eth_flag_change (vnet_main_t *vnm, vnet_hw_interface_t *hi, u32 flags)
+{
+ /* nothing for now */
+ return 0;
+}
+
+static clib_error_t *
+vxlan_eth_set_max_frame_size (vnet_main_t *vnm, vnet_hw_interface_t *hw,
+ u32 frame_size)
+{
+ /* nothing for now */
+ return 0;
+}
+
static u8 *
format_decap_next (u8 * s, va_list * args)
{
vxlan_tunnel_t *t = va_arg (*args, vxlan_tunnel_t *);
s = format (s,
- "[%d] instance %d src %U dst %U vni %d fib-idx %d sw-if-idx %d ",
- t->dev_instance, t->user_instance,
- format_ip46_address, &t->src, IP46_TYPE_ANY,
- format_ip46_address, &t->dst, IP46_TYPE_ANY,
- t->vni, t->encap_fib_index, t->sw_if_index);
+ "[%d] instance %d src %U dst %U src_port %d dst_port %d vni %d "
+ "fib-idx %d sw-if-idx %d ",
+ t->dev_instance, t->user_instance, format_ip46_address, &t->src,
+ IP46_TYPE_ANY, format_ip46_address, &t->dst, IP46_TYPE_ANY,
+ t->src_port, t->dst_port, t->vni, t->encap_fib_index,
+ t->sw_if_index);
s = format (s, "encap-dpo-idx %d ", t->next_dpo.dpoi_index);
* skip single bucket load balance dpo's */
while (DPO_LOAD_BALANCE == dpo.dpoi_type)
{
- load_balance_t *lb = load_balance_get (dpo.dpoi_index);
+ const load_balance_t *lb;
+ const dpo_id_t *choice;
+
+ lb = load_balance_get (dpo.dpoi_index);
if (lb->lb_n_buckets > 1)
break;
- dpo_copy (&dpo, load_balance_get_bucket_i (lb, 0));
+ choice = load_balance_get_bucket_i (lb, 0);
+
+ if (DPO_RECEIVE == choice->dpoi_type)
+ dpo_copy (&dpo, drop_dpo_get (choice->dpoi_proto));
+ else
+ dpo_copy (&dpo, choice);
}
u32 encap_index = is_ip4 ?
.fnv_back_walk = vxlan_tunnel_back_walk,
};
-
-#define foreach_copy_field \
-_(vni) \
-_(mcast_sw_if_index) \
-_(encap_fib_index) \
-_(decap_next_index) \
-_(src) \
-_(dst)
+#define foreach_copy_field \
+ _ (vni) \
+ _ (mcast_sw_if_index) \
+ _ (encap_fib_index) \
+ _ (decap_next_index) \
+ _ (src) \
+ _ (dst) \
+ _ (src_port) \
+ _ (dst_port)
static void
vxlan_rewrite (vxlan_tunnel_t * t, bool is_ip6)
}
/* UDP header, randomize src port on something, maybe? */
- udp->src_port = clib_host_to_net_u16 (4789);
- udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan);
+ udp->src_port = clib_host_to_net_u16 (t->src_port);
+ udp->dst_port = clib_host_to_net_u16 (t->dst_port);
/* VXLAN header */
vnet_set_vni_and_flags (vxlan, t->vni);
return decap_next_index < r->n_next_nodes;
}
-static uword
-vtep_addr_ref (ip46_address_t * ip)
-{
- uword *vtep = ip46_address_is_ip4 (ip) ?
- hash_get (vxlan_main.vtep4, ip->ip4.as_u32) :
- hash_get_mem (vxlan_main.vtep6, &ip->ip6);
- if (vtep)
- return ++(*vtep);
- ip46_address_is_ip4 (ip) ?
- hash_set (vxlan_main.vtep4, ip->ip4.as_u32, 1) :
- hash_set_mem_alloc (&vxlan_main.vtep6, &ip->ip6, 1);
- return 1;
-}
-
-static uword
-vtep_addr_unref (ip46_address_t * ip)
-{
- uword *vtep = ip46_address_is_ip4 (ip) ?
- hash_get (vxlan_main.vtep4, ip->ip4.as_u32) :
- hash_get_mem (vxlan_main.vtep6, &ip->ip6);
- ASSERT (vtep);
- if (--(*vtep) != 0)
- return *vtep;
- ip46_address_is_ip4 (ip) ?
- hash_unset (vxlan_main.vtep4, ip->ip4.as_u32) :
- hash_unset_mem_free (&vxlan_main.vtep6, &ip->ip6);
- return 0;
-}
-
/* *INDENT-OFF* */
typedef CLIB_PACKED(union
{
{
ASSERT (ip46_address_is_multicast (ip));
uword *p = hash_get_mem (vxlan_main.mcast_shared, ip);
- ASSERT (p);
+ ALWAYS_ASSERT (p);
mcast_shared_t ret = {.as_u64 = *p };
return ret;
}
vxlan4_tunnel_key_t key4;
vxlan6_tunnel_key_t key6;
u32 is_ip6 = a->is_ip6;
+ vlib_main_t *vm = vlib_get_main ();
+ u8 hw_addr[6];
+
+ /* Set udp-ports */
+ if (a->src_port == 0)
+ a->src_port = is_ip6 ? UDP_DST_PORT_vxlan6 : UDP_DST_PORT_vxlan;
+
+ if (a->dst_port == 0)
+ a->dst_port = is_ip6 ? UDP_DST_PORT_vxlan6 : UDP_DST_PORT_vxlan;
int not_found;
if (!is_ip6)
{
/* ip4 mcast is indexed by mcast addr only */
key4.key[0] = ip46_address_is_multicast (&a->dst) ?
- a->dst.ip4.as_u32 :
- a->dst.ip4.as_u32 | (((u64) a->src.ip4.as_u32) << 32);
- key4.key[1] = (((u64) a->encap_fib_index) << 32)
- | clib_host_to_net_u32 (a->vni << 8);
+ a->dst.ip4.as_u32 :
+ a->dst.ip4.as_u32 | (((u64) a->src.ip4.as_u32) << 32);
+ key4.key[1] = ((u64) clib_host_to_net_u16 (a->src_port) << 48) |
+ (((u64) a->encap_fib_index) << 32) |
+ clib_host_to_net_u32 (a->vni << 8);
not_found =
clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
p = (void *) &key4.value;
{
key6.key[0] = a->dst.ip6.as_u64[0];
key6.key[1] = a->dst.ip6.as_u64[1];
- key6.key[2] = (((u64) a->encap_fib_index) << 32)
- | clib_host_to_net_u32 (a->vni << 8);
+ key6.key[2] = (((u64) clib_host_to_net_u16 (a->src_port) << 48) |
+ ((u64) a->encap_fib_index) << 32) |
+ clib_host_to_net_u32 (a->vni << 8);
not_found =
clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
p = (void *) &key6.value;
pool_put (vxm->tunnels, t);
return VNET_API_ERROR_INSTANCE_IN_USE;
}
+
hash_set (vxm->instance_used, user_instance, 1);
t->dev_instance = dev_instance; /* actual */
- t->user_instance = user_instance; /* name */
+ t->user_instance = user_instance; /* name */
t->flow_index = ~0;
- t->hw_if_index = vnet_register_interface
- (vnm, vxlan_device_class.index, dev_instance,
- vxlan_hw_class.index, dev_instance);
+ if (a->is_l3)
+ t->hw_if_index =
+ vnet_register_interface (vnm, vxlan_device_class.index, dev_instance,
+ vxlan_hw_class.index, dev_instance);
+ else
+ {
+ vnet_eth_interface_registration_t eir = {};
+ f64 now = vlib_time_now (vm);
+ u32 rnd;
+ rnd = (u32) (now * 1e6);
+ rnd = random_u32 (&rnd);
+ memcpy (hw_addr + 2, &rnd, sizeof (rnd));
+ hw_addr[0] = 2;
+ hw_addr[1] = 0xfe;
+
+ eir.dev_class_index = vxlan_device_class.index;
+ eir.dev_instance = dev_instance;
+ eir.address = hw_addr;
+ eir.cb.flag_change = vxlan_eth_flag_change;
+ eir.cb.set_max_frame_size = vxlan_eth_set_max_frame_size;
+ t->hw_if_index = vnet_eth_register_interface (vnm, &eir);
+ }
+
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, t->hw_if_index);
/* Set vxlan tunnel output node */
if (add_failed)
{
- vnet_delete_hw_interface (vnm, t->hw_if_index);
+ if (a->is_l3)
+ vnet_delete_hw_interface (vnm, t->hw_if_index);
+ else
+ ethernet_delete_interface (vnm, t->hw_if_index);
hash_unset (vxm->instance_used, t->user_instance);
pool_put (vxm->tunnels, t);
return VNET_API_ERROR_INVALID_REGISTRATION;
fib_prefix_t tun_dst_pfx;
vnet_flood_class_t flood_class = VNET_FLOOD_CLASS_TUNNEL_NORMAL;
- fib_prefix_from_ip46_addr (&t->dst, &tun_dst_pfx);
+ fib_protocol_t fp = fib_ip_proto (is_ip6);
+ fib_prefix_from_ip46_addr (fp, &t->dst, &tun_dst_pfx);
if (!ip46_address_is_multicast (&t->dst))
{
/* Unicast tunnel -
* when the forwarding for the entry updates, and the tunnel can
* re-stack accordingly
*/
- vtep_addr_ref (&t->src);
+ vtep_addr_ref (&vxm->vtep_table, t->encap_fib_index, &t->src);
t->fib_entry_index = fib_entry_track (t->encap_fib_index,
&tun_dst_pfx,
FIB_NODE_TYPE_VXLAN_TUNNEL,
* with different VNIs, create the output fib adjacency only if
* it does not already exist
*/
- fib_protocol_t fp = fib_ip_proto (is_ip6);
-
- if (vtep_addr_ref (&t->dst) == 1)
+ if (vtep_addr_ref (&vxm->vtep_table,
+ t->encap_fib_index, &t->dst) == 1)
{
fib_node_index_t mfei;
adj_index_t ai;
* - the forwarding interface is for-us
* - the accepting interface is that from the API
*/
- mfib_table_entry_path_update (t->encap_fib_index,
- &mpfx, MFIB_SOURCE_VXLAN, &path);
+ mfib_table_entry_path_update (t->encap_fib_index, &mpfx,
+ MFIB_SOURCE_VXLAN,
+ MFIB_ENTRY_FLAG_NONE, &path);
path.frp_sw_if_index = a->mcast_sw_if_index;
path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
path.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
- mfei = mfib_table_entry_path_update (t->encap_fib_index,
- &mpfx,
- MFIB_SOURCE_VXLAN, &path);
+ mfei = mfib_table_entry_path_update (
+ t->encap_fib_index, &mpfx, MFIB_SOURCE_VXLAN,
+ MFIB_ENTRY_FLAG_NONE, &path);
/*
* Create the mcast adjacency to send traffic to the group
if (t->flow_index != ~0)
vnet_flow_del (vnm, t->flow_index);
- vtep_addr_unref (&t->src);
+ vtep_addr_unref (&vxm->vtep_table, t->encap_fib_index, &t->src);
fib_entry_untrack (t->fib_entry_index, t->sibling_index);
}
- else if (vtep_addr_unref (&t->dst) == 0)
+ else if (vtep_addr_unref (&vxm->vtep_table,
+ t->encap_fib_index, &t->dst) == 0)
{
mcast_shared_remove (&t->dst);
}
- vnet_delete_hw_interface (vnm, t->hw_if_index);
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, t->hw_if_index);
+ if (hw->dev_class_index == vxlan_device_class.index)
+ vnet_delete_hw_interface (vnm, t->hw_if_index);
+ else
+ ethernet_delete_interface (vnm, t->hw_if_index);
hash_unset (vxm->instance_used, t->user_instance);
fib_node_deinit (&t->node);
if (a->is_add)
{
/* register udp ports */
- if (!is_ip6 && !udp_is_valid_dst_port (UDP_DST_PORT_vxlan, 1))
- udp_register_dst_port (vxm->vlib_main, UDP_DST_PORT_vxlan,
+ if (!is_ip6 && !udp_is_valid_dst_port (a->src_port, 1))
+ udp_register_dst_port (vxm->vlib_main, a->src_port,
vxlan4_input_node.index, 1);
- if (is_ip6 && !udp_is_valid_dst_port (UDP_DST_PORT_vxlan6, 0))
- udp_register_dst_port (vxm->vlib_main, UDP_DST_PORT_vxlan6,
+ if (is_ip6 && !udp_is_valid_dst_port (a->src_port, 0))
+ udp_register_dst_port (vxm->vlib_main, a->src_port,
vxlan6_input_node.index, 0);
}
u8 grp_set = 0;
u8 ipv4_set = 0;
u8 ipv6_set = 0;
+ u8 is_l3 = 0;
u32 instance = ~0;
u32 encap_fib_index = 0;
u32 mcast_sw_if_index = ~0;
u32 decap_next_index = VXLAN_INPUT_NEXT_L2_INPUT;
u32 vni = 0;
+ u32 src_port = 0;
+ u32 dst_port = 0;
u32 table_id;
clib_error_t *parse_error = NULL;
encap_fib_index =
fib_table_find (fib_ip_proto (ipv6_set), table_id);
}
+ else if (unformat (line_input, "l3"))
+ is_l3 = 1;
else if (unformat (line_input, "decap-next %U", unformat_decap_next,
&decap_next_index, ipv4_set))
;
else if (unformat (line_input, "vni %d", &vni))
;
+ else if (unformat (line_input, "src_port %d", &src_port))
+ ;
+ else if (unformat (line_input, "dst_port %d", &dst_port))
+ ;
else
{
parse_error = clib_error_return (0, "parse error: '%U'",
if (parse_error)
return parse_error;
+ if (is_l3 && decap_next_index == VXLAN_INPUT_NEXT_L2_INPUT)
+ {
+ vlib_node_t *node = vlib_get_node_by_name (
+ vm, (u8 *) (ipv4_set ? "ip4-input" : "ip6-input"));
+ decap_next_index = get_decap_next_for_node (node->index, ipv4_set);
+ }
+
if (encap_fib_index == ~0)
return clib_error_return (0, "nonexistent encap-vrf-id %d", table_id);
if (vni >> 24)
return clib_error_return (0, "vni %d out of range", vni);
- vnet_vxlan_add_del_tunnel_args_t a = {
- .is_add = is_add,
- .is_ip6 = ipv6_set,
- .instance = instance,
+ vnet_vxlan_add_del_tunnel_args_t a = { .is_add = is_add,
+ .is_ip6 = ipv6_set,
+ .is_l3 = is_l3,
+ .instance = instance,
#define _(x) .x = x,
- foreach_copy_field
+ foreach_copy_field
#undef _
};
*
* @cliexpar
* Example of how to create a VXLAN Tunnel:
- * @cliexcmd{create vxlan tunnel src 10.0.3.1 dst 10.0.3.3 vni 13 encap-vrf-id 7}
+ * @cliexcmd{create vxlan tunnel src 10.0.3.1 dst 10.0.3.3 vni 13 encap-vrf-id
+ 7}
* Example of how to create a VXLAN Tunnel with a known name, vxlan_tunnel42:
* @cliexcmd{create vxlan tunnel src 10.0.3.1 dst 10.0.3.3 instance 42}
- * Example of how to create a multicast VXLAN Tunnel with a known name, vxlan_tunnel23:
- * @cliexcmd{create vxlan tunnel src 10.0.3.1 group 239.1.1.1 GigabitEthernet0/8/0 instance 23}
+ * Example of how to create a multicast VXLAN Tunnel with a known name,
+ vxlan_tunnel23:
+ * @cliexcmd{create vxlan tunnel src 10.0.3.1 group 239.1.1.1
+ GigabitEthernet0/8/0 instance 23}
+ * Example of how to create a VXLAN Tunnel with custom udp-ports:
+ * @cliexcmd{create vxlan tunnel src 10.0.3.1 dst 10.0.3.3 vni 13 src_port
+ 59000 dst_port 59001}
* Example of how to delete a VXLAN Tunnel:
* @cliexcmd{create vxlan tunnel src 10.0.3.1 dst 10.0.3.3 vni 13 del}
?*/
VLIB_CLI_COMMAND (create_vxlan_tunnel_command, static) = {
.path = "create vxlan tunnel",
.short_help =
- "create vxlan tunnel src <local-vtep-addr>"
- " {dst <remote-vtep-addr>|group <mcast-vtep-addr> <intf-name>} vni <nn>"
- " [instance <id>]"
- " [encap-vrf-id <nn>] [decap-next [l2|node <name>]] [del]",
+ "create vxlan tunnel src <local-vtep-addr>"
+ " {dst <remote-vtep-addr>|group <mcast-vtep-addr> <intf-name>} vni <nn>"
+ " [instance <id>]"
+ " [encap-vrf-id <nn>] [decap-next [l2|node <name>]] [del] [l3]"
+ " [src_port <local-vtep-udp-port>] [dst_port <remote-vtep-udp-port>]",
.function = vxlan_add_del_tunnel_command_fn,
};
/* *INDENT-ON* */
vlib_cli_output (vm, "No vxlan tunnels configured...");
/* *INDENT-OFF* */
- pool_foreach (t, vxm->tunnels,
- ({
+ pool_foreach (t, vxm->tunnels)
+ {
vlib_cli_output (vm, "%U", format_vxlan_tunnel, t);
- }));
+ }
/* *INDENT-ON* */
if (raw)
* @cliexpar
* Example of how to display the VXLAN Tunnel entries:
* @cliexstart{show vxlan tunnel}
- * [0] src 10.0.3.1 dst 10.0.3.3 vni 13 encap_fib_index 0 sw_if_index 5 decap_next l2
+ * [0] src 10.0.3.1 dst 10.0.3.3 src_port 4789 dst_port 4789 vni 13
+ encap_fib_index 0 sw_if_index 5 decap_next l2
* @cliexend
?*/
/* *INDENT-OFF* */
/*?
* This command adds the 'ip4-vxlan-bypass' graph node for a given interface.
* By adding the IPv4 vxlan-bypass graph node to an interface, the node checks
- * for and validate input vxlan packet and bypass ip4-lookup, ip4-local,
+ * for and validate input vxlan packet and bypass ip4-lookup, ip4-local,
* ip4-udp-lookup nodes to speedup vxlan packet forwarding. This node will
* cause extra overhead to for non-vxlan packets which is kept at a minimum.
*
/*?
* This command adds the 'ip6-vxlan-bypass' graph node for a given interface.
* By adding the IPv6 vxlan-bypass graph node to an interface, the node checks
- * for and validate input vxlan packet and bypass ip6-lookup, ip6-local,
+ * for and validate input vxlan packet and bypass ip6-lookup, ip6-local,
* ip6-udp-lookup nodes to speedup vxlan packet forwarding. This node will
* cause extra overhead to for non-vxlan packets which is kept at a minimum.
*
VLIB_CLI_COMMAND (set_interface_ip6_vxlan_bypass_command, static) = {
.path = "set interface ip6 vxlan-bypass",
.function = set_ip6_vxlan_bypass,
- .short_help = "set interface ip vxlan-bypass <interface> [del]",
+ .short_help = "set interface ip6 vxlan-bypass <interface> [del]",
};
/* *INDENT-ON* */
vxlan_main_t *vxm = &vxlan_main;
vnet_flow_t flow = {
.actions =
- VNET_FLOW_ACTION_REDIRECT_TO_NODE | VNET_FLOW_ACTION_MARK,
+ VNET_FLOW_ACTION_REDIRECT_TO_NODE | VNET_FLOW_ACTION_MARK |
+ VNET_FLOW_ACTION_BUFFER_ADVANCE,
.mark_flow_id = t->dev_instance + vxm->flow_id_start,
.redirect_node_index = vxlan4_flow_input_node.index,
+ .buffer_advance = sizeof (ethernet_header_t),
.type = VNET_FLOW_TYPE_IP4_VXLAN,
.ip4_vxlan = {
- .src_addr = t->dst.ip4,
- .dst_addr = t->src.ip4,
- .dst_port = UDP_DST_PORT_vxlan,
+ .protocol.prot = IP_PROTOCOL_UDP,
+ .src_addr.addr = t->dst.ip4,
+ .dst_addr.addr = t->src.ip4,
+ .src_addr.mask.as_u32 = ~0,
+ .dst_addr.mask.as_u32 = ~0,
+ .dst_port.port = t->src_port,
+ .dst_port.mask = 0xFF,
.vni = t->vni,
}
,
VXLAN_HASH_NUM_BUCKETS, VXLAN_HASH_MEMORY_SIZE);
clib_bihash_init_24_8 (&vxm->vxlan6_tunnel_by_key, "vxlan6",
VXLAN_HASH_NUM_BUCKETS, VXLAN_HASH_MEMORY_SIZE);
- vxm->vtep6 = hash_create_mem (0, sizeof (ip6_address_t), sizeof (uword));
+ vxm->vtep_table = vtep_table_create ();
vxm->mcast_shared = hash_create_mem (0,
sizeof (ip46_address_t),
sizeof (mcast_shared_t));