return 0;
}
+static clib_error_t *
+vxlan_eth_set_max_frame_size (vnet_main_t *vnm, vnet_hw_interface_t *hw,
+ u32 frame_size)
+{
+ /* nothing for now */
+ return 0;
+}
+
static u8 *
format_decap_next (u8 * s, va_list * args)
{
return VNET_API_ERROR_INSTANCE_IN_USE;
}
- f64 now = vlib_time_now (vm);
- u32 rnd;
- rnd = (u32) (now * 1e6);
- rnd = random_u32 (&rnd);
-
- memcpy (hw_addr + 2, &rnd, sizeof (rnd));
- hw_addr[0] = 2;
- hw_addr[1] = 0xfe;
-
hash_set (vxm->instance_used, user_instance, 1);
t->dev_instance = dev_instance; /* actual */
t->user_instance = user_instance; /* name */
t->flow_index = ~0;
- if (ethernet_register_interface (vnm, vxlan_device_class.index,
- dev_instance, hw_addr, &t->hw_if_index,
- vxlan_eth_flag_change))
+ if (a->is_l3)
+ t->hw_if_index =
+ vnet_register_interface (vnm, vxlan_device_class.index, dev_instance,
+ vxlan_hw_class.index, dev_instance);
+ else
{
- hash_unset (vxm->instance_used, t->user_instance);
-
- pool_put (vxm->tunnels, t);
- return VNET_API_ERROR_SYSCALL_ERROR_2;
+ vnet_eth_interface_registration_t eir = {};
+ f64 now = vlib_time_now (vm);
+ u32 rnd;
+ rnd = (u32) (now * 1e6);
+ rnd = random_u32 (&rnd);
+ memcpy (hw_addr + 2, &rnd, sizeof (rnd));
+ hw_addr[0] = 2;
+ hw_addr[1] = 0xfe;
+
+ eir.dev_class_index = vxlan_device_class.index;
+ eir.dev_instance = dev_instance;
+ eir.address = hw_addr;
+ eir.cb.flag_change = vxlan_eth_flag_change;
+ eir.cb.set_max_frame_size = vxlan_eth_set_max_frame_size;
+ t->hw_if_index = vnet_eth_register_interface (vnm, &eir);
}
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, t->hw_if_index);
if (add_failed)
{
- ethernet_delete_interface (vnm, t->hw_if_index);
+ if (a->is_l3)
+ vnet_delete_hw_interface (vnm, t->hw_if_index);
+ else
+ ethernet_delete_interface (vnm, t->hw_if_index);
hash_unset (vxm->instance_used, t->user_instance);
pool_put (vxm->tunnels, t);
return VNET_API_ERROR_INVALID_REGISTRATION;
fib_prefix_t tun_dst_pfx;
vnet_flood_class_t flood_class = VNET_FLOOD_CLASS_TUNNEL_NORMAL;
- fib_prefix_from_ip46_addr (&t->dst, &tun_dst_pfx);
+ fib_protocol_t fp = fib_ip_proto (is_ip6);
+ fib_prefix_from_ip46_addr (fp, &t->dst, &tun_dst_pfx);
if (!ip46_address_is_multicast (&t->dst))
{
/* Unicast tunnel -
* with different VNIs, create the output fib adjacency only if
* it does not already exist
*/
- fib_protocol_t fp = fib_ip_proto (is_ip6);
-
if (vtep_addr_ref (&vxm->vtep_table,
t->encap_fib_index, &t->dst) == 1)
{
* - the forwarding interface is for-us
* - the accepting interface is that from the API
*/
- mfib_table_entry_path_update (t->encap_fib_index,
- &mpfx, MFIB_SOURCE_VXLAN, &path);
+ mfib_table_entry_path_update (t->encap_fib_index, &mpfx,
+ MFIB_SOURCE_VXLAN,
+ MFIB_ENTRY_FLAG_NONE, &path);
path.frp_sw_if_index = a->mcast_sw_if_index;
path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE;
path.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT;
- mfei = mfib_table_entry_path_update (t->encap_fib_index,
- &mpfx,
- MFIB_SOURCE_VXLAN, &path);
+ mfei = mfib_table_entry_path_update (
+ t->encap_fib_index, &mpfx, MFIB_SOURCE_VXLAN,
+ MFIB_ENTRY_FLAG_NONE, &path);
/*
* Create the mcast adjacency to send traffic to the group
mcast_shared_remove (&t->dst);
}
- ethernet_delete_interface (vnm, t->hw_if_index);
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, t->hw_if_index);
+ if (hw->dev_class_index == vxlan_device_class.index)
+ vnet_delete_hw_interface (vnm, t->hw_if_index);
+ else
+ ethernet_delete_interface (vnm, t->hw_if_index);
hash_unset (vxm->instance_used, t->user_instance);
fib_node_deinit (&t->node);
u8 grp_set = 0;
u8 ipv4_set = 0;
u8 ipv6_set = 0;
+ u8 is_l3 = 0;
u32 instance = ~0;
u32 encap_fib_index = 0;
u32 mcast_sw_if_index = ~0;
encap_fib_index =
fib_table_find (fib_ip_proto (ipv6_set), table_id);
}
+ else if (unformat (line_input, "l3"))
+ is_l3 = 1;
else if (unformat (line_input, "decap-next %U", unformat_decap_next,
&decap_next_index, ipv4_set))
;
if (parse_error)
return parse_error;
+ if (is_l3 && decap_next_index == VXLAN_INPUT_NEXT_L2_INPUT)
+ {
+ vlib_node_t *node = vlib_get_node_by_name (
+ vm, (u8 *) (ipv4_set ? "ip4-input" : "ip6-input"));
+ decap_next_index = get_decap_next_for_node (node->index, ipv4_set);
+ }
+
if (encap_fib_index == ~0)
return clib_error_return (0, "nonexistent encap-vrf-id %d", table_id);
if (vni >> 24)
return clib_error_return (0, "vni %d out of range", vni);
- vnet_vxlan_add_del_tunnel_args_t a = {
- .is_add = is_add,
- .is_ip6 = ipv6_set,
- .instance = instance,
+ vnet_vxlan_add_del_tunnel_args_t a = { .is_add = is_add,
+ .is_ip6 = ipv6_set,
+ .is_l3 = is_l3,
+ .instance = instance,
#define _(x) .x = x,
- foreach_copy_field
+ foreach_copy_field
#undef _
};
"create vxlan tunnel src <local-vtep-addr>"
" {dst <remote-vtep-addr>|group <mcast-vtep-addr> <intf-name>} vni <nn>"
" [instance <id>]"
- " [encap-vrf-id <nn>] [decap-next [l2|node <name>]] [del]"
+ " [encap-vrf-id <nn>] [decap-next [l2|node <name>]] [del] [l3]"
" [src_port <local-vtep-udp-port>] [dst_port <remote-vtep-udp-port>]",
.function = vxlan_add_del_tunnel_command_fn,
};
/*?
* This command adds the 'ip4-vxlan-bypass' graph node for a given interface.
* By adding the IPv4 vxlan-bypass graph node to an interface, the node checks
- * for and validate input vxlan packet and bypass ip4-lookup, ip4-local,
+ * for and validate input vxlan packet and bypass ip4-lookup, ip4-local,
* ip4-udp-lookup nodes to speedup vxlan packet forwarding. This node will
* cause extra overhead to for non-vxlan packets which is kept at a minimum.
*
/*?
* This command adds the 'ip6-vxlan-bypass' graph node for a given interface.
* By adding the IPv6 vxlan-bypass graph node to an interface, the node checks
- * for and validate input vxlan packet and bypass ip6-lookup, ip6-local,
+ * for and validate input vxlan packet and bypass ip6-lookup, ip6-local,
* ip6-udp-lookup nodes to speedup vxlan packet forwarding. This node will
* cause extra overhead to for non-vxlan packets which is kept at a minimum.
*