X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fgtpu%2Fgtpu.c;h=905ca5e1ca5e6c85f1a3f759f337af12a8020a15;hb=00fdf53c7076d1bd0045439e73f0144d613eb09c;hp=48ac8a7a2f170ef5053adb4b70219b4e817b6580;hpb=4dc5a43f4871c3f0a88ad0bb9041332bf3b03f1b;p=vpp.git diff --git a/src/plugins/gtpu/gtpu.c b/src/plugins/gtpu/gtpu.c index 48ac8a7a2f1..905ca5e1ca5 100644 --- a/src/plugins/gtpu/gtpu.c +++ b/src/plugins/gtpu/gtpu.c @@ -31,7 +31,7 @@ #include #include #include - +#include gtpu_main_t gtpu_main; @@ -419,6 +419,9 @@ int vnet_gtpu_add_del_tunnel ip_udp_gtpu_rewrite (t, is_ip6); + /* clear the flow index */ + t->flow_index = ~0; + /* copy the key */ if (is_ip6) hash_set_mem_alloc (>m->gtpu6_tunnel_by_key, &key6, @@ -602,6 +605,9 @@ int vnet_gtpu_add_del_tunnel if (!ip46_address_is_multicast (&t->dst)) { + if (t->flow_index != ~0) + vnet_flow_del (vnm, t->flow_index); + vtep_addr_unref (&t->src); fib_entry_untrack (t->fib_entry_index, t->sibling_index); } @@ -1093,6 +1099,135 @@ VLIB_CLI_COMMAND (set_interface_ip6_gtpu_bypass_command, static) = { }; /* *INDENT-ON* */ +int +vnet_gtpu_add_del_rx_flow (u32 hw_if_index, u32 t_index, int is_add) +{ + gtpu_main_t *gtm = >pu_main; + gtpu_tunnel_t *t = pool_elt_at_index (gtm->tunnels, t_index); + vnet_main_t *vnm = vnet_get_main (); + if (is_add) + { + if (t->flow_index == ~0) + { + vnet_flow_t flow = { + .actions = + VNET_FLOW_ACTION_REDIRECT_TO_NODE | VNET_FLOW_ACTION_MARK | + VNET_FLOW_ACTION_BUFFER_ADVANCE, + .mark_flow_id = t_index + gtm->flow_id_start, + .redirect_node_index = gtpu4_flow_input_node.index, + .buffer_advance = sizeof (ethernet_header_t) + + sizeof (ip4_header_t) + sizeof (udp_header_t), + .type = VNET_FLOW_TYPE_IP4_GTPU_IP4, + .ip4_gtpu = { + .protocol = IP_PROTOCOL_UDP, + .src_addr.addr = t->dst.ip4, + .src_addr.mask.as_u32 = ~0, + .dst_addr.addr = t->src.ip4, + .dst_addr.mask.as_u32 = ~0, + .teid = t->teid, + } + , + }; + vnet_flow_add (vnm, &flow, &t->flow_index); + } + + return vnet_flow_enable (vnm, t->flow_index, hw_if_index); + } + + /* flow index is removed when the tunnel is deleted */ + return vnet_flow_disable (vnm, t->flow_index, hw_if_index); +} + +u32 +vnet_gtpu_get_tunnel_index (u32 sw_if_index) +{ + gtpu_main_t *gtm = >pu_main; + + if (sw_if_index >= vec_len (gtm->tunnel_index_by_sw_if_index)) + return ~0; + return gtm->tunnel_index_by_sw_if_index[sw_if_index]; +} + +static clib_error_t * +gtpu_offload_command_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + vnet_main_t *vnm = vnet_get_main (); + u32 rx_sw_if_index = ~0; + u32 hw_if_index = ~0; + int is_add = 1; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "hw %U", unformat_vnet_hw_interface, vnm, + &hw_if_index)) + continue; + if (unformat (line_input, "rx %U", unformat_vnet_sw_interface, vnm, + &rx_sw_if_index)) + continue; + if (unformat (line_input, "del")) + { + is_add = 0; + continue; + } + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + } + + if (rx_sw_if_index == ~0) + return clib_error_return (0, "missing rx interface"); + if (hw_if_index == ~0) + return clib_error_return (0, "missing hw interface"); + + u32 t_index = vnet_gtpu_get_tunnel_index (rx_sw_if_index);; + if (t_index == ~0) + return clib_error_return (0, "%U is not a gtpu tunnel", + format_vnet_sw_if_index_name, vnm, + rx_sw_if_index); + + gtpu_main_t *gtm = >pu_main; + gtpu_tunnel_t *t = pool_elt_at_index (gtm->tunnels, t_index); + + /* first support ipv4 hw offload */ + if (!ip46_address_is_ip4 (&t->dst)) + return clib_error_return (0, "currently only IPV4 tunnels are supported"); + + /* inner protocol should be IPv4 */ + if (t->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) + return clib_error_return (0, + "currently only inner IPV4 protocol is supported"); + + vnet_hw_interface_t *hw_if = vnet_get_hw_interface (vnm, hw_if_index); + ip4_main_t *im = &ip4_main; + u32 rx_fib_index = + vec_elt (im->fib_index_by_sw_if_index, hw_if->sw_if_index); + + if (t->encap_fib_index != rx_fib_index) + return clib_error_return (0, "interface/tunnel fib mismatch"); + + if (vnet_gtpu_add_del_rx_flow (hw_if_index, t_index, is_add)) + return clib_error_return (0, "error %s flow", + is_add ? "enabling" : "disabling"); + + return 0; +} + + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (gtpu_offload_command, static) = { + .path = "set flow-offload gtpu", + .short_help = + "set flow-offload gtpu hw rx [del]", + .function = gtpu_offload_command_fn, +}; +/* *INDENT-ON* */ + clib_error_t * gtpu_init (vlib_main_t * vm) { @@ -1101,6 +1236,9 @@ gtpu_init (vlib_main_t * vm) gtm->vnet_main = vnet_get_main (); gtm->vlib_main = vm; + vnet_flow_get_range (gtm->vnet_main, "gtpu", 1024 * 1024, + >m->flow_id_start); + /* initialize the ip6 hash */ gtm->gtpu6_tunnel_by_key = hash_create_mem (0, sizeof (gtpu6_tunnel_key_t),