X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fgtpu%2Fgtpu.c;h=905ca5e1ca5e6c85f1a3f759f337af12a8020a15;hb=00fdf53c7;hp=80069a77b09c9043cede10d0c9c5d33f349bce6e;hpb=e6bfeab1c352ae73a19361c038e2a06a58c035db;p=vpp.git diff --git a/src/plugins/gtpu/gtpu.c b/src/plugins/gtpu/gtpu.c old mode 100755 new mode 100644 index 80069a77b09..905ca5e1ca5 --- a/src/plugins/gtpu/gtpu.c +++ b/src/plugins/gtpu/gtpu.c @@ -24,13 +24,14 @@ #include #include #include +#include #include #include #include #include #include #include - +#include gtpu_main_t gtpu_main; @@ -48,6 +49,18 @@ VNET_FEATURE_INIT (ip6_gtpu_bypass, static) = { }; /* *INDENT-on* */ +u8 * format_gtpu_encap_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + gtpu_encap_trace_t * t + = va_arg (*args, gtpu_encap_trace_t *); + + s = format (s, "GTPU encap to gtpu_tunnel%d teid %d", + t->tunnel_index, t->teid); + return s; +} + static u8 * format_decap_next (u8 * s, va_list * args) { @@ -75,18 +88,18 @@ format_gtpu_tunnel (u8 * s, va_list * args) gtpu_tunnel_t *t = va_arg (*args, gtpu_tunnel_t *); gtpu_main_t *ngm = >pu_main; - s = format (s, "[%d] src %U dst %U teid %d sw_if_index %d ", + s = format (s, "[%d] src %U dst %U teid %d fib-idx %d sw-if-idx %d ", t - ngm->tunnels, format_ip46_address, &t->src, IP46_TYPE_ANY, format_ip46_address, &t->dst, IP46_TYPE_ANY, - t->teid, t->sw_if_index); + t->teid, t->encap_fib_index, t->sw_if_index); - if (ip46_address_is_multicast (&t->dst)) - s = format (s, "mcast_sw_if_index %d ", t->mcast_sw_if_index); + s = format (s, "encap-dpo-idx %d ", t->next_dpo.dpoi_index); + s = format (s, "decap-next-%U ", format_decap_next, t->decap_next_index); + + if (PREDICT_FALSE (ip46_address_is_multicast (&t->dst))) + s = format (s, "mcast-sw-if-idx %d ", t->mcast_sw_if_index); - s = format (s, "encap_fib_index %d fib_entry_index %d decap_next %U\n", - t->encap_fib_index, t->fib_entry_index, - format_decap_next, t->decap_next_index); return s; } @@ -97,14 +110,6 @@ format_gtpu_name (u8 * s, va_list * args) return format (s, "gtpu_tunnel%d", dev_instance); } -static uword -dummy_interface_tx (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) -{ - clib_warning ("you shouldn't be here, leaking buffers..."); - return frame->n_vectors; -} - static clib_error_t * gtpu_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) { @@ -120,7 +125,6 @@ VNET_DEVICE_CLASS (gtpu_device_class,static) = { .name = "GTPU", .format_device_name = format_gtpu_name, .format_tx_trace = format_gtpu_encap_trace, - .tx_function = dummy_interface_tx, .admin_up_down_function = gtpu_interface_admin_up_down, }; /* *INDENT-ON* */ @@ -365,12 +369,6 @@ mcast_shared_remove (ip46_address_t * dst) hash_unset_mem_free (>pu_main.mcast_shared, dst); } -static inline fib_protocol_t -fib_ip_proto (bool is_ip6) -{ - return (is_ip6) ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4; -} - int vnet_gtpu_add_del_tunnel (vnet_gtpu_add_del_tunnel_args_t * a, u32 * sw_if_indexp) { @@ -382,7 +380,7 @@ int vnet_gtpu_add_del_tunnel u32 sw_if_index = ~0; gtpu4_tunnel_key_t key4; gtpu6_tunnel_key_t key6; - u32 is_ip6 = a->is_ip6; + bool is_ip6 = !ip46_address_is_ip4 (&a->dst); if (!is_ip6) { @@ -412,7 +410,7 @@ int vnet_gtpu_add_del_tunnel return VNET_API_ERROR_INVALID_DECAP_NEXT; pool_get_aligned (gtm->tunnels, t, CLIB_CACHE_LINE_BYTES); - memset (t, 0, sizeof (*t)); + clib_memset (t, 0, sizeof (*t)); /* copy from arg structure */ #define _(x) t->x = a->x; @@ -421,6 +419,9 @@ int vnet_gtpu_add_del_tunnel ip_udp_gtpu_rewrite (t, is_ip6); + /* clear the flow index */ + t->flow_index = ~0; + /* copy the key */ if (is_ip6) hash_set_mem_alloc (>m->gtpu6_tunnel_by_key, &key6, @@ -462,6 +463,11 @@ int vnet_gtpu_add_del_tunnel hi = vnet_get_hw_interface (vnm, hw_if_index); } + /* Set gtpu tunnel output node */ + u32 encap_index = !is_ip6 ? + gtpu4_encap_node.index : gtpu6_encap_node.index; + vnet_set_interface_output_node (vnm, hw_if_index, encap_index); + t->hw_if_index = hw_if_index; t->sw_if_index = sw_if_index = hi->sw_if_index; @@ -481,32 +487,30 @@ int vnet_gtpu_add_del_tunnel fib_node_init (&t->node, gtm->fib_node_type); fib_prefix_t tun_dst_pfx; - u32 encap_index = !is_ip6 ? - gtpu4_encap_node.index : gtpu6_encap_node.index; vnet_flood_class_t flood_class = VNET_FLOOD_CLASS_TUNNEL_NORMAL; fib_prefix_from_ip46_addr (&t->dst, &tun_dst_pfx); if (!ip46_address_is_multicast (&t->dst)) { /* Unicast tunnel - - * source the FIB entry for the tunnel's destination - * and become a child thereof. The tunnel will then get poked + * Track the FIB entry for the tunnel's destination. + * The tunnel will then get poked * when the forwarding for the entry updates, and the tunnel can * re-stack accordingly */ vtep_addr_ref (&t->src); - t->fib_entry_index = fib_table_entry_special_add - (t->encap_fib_index, &tun_dst_pfx, FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE); - t->sibling_index = fib_entry_child_add - (t->fib_entry_index, gtm->fib_node_type, t - gtm->tunnels); + t->fib_entry_index = fib_entry_track (t->encap_fib_index, + &tun_dst_pfx, + gtm->fib_node_type, + t - gtm->tunnels, + &t->sibling_index); gtpu_tunnel_restack_dpo (t); } else { /* Multicast tunnel - - * as the same mcast group can be used for mutiple mcast tunnels - * with different VNIs, create the output fib adjecency only if + * as the same mcast group can be used for multiple mcast tunnels + * with different VNIs, create the output adjacency only if * it does not already exist */ fib_protocol_t fp = fib_ip_proto (is_ip6); @@ -520,8 +524,9 @@ int vnet_gtpu_add_del_tunnel .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, - .frp_weight = 0, + .frp_weight = 1, .frp_flags = FIB_ROUTE_PATH_LOCAL, + .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD, }; const mfib_prefix_t mpfx = { .fp_proto = fp, @@ -535,17 +540,14 @@ int vnet_gtpu_add_del_tunnel * - the accepting interface is that from the API */ mfib_table_entry_path_update (t->encap_fib_index, - &mpfx, - MFIB_SOURCE_GTPU, - &path, MFIB_ITF_FLAG_FORWARD); + &mpfx, MFIB_SOURCE_GTPU, &path); path.frp_sw_if_index = a->mcast_sw_if_index; path.frp_flags = FIB_ROUTE_PATH_FLAG_NONE; + path.frp_mitf_flags = MFIB_ITF_FLAG_ACCEPT; mfei = mfib_table_entry_path_update (t->encap_fib_index, &mpfx, - MFIB_SOURCE_GTPU, - &path, - MFIB_ITF_FLAG_ACCEPT); + MFIB_SOURCE_GTPU, &path); /* * Create the mcast adjacency to send traffic to the group @@ -573,9 +575,6 @@ int vnet_gtpu_add_del_tunnel flood_class = VNET_FLOOD_CLASS_TUNNEL_MASTER; } - /* Set gtpu tunnel output node */ - hi->output_node_index = encap_index; - vnet_get_sw_interface (vnet_get_main (), sw_if_index)->flood_class = flood_class; } @@ -593,8 +592,8 @@ int vnet_gtpu_add_del_tunnel si->flags |= VNET_SW_INTERFACE_FLAG_HIDDEN; /* make sure tunnel is removed from l2 bd or xconnect */ - set_int_l2_mode (gtm->vlib_main, vnm, MODE_L3, t->sw_if_index, 0, 0, 0, - 0); + set_int_l2_mode (gtm->vlib_main, vnm, MODE_L3, t->sw_if_index, 0, + L2_BD_PORT_TYPE_NORMAL, 0, 0); vec_add1 (gtm->free_gtpu_tunnel_hw_if_indices, t->hw_if_index); gtm->tunnel_index_by_sw_if_index[t->sw_if_index] = ~0; @@ -606,9 +605,11 @@ int vnet_gtpu_add_del_tunnel if (!ip46_address_is_multicast (&t->dst)) { + if (t->flow_index != ~0) + vnet_flow_del (vnm, t->flow_index); + vtep_addr_unref (&t->src); - fib_entry_child_remove (t->fib_entry_index, t->sibling_index); - fib_table_entry_delete_index (t->fib_entry_index, FIB_SOURCE_RR); + fib_entry_untrack (t->fib_entry_index, t->sibling_index); } else if (vtep_addr_unref (&t->dst) == 0) { @@ -623,6 +624,17 @@ int vnet_gtpu_add_del_tunnel if (sw_if_indexp) *sw_if_indexp = sw_if_index; + if (a->is_add) + { + /* register udp ports */ + if (!is_ip6 && !udp_is_valid_dst_port (UDP_DST_PORT_GTPU, 1)) + udp_register_dst_port (gtm->vlib_main, UDP_DST_PORT_GTPU, + gtpu4_input_node.index, /* is_ip4 */ 1); + if (is_ip6 && !udp_is_valid_dst_port (UDP_DST_PORT_GTPU6, 0)) + udp_register_dst_port (gtm->vlib_main, UDP_DST_PORT_GTPU6, + gtpu6_input_node.index, /* is_ip4 */ 0); + } + return 0; } @@ -687,8 +699,8 @@ gtpu_add_del_tunnel_command_fn (vlib_main_t * vm, clib_error_t *error = NULL; /* Cant "universally zero init" (={0}) due to GCC bug 53119 */ - memset (&src, 0, sizeof src); - memset (&dst, 0, sizeof dst); + clib_memset (&src, 0, sizeof src); + clib_memset (&dst, 0, sizeof dst); /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -811,10 +823,9 @@ gtpu_add_del_tunnel_command_fn (vlib_main_t * vm, goto done; } - memset (a, 0, sizeof (*a)); + clib_memset (a, 0, sizeof (*a)); a->is_add = is_add; - a->is_ip6 = ipv6_set; #define _(x) a->x = x; foreach_copy_field; @@ -1009,7 +1020,7 @@ set_ip4_gtpu_bypass (vlib_main_t * vm, * ip4-lookup [2] * @cliexend * - * Example of how to display the feature enabed on an interface: + * Example of how to display the feature enabled on an interface: * @cliexstart{show ip interface features GigabitEthernet2/0/0} * IP feature paths configured on GigabitEthernet2/0/0... * ... @@ -1066,7 +1077,7 @@ set_ip6_gtpu_bypass (vlib_main_t * vm, * ip6-lookup [2] * @cliexend * - * Example of how to display the feature enabed on an interface: + * Example of how to display the feature enabled on an interface: * @cliexstart{show ip interface features GigabitEthernet2/0/0} * IP feature paths configured on GigabitEthernet2/0/0... * ... @@ -1084,7 +1095,136 @@ set_ip6_gtpu_bypass (vlib_main_t * vm, VLIB_CLI_COMMAND (set_interface_ip6_gtpu_bypass_command, static) = { .path = "set interface ip6 gtpu-bypass", .function = set_ip6_gtpu_bypass, - .short_help = "set interface ip gtpu-bypass [del]", + .short_help = "set interface ip6 gtpu-bypass [del]", +}; +/* *INDENT-ON* */ + +int +vnet_gtpu_add_del_rx_flow (u32 hw_if_index, u32 t_index, int is_add) +{ + gtpu_main_t *gtm = >pu_main; + gtpu_tunnel_t *t = pool_elt_at_index (gtm->tunnels, t_index); + vnet_main_t *vnm = vnet_get_main (); + if (is_add) + { + if (t->flow_index == ~0) + { + vnet_flow_t flow = { + .actions = + VNET_FLOW_ACTION_REDIRECT_TO_NODE | VNET_FLOW_ACTION_MARK | + VNET_FLOW_ACTION_BUFFER_ADVANCE, + .mark_flow_id = t_index + gtm->flow_id_start, + .redirect_node_index = gtpu4_flow_input_node.index, + .buffer_advance = sizeof (ethernet_header_t) + + sizeof (ip4_header_t) + sizeof (udp_header_t), + .type = VNET_FLOW_TYPE_IP4_GTPU_IP4, + .ip4_gtpu = { + .protocol = IP_PROTOCOL_UDP, + .src_addr.addr = t->dst.ip4, + .src_addr.mask.as_u32 = ~0, + .dst_addr.addr = t->src.ip4, + .dst_addr.mask.as_u32 = ~0, + .teid = t->teid, + } + , + }; + vnet_flow_add (vnm, &flow, &t->flow_index); + } + + return vnet_flow_enable (vnm, t->flow_index, hw_if_index); + } + + /* flow index is removed when the tunnel is deleted */ + return vnet_flow_disable (vnm, t->flow_index, hw_if_index); +} + +u32 +vnet_gtpu_get_tunnel_index (u32 sw_if_index) +{ + gtpu_main_t *gtm = >pu_main; + + if (sw_if_index >= vec_len (gtm->tunnel_index_by_sw_if_index)) + return ~0; + return gtm->tunnel_index_by_sw_if_index[sw_if_index]; +} + +static clib_error_t * +gtpu_offload_command_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + vnet_main_t *vnm = vnet_get_main (); + u32 rx_sw_if_index = ~0; + u32 hw_if_index = ~0; + int is_add = 1; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "hw %U", unformat_vnet_hw_interface, vnm, + &hw_if_index)) + continue; + if (unformat (line_input, "rx %U", unformat_vnet_sw_interface, vnm, + &rx_sw_if_index)) + continue; + if (unformat (line_input, "del")) + { + is_add = 0; + continue; + } + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + } + + if (rx_sw_if_index == ~0) + return clib_error_return (0, "missing rx interface"); + if (hw_if_index == ~0) + return clib_error_return (0, "missing hw interface"); + + u32 t_index = vnet_gtpu_get_tunnel_index (rx_sw_if_index);; + if (t_index == ~0) + return clib_error_return (0, "%U is not a gtpu tunnel", + format_vnet_sw_if_index_name, vnm, + rx_sw_if_index); + + gtpu_main_t *gtm = >pu_main; + gtpu_tunnel_t *t = pool_elt_at_index (gtm->tunnels, t_index); + + /* first support ipv4 hw offload */ + if (!ip46_address_is_ip4 (&t->dst)) + return clib_error_return (0, "currently only IPV4 tunnels are supported"); + + /* inner protocol should be IPv4 */ + if (t->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) + return clib_error_return (0, + "currently only inner IPV4 protocol is supported"); + + vnet_hw_interface_t *hw_if = vnet_get_hw_interface (vnm, hw_if_index); + ip4_main_t *im = &ip4_main; + u32 rx_fib_index = + vec_elt (im->fib_index_by_sw_if_index, hw_if->sw_if_index); + + if (t->encap_fib_index != rx_fib_index) + return clib_error_return (0, "interface/tunnel fib mismatch"); + + if (vnet_gtpu_add_del_rx_flow (hw_if_index, t_index, is_add)) + return clib_error_return (0, "error %s flow", + is_add ? "enabling" : "disabling"); + + return 0; +} + + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (gtpu_offload_command, static) = { + .path = "set flow-offload gtpu", + .short_help = + "set flow-offload gtpu hw rx [del]", + .function = gtpu_offload_command_fn, }; /* *INDENT-ON* */ @@ -1096,6 +1236,9 @@ gtpu_init (vlib_main_t * vm) gtm->vnet_main = vnet_get_main (); gtm->vlib_main = vm; + vnet_flow_get_range (gtm->vnet_main, "gtpu", 1024 * 1024, + >m->flow_id_start); + /* initialize the ip6 hash */ gtm->gtpu6_tunnel_by_key = hash_create_mem (0, sizeof (gtpu6_tunnel_key_t), @@ -1105,11 +1248,6 @@ gtpu_init (vlib_main_t * vm) sizeof (ip46_address_t), sizeof (mcast_shared_t)); - udp_register_dst_port (vm, UDP_DST_PORT_GTPU, - gtpu4_input_node.index, /* is_ip4 */ 1); - udp_register_dst_port (vm, UDP_DST_PORT_GTPU6, - gtpu6_input_node.index, /* is_ip4 */ 0); - gtm->fib_node_type = fib_node_register_new_type (>pu_vft); return 0; @@ -1120,7 +1258,7 @@ VLIB_INIT_FUNCTION (gtpu_init); /* *INDENT-OFF* */ VLIB_PLUGIN_REGISTER () = { .version = VPP_BUILD_VER, - .description = "GTPv1-U", + .description = "GPRS Tunnelling Protocol, User Data (GTPv1-U)", }; /* *INDENT-ON* */