X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fgre%2Fgre.c;h=72c76fca5df197877980065d2f1ef3b2f4c3f8e4;hb=8e22054209ae9c4f08dae16f1aff910d8c8d0b76;hp=785476d0905e15b4d458a8580532d094ad3ead74;hpb=9ff647a995ba43dcee898c1904d9ec8298b069c8;p=vpp.git diff --git a/src/vnet/gre/gre.c b/src/vnet/gre/gre.c index 785476d0905..72c76fca5df 100644 --- a/src/vnet/gre/gre.c +++ b/src/vnet/gre/gre.c @@ -19,6 +19,9 @@ #include #include +extern gre_main_t gre_main; + +#ifndef CLIB_MARCH_VARIANT gre_main_t gre_main; typedef struct @@ -38,6 +41,7 @@ typedef struct u64 as_u64[3]; }; } ip6_and_gre_union_t; +#endif /* CLIB_MARCH_VARIANT */ /* Packet trace structure */ @@ -54,6 +58,9 @@ typedef struct ip46_address_t dst; } gre_tx_trace_t; +extern u8 *format_gre_tx_trace (u8 * s, va_list * args); + +#ifndef CLIB_MARCH_VARIANT u8 * format_gre_tx_trace (u8 * s, va_list * args) { @@ -62,7 +69,7 @@ format_gre_tx_trace (u8 * s, va_list * args) gre_tx_trace_t *t = va_arg (*args, gre_tx_trace_t *); s = format (s, "GRE: tunnel %d len %d src %U dst %U", - t->tunnel_id, clib_net_to_host_u16 (t->length), + t->tunnel_id, t->length, format_ip46_address, &t->src, IP46_TYPE_ANY, format_ip46_address, &t->dst, IP46_TYPE_ANY); return s; @@ -100,7 +107,7 @@ format_gre_header_with_length (u8 * s, va_list * args) s = format (s, "GRE %U", format_gre_protocol, p); - if (max_header_bytes != 0 && header_bytes > max_header_bytes) + if (max_header_bytes != 0 && header_bytes < max_header_bytes) { gre_protocol_info_t *pi = gre_get_protocol_info (gm, p); vlib_node_t *node = vlib_get_node (gm->vlib_main, pi->node_index); @@ -208,6 +215,7 @@ gre_build_rewrite (vnet_main_t * vnm, gre_main_t *gm = &gre_main; ip4_and_gre_header_t *h4; ip6_and_gre_header_t *h6; + gre_header_t *gre; u8 *rewrite = NULL; gre_tunnel_t *t; u32 ti; @@ -227,9 +235,7 @@ gre_build_rewrite (vnet_main_t * vnm, { vec_validate (rewrite, sizeof (*h4) - 1); h4 = (ip4_and_gre_header_t *) rewrite; - h4->gre.protocol = - clib_host_to_net_u16 (gre_proto_from_vnet_link (link_type)); - + gre = &h4->gre; h4->ip4.ip_version_and_header_length = 0x45; h4->ip4.ttl = 254; h4->ip4.protocol = IP_PROTOCOL_GRE; @@ -242,9 +248,7 @@ gre_build_rewrite (vnet_main_t * vnm, { vec_validate (rewrite, sizeof (*h6) - 1); h6 = (ip6_and_gre_header_t *) rewrite; - h6->gre.protocol = - clib_host_to_net_u16 (gre_proto_from_vnet_link (link_type)); - + gre = &h6->gre; h6->ip6.ip_version_traffic_class_and_flow_label = clib_host_to_net_u32 (6 << 28); h6->ip6.hop_limit = 255; @@ -256,13 +260,23 @@ gre_build_rewrite (vnet_main_t * vnm, h6->ip6.dst_address.as_u64[1] = t->tunnel_dst.fp_addr.ip6.as_u64[1]; } + if (PREDICT_FALSE (t->type == GRE_TUNNEL_TYPE_ERSPAN)) + { + gre->protocol = clib_host_to_net_u16 (GRE_PROTOCOL_erspan); + gre->flags_and_version = clib_host_to_net_u16 (GRE_FLAGS_SEQUENCE); + } + else + gre->protocol = + clib_host_to_net_u16 (gre_proto_from_vnet_link (link_type)); + return (rewrite); } #define is_v4_packet(_h) ((*(u8*) _h) & 0xF0) == 0x40 -void -gre4_fixup (vlib_main_t * vm, ip_adjacency_t * adj, vlib_buffer_t * b0) +static void +gre4_fixup (vlib_main_t * vm, + ip_adjacency_t * adj, vlib_buffer_t * b0, const void *data) { ip4_header_t *ip0; @@ -274,8 +288,9 @@ gre4_fixup (vlib_main_t * vm, ip_adjacency_t * adj, vlib_buffer_t * b0) ip0->checksum = ip4_header_checksum (ip0); } -void -gre6_fixup (vlib_main_t * vm, ip_adjacency_t * adj, vlib_buffer_t * b0) +static void +gre6_fixup (vlib_main_t * vm, + ip_adjacency_t * adj, vlib_buffer_t * b0, const void *data) { ip6_header_t *ip0; @@ -284,8 +299,8 @@ gre6_fixup (vlib_main_t * vm, ip_adjacency_t * adj, vlib_buffer_t * b0) /* Fixup the payload length field in the GRE tunnel encap that was applied * at the midchain node */ ip0->payload_length = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)) - - sizeof (*ip0); + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - + sizeof (*ip0)); } void @@ -293,125 +308,216 @@ gre_update_adj (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai) { gre_main_t *gm = &gre_main; gre_tunnel_t *t; - u32 ti; + adj_flags_t af; u8 is_ipv6; + u32 ti; ti = gm->tunnel_index_by_sw_if_index[sw_if_index]; t = pool_elt_at_index (gm->tunnels, ti); is_ipv6 = t->tunnel_dst.fp_proto == FIB_PROTOCOL_IP6 ? 1 : 0; + af = ADJ_FLAG_MIDCHAIN_IP_STACK; - adj_nbr_midchain_update_rewrite (ai, !is_ipv6 ? gre4_fixup : gre6_fixup, - (VNET_LINK_ETHERNET == - adj_get_link_type (ai) ? - ADJ_FLAG_MIDCHAIN_NO_COUNT : - ADJ_FLAG_NONE), gre_build_rewrite (vnm, - sw_if_index, - adj_get_link_type - (ai), - NULL)); + if (VNET_LINK_ETHERNET == adj_get_link_type (ai)) + af |= ADJ_FLAG_MIDCHAIN_NO_COUNT; + + adj_nbr_midchain_update_rewrite + (ai, !is_ipv6 ? gre4_fixup : gre6_fixup, NULL, af, + gre_build_rewrite (vnm, sw_if_index, adj_get_link_type (ai), NULL)); gre_tunnel_stack (ai); } +#endif /* CLIB_MARCH_VARIANT */ + +typedef enum +{ + GRE_ENCAP_NEXT_L2_MIDCHAIN, + GRE_ENCAP_N_NEXT, +} gre_encap_next_t; /** - * @brief TX function. Only called L2. L3 traffic uses the adj-midchains + * @brief TX function. Only called for L2 payload including TEB or ERSPAN. + * L3 traffic uses the adj-midchains. */ -static uword -gre_interface_tx_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gre_encap_node) (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) { gre_main_t *gm = &gre_main; - u32 next_index; - u32 *from, *to_next, n_left_from, n_left_to_next; - vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; - const gre_tunnel_t *gt = pool_elt_at_index (gm->tunnels, rd->dev_instance); - u8 is_ipv6 = gt->tunnel_dst.fp_proto == FIB_PROTOCOL_IP6 ? 1 : 0; + u32 *from, n_left_from; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; + u32 sw_if_index[2] = { ~0, ~0 }; + const gre_tunnel_t *gt[2] = { 0 }; + adj_index_t adj_index[2] = { ADJ_INDEX_INVALID, ADJ_INDEX_INVALID }; - /* Vector of buffer / pkt indices we're supposed to process */ from = vlib_frame_vector_args (frame); - - /* Number of buffers / pkts */ n_left_from = frame->n_vectors; + vlib_get_buffers (vm, from, bufs, n_left_from); + + while (n_left_from >= 2) + { + + if (PREDICT_FALSE + (sw_if_index[0] != vnet_buffer (b[0])->sw_if_index[VLIB_TX])) + { + const vnet_hw_interface_t *hi; + sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; + hi = vnet_get_sup_hw_interface (gm->vnet_main, sw_if_index[0]); + gt[0] = &gm->tunnels[hi->dev_instance]; + adj_index[0] = gt[0]->l2_adj_index; + } + if (PREDICT_FALSE + (sw_if_index[1] != vnet_buffer (b[1])->sw_if_index[VLIB_TX])) + { + const vnet_hw_interface_t *hi; + sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_TX]; + hi = vnet_get_sup_hw_interface (gm->vnet_main, sw_if_index[1]); + gt[1] = &gm->tunnels[hi->dev_instance]; + adj_index[1] = gt[1]->l2_adj_index; + } + + vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = adj_index[0]; + vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = adj_index[1]; + + if (PREDICT_FALSE (gt[0]->type == GRE_TUNNEL_TYPE_ERSPAN)) + { + /* Encap GRE seq# and ERSPAN type II header */ + erspan_t2_t *h0; + u32 seq_num; + u64 hdr; + vlib_buffer_advance (b[0], -sizeof (erspan_t2_t)); + h0 = vlib_buffer_get_current (b[0]); + seq_num = clib_atomic_fetch_add (>[0]->gre_sn->seq_num, 1); + hdr = clib_host_to_net_u64 (ERSPAN_HDR2); + h0->seq_num = clib_host_to_net_u32 (seq_num); + h0->t2_u64 = hdr; + h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt[0]->session_id); + } + if (PREDICT_FALSE (gt[1]->type == GRE_TUNNEL_TYPE_ERSPAN)) + { + /* Encap GRE seq# and ERSPAN type II header */ + erspan_t2_t *h0; + u32 seq_num; + u64 hdr; + vlib_buffer_advance (b[1], -sizeof (erspan_t2_t)); + h0 = vlib_buffer_get_current (b[1]); + seq_num = clib_atomic_fetch_add (>[1]->gre_sn->seq_num, 1); + hdr = clib_host_to_net_u64 (ERSPAN_HDR2); + h0->seq_num = clib_host_to_net_u32 (seq_num); + h0->t2_u64 = hdr; + h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt[1]->session_id); + } + + if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) + { + gre_tx_trace_t *tr = vlib_add_trace (vm, node, + b[0], sizeof (*tr)); + tr->tunnel_id = gt[0] - gm->tunnels; + tr->src = gt[0]->tunnel_src; + tr->dst = gt[0]->tunnel_dst.fp_addr; + tr->length = vlib_buffer_length_in_chain (vm, b[0]); + } + if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED)) + { + gre_tx_trace_t *tr = vlib_add_trace (vm, node, + b[1], sizeof (*tr)); + tr->tunnel_id = gt[1] - gm->tunnels; + tr->src = gt[1]->tunnel_src; + tr->dst = gt[1]->tunnel_dst.fp_addr; + tr->length = vlib_buffer_length_in_chain (vm, b[1]); + } - /* Speculatively send the first buffer to the last disposition we used */ - next_index = node->cached_next_index; + b += 2; + n_left_from -= 2; + } - while (n_left_from > 0) + while (n_left_from >= 1) { - /* set up to enqueue to our disposition with index = next_index */ - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - /* - * FIXME DUAL LOOP - */ + if (PREDICT_FALSE + (sw_if_index[0] != vnet_buffer (b[0])->sw_if_index[VLIB_TX])) + { + const vnet_hw_interface_t *hi; + sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; + hi = vnet_get_sup_hw_interface (gm->vnet_main, sw_if_index[0]); + gt[0] = &gm->tunnels[hi->dev_instance]; + adj_index[0] = gt[0]->l2_adj_index; + } - while (n_left_from > 0 && n_left_to_next > 0) + vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = adj_index[0]; + + if (PREDICT_FALSE (gt[0]->type == GRE_TUNNEL_TYPE_ERSPAN)) { - vlib_buffer_t *b0; - u32 bi0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - - vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gt->l2_adj_index; - - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - gre_tx_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - tr->tunnel_id = gt - gm->tunnels; - tr->src = gt->tunnel_src; - tr->dst = gt->tunnel_src; - tr->length = vlib_buffer_length_in_chain (vm, b0); - } - - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, gt->l2_tx_arc); + /* Encap GRE seq# and ERSPAN type II header */ + erspan_t2_t *h0; + u32 seq_num; + u64 hdr; + vlib_buffer_advance (b[0], -sizeof (erspan_t2_t)); + h0 = vlib_buffer_get_current (b[0]); + seq_num = clib_atomic_fetch_add (>[0]->gre_sn->seq_num, 1); + hdr = clib_host_to_net_u64 (ERSPAN_HDR2); + h0->seq_num = clib_host_to_net_u32 (seq_num); + h0->t2_u64 = hdr; + h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt[0]->session_id); } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); + if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) + { + gre_tx_trace_t *tr = vlib_add_trace (vm, node, + b[0], sizeof (*tr)); + tr->tunnel_id = gt[0] - gm->tunnels; + tr->src = gt[0]->tunnel_src; + tr->dst = gt[0]->tunnel_dst.fp_addr; + tr->length = vlib_buffer_length_in_chain (vm, b[0]); + } + + b += 1; + n_left_from -= 1; } - vlib_node_increment_counter (vm, !is_ipv6 ? gre4_input_node.index : - gre6_input_node.index, + vlib_buffer_enqueue_to_single_next (vm, node, from, + GRE_ENCAP_NEXT_L2_MIDCHAIN, + frame->n_vectors); + + vlib_node_increment_counter (vm, node->node_index, GRE_ERROR_PKTS_ENCAP, frame->n_vectors); return frame->n_vectors; } -static uword -gre_interface_tx (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) -{ - return (gre_interface_tx_inline (vm, node, frame)); -} +static char *gre_error_strings[] = { +#define gre_error(n,s) s, +#include "error.def" +#undef gre_error +}; -static uword -gre_teb_interface_tx (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (gre_encap_node) = { - return (gre_interface_tx_inline (vm, node, frame)); -} + .name = "gre-encap", + .vector_size = sizeof (u32), + .format_trace = format_gre_tx_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = GRE_N_ERROR, + .error_strings = gre_error_strings, + .n_next_nodes = GRE_ENCAP_N_NEXT, + .next_nodes = { + [GRE_ENCAP_NEXT_L2_MIDCHAIN] = "adj-l2-midchain", + }, +}; +/* *INDENT-ON* */ +#ifndef CLIB_MARCH_VARIANT static u8 * format_gre_tunnel_name (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); - return format (s, "gre%d", dev_instance); -} + gre_main_t *gm = &gre_main; + gre_tunnel_t *t; -static u8 * -format_gre_tunnel_teb_name (u8 * s, va_list * args) -{ - u32 dev_instance = va_arg (*args, u32); - return format (s, "teb-gre%d", dev_instance); + if (dev_instance >= vec_len (gm->tunnels)) + return format (s, ""); + + t = pool_elt_at_index (gm->tunnels, dev_instance); + return format (s, "gre%d", t->user_instance); } static u8 * @@ -430,36 +536,11 @@ VNET_DEVICE_CLASS (gre_device_class) = { .format_device_name = format_gre_tunnel_name, .format_device = format_gre_device, .format_tx_trace = format_gre_tx_trace, - .tx_function = gre_interface_tx, .admin_up_down_function = gre_interface_admin_up_down, #ifdef SOON .clear counter = 0; #endif }; -/* *INDENT-ON* */ - - -/* *INDENT-OFF* */ -VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_device_class, - gre_interface_tx) - -VNET_DEVICE_CLASS (gre_device_teb_class) = { - .name = "GRE TEB tunnel device", - .format_device_name = format_gre_tunnel_teb_name, - .format_device = format_gre_device, - .format_tx_trace = format_gre_tx_trace, - .tx_function = gre_teb_interface_tx, - .admin_up_down_function = gre_interface_admin_up_down, -#ifdef SOON - .clear counter = 0; -#endif -}; - -/* *INDENT-ON* */ - -/* *INDENT-OFF* */ -VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_device_teb_class, - gre_teb_interface_tx) VNET_HW_INTERFACE_CLASS (gre_hw_interface_class) = { .name = "GRE", @@ -470,6 +551,7 @@ VNET_HW_INTERFACE_CLASS (gre_hw_interface_class) = { .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P, }; /* *INDENT-ON* */ +#endif /* CLIB_MARCH_VARIANT */ static void add_protocol (gre_main_t * gm, gre_protocol_t protocol, char *protocol_name) @@ -496,7 +578,7 @@ gre_init (vlib_main_t * vm) ip_main_t *im = &ip_main; ip_protocol_info_t *pi; - memset (gm, 0, sizeof (gm[0])); + clib_memset (gm, 0, sizeof (gm[0])); gm->vlib_main = vm; gm->vnet_main = vnet_get_main (); @@ -516,8 +598,12 @@ gre_init (vlib_main_t * vm) gm->protocol_info_by_name = hash_create_string (0, sizeof (uword)); gm->protocol_info_by_protocol = hash_create (0, sizeof (uword)); - gm->tunnel_by_key4 = hash_create (0, sizeof (uword)); - gm->tunnel_by_key6 = hash_create_mem (0, sizeof (u64[4]), sizeof (uword)); + gm->tunnel_by_key4 = + hash_create_mem (0, sizeof (gre_tunnel_key4_t), sizeof (uword)); + gm->tunnel_by_key6 = + hash_create_mem (0, sizeof (gre_tunnel_key6_t), sizeof (uword)); + gm->seq_num_by_key = + hash_create_mem (0, sizeof (gre_sn_key_t), sizeof (uword)); #define _(n,s) add_protocol (gm, GRE_PROTOCOL_##s, #s); foreach_gre_protocol @@ -527,14 +613,6 @@ gre_init (vlib_main_t * vm) VLIB_INIT_FUNCTION (gre_init); -gre_main_t * -gre_get_main (vlib_main_t * vm) -{ - vlib_call_init_function (vm, gre_init); - return &gre_main; -} - - /* * fd.io coding-style-patch-verification: ON *