X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fgre%2Fgre.c;h=2b95d99148a0f8c5db7a16383622424b016cc430;hb=14053c9dbd75182f5302f7388d17508f3930f7ce;hp=0b8d2cc8df7ae67d2a4bf46a01587ca9dc1d14d1;hpb=756cd9441752fc8f84104c9ee19099506ba89f85;p=vpp.git diff --git a/src/vnet/gre/gre.c b/src/vnet/gre/gre.c index 0b8d2cc8df7..2b95d99148a 100644 --- a/src/vnet/gre/gre.c +++ b/src/vnet/gre/gre.c @@ -19,6 +19,9 @@ #include #include +extern gre_main_t gre_main; + +#ifndef CLIB_MARCH_VARIANT gre_main_t gre_main; typedef struct @@ -38,6 +41,7 @@ typedef struct u64 as_u64[3]; }; } ip6_and_gre_union_t; +#endif /* CLIB_MARCH_VARIANT */ /* Packet trace structure */ @@ -54,6 +58,9 @@ typedef struct ip46_address_t dst; } gre_tx_trace_t; +extern u8 *format_gre_tx_trace (u8 * s, va_list * args); + +#ifndef CLIB_MARCH_VARIANT u8 * format_gre_tx_trace (u8 * s, va_list * args) { @@ -206,6 +213,7 @@ gre_build_rewrite (vnet_main_t * vnm, vnet_link_t link_type, const void *dst_address) { gre_main_t *gm = &gre_main; + const ip46_address_t *dst; ip4_and_gre_header_t *h4; ip6_and_gre_header_t *h6; gre_header_t *gre; @@ -214,6 +222,7 @@ gre_build_rewrite (vnet_main_t * vnm, u32 ti; u8 is_ipv6; + dst = dst_address; ti = gm->tunnel_index_by_sw_if_index[sw_if_index]; if (~0 == ti) @@ -234,7 +243,7 @@ gre_build_rewrite (vnet_main_t * vnm, h4->ip4.protocol = IP_PROTOCOL_GRE; /* fixup ip4 header length and checksum after-the-fact */ h4->ip4.src_address.as_u32 = t->tunnel_src.ip4.as_u32; - h4->ip4.dst_address.as_u32 = t->tunnel_dst.fp_addr.ip4.as_u32; + h4->ip4.dst_address.as_u32 = dst->ip4.as_u32; h4->ip4.checksum = ip4_header_checksum (&h4->ip4); } else @@ -249,8 +258,8 @@ gre_build_rewrite (vnet_main_t * vnm, /* fixup ip6 header length and checksum after-the-fact */ h6->ip6.src_address.as_u64[0] = t->tunnel_src.ip6.as_u64[0]; h6->ip6.src_address.as_u64[1] = t->tunnel_src.ip6.as_u64[1]; - h6->ip6.dst_address.as_u64[0] = t->tunnel_dst.fp_addr.ip6.as_u64[0]; - h6->ip6.dst_address.as_u64[1] = t->tunnel_dst.fp_addr.ip6.as_u64[1]; + h6->ip6.dst_address.as_u64[0] = dst->ip6.as_u64[0]; + h6->ip6.dst_address.as_u64[1] = dst->ip6.as_u64[1]; } if (PREDICT_FALSE (t->type == GRE_TUNNEL_TYPE_ERSPAN)) @@ -269,7 +278,7 @@ gre_build_rewrite (vnet_main_t * vnm, static void gre4_fixup (vlib_main_t * vm, - ip_adjacency_t * adj, vlib_buffer_t * b0, const void *data) + const ip_adjacency_t * adj, vlib_buffer_t * b0, const void *data) { ip4_header_t *ip0; @@ -283,7 +292,7 @@ gre4_fixup (vlib_main_t * vm, static void gre6_fixup (vlib_main_t * vm, - ip_adjacency_t * adj, vlib_buffer_t * b0, const void *data) + const ip_adjacency_t * adj, vlib_buffer_t * b0, const void *data) { ip6_header_t *ip0; @@ -292,8 +301,8 @@ gre6_fixup (vlib_main_t * vm, /* Fixup the payload length field in the GRE tunnel encap that was applied * at the midchain node */ ip0->payload_length = - clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)) - - sizeof (*ip0); + clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) - + sizeof (*ip0)); } void @@ -301,22 +310,90 @@ gre_update_adj (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai) { gre_main_t *gm = &gre_main; gre_tunnel_t *t; - u32 ti; + adj_flags_t af; u8 is_ipv6; + u32 ti; ti = gm->tunnel_index_by_sw_if_index[sw_if_index]; t = pool_elt_at_index (gm->tunnels, ti); is_ipv6 = t->tunnel_dst.fp_proto == FIB_PROTOCOL_IP6 ? 1 : 0; + af = ADJ_FLAG_MIDCHAIN_IP_STACK; + + if (VNET_LINK_ETHERNET == adj_get_link_type (ai)) + af |= ADJ_FLAG_MIDCHAIN_NO_COUNT; adj_nbr_midchain_update_rewrite - (ai, !is_ipv6 ? gre4_fixup : gre6_fixup, NULL, - (VNET_LINK_ETHERNET == adj_get_link_type (ai) ? - ADJ_FLAG_MIDCHAIN_NO_COUNT : ADJ_FLAG_NONE), - gre_build_rewrite (vnm, sw_if_index, adj_get_link_type (ai), NULL)); + (ai, !is_ipv6 ? gre4_fixup : gre6_fixup, NULL, af, + gre_build_rewrite (vnm, sw_if_index, adj_get_link_type (ai), + &t->tunnel_dst.fp_addr)); gre_tunnel_stack (ai); } +adj_walk_rc_t +mgre_mk_complete_walk (adj_index_t ai, void *data) +{ + mgre_walk_ctx_t *ctx = data; + adj_midchain_fixup_t f; + + f = (ctx->t->tunnel_dst.fp_proto == FIB_PROTOCOL_IP4 ? + gre4_fixup : gre6_fixup); + + adj_nbr_midchain_update_rewrite + (ai, f, NULL, ADJ_FLAG_MIDCHAIN_IP_STACK, + gre_build_rewrite (vnet_get_main (), + ctx->t->sw_if_index, + adj_get_link_type (ai), + &nhrp_entry_get_nh (ctx->ne)->fp_addr)); + + nhrp_entry_adj_stack (ctx->ne, ai); + + return (ADJ_WALK_RC_CONTINUE); +} + +adj_walk_rc_t +mgre_mk_incomplete_walk (adj_index_t ai, void *data) +{ + gre_tunnel_t *t = data; + adj_midchain_fixup_t f; + + f = (t->tunnel_dst.fp_proto == FIB_PROTOCOL_IP4 ? gre4_fixup : gre6_fixup); + + adj_nbr_midchain_update_rewrite (ai, f, NULL, ADJ_FLAG_NONE, NULL); + + adj_midchain_delegate_unstack (ai); + + return (ADJ_WALK_RC_CONTINUE); +} + +void +mgre_update_adj (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai) +{ + gre_main_t *gm = &gre_main; + ip_adjacency_t *adj; + nhrp_entry_t *ne; + gre_tunnel_t *t; + u32 ti; + + adj = adj_get (ai); + ti = gm->tunnel_index_by_sw_if_index[sw_if_index]; + t = pool_elt_at_index (gm->tunnels, ti); + + ne = nhrp_entry_find (sw_if_index, &adj->sub_type.nbr.next_hop); + + if (NULL == ne) + // no NHRP entry to provide the next-hop + return; + + mgre_walk_ctx_t ctx = { + .t = t, + .ne = ne + }; + adj_nbr_walk_nh (sw_if_index, + adj->ia_nh_proto, + &adj->sub_type.nbr.next_hop, mgre_mk_complete_walk, &ctx); +} +#endif /* CLIB_MARCH_VARIANT */ typedef enum { @@ -328,173 +405,145 @@ typedef enum * @brief TX function. Only called for L2 payload including TEB or ERSPAN. * L3 traffic uses the adj-midchains. */ -static uword -gre_interface_tx (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gre_encap_node) (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) { gre_main_t *gm = &gre_main; - vnet_main_t *vnm = gm->vnet_main; - u32 next_index; - u32 *from, *to_next, n_left_from, n_left_to_next; - u32 sw_if_index0 = 0; - u32 sw_if_index1 = 0; - adj_index_t adj_index0 = ADJ_INDEX_INVALID; - adj_index_t adj_index1 = ADJ_INDEX_INVALID; - gre_tunnel_t *gt0 = NULL; - gre_tunnel_t *gt1 = NULL; - - /* Vector of buffer / pkt indices we're supposed to process */ - from = vlib_frame_vector_args (frame); + u32 *from, n_left_from; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; + u32 sw_if_index[2] = { ~0, ~0 }; + const gre_tunnel_t *gt[2] = { 0 }; + adj_index_t adj_index[2] = { ADJ_INDEX_INVALID, ADJ_INDEX_INVALID }; - /* Number of buffers / pkts */ + from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; + vlib_get_buffers (vm, from, bufs, n_left_from); + + while (n_left_from >= 2) + { - /* Speculatively send the first buffer to the last disposition we used */ - next_index = GRE_ENCAP_NEXT_L2_MIDCHAIN; + if (PREDICT_FALSE + (sw_if_index[0] != vnet_buffer (b[0])->sw_if_index[VLIB_TX])) + { + const vnet_hw_interface_t *hi; + sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; + hi = vnet_get_sup_hw_interface (gm->vnet_main, sw_if_index[0]); + gt[0] = &gm->tunnels[hi->dev_instance]; + adj_index[0] = gt[0]->l2_adj_index; + } + if (PREDICT_FALSE + (sw_if_index[1] != vnet_buffer (b[1])->sw_if_index[VLIB_TX])) + { + const vnet_hw_interface_t *hi; + sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_TX]; + hi = vnet_get_sup_hw_interface (gm->vnet_main, sw_if_index[1]); + gt[1] = &gm->tunnels[hi->dev_instance]; + adj_index[1] = gt[1]->l2_adj_index; + } - while (n_left_from > 0) + vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = adj_index[0]; + vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = adj_index[1]; + + if (PREDICT_FALSE (gt[0]->type == GRE_TUNNEL_TYPE_ERSPAN)) + { + /* Encap GRE seq# and ERSPAN type II header */ + erspan_t2_t *h0; + u32 seq_num; + u64 hdr; + vlib_buffer_advance (b[0], -sizeof (erspan_t2_t)); + h0 = vlib_buffer_get_current (b[0]); + seq_num = clib_atomic_fetch_add (>[0]->gre_sn->seq_num, 1); + hdr = clib_host_to_net_u64 (ERSPAN_HDR2); + h0->seq_num = clib_host_to_net_u32 (seq_num); + h0->t2_u64 = hdr; + h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt[0]->session_id); + } + if (PREDICT_FALSE (gt[1]->type == GRE_TUNNEL_TYPE_ERSPAN)) + { + /* Encap GRE seq# and ERSPAN type II header */ + erspan_t2_t *h0; + u32 seq_num; + u64 hdr; + vlib_buffer_advance (b[1], -sizeof (erspan_t2_t)); + h0 = vlib_buffer_get_current (b[1]); + seq_num = clib_atomic_fetch_add (>[1]->gre_sn->seq_num, 1); + hdr = clib_host_to_net_u64 (ERSPAN_HDR2); + h0->seq_num = clib_host_to_net_u32 (seq_num); + h0->t2_u64 = hdr; + h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt[1]->session_id); + } + + if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) + { + gre_tx_trace_t *tr = vlib_add_trace (vm, node, + b[0], sizeof (*tr)); + tr->tunnel_id = gt[0] - gm->tunnels; + tr->src = gt[0]->tunnel_src; + tr->dst = gt[0]->tunnel_dst.fp_addr; + tr->length = vlib_buffer_length_in_chain (vm, b[0]); + } + if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED)) + { + gre_tx_trace_t *tr = vlib_add_trace (vm, node, + b[1], sizeof (*tr)); + tr->tunnel_id = gt[1] - gm->tunnels; + tr->src = gt[1]->tunnel_src; + tr->dst = gt[1]->tunnel_dst.fp_addr; + tr->length = vlib_buffer_length_in_chain (vm, b[1]); + } + + b += 2; + n_left_from -= 2; + } + + while (n_left_from >= 1) { - /* set up to enqueue to our disposition with index = next_index */ - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - while (n_left_from >= 4 && n_left_to_next >= 2) + if (PREDICT_FALSE + (sw_if_index[0] != vnet_buffer (b[0])->sw_if_index[VLIB_TX])) + { + const vnet_hw_interface_t *hi; + sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; + hi = vnet_get_sup_hw_interface (gm->vnet_main, sw_if_index[0]); + gt[0] = &gm->tunnels[hi->dev_instance]; + adj_index[0] = gt[0]->l2_adj_index; + } + + vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = adj_index[0]; + + if (PREDICT_FALSE (gt[0]->type == GRE_TUNNEL_TYPE_ERSPAN)) { - u32 bi0 = from[0]; - u32 bi1 = from[1]; - vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); - vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1); - - to_next[0] = bi0; - to_next[1] = bi1; - from += 2; - to_next += 2; - n_left_to_next -= 2; - n_left_from -= 2; - - if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX]) - { - sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX]; - vnet_hw_interface_t *hi0 = - vnet_get_sup_hw_interface (vnm, sw_if_index0); - gt0 = &gm->tunnels[hi0->dev_instance]; - adj_index0 = gt0->l2_adj_index; - } - - if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX]) - { - if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX]) - { - sw_if_index1 = sw_if_index0; - gt1 = gt0; - adj_index1 = adj_index0; - } - else - { - sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX]; - vnet_hw_interface_t *hi1 = - vnet_get_sup_hw_interface (vnm, sw_if_index1); - gt1 = &gm->tunnels[hi1->dev_instance]; - adj_index1 = gt1->l2_adj_index; - } - } - - vnet_buffer (b0)->ip.adj_index[VLIB_TX] = adj_index0; - vnet_buffer (b1)->ip.adj_index[VLIB_TX] = adj_index1; - - if (PREDICT_FALSE (gt0->type == GRE_TUNNEL_TYPE_ERSPAN)) - { - /* Encap GRE seq# and ERSPAN type II header */ - vlib_buffer_advance (b0, -sizeof (erspan_t2_t)); - erspan_t2_t *h0 = vlib_buffer_get_current (b0); - u32 seq_num = clib_smp_atomic_add (>0->gre_sn->seq_num, 1); - u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2); - h0->seq_num = clib_host_to_net_u32 (seq_num); - h0->t2_u64 = hdr; - h0->t2.cos_en_t_session |= - clib_host_to_net_u16 (gt0->session_id); - } - if (PREDICT_FALSE (gt1->type == GRE_TUNNEL_TYPE_ERSPAN)) - { - /* Encap GRE seq# and ERSPAN type II header */ - vlib_buffer_advance (b1, -sizeof (erspan_t2_t)); - erspan_t2_t *h1 = vlib_buffer_get_current (b1); - u32 seq_num = clib_smp_atomic_add (>1->gre_sn->seq_num, 1); - u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2); - h1->seq_num = clib_host_to_net_u32 (seq_num); - h1->t2_u64 = hdr; - h1->t2.cos_en_t_session |= - clib_host_to_net_u16 (gt1->session_id); - } - - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - gre_tx_trace_t *tr0 = vlib_add_trace (vm, node, - b0, sizeof (*tr0)); - tr0->tunnel_id = gt0 - gm->tunnels; - tr0->src = gt0->tunnel_src; - tr0->dst = gt0->tunnel_dst.fp_addr; - tr0->length = vlib_buffer_length_in_chain (vm, b0); - } - if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) - { - gre_tx_trace_t *tr1 = vlib_add_trace (vm, node, - b1, sizeof (*tr1)); - tr1->tunnel_id = gt1 - gm->tunnels; - tr1->src = gt1->tunnel_src; - tr1->dst = gt1->tunnel_dst.fp_addr; - tr1->length = vlib_buffer_length_in_chain (vm, b1); - } + /* Encap GRE seq# and ERSPAN type II header */ + erspan_t2_t *h0; + u32 seq_num; + u64 hdr; + vlib_buffer_advance (b[0], -sizeof (erspan_t2_t)); + h0 = vlib_buffer_get_current (b[0]); + seq_num = clib_atomic_fetch_add (>[0]->gre_sn->seq_num, 1); + hdr = clib_host_to_net_u64 (ERSPAN_HDR2); + h0->seq_num = clib_host_to_net_u32 (seq_num); + h0->t2_u64 = hdr; + h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt[0]->session_id); } - while (n_left_from > 0 && n_left_to_next > 0) + if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { - u32 bi0 = from[0]; - vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); - - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX]) - { - sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX]; - vnet_hw_interface_t *hi0 = - vnet_get_sup_hw_interface (vnm, sw_if_index0); - gt0 = &gm->tunnels[hi0->dev_instance]; - adj_index0 = gt0->l2_adj_index; - } - - vnet_buffer (b0)->ip.adj_index[VLIB_TX] = adj_index0; - - if (PREDICT_FALSE (gt0->type == GRE_TUNNEL_TYPE_ERSPAN)) - { - /* Encap GRE seq# and ERSPAN type II header */ - vlib_buffer_advance (b0, -sizeof (erspan_t2_t)); - erspan_t2_t *h0 = vlib_buffer_get_current (b0); - u32 seq_num = clib_smp_atomic_add (>0->gre_sn->seq_num, 1); - u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2); - h0->seq_num = clib_host_to_net_u32 (seq_num); - h0->t2_u64 = hdr; - h0->t2.cos_en_t_session |= - clib_host_to_net_u16 (gt0->session_id); - } - - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - gre_tx_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - tr->tunnel_id = gt0 - gm->tunnels; - tr->src = gt0->tunnel_src; - tr->dst = gt0->tunnel_dst.fp_addr; - tr->length = vlib_buffer_length_in_chain (vm, b0); - } + gre_tx_trace_t *tr = vlib_add_trace (vm, node, + b[0], sizeof (*tr)); + tr->tunnel_id = gt[0] - gm->tunnels; + tr->src = gt[0]->tunnel_src; + tr->dst = gt[0]->tunnel_dst.fp_addr; + tr->length = vlib_buffer_length_in_chain (vm, b[0]); } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); + b += 1; + n_left_from -= 1; } + vlib_buffer_enqueue_to_single_next (vm, node, from, + GRE_ENCAP_NEXT_L2_MIDCHAIN, + frame->n_vectors); + vlib_node_increment_counter (vm, node->node_index, GRE_ERROR_PKTS_ENCAP, frame->n_vectors); @@ -510,7 +559,6 @@ static char *gre_error_strings[] = { /* *INDENT-OFF* */ VLIB_REGISTER_NODE (gre_encap_node) = { - .function = gre_interface_tx, .name = "gre-encap", .vector_size = sizeof (u32), .format_trace = format_gre_tx_trace, @@ -522,10 +570,9 @@ VLIB_REGISTER_NODE (gre_encap_node) = [GRE_ENCAP_NEXT_L2_MIDCHAIN] = "adj-l2-midchain", }, }; - -VLIB_NODE_FUNCTION_MULTIARCH (gre_encap_node, gre_interface_tx) /* *INDENT-ON* */ +#ifndef CLIB_MARCH_VARIANT static u8 * format_gre_tunnel_name (u8 * s, va_list * args) { @@ -550,6 +597,29 @@ format_gre_device (u8 * s, va_list * args) return s; } +static int +gre_tunnel_desc (u32 sw_if_index, + ip46_address_t * src, ip46_address_t * dst, u8 * is_l2) +{ + gre_main_t *gm = &gre_main; + gre_tunnel_t *t; + u32 ti; + + ti = gm->tunnel_index_by_sw_if_index[sw_if_index]; + + if (~0 == ti) + /* not one of ours */ + return -1; + + t = pool_elt_at_index (gm->tunnels, ti); + + *src = t->tunnel_src; + *dst = t->tunnel_dst.fp_addr; + *is_l2 = t->type == GRE_TUNNEL_TYPE_TEB; + + return (0); +} + /* *INDENT-OFF* */ VNET_DEVICE_CLASS (gre_device_class) = { .name = "GRE tunnel device", @@ -557,6 +627,7 @@ VNET_DEVICE_CLASS (gre_device_class) = { .format_device = format_gre_device, .format_tx_trace = format_gre_tx_trace, .admin_up_down_function = gre_interface_admin_up_down, + .ip_tun_desc = gre_tunnel_desc, #ifdef SOON .clear counter = 0; #endif @@ -570,7 +641,17 @@ VNET_HW_INTERFACE_CLASS (gre_hw_interface_class) = { .update_adjacency = gre_update_adj, .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P, }; + +VNET_HW_INTERFACE_CLASS (mgre_hw_interface_class) = { + .name = "mGRE", + .format_header = format_gre_header_with_length, + .unformat_header = unformat_gre_header, + .build_rewrite = gre_build_rewrite, + .update_adjacency = mgre_update_adj, + .flags = VNET_HW_INTERFACE_CLASS_FLAG_NBMA, +}; /* *INDENT-ON* */ +#endif /* CLIB_MARCH_VARIANT */ static void add_protocol (gre_main_t * gm, gre_protocol_t protocol, char *protocol_name) @@ -597,7 +678,7 @@ gre_init (vlib_main_t * vm) ip_main_t *im = &ip_main; ip_protocol_info_t *pi; - memset (gm, 0, sizeof (gm[0])); + clib_memset (gm, 0, sizeof (gm[0])); gm->vlib_main = vm; gm->vnet_main = vnet_get_main ();