X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fvxlan-gbp%2Fdecap.c;h=927c778b21191e333dd8ccd80b321ebc16b3cf61;hb=e294de6f876587ddc34ab02771771aea60087adc;hp=12097986af559c5a8e92f312e8a86744404fcf93;hpb=bdc0e6b7204ea0211d4f7881497e4306586fb9ef;p=vpp.git diff --git a/src/vnet/vxlan-gbp/decap.c b/src/vnet/vxlan-gbp/decap.c index 12097986af5..927c778b211 100644 --- a/src/vnet/vxlan-gbp/decap.c +++ b/src/vnet/vxlan-gbp/decap.c @@ -16,11 +16,8 @@ */ #include -#include -#include -vlib_node_registration_t vxlan4_gbp_input_node; -vlib_node_registration_t vxlan6_gbp_input_node; +#include typedef struct { @@ -29,6 +26,7 @@ typedef struct u32 error; u32 vni; u16 sclass; + u8 flags; } vxlan_gbp_rx_trace_t; static u8 * @@ -44,8 +42,10 @@ format_vxlan_gbp_rx_trace (u8 * s, va_list * args) t->vni); return format (s, "VXLAN_GBP decap from vxlan_gbp_tunnel%d vni %d sclass %d" - " next %d error %d", - t->tunnel_index, t->vni, t->sclass, t->next_index, t->error); + " flags %U next %d error %d", + t->tunnel_index, t->vni, t->sclass, + format_vxlan_gbp_header_gpflags, t->flags, + t->next_index, t->error); } always_inline u32 @@ -67,46 +67,49 @@ typedef vxlan4_gbp_tunnel_key_t last_tunnel_cache4; always_inline vxlan_gbp_tunnel_t * vxlan4_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache4 * cache, u32 fib_index, ip4_header_t * ip4_0, - vxlan_gbp_header_t * vxlan_gbp0, - vxlan_gbp_tunnel_t ** stats_t0) + vxlan_gbp_header_t * vxlan_gbp0) { - /* Make sure VXLAN_GBP tunnel exist according to packet SIP and VNI */ + /* + * Check unicast first since that's where most of the traffic comes from + * Make sure VXLAN_GBP tunnel exist according to packet SIP, DIP and VNI + */ vxlan4_gbp_tunnel_key_t key4; - key4.key[1] = ((u64) fib_index << 32) | vxlan_gbp0->vni_reserved; + int rv; - if (PREDICT_FALSE (key4.key[1] != cache->key[1] || - ip4_0->src_address.as_u32 != (u32) cache->key[0])) - { - key4.key[0] = ip4_0->src_address.as_u32; - int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, - &key4); - if (PREDICT_FALSE (rv != 0)) - return 0; + key4.key[1] = (((u64) fib_index << 32) | + (vxlan_gbp0->vni_reserved & + clib_host_to_net_u32 (0xffffff00))); + key4.key[0] = + (((u64) ip4_0->dst_address.as_u32 << 32) | ip4_0->src_address.as_u32); - *cache = key4; + if (PREDICT_FALSE (key4.key[0] != cache->key[0] || + key4.key[1] != cache->key[1])) + { + rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, + &key4); + if (PREDICT_FALSE (rv == 0)) + { + *cache = key4; + return (pool_elt_at_index (vxm->tunnels, cache->value)); + } } - vxlan_gbp_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value); - - /* Validate VXLAN_GBP tunnel SIP against packet DIP */ - if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32)) - *stats_t0 = t0; else { - /* try multicast */ - if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address))) - return 0; + return (pool_elt_at_index (vxm->tunnels, cache->value)); + } - key4.key[0] = ip4_0->dst_address.as_u32; - /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */ - int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, - &key4); - if (PREDICT_FALSE (rv != 0)) - return 0; + /* No unicast match - try multicast */ + if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address))) + return (NULL); - *stats_t0 = pool_elt_at_index (vxm->tunnels, key4.value); - } + key4.key[0] = ip4_0->dst_address.as_u32; + /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */ + rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, &key4); - return t0; + if (PREDICT_FALSE (rv != 0)) + return (NULL); + + return (pool_elt_at_index (vxm->tunnels, key4.value)); } typedef vxlan6_gbp_tunnel_key_t last_tunnel_cache6; @@ -114,34 +117,35 @@ typedef vxlan6_gbp_tunnel_key_t last_tunnel_cache6; always_inline vxlan_gbp_tunnel_t * vxlan6_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache6 * cache, u32 fib_index, ip6_header_t * ip6_0, - vxlan_gbp_header_t * vxlan_gbp0, - vxlan_gbp_tunnel_t ** stats_t0) + vxlan_gbp_header_t * vxlan_gbp0) { /* Make sure VXLAN_GBP tunnel exist according to packet SIP and VNI */ vxlan6_gbp_tunnel_key_t key6 = { .key = { [0] = ip6_0->src_address.as_u64[0], [1] = ip6_0->src_address.as_u64[1], - [2] = (((u64) fib_index) << 32) | vxlan_gbp0->vni_reserved, + [2] = ((((u64) fib_index) << 32) | + (vxlan_gbp0->vni_reserved & + clib_host_to_net_u32 (0xffffff00))), } }; + int rv; if (PREDICT_FALSE (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0)) { - int rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, - &key6); + rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, + &key6); if (PREDICT_FALSE (rv != 0)) - return 0; + return NULL; *cache = key6; } vxlan_gbp_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value); /* Validate VXLAN_GBP tunnel SIP against packet DIP */ - if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6))) - *stats_t0 = t0; - else + if (PREDICT_FALSE + (!ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6))) { /* try multicast */ if (PREDICT_TRUE (!ip6_address_is_multicast (&ip6_0->dst_address))) @@ -150,21 +154,44 @@ vxlan6_gbp_find_tunnel (vxlan_gbp_main_t * vxm, last_tunnel_cache6 * cache, /* Make sure mcast VXLAN_GBP tunnel exist by packet DIP and VNI */ key6.key[0] = ip6_0->dst_address.as_u64[0]; key6.key[1] = ip6_0->dst_address.as_u64[1]; - int rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, - &key6); + rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, + &key6); if (PREDICT_FALSE (rv != 0)) return 0; - *stats_t0 = pool_elt_at_index (vxm->tunnels, key6.value); } return t0; } +always_inline vxlan_gbp_input_next_t +vxlan_gbp_tunnel_get_next (const vxlan_gbp_tunnel_t * t, vlib_buffer_t * b0) +{ + if (VXLAN_GBP_TUNNEL_MODE_L2 == t->mode) + return (VXLAN_GBP_INPUT_NEXT_L2_INPUT); + else + { + ethernet_header_t *e0; + u16 type0; + + e0 = vlib_buffer_get_current (b0); + vlib_buffer_advance (b0, sizeof (*e0)); + type0 = clib_net_to_host_u16 (e0->type); + switch (type0) + { + case ETHERNET_TYPE_IP4: + return (VXLAN_GBP_INPUT_NEXT_IP4_INPUT); + case ETHERNET_TYPE_IP6: + return (VXLAN_GBP_INPUT_NEXT_IP6_INPUT); + } + } + return (VXLAN_GBP_INPUT_NEXT_DROP); +} + always_inline uword vxlan_gbp_input (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * from_frame, u32 is_ip4) + vlib_frame_t * from_frame, u8 is_ip4) { vxlan_gbp_main_t *vxm = &vxlan_gbp_main; vnet_main_t *vnm = vxm->vnet_main; @@ -179,9 +206,9 @@ vxlan_gbp_input (vlib_main_t * vm, u32 thread_index = vlib_get_thread_index (); if (is_ip4) - memset (&last4, 0xff, sizeof last4); + clib_memset (&last4, 0xff, sizeof last4); else - memset (&last6, 0xff, sizeof last6); + clib_memset (&last6, 0xff, sizeof last6); u32 next_index = node->cached_next_index; @@ -239,116 +266,121 @@ vxlan_gbp_input (vlib_main_t * vm, ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t); } - /* pop vxlan_gbp */ - vlib_buffer_advance (b0, sizeof *vxlan_gbp0); - vlib_buffer_advance (b1, sizeof *vxlan_gbp1); - u32 fi0 = buf_fib_index (b0, is_ip4); u32 fi1 = buf_fib_index (b1, is_ip4); - vxlan_gbp_tunnel_t *t0, *stats_t0 = 0; - vxlan_gbp_tunnel_t *t1, *stats_t1 = 0; + vxlan_gbp_tunnel_t *t0, *t1; if (is_ip4) { t0 = - vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0, - &stats_t0); + vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0); t1 = - vxlan4_gbp_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan_gbp1, - &stats_t1); + vxlan4_gbp_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan_gbp1); } else { t0 = - vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0, - &stats_t0); + vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0); t1 = - vxlan6_gbp_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan_gbp1, - &stats_t1); + vxlan6_gbp_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan_gbp1); } u32 len0 = vlib_buffer_length_in_chain (vm, b0); u32 len1 = vlib_buffer_length_in_chain (vm, b1); - u32 next0, next1; + vxlan_gbp_input_next_t next0, next1; u8 error0 = 0, error1 = 0; u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0); u8 flags1 = vxlan_gbp_get_flags (vxlan_gbp1); + /* Required to make the l2 tag push / pop code work on l2 subifs */ + /* pop vxlan_gbp */ + vlib_buffer_advance (b0, sizeof *vxlan_gbp0); + vlib_buffer_advance (b1, sizeof *vxlan_gbp1); + + u8 i_and_g0 = ((flags0 & VXLAN_GBP_FLAGS_GI) == VXLAN_GBP_FLAGS_GI); + u8 i_and_g1 = ((flags1 & VXLAN_GBP_FLAGS_GI) == VXLAN_GBP_FLAGS_GI); + /* Validate VXLAN_GBP tunnel encap-fib index against packet */ - if (PREDICT_FALSE - (t0 == 0 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))) + if (PREDICT_FALSE (t0 == NULL || !i_and_g0)) { - next0 = VXLAN_GBP_INPUT_NEXT_DROP; - - if (t0 != 0 - && flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)) + if (t0 != NULL && !i_and_g0) { error0 = VXLAN_GBP_ERROR_BAD_FLAGS; vlib_increment_combined_counter - (drop_counter, thread_index, stats_t0->sw_if_index, 1, - len0); + (drop_counter, thread_index, t0->sw_if_index, 1, len0); + next0 = VXLAN_GBP_INPUT_NEXT_DROP; } else - error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL; + { + error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL; + next0 = VXLAN_GBP_INPUT_NEXT_PUNT; + if (is_ip4) + b0->punt_reason = + vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4]; + else + b0->punt_reason = + vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP6]; + } b0->error = node->errors[error0]; } else { - next0 = t0->decap_next_index; - vnet_buffer2 (b0)->gbp.flags = - vxlan_gbp_get_gpflags (vxlan_gbp0); - vnet_buffer2 (b0)->gbp.src_epg = - vxlan_gbp_get_sclass (vxlan_gbp0); - - /* Required to make the l2 tag push / pop code work on l2 subifs */ - if (PREDICT_TRUE (next0 == VXLAN_GBP_INPUT_NEXT_L2_INPUT)) - vnet_update_l2_len (b0); + next0 = vxlan_gbp_tunnel_get_next (t0, b0); /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */ vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index; vlib_increment_combined_counter - (rx_counter, thread_index, stats_t0->sw_if_index, 1, len0); + (rx_counter, thread_index, t0->sw_if_index, 1, len0); pkts_decapsulated++; } - /* Validate VXLAN_GBP tunnel encap-fib index against packet */ - if (PREDICT_FALSE - (t1 == 0 || flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))) - { - next1 = VXLAN_GBP_INPUT_NEXT_DROP; + vnet_buffer2 (b0)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp0) | + VXLAN_GBP_GPFLAGS_R); + vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0); - if (t1 != 0 - && flags1 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)) + + if (PREDICT_FALSE (t1 == NULL || !i_and_g1)) + { + if (t1 != NULL && !i_and_g1) { error1 = VXLAN_GBP_ERROR_BAD_FLAGS; vlib_increment_combined_counter - (drop_counter, thread_index, stats_t1->sw_if_index, 1, - len1); + (drop_counter, thread_index, t1->sw_if_index, 1, len1); + next1 = VXLAN_GBP_INPUT_NEXT_DROP; } else - error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL; + { + error1 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL; + next1 = VXLAN_GBP_INPUT_NEXT_PUNT; + if (is_ip4) + b1->punt_reason = + vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4]; + else + b1->punt_reason = + vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP6]; + } b1->error = node->errors[error1]; } else { - next1 = t1->decap_next_index; - vnet_buffer2 (b1)->gbp.flags = - vxlan_gbp_get_gpflags (vxlan_gbp1); - vnet_buffer2 (b1)->gbp.src_epg = - vxlan_gbp_get_sclass (vxlan_gbp1); - - /* Required to make the l2 tag push / pop code work on l2 subifs */ - if (PREDICT_TRUE (next1 == VXLAN_GBP_INPUT_NEXT_L2_INPUT)) - vnet_update_l2_len (b1); + next1 = vxlan_gbp_tunnel_get_next (t1, b1); /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */ vnet_buffer (b1)->sw_if_index[VLIB_RX] = t1->sw_if_index; pkts_decapsulated++; vlib_increment_combined_counter - (rx_counter, thread_index, stats_t1->sw_if_index, 1, len1); + (rx_counter, thread_index, t1->sw_if_index, 1, len1); } + vnet_buffer2 (b1)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp1) | + VXLAN_GBP_GPFLAGS_R); + + vnet_buffer2 (b1)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp1); + + vnet_update_l2_len (b0); + vnet_update_l2_len (b1); + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { vxlan_gbp_rx_trace_t *tr = @@ -358,6 +390,7 @@ vxlan_gbp_input (vlib_main_t * vm, tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels; tr->vni = vxlan_gbp_get_vni (vxlan_gbp0); tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0); + tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0); } if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) { @@ -368,6 +401,7 @@ vxlan_gbp_input (vlib_main_t * vm, tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels; tr->vni = vxlan_gbp_get_vni (vxlan_gbp1); tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp1); + tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp1); } vlib_validate_buffer_enqueue_x2 (vm, node, next_index, @@ -395,64 +429,65 @@ vxlan_gbp_input (vlib_main_t * vm, else ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t); - /* pop (ip, udp, vxlan_gbp) */ - vlib_buffer_advance (b0, sizeof (*vxlan_gbp0)); - u32 fi0 = buf_fib_index (b0, is_ip4); - vxlan_gbp_tunnel_t *t0, *stats_t0 = 0; + vxlan_gbp_tunnel_t *t0; if (is_ip4) - t0 = - vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0, - &stats_t0); + t0 = vxlan4_gbp_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan_gbp0); else - t0 = - vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0, - &stats_t0); + t0 = vxlan6_gbp_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan_gbp0); uword len0 = vlib_buffer_length_in_chain (vm, b0); - u32 next0; + vxlan_gbp_input_next_t next0; u8 error0 = 0; u8 flags0 = vxlan_gbp_get_flags (vxlan_gbp0); + + /* pop (ip, udp, vxlan_gbp) */ + vlib_buffer_advance (b0, sizeof (*vxlan_gbp0)); + + u8 i_and_g0 = ((flags0 & VXLAN_GBP_FLAGS_GI) == VXLAN_GBP_FLAGS_GI); + /* Validate VXLAN_GBP tunnel encap-fib index against packet */ - if (PREDICT_FALSE - (t0 == 0 || flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G))) + if (PREDICT_FALSE (t0 == NULL || !i_and_g0)) { - next0 = VXLAN_GBP_INPUT_NEXT_DROP; - - if (t0 != 0 - && flags0 != (VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G)) + if (t0 != NULL && !i_and_g0) { error0 = VXLAN_GBP_ERROR_BAD_FLAGS; vlib_increment_combined_counter - (drop_counter, thread_index, stats_t0->sw_if_index, 1, - len0); + (drop_counter, thread_index, t0->sw_if_index, 1, len0); + next0 = VXLAN_GBP_INPUT_NEXT_DROP; } else - error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL; + { + error0 = VXLAN_GBP_ERROR_NO_SUCH_TUNNEL; + next0 = VXLAN_GBP_INPUT_NEXT_PUNT; + if (is_ip4) + b0->punt_reason = + vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP4]; + else + b0->punt_reason = + vxm->punt_no_such_tunnel[FIB_PROTOCOL_IP6]; + } b0->error = node->errors[error0]; } else { - next0 = t0->decap_next_index; - vnet_buffer2 (b0)->gbp.flags = - vxlan_gbp_get_gpflags (vxlan_gbp0); - vnet_buffer2 (b0)->gbp.src_epg = - vxlan_gbp_get_sclass (vxlan_gbp0); - - - /* Required to make the l2 tag push / pop code work on l2 subifs */ - if (PREDICT_TRUE (next0 == VXLAN_GBP_INPUT_NEXT_L2_INPUT)) - vnet_update_l2_len (b0); - + next0 = vxlan_gbp_tunnel_get_next (t0, b0); /* Set packet input sw_if_index to unicast VXLAN_GBP tunnel for learning */ vnet_buffer (b0)->sw_if_index[VLIB_RX] = t0->sw_if_index; pkts_decapsulated++; vlib_increment_combined_counter - (rx_counter, thread_index, stats_t0->sw_if_index, 1, len0); + (rx_counter, thread_index, t0->sw_if_index, 1, len0); } + vnet_buffer2 (b0)->gbp.flags = (vxlan_gbp_get_gpflags (vxlan_gbp0) | + VXLAN_GBP_GPFLAGS_R); + + vnet_buffer2 (b0)->gbp.sclass = vxlan_gbp_get_sclass (vxlan_gbp0); + + /* Required to make the l2 tag push / pop code work on l2 subifs */ + vnet_update_l2_len (b0); if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -463,6 +498,7 @@ vxlan_gbp_input (vlib_main_t * vm, tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels; tr->vni = vxlan_gbp_get_vni (vxlan_gbp0); tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0); + tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0); } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, @@ -480,16 +516,16 @@ vxlan_gbp_input (vlib_main_t * vm, return from_frame->n_vectors; } -static uword -vxlan4_gbp_input (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +VLIB_NODE_FN (vxlan4_gbp_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 1); } -static uword -vxlan6_gbp_input (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +VLIB_NODE_FN (vxlan6_gbp_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return vxlan_gbp_input (vm, node, from_frame, /* is_ip4 */ 0); } @@ -504,7 +540,6 @@ static char *vxlan_gbp_error_strings[] = { /* *INDENT-OFF* */ VLIB_REGISTER_NODE (vxlan4_gbp_input_node) = { - .function = vxlan4_gbp_input, .name = "vxlan4-gbp-input", .vector_size = sizeof (u32), .n_errors = VXLAN_GBP_N_ERROR, @@ -517,11 +552,9 @@ VLIB_REGISTER_NODE (vxlan4_gbp_input_node) = #undef _ }, }; -VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gbp_input_node, vxlan4_gbp_input) VLIB_REGISTER_NODE (vxlan6_gbp_input_node) = { - .function = vxlan6_gbp_input, .name = "vxlan6-gbp-input", .vector_size = sizeof (u32), .n_errors = VXLAN_GBP_N_ERROR, @@ -534,7 +567,6 @@ VLIB_REGISTER_NODE (vxlan6_gbp_input_node) = }, .format_trace = format_vxlan_gbp_rx_trace, }; -VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gbp_input_node, vxlan6_gbp_input) /* *INDENT-ON* */ typedef enum @@ -542,7 +574,7 @@ typedef enum IP_VXLAN_GBP_BYPASS_NEXT_DROP, IP_VXLAN_GBP_BYPASS_NEXT_VXLAN_GBP, IP_VXLAN_GBP_BYPASS_N_NEXT, -} ip_vxan_gbp_bypass_next_t; +} ip_vxlan_gbp_bypass_next_t; always_inline uword ip_vxlan_gbp_bypass_inline (vlib_main_t * vm, @@ -943,9 +975,9 @@ ip_vxlan_gbp_bypass_inline (vlib_main_t * vm, return frame->n_vectors; } -static uword -ip4_vxlan_gbp_bypass (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_vxlan_gbp_bypass_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 1); } @@ -953,7 +985,6 @@ ip4_vxlan_gbp_bypass (vlib_main_t * vm, /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_vxlan_gbp_bypass_node) = { - .function = ip4_vxlan_gbp_bypass, .name = "ip4-vxlan-gbp-bypass", .vector_size = sizeof (u32), .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT, @@ -964,10 +995,9 @@ VLIB_REGISTER_NODE (ip4_vxlan_gbp_bypass_node) = .format_buffer = format_ip4_header, .format_trace = format_ip4_forward_next_trace, }; - -VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gbp_bypass_node, ip4_vxlan_gbp_bypass) /* *INDENT-ON* */ +#ifndef CLIB_MARCH_VARIANT /* Dummy init function to get us linked in. */ clib_error_t * ip4_vxlan_gbp_bypass_init (vlib_main_t * vm) @@ -976,10 +1006,11 @@ ip4_vxlan_gbp_bypass_init (vlib_main_t * vm) } VLIB_INIT_FUNCTION (ip4_vxlan_gbp_bypass_init); +#endif /* CLIB_MARCH_VARIANT */ -static uword -ip6_vxlan_gbp_bypass (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip6_vxlan_gbp_bypass_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return ip_vxlan_gbp_bypass_inline (vm, node, frame, /* is_ip4 */ 0); } @@ -987,7 +1018,6 @@ ip6_vxlan_gbp_bypass (vlib_main_t * vm, /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip6_vxlan_gbp_bypass_node) = { - .function = ip6_vxlan_gbp_bypass, .name = "ip6-vxlan-gbp-bypass", .vector_size = sizeof (u32), .n_next_nodes = IP_VXLAN_GBP_BYPASS_N_NEXT, @@ -998,10 +1028,9 @@ VLIB_REGISTER_NODE (ip6_vxlan_gbp_bypass_node) = .format_buffer = format_ip6_header, .format_trace = format_ip6_forward_next_trace, }; - -VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gbp_bypass_node, ip6_vxlan_gbp_bypass) /* *INDENT-ON* */ +#ifndef CLIB_MARCH_VARIANT /* Dummy init function to get us linked in. */ clib_error_t * ip6_vxlan_gbp_bypass_init (vlib_main_t * vm) @@ -1010,6 +1039,7 @@ ip6_vxlan_gbp_bypass_init (vlib_main_t * vm) } VLIB_INIT_FUNCTION (ip6_vxlan_gbp_bypass_init); +#endif /* CLIB_MARCH_VARIANT */ /* * fd.io coding-style-patch-verification: ON