X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fgbp%2Fgbp_policy_dpo.c;h=9f26b9c67ab167d91e07baf82532e928427788c3;hb=59f71132e;hp=a2d9510b83877f484c88a3e1ce84ea2c3fe093ce;hpb=b7b929931a07fbb27b43d5cd105f366c3e29807e;p=vpp.git diff --git a/src/plugins/gbp/gbp_policy_dpo.c b/src/plugins/gbp/gbp_policy_dpo.c index a2d9510b838..9f26b9c67ab 100644 --- a/src/plugins/gbp/gbp_policy_dpo.c +++ b/src/plugins/gbp/gbp_policy_dpo.c @@ -14,42 +14,33 @@ */ #include -#include -#include -#include +#include +#include +#include #include +#include #include #include +#include +#ifndef CLIB_MARCH_VARIANT /** * DPO pool */ -static gbp_policy_dpo_t *gbp_policy_dpo_pool; +gbp_policy_dpo_t *gbp_policy_dpo_pool; /** * DPO type registered for these GBP FWD */ -static dpo_type_t gbp_policy_dpo_type; - -static inline gbp_policy_dpo_t * -gbp_policy_dpo_get_i (index_t index) -{ - return (pool_elt_at_index (gbp_policy_dpo_pool, index)); -} - -gbp_policy_dpo_t * -gbp_policy_dpo_get (index_t index) -{ - return (gbp_policy_dpo_get_i (index)); -} +dpo_type_t gbp_policy_dpo_type; static gbp_policy_dpo_t * gbp_policy_dpo_alloc (void) { gbp_policy_dpo_t *gpd; - pool_get (gbp_policy_dpo_pool, gpd); + pool_get_aligned_zero (gbp_policy_dpo_pool, gpd, CLIB_CACHE_LINE_BYTES); return (gpd); } @@ -59,7 +50,7 @@ gbp_policy_dpo_get_from_dpo (const dpo_id_t * dpo) { ASSERT (gbp_policy_dpo_type == dpo->dpoi_type); - return (gbp_policy_dpo_get_i (dpo->dpoi_index)); + return (gbp_policy_dpo_get (dpo->dpoi_index)); } static inline index_t @@ -104,25 +95,32 @@ gbp_policy_dpo_get_urpf (const dpo_id_t * dpo) void gbp_policy_dpo_add_or_lock (dpo_proto_t dproto, - epg_id_t epg, u32 sw_if_index, dpo_id_t * dpo) + gbp_scope_t scope, + sclass_t sclass, u32 sw_if_index, dpo_id_t * dpo) { gbp_policy_dpo_t *gpd; dpo_id_t parent = DPO_INVALID; gpd = gbp_policy_dpo_alloc (); - clib_memset (gpd, 0, sizeof (*gpd)); gpd->gpd_proto = dproto; gpd->gpd_sw_if_index = sw_if_index; - gpd->gpd_epg = epg; + gpd->gpd_sclass = sclass; + gpd->gpd_scope = scope; - /* - * stack on the DVR DPO for the output interface - */ - dvr_dpo_add_or_lock (sw_if_index, dproto, &parent); + if (~0 != sw_if_index) + { + /* + * stack on the DVR DPO for the output interface + */ + dvr_dpo_add_or_lock (sw_if_index, dproto, &parent); + } + else + { + dpo_copy (&parent, drop_dpo_get (dproto)); + } dpo_stack (gbp_policy_dpo_type, dproto, &gpd->gpd_dpo, &parent); - dpo_set (dpo, gbp_policy_dpo_type, dproto, gbp_policy_dpo_get_index (gpd)); } @@ -131,12 +129,12 @@ format_gbp_policy_dpo (u8 * s, va_list * ap) { index_t index = va_arg (*ap, index_t); u32 indent = va_arg (*ap, u32); - gbp_policy_dpo_t *gpd = gbp_policy_dpo_get_i (index); + gbp_policy_dpo_t *gpd = gbp_policy_dpo_get (index); vnet_main_t *vnm = vnet_get_main (); - s = format (s, "gbp-policy-dpo: %U, epg:%d out:%U", + s = format (s, "gbp-policy-dpo: %U, scope:%d sclass:%d out:%U", format_dpo_proto, gpd->gpd_proto, - gpd->gpd_epg, + gpd->gpd_scope, (int) gpd->gpd_sclass, format_vnet_sw_if_index_name, vnm, gpd->gpd_sw_if_index); s = format (s, "\n%U", format_white_space, indent + 2); s = format (s, "%U", format_dpo_id, &gpd->gpd_dpo, indent + 4); @@ -144,11 +142,44 @@ format_gbp_policy_dpo (u8 * s, va_list * ap) return (s); } +/** + * Interpose a policy DPO + */ +static void +gbp_policy_dpo_interpose (const dpo_id_t * original, + const dpo_id_t * parent, dpo_id_t * clone) +{ + gbp_policy_dpo_t *gpd, *gpd_clone; + + gpd_clone = gbp_policy_dpo_alloc (); + gpd = gbp_policy_dpo_get (original->dpoi_index); + + gpd_clone->gpd_proto = gpd->gpd_proto; + gpd_clone->gpd_scope = gpd->gpd_scope; + gpd_clone->gpd_sclass = gpd->gpd_sclass; + gpd_clone->gpd_sw_if_index = gpd->gpd_sw_if_index; + + /* + * if no interface is provided, grab one from the parent + * on which we stack + */ + if (~0 == gpd_clone->gpd_sw_if_index) + gpd_clone->gpd_sw_if_index = dpo_get_urpf (parent); + + dpo_stack (gbp_policy_dpo_type, + gpd_clone->gpd_proto, &gpd_clone->gpd_dpo, parent); + + dpo_set (clone, + gbp_policy_dpo_type, + gpd_clone->gpd_proto, gbp_policy_dpo_get_index (gpd_clone)); +} + const static dpo_vft_t gbp_policy_dpo_vft = { .dv_lock = gbp_policy_dpo_lock, .dv_unlock = gbp_policy_dpo_unlock, .dv_format = format_gbp_policy_dpo, .dv_get_urpf = gbp_policy_dpo_get_urpf, + .dv_mk_interpose = gbp_policy_dpo_interpose, }; /** @@ -189,13 +220,7 @@ gbp_policy_dpo_module_init (vlib_main_t * vm) } VLIB_INIT_FUNCTION (gbp_policy_dpo_module_init); - -typedef struct gbp_policy_dpo_trace_t_ -{ - u32 src_epg; - u32 dst_epg; - u32 acl_index; -} gbp_policy_dpo_trace_t; +#endif /* CLIB_MARCH_VARIANT */ typedef enum { @@ -203,6 +228,23 @@ typedef enum GBP_POLICY_N_NEXT, } gbp_policy_next_t; +always_inline u32 +gbp_rule_l3_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0, int is_ip6) +{ + gbp_policy_node_t pnode; + const dpo_id_t *dpo; + dpo_proto_t dproto; + + pnode = (is_ip6 ? GBP_POLICY_NODE_IP6 : GBP_POLICY_NODE_IP4); + dproto = (is_ip6 ? DPO_PROTO_IP6 : DPO_PROTO_IP4); + dpo = &gu->gu_dpo[pnode][dproto]; + + /* The flow hash is still valid as this is a IP packet being switched */ + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index; + + return (dpo->dpoi_next_node); +} + always_inline uword gbp_policy_dpo_inline (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -210,9 +252,11 @@ gbp_policy_dpo_inline (vlib_main_t * vm, { gbp_main_t *gm = &gbp_main; u32 n_left_from, next_index, *from, *to_next; + u32 n_allow_intra, n_allow_a_bit, n_allow_sclass_1; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; + n_allow_intra = n_allow_a_bit = n_allow_sclass_1 = 0; next_index = node->cached_next_index; @@ -224,13 +268,14 @@ gbp_policy_dpo_inline (vlib_main_t * vm, while (n_left_from > 0 && n_left_to_next > 0) { + gbp_rule_action_t action0 = GBP_RULE_DENY; + u32 acl_match = ~0, rule_match = ~0; const gbp_policy_dpo_t *gpd0; - u32 bi0, next0; + gbp_contract_error_t err0; gbp_contract_key_t key0; - gbp_contract_value_t value0 = { - .as_u64 = ~0, - }; vlib_buffer_t *b0; + gbp_rule_t *rule0; + u32 bi0, next0; bi0 = from[0]; to_next[0] = bi0; @@ -241,116 +286,109 @@ gbp_policy_dpo_inline (vlib_main_t * vm, next0 = GBP_POLICY_DROP; b0 = vlib_get_buffer (vm, bi0); - gpd0 = - gbp_policy_dpo_get_i (vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + + gpd0 = gbp_policy_dpo_get (vnet_buffer (b0)->ip.adj_index[VLIB_TX]); vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index; - key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg; - key0.gck_dst = gpd0->gpd_epg; + /* + * Reflection check; in and out on an ivxlan tunnel + */ + if ((~0 != vxlan_gbp_tunnel_by_sw_if_index (gpd0->gpd_sw_if_index)) + && (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_R)) + { + goto trace; + } - if (EPG_INVALID != key0.gck_src) + if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A) { - if (PREDICT_FALSE (key0.gck_src == key0.gck_dst)) - { - /* - * intra-epg allowed - */ - next0 = gpd0->gpd_dpo.dpoi_next_node; - } - else - { - value0.as_u64 = gbp_acl_lookup (&key0); - - if (~0 != value0.gc_lc_index) - { - fa_5tuple_opaque_t pkt_5tuple0; - u8 action0 = 0; - u32 acl_pos_p0, acl_match_p0; - u32 rule_match_p0, trace_bitmap0; - /* - * tests against the ACL - */ - acl_plugin_fill_5tuple_inline (gm-> - acl_plugin.p_acl_main, - value0.gc_lc_index, b0, - is_ip6, - /* is_input */ 1, - /* is_l2_path */ 0, - &pkt_5tuple0); - acl_plugin_match_5tuple_inline (gm-> - acl_plugin.p_acl_main, - value0.gc_lc_index, - &pkt_5tuple0, is_ip6, - &action0, &acl_pos_p0, - &acl_match_p0, - &rule_match_p0, - &trace_bitmap0); - - if (action0 > 0) - next0 = gpd0->gpd_dpo.dpoi_next_node; - } - } + next0 = gpd0->gpd_dpo.dpoi_next_node; + key0.as_u64 = ~0; + n_allow_a_bit++; + goto trace; } - else + + /* zero out the key to ensure the pad space is clear */ + key0.as_u64 = 0; + key0.gck_src = vnet_buffer2 (b0)->gbp.sclass; + + if (SCLASS_INVALID == key0.gck_src) { /* * the src EPG is not set when the packet arrives on an EPG * uplink interface and we do not need to apply policy */ next0 = gpd0->gpd_dpo.dpoi_next_node; + goto trace; } - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - gbp_policy_dpo_trace_t *tr; + key0.gck_scope = gpd0->gpd_scope; + key0.gck_dst = gpd0->gpd_sclass; - tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->src_epg = key0.gck_src; - tr->dst_epg = key0.gck_dst; - tr->acl_index = value0.gc_acl_index; + action0 = + gbp_contract_apply (vm, gm, &key0, b0, &rule0, &n_allow_intra, + &n_allow_sclass_1, &acl_match, &rule_match, + &err0, + is_ip6 ? GBP_CONTRACT_APPLY_IP6 : + GBP_CONTRACT_APPLY_IP4); + switch (action0) + { + case GBP_RULE_PERMIT: + next0 = gpd0->gpd_dpo.dpoi_next_node; + vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A; + break; + case GBP_RULE_REDIRECT: + next0 = gbp_rule_l3_redirect (rule0, b0, is_ip6); + vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A; + break; + case GBP_RULE_DENY: + next0 = GBP_POLICY_DROP; + b0->error = node->errors[err0]; + break; } + trace: + gbp_policy_trace (vm, node, b0, &key0, action0, acl_match, + rule_match); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - return from_frame->n_vectors; -} - -static u8 * -format_gbp_policy_dpo_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - gbp_policy_dpo_trace_t *t = va_arg (*args, gbp_policy_dpo_trace_t *); - - s = format (s, " src-epg:%d dst-epg:%d acl-index:%d", - t->src_epg, t->dst_epg, t->acl_index); - return s; + vlib_node_increment_counter (vm, node->node_index, + GBP_CONTRACT_ERROR_ALLOW_INTRA, n_allow_intra); + vlib_node_increment_counter (vm, node->node_index, + GBP_CONTRACT_ERROR_ALLOW_A_BIT, n_allow_a_bit); + vlib_node_increment_counter (vm, node->node_index, + GBP_CONTRACT_ERROR_ALLOW_SCLASS_1, + n_allow_sclass_1); + return from_frame->n_vectors; } -static uword -ip4_gbp_policy_dpo (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +VLIB_NODE_FN (ip4_gbp_policy_dpo_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return (gbp_policy_dpo_inline (vm, node, from_frame, 0)); } -static uword -ip6_gbp_policy_dpo (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +VLIB_NODE_FN (ip6_gbp_policy_dpo_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return (gbp_policy_dpo_inline (vm, node, from_frame, 1)); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_gbp_policy_dpo_node) = { - .function = ip4_gbp_policy_dpo, .name = "ip4-gbp-policy-dpo", .vector_size = sizeof (u32), - .format_trace = format_gbp_policy_dpo_trace, + .format_trace = format_gbp_policy_trace, + + .n_errors = ARRAY_LEN(gbp_contract_error_strings), + .error_strings = gbp_contract_error_strings, + .n_next_nodes = GBP_POLICY_N_NEXT, .next_nodes = { @@ -358,204 +396,19 @@ VLIB_REGISTER_NODE (ip4_gbp_policy_dpo_node) = { } }; VLIB_REGISTER_NODE (ip6_gbp_policy_dpo_node) = { - .function = ip6_gbp_policy_dpo, .name = "ip6-gbp-policy-dpo", .vector_size = sizeof (u32), - .format_trace = format_gbp_policy_dpo_trace, + .format_trace = format_gbp_policy_trace, + + .n_errors = ARRAY_LEN(gbp_contract_error_strings), + .error_strings = gbp_contract_error_strings, + .n_next_nodes = GBP_POLICY_N_NEXT, .next_nodes = { [GBP_POLICY_DROP] = "ip6-drop", } }; - -VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_policy_dpo_node, ip4_gbp_policy_dpo) -VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_policy_dpo_node, ip6_gbp_policy_dpo) -/* *INDENT-ON* */ - - /** - * per-packet trace data - */ -typedef struct gbp_classify_trace_t_ -{ - /* per-pkt trace data */ - epg_id_t src_epg; -} gbp_classify_trace_t; - -typedef enum gbp_lpm_classify_next_t_ -{ - GPB_LPM_CLASSIFY_DROP, -} gbp_lpm_classify_next_t; - -/* - * Determine the SRC EPG from a LPM - */ -always_inline uword -gbp_lpm_classify_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, fib_protocol_t fproto) -{ - u32 n_left_from, *from, *to_next; - u32 next_index; - - next_index = 0; - n_left_from = frame->n_vectors; - from = vlib_frame_vector_args (frame); - - while (n_left_from > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0, sw_if_index0, fib_index0, lbi0; - gbp_lpm_classify_next_t next0; - const gbp_policy_dpo_t *gpd0; - const gbp_recirc_t *gr0; - const dpo_id_t *dpo0; - load_balance_t *lb0; - ip4_header_t *ip4_0; - ip6_header_t *ip6_0; - vlib_buffer_t *b0; - epg_id_t src_epg0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - next0 = GPB_LPM_CLASSIFY_DROP; - - b0 = vlib_get_buffer (vm, bi0); - - sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; - gr0 = gbp_recirc_get (sw_if_index0); - fib_index0 = gr0->gr_fib_index[fproto]; - - if (FIB_PROTOCOL_IP4 == fproto) - { - ip4_0 = vlib_buffer_get_current (b0); - lbi0 = ip4_fib_forwarding_lookup (fib_index0, - &ip4_0->src_address); - } - else - { - ip6_0 = vlib_buffer_get_current (b0); - lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0, - &ip6_0->src_address); - } - - lb0 = load_balance_get (lbi0); - dpo0 = load_balance_get_bucket_i (lb0, 0); - - if (gbp_policy_dpo_type == dpo0->dpoi_type) - { - gpd0 = gbp_policy_dpo_get_i (dpo0->dpoi_index); - src_epg0 = gpd0->gpd_epg; - vnet_feature_next (&next0, b0); - } - else - { - /* could not classify => drop */ - src_epg0 = 0; - } - - vnet_buffer2 (b0)->gbp.src_epg = src_epg0; - - if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED))) - { - gbp_classify_trace_t *t = - vlib_add_trace (vm, node, b0, sizeof (*t)); - t->src_epg = src_epg0; - } - - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - - return frame->n_vectors; -} - -static uword -gbp_ip4_lpm_classify (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) -{ - return (gbp_lpm_classify_inline (vm, node, frame, FIB_PROTOCOL_IP4)); -} - -static uword -gbp_ip6_lpm_classify (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) -{ - return (gbp_lpm_classify_inline (vm, node, frame, FIB_PROTOCOL_IP6)); -} - - /* packet trace format function */ -static u8 * -format_gbp_classify_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *); - - s = format (s, "src-epg:%d", t->src_epg); - - return s; -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = { - .function = gbp_ip4_lpm_classify, - .name = "ip4-gbp-lpm-classify", - .vector_size = sizeof (u32), - .format_trace = format_gbp_classify_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_errors = 0, - .n_next_nodes = 1, - .next_nodes = { - [GPB_LPM_CLASSIFY_DROP] = "ip4-drop" - }, -}; - -VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip4_lpm_classify_node, gbp_ip4_lpm_classify); - -VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = { - .function = gbp_ip6_lpm_classify, - .name = "ip6-gbp-lpm-classify", - .vector_size = sizeof (u32), - .format_trace = format_gbp_classify_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_errors = 0, - .n_next_nodes = 1, - .next_nodes = { - [GPB_LPM_CLASSIFY_DROP] = "ip6-drop" - }, -}; - -VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip6_lpm_classify_node, gbp_ip6_lpm_classify); - -VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) = -{ - .arc_name = "ip4-unicast", - .node_name = "ip4-gbp-lpm-classify", - .runs_before = VNET_FEATURES ("nat44-out2in"), -}; -VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) = -{ - .arc_name = "ip6-unicast", - .node_name = "ip6-gbp-lpm-classify", - .runs_before = VNET_FEATURES ("nat66-out2in"), -}; - /* *INDENT-ON* */ /*