X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fgbp%2Fgbp_policy_node.c;h=8c6ef5c2b94f969afaaab94cc60ef94c2aa4d098;hb=9534696b4;hp=ff21e7d0e2eb2365b364f51f20833f604c5568d9;hpb=4ba67723d716660c56326ce498b99a060a9471b1;p=vpp.git diff --git a/src/plugins/gbp/gbp_policy_node.c b/src/plugins/gbp/gbp_policy_node.c index ff21e7d0e2e..8c6ef5c2b94 100644 --- a/src/plugins/gbp/gbp_policy_node.c +++ b/src/plugins/gbp/gbp_policy_node.c @@ -14,47 +14,22 @@ */ #include +#include +#include #include +#include +#include +#include #include - -#define foreach_gbp_policy \ - _(DENY, "deny") - -typedef enum -{ -#define _(sym,str) GBP_ERROR_##sym, - foreach_gbp_policy -#undef _ - GBP_POLICY_N_ERROR, -} gbp_policy_error_t; - -static char *gbp_policy_error_strings[] = { -#define _(sym,string) string, - foreach_gbp_policy -#undef _ -}; +#include typedef enum { -#define _(sym,str) GBP_POLICY_NEXT_##sym, - foreach_gbp_policy -#undef _ - GBP_POLICY_N_NEXT, + GBP_POLICY_NEXT_DROP, + GBP_POLICY_N_NEXT, } gbp_policy_next_t; -/** - * per-packet trace data - */ -typedef struct gbp_policy_trace_t_ -{ - /* per-pkt trace data */ - u32 sclass; - u32 dst_epg; - u32 acl_index; - u32 allowed; -} gbp_policy_trace_t; - always_inline dpo_proto_t ethertype_to_dpo_proto (u16 etype) { @@ -93,32 +68,45 @@ gbp_rule_l2_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0) return (dpo->dpoi_next_node); } -always_inline u8 -gbp_policy_is_ethertype_allowed (const gbp_contract_t * gc0, u16 ethertype) +static_always_inline gbp_policy_next_t +gbp_policy_l2_feature_next (gbp_policy_main_t * gpm, vlib_buffer_t * b, + const gbp_policy_type_t type) { - u16 *et; - - vec_foreach (et, gc0->gc_allowed_ethertypes) - { - if (*et == ethertype) - return (1); - } - return (0); + u32 feat_bit; + + switch (type) + { + case GBP_POLICY_PORT: + feat_bit = L2OUTPUT_FEAT_GBP_POLICY_PORT; + break; + case GBP_POLICY_MAC: + feat_bit = L2OUTPUT_FEAT_GBP_POLICY_MAC; + break; + case GBP_POLICY_LPM: + feat_bit = L2OUTPUT_FEAT_GBP_POLICY_LPM; + break; + default: + return GBP_POLICY_NEXT_DROP; + } + + return vnet_l2_feature_next (b, gpm->l2_output_feat_next[type], feat_bit); } static uword gbp_policy_inline (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame, u8 is_port_based) + vlib_frame_t * frame, const gbp_policy_type_t type) { gbp_main_t *gm = &gbp_main; gbp_policy_main_t *gpm = &gbp_policy_main; u32 n_left_from, *from, *to_next; u32 next_index; + u32 n_allow_intra, n_allow_a_bit, n_allow_sclass_1; next_index = 0; n_left_from = frame->n_vectors; from = vlib_frame_vector_args (frame); + n_allow_intra = n_allow_a_bit = n_allow_sclass_1 = 0; while (n_left_from > 0) { @@ -128,17 +116,18 @@ gbp_policy_inline (vlib_main_t * vm, while (n_left_from > 0 && n_left_to_next > 0) { + gbp_rule_action_t action0 = GBP_RULE_DENY; const ethernet_header_t *h0; const gbp_endpoint_t *ge0; - const gbp_contract_t *gc0; + gbp_contract_error_t err0; + u32 acl_match = ~0, rule_match = ~0; gbp_policy_next_t next0; gbp_contract_key_t key0; u32 bi0, sw_if_index0; vlib_buffer_t *b0; - index_t gci0; + gbp_rule_t *rule0; - gc0 = NULL; - next0 = GBP_POLICY_NEXT_DENY; + next0 = GBP_POLICY_NEXT_DROP; bi0 = from[0]; to_next[0] = bi0; from += 1; @@ -151,166 +140,109 @@ gbp_policy_inline (vlib_main_t * vm, sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX]; /* - * If the A0bit is set then policy has already been applied + * Reflection check; in and out on an ivxlan tunnel + */ + if ((~0 != vxlan_gbp_tunnel_by_sw_if_index (sw_if_index0)) && + (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_R)) + { + goto trace; + } + + /* + * If the A-bit is set then policy has already been applied * and we skip enforcement here. */ if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A) { - next0 = vnet_l2_feature_next (b0, - gpm->l2_output_feat_next - [is_port_based], - (is_port_based ? - L2OUTPUT_FEAT_GBP_POLICY_PORT : - L2OUTPUT_FEAT_GBP_POLICY_MAC)); - key0.as_u32 = ~0; + next0 = gbp_policy_l2_feature_next (gpm, b0, type); + n_allow_a_bit++; + key0.as_u64 = ~0; goto trace; } + /* * determine the src and dst EPG */ - if (is_port_based) - ge0 = gbp_endpoint_find_itf (sw_if_index0); - else - ge0 = gbp_endpoint_find_mac (h0->dst_address, - vnet_buffer (b0)->l2.bd_index); - - if (NULL != ge0) - key0.gck_dst = ge0->ge_fwd.gef_sclass; - else - /* If you cannot determine the destination EP then drop */ - goto trace; + /* zero out the key to ensure the pad space is clear */ + key0.as_u64 = 0; key0.gck_src = vnet_buffer2 (b0)->gbp.sclass; + key0.gck_dst = SCLASS_INVALID; - if (SCLASS_INVALID != key0.gck_src) + if (GBP_POLICY_LPM == type) { - if (PREDICT_FALSE (key0.gck_src == key0.gck_dst)) + const ip4_address_t *ip4 = 0; + const ip6_address_t *ip6 = 0; + const dpo_proto_t proto = + gbp_classify_get_ip_address (h0, &ip4, &ip6, + GBP_CLASSIFY_GET_IP_DST); + if (PREDICT_TRUE (DPO_PROTO_NONE != proto)) { - /* - * intra-epg allowed - */ - next0 = - vnet_l2_feature_next (b0, - gpm->l2_output_feat_next - [is_port_based], - (is_port_based ? - L2OUTPUT_FEAT_GBP_POLICY_PORT : - L2OUTPUT_FEAT_GBP_POLICY_MAC)); - vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A; - } - else - { - gci0 = gbp_contract_find (&key0); - - if (INDEX_INVALID != gci0) - { - u32 rule_match_p0, trace_bitmap0; - fa_5tuple_opaque_t pkt_5tuple0; - u32 acl_pos_p0, acl_match_p0; - u8 is_ip60, l2_len0, action0; - const gbp_rule_t *gu; - u16 ether_type0; - const u8 *h0; - - action0 = 0; - gc0 = gbp_contract_get (gci0); - l2_len0 = vnet_buffer (b0)->l2.l2_len; - h0 = vlib_buffer_get_current (b0); - - ether_type0 = *(u16 *) (h0 + l2_len0 - 2); - - if (!gbp_policy_is_ethertype_allowed (gc0, ether_type0)) - { - /* - * black list model so drop - */ - goto trace; - } - - if ((ether_type0 == - clib_net_to_host_u16 (ETHERNET_TYPE_IP6)) - || (ether_type0 == - clib_net_to_host_u16 (ETHERNET_TYPE_IP4))) - { - is_ip60 = - (ether_type0 == - clib_net_to_host_u16 (ETHERNET_TYPE_IP6)) ? 1 : - 0; - /* - * tests against the ACL - */ - acl_plugin_fill_5tuple_inline (gm-> - acl_plugin.p_acl_main, - gc0->gc_lc_index, b0, - is_ip60, - /* is_input */ 0, - /* is_l2_path */ 1, - &pkt_5tuple0); - acl_plugin_match_5tuple_inline (gm-> - acl_plugin.p_acl_main, - gc0->gc_lc_index, - &pkt_5tuple0, - is_ip60, &action0, - &acl_pos_p0, - &acl_match_p0, - &rule_match_p0, - &trace_bitmap0); - - if (action0 > 0) - { - vnet_buffer2 (b0)->gbp.flags |= - VXLAN_GBP_GPFLAGS_A; - gu = - gbp_rule_get (gc0->gc_rules[rule_match_p0]); - - switch (gu->gu_action) - { - case GBP_RULE_PERMIT: - next0 = vnet_l2_feature_next - (b0, - gpm->l2_output_feat_next - [is_port_based], - (is_port_based ? - L2OUTPUT_FEAT_GBP_POLICY_PORT : - L2OUTPUT_FEAT_GBP_POLICY_MAC)); - break; - case GBP_RULE_DENY: - next0 = 0; - break; - case GBP_RULE_REDIRECT: - next0 = gbp_rule_l2_redirect (gu, b0); - break; - } - } - } - } + const gbp_ext_itf_t *ext_itf = + gbp_ext_itf_get (sw_if_index0); + const gbp_policy_dpo_t *gpd = + gbp_classify_get_gpd (ip4, ip6, + ext_itf->gx_fib_index[proto]); + if (gpd) + key0.gck_dst = gpd->gpd_sclass; } } else + { + if (GBP_POLICY_PORT == type) + ge0 = gbp_endpoint_find_itf (sw_if_index0); + else + ge0 = gbp_endpoint_find_mac (h0->dst_address, + vnet_buffer (b0)->l2.bd_index); + if (NULL != ge0) + key0.gck_dst = ge0->ge_fwd.gef_sclass; + } + + if (SCLASS_INVALID == key0.gck_dst) + { + /* If you cannot determine the destination EP then drop */ + b0->error = node->errors[GBP_CONTRACT_ERROR_DROP_NO_DCLASS]; + goto trace; + } + + key0.gck_src = vnet_buffer2 (b0)->gbp.sclass; + if (SCLASS_INVALID == key0.gck_src) { /* * the src EPG is not set when the packet arrives on an EPG * uplink interface and we do not need to apply policy */ - next0 = - vnet_l2_feature_next (b0, - gpm->l2_output_feat_next[is_port_based], - (is_port_based ? - L2OUTPUT_FEAT_GBP_POLICY_PORT : - L2OUTPUT_FEAT_GBP_POLICY_MAC)); + next0 = gbp_policy_l2_feature_next (gpm, b0, type); + goto trace; } - trace: - if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED))) + key0.gck_scope = + gbp_bridge_domain_get_scope (vnet_buffer (b0)->l2.bd_index); + + action0 = + gbp_contract_apply (vm, gm, &key0, b0, &rule0, &n_allow_intra, + &n_allow_sclass_1, &acl_match, &rule_match, + &err0, GBP_CONTRACT_APPLY_L2); + switch (action0) { - gbp_policy_trace_t *t = - vlib_add_trace (vm, node, b0, sizeof (*t)); - t->sclass = key0.gck_src; - t->dst_epg = key0.gck_dst; - t->acl_index = (gc0 ? gc0->gc_acl_index : ~0), - t->allowed = (next0 != GBP_POLICY_NEXT_DENY); + case GBP_RULE_PERMIT: + next0 = gbp_policy_l2_feature_next (gpm, b0, type); + vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A; + break; + case GBP_RULE_REDIRECT: + next0 = gbp_rule_l2_redirect (rule0, b0); + vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A; + break; + case GBP_RULE_DENY: + next0 = GBP_POLICY_NEXT_DROP; + b0->error = node->errors[err0]; + break; } + trace: + gbp_policy_trace (vm, node, b0, &key0, action0, acl_match, + rule_match); + /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, @@ -320,6 +252,14 @@ gbp_policy_inline (vlib_main_t * vm, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } + vlib_node_increment_counter (vm, node->node_index, + GBP_CONTRACT_ERROR_ALLOW_INTRA, n_allow_intra); + vlib_node_increment_counter (vm, node->node_index, + GBP_CONTRACT_ERROR_ALLOW_A_BIT, n_allow_a_bit); + vlib_node_increment_counter (vm, node->node_index, + GBP_CONTRACT_ERROR_ALLOW_SCLASS_1, + n_allow_sclass_1); + return frame->n_vectors; } @@ -327,29 +267,21 @@ VLIB_NODE_FN (gbp_policy_port_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return (gbp_policy_inline (vm, node, frame, 1)); + return (gbp_policy_inline (vm, node, frame, GBP_POLICY_PORT)); } VLIB_NODE_FN (gbp_policy_mac_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return (gbp_policy_inline (vm, node, frame, 0)); + return (gbp_policy_inline (vm, node, frame, GBP_POLICY_MAC)); } -/* packet trace format function */ -static u8 * -format_gbp_policy_trace (u8 * s, va_list * args) +VLIB_NODE_FN (gbp_policy_lpm_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - gbp_policy_trace_t *t = va_arg (*args, gbp_policy_trace_t *); - - s = - format (s, "sclass:%d, dst:%d, acl:%d allowed:%d", - t->sclass, t->dst_epg, t->acl_index, t->allowed); - - return s; + return (gbp_policy_inline (vm, node, frame, GBP_POLICY_LPM)); } /* *INDENT-OFF* */ @@ -359,13 +291,12 @@ VLIB_REGISTER_NODE (gbp_policy_port_node) = { .format_trace = format_gbp_policy_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(gbp_policy_error_strings), - .error_strings = gbp_policy_error_strings, + .n_errors = ARRAY_LEN(gbp_contract_error_strings), + .error_strings = gbp_contract_error_strings, .n_next_nodes = GBP_POLICY_N_NEXT, - .next_nodes = { - [GBP_POLICY_NEXT_DENY] = "error-drop", + [GBP_POLICY_NEXT_DROP] = "error-drop", }, }; @@ -374,7 +305,29 @@ VLIB_REGISTER_NODE (gbp_policy_mac_node) = { .vector_size = sizeof (u32), .format_trace = format_gbp_policy_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .sibling_of = "gbp-policy-port", + + .n_errors = ARRAY_LEN(gbp_contract_error_strings), + .error_strings = gbp_contract_error_strings, + + .n_next_nodes = GBP_POLICY_N_NEXT, + .next_nodes = { + [GBP_POLICY_NEXT_DROP] = "error-drop", + }, +}; + +VLIB_REGISTER_NODE (gbp_policy_lpm_node) = { + .name = "gbp-policy-lpm", + .vector_size = sizeof (u32), + .format_trace = format_gbp_policy_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(gbp_contract_error_strings), + .error_strings = gbp_contract_error_strings, + + .n_next_nodes = GBP_POLICY_N_NEXT, + .next_nodes = { + [GBP_POLICY_NEXT_DROP] = "error-drop", + }, }; /* *INDENT-ON* */