2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <plugins/gbp/gbp.h>
17 #include <plugins/gbp/gbp_classify.h>
18 #include <plugins/gbp/gbp_policy.h>
19 #include <plugins/gbp/gbp_policy_dpo.h>
20 #include <plugins/gbp/gbp_bridge_domain.h>
21 #include <plugins/gbp/gbp_ext_itf.h>
22 #include <plugins/gbp/gbp_contract.h>
24 #include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
25 #include <vnet/vxlan-gbp/vxlan_gbp.h>
33 always_inline dpo_proto_t
34 ethertype_to_dpo_proto (u16 etype)
36 etype = clib_net_to_host_u16 (etype);
40 case ETHERNET_TYPE_IP4:
41 return (DPO_PROTO_IP4);
42 case ETHERNET_TYPE_IP6:
43 return (DPO_PROTO_IP6);
46 return (DPO_PROTO_NONE);
50 gbp_rule_l2_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0)
52 const ethernet_header_t *eth0;
56 eth0 = vlib_buffer_get_current (b0);
57 /* pop the ethernet header to prepare for L3 rewrite */
58 vlib_buffer_advance (b0, vnet_buffer (b0)->l2.l2_len);
60 dproto = ethertype_to_dpo_proto (eth0->type);
61 dpo = &gu->gu_dpo[GBP_POLICY_NODE_L2][dproto];
63 /* save the LB index for the next node and reset the IP flow hash
64 * so it's recalculated */
65 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
66 vnet_buffer (b0)->ip.flow_hash = 0;
68 return (dpo->dpoi_next_node);
71 static_always_inline gbp_policy_next_t
72 gbp_policy_l2_feature_next (gbp_policy_main_t * gpm, vlib_buffer_t * b,
73 const gbp_policy_type_t type)
80 feat_bit = L2OUTPUT_FEAT_GBP_POLICY_PORT;
83 feat_bit = L2OUTPUT_FEAT_GBP_POLICY_MAC;
86 feat_bit = L2OUTPUT_FEAT_GBP_POLICY_LPM;
89 return GBP_POLICY_NEXT_DROP;
92 return vnet_l2_feature_next (b, gpm->l2_output_feat_next[type], feat_bit);
96 gbp_policy_inline (vlib_main_t * vm,
97 vlib_node_runtime_t * node,
98 vlib_frame_t * frame, const gbp_policy_type_t type)
100 gbp_main_t *gm = &gbp_main;
101 gbp_policy_main_t *gpm = &gbp_policy_main;
102 u32 n_left_from, *from, *to_next;
104 u32 n_allow_intra, n_allow_a_bit, n_allow_sclass_1;
107 n_left_from = frame->n_vectors;
108 from = vlib_frame_vector_args (frame);
109 n_allow_intra = n_allow_a_bit = n_allow_sclass_1 = 0;
111 while (n_left_from > 0)
115 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
117 while (n_left_from > 0 && n_left_to_next > 0)
119 const ethernet_header_t *h0;
120 const gbp_endpoint_t *ge0;
121 gbp_rule_action_t action0;
122 gbp_contract_error_t err0;
123 gbp_policy_next_t next0;
124 gbp_contract_key_t key0;
125 u32 bi0, sw_if_index0;
129 next0 = GBP_POLICY_NEXT_DROP;
137 b0 = vlib_get_buffer (vm, bi0);
138 h0 = vlib_buffer_get_current (b0);
139 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
142 * Reflection check; in and out on an ivxlan tunnel
144 if ((~0 != vxlan_gbp_tunnel_by_sw_if_index (sw_if_index0)) &&
145 (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_R))
151 * If the A-bit is set then policy has already been applied
152 * and we skip enforcement here.
154 if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
156 next0 = gbp_policy_l2_feature_next (gpm, b0, type);
163 * determine the src and dst EPG
166 /* zero out the key to ensure the pad space is clear */
168 key0.gck_dst = SCLASS_INVALID;
170 if (GBP_POLICY_LPM == type)
172 const ip4_address_t *ip4 = 0;
173 const ip6_address_t *ip6 = 0;
174 const dpo_proto_t proto =
175 gbp_classify_get_ip_address (h0, &ip4, &ip6,
176 GBP_CLASSIFY_GET_IP_DST);
177 if (PREDICT_TRUE (DPO_PROTO_NONE != proto))
179 const gbp_ext_itf_t *ext_itf =
180 gbp_ext_itf_get (sw_if_index0);
181 const gbp_policy_dpo_t *gpd =
182 gbp_classify_get_gpd (ip4, ip6,
183 ext_itf->gx_fib_index[proto]);
185 key0.gck_dst = gpd->gpd_sclass;
190 if (GBP_POLICY_PORT == type)
191 ge0 = gbp_endpoint_find_itf (sw_if_index0);
193 ge0 = gbp_endpoint_find_mac (h0->dst_address,
194 vnet_buffer (b0)->l2.bd_index);
196 key0.gck_dst = ge0->ge_fwd.gef_sclass;
199 if (SCLASS_INVALID == key0.gck_dst)
201 /* If you cannot determine the destination EP then drop */
202 b0->error = node->errors[GBP_CONTRACT_ERROR_DROP_NO_DCLASS];
206 key0.gck_src = vnet_buffer2 (b0)->gbp.sclass;
207 if (SCLASS_INVALID == key0.gck_src)
210 * the src EPG is not set when the packet arrives on an EPG
211 * uplink interface and we do not need to apply policy
213 next0 = gbp_policy_l2_feature_next (gpm, b0, type);
218 gbp_bridge_domain_get_scope (vnet_buffer (b0)->l2.bd_index);
221 gbp_contract_apply (vm, gm, &key0, b0, &rule0, &n_allow_intra,
222 &n_allow_sclass_1, &err0,
223 GBP_CONTRACT_APPLY_L2);
226 case GBP_RULE_PERMIT:
227 next0 = gbp_policy_l2_feature_next (gpm, b0, type);
228 vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
230 case GBP_RULE_REDIRECT:
231 next0 = gbp_rule_l2_redirect (rule0, b0);
232 vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
235 next0 = GBP_POLICY_NEXT_DROP;
236 b0->error = node->errors[err0];
241 gbp_policy_trace (vm, node, b0, &key0,
242 (next0 != GBP_POLICY_NEXT_DROP));
244 /* verify speculative enqueue, maybe switch current next frame */
245 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
246 to_next, n_left_to_next,
250 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
253 vlib_node_increment_counter (vm, node->node_index,
254 GBP_CONTRACT_ERROR_ALLOW_INTRA, n_allow_intra);
255 vlib_node_increment_counter (vm, node->node_index,
256 GBP_CONTRACT_ERROR_ALLOW_A_BIT, n_allow_a_bit);
257 vlib_node_increment_counter (vm, node->node_index,
258 GBP_CONTRACT_ERROR_ALLOW_SCLASS_1,
261 return frame->n_vectors;
264 VLIB_NODE_FN (gbp_policy_port_node) (vlib_main_t * vm,
265 vlib_node_runtime_t * node,
266 vlib_frame_t * frame)
268 return (gbp_policy_inline (vm, node, frame, GBP_POLICY_PORT));
271 VLIB_NODE_FN (gbp_policy_mac_node) (vlib_main_t * vm,
272 vlib_node_runtime_t * node,
273 vlib_frame_t * frame)
275 return (gbp_policy_inline (vm, node, frame, GBP_POLICY_MAC));
278 VLIB_NODE_FN (gbp_policy_lpm_node) (vlib_main_t * vm,
279 vlib_node_runtime_t * node,
280 vlib_frame_t * frame)
282 return (gbp_policy_inline (vm, node, frame, GBP_POLICY_LPM));
286 VLIB_REGISTER_NODE (gbp_policy_port_node) = {
287 .name = "gbp-policy-port",
288 .vector_size = sizeof (u32),
289 .format_trace = format_gbp_policy_trace,
290 .type = VLIB_NODE_TYPE_INTERNAL,
292 .n_errors = ARRAY_LEN(gbp_contract_error_strings),
293 .error_strings = gbp_contract_error_strings,
295 .n_next_nodes = GBP_POLICY_N_NEXT,
297 [GBP_POLICY_NEXT_DROP] = "error-drop",
301 VLIB_REGISTER_NODE (gbp_policy_mac_node) = {
302 .name = "gbp-policy-mac",
303 .vector_size = sizeof (u32),
304 .format_trace = format_gbp_policy_trace,
305 .type = VLIB_NODE_TYPE_INTERNAL,
307 .n_errors = ARRAY_LEN(gbp_contract_error_strings),
308 .error_strings = gbp_contract_error_strings,
310 .n_next_nodes = GBP_POLICY_N_NEXT,
312 [GBP_POLICY_NEXT_DROP] = "error-drop",
316 VLIB_REGISTER_NODE (gbp_policy_lpm_node) = {
317 .name = "gbp-policy-lpm",
318 .vector_size = sizeof (u32),
319 .format_trace = format_gbp_policy_trace,
320 .type = VLIB_NODE_TYPE_INTERNAL,
322 .n_errors = ARRAY_LEN(gbp_contract_error_strings),
323 .error_strings = gbp_contract_error_strings,
325 .n_next_nodes = GBP_POLICY_N_NEXT,
327 [GBP_POLICY_NEXT_DROP] = "error-drop",
334 * fd.io coding-style-patch-verification: ON
337 * eval: (c-set-style "gnu")