2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <plugins/gbp/gbp.h>
17 #include <plugins/gbp/gbp_classify.h>
18 #include <plugins/gbp/gbp_policy.h>
19 #include <plugins/gbp/gbp_policy_dpo.h>
20 #include <plugins/gbp/gbp_bridge_domain.h>
21 #include <plugins/gbp/gbp_ext_itf.h>
22 #include <plugins/gbp/gbp_contract.h>
24 #include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
25 #include <vnet/vxlan-gbp/vxlan_gbp.h>
33 always_inline dpo_proto_t
34 ethertype_to_dpo_proto (u16 etype)
36 etype = clib_net_to_host_u16 (etype);
40 case ETHERNET_TYPE_IP4:
41 return (DPO_PROTO_IP4);
42 case ETHERNET_TYPE_IP6:
43 return (DPO_PROTO_IP6);
46 return (DPO_PROTO_NONE);
50 gbp_rule_l2_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0)
52 const ethernet_header_t *eth0;
56 eth0 = vlib_buffer_get_current (b0);
57 /* pop the ethernet header to prepare for L3 rewrite */
58 vlib_buffer_advance (b0, vnet_buffer (b0)->l2.l2_len);
60 dproto = ethertype_to_dpo_proto (eth0->type);
61 dpo = &gu->gu_dpo[GBP_POLICY_NODE_L2][dproto];
63 /* save the LB index for the next node and reset the IP flow hash
64 * so it's recalculated */
65 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
66 vnet_buffer (b0)->ip.flow_hash = 0;
68 return (dpo->dpoi_next_node);
71 static_always_inline gbp_policy_next_t
72 gbp_policy_l2_feature_next (gbp_policy_main_t * gpm, vlib_buffer_t * b,
73 const gbp_policy_type_t type)
80 feat_bit = L2OUTPUT_FEAT_GBP_POLICY_PORT;
83 feat_bit = L2OUTPUT_FEAT_GBP_POLICY_MAC;
86 feat_bit = L2OUTPUT_FEAT_GBP_POLICY_LPM;
89 return GBP_POLICY_NEXT_DROP;
92 return vnet_l2_feature_next (b, gpm->l2_output_feat_next[type], feat_bit);
96 gbp_policy_inline (vlib_main_t * vm,
97 vlib_node_runtime_t * node,
98 vlib_frame_t * frame, const gbp_policy_type_t type)
100 gbp_main_t *gm = &gbp_main;
101 gbp_policy_main_t *gpm = &gbp_policy_main;
102 u32 n_left_from, *from, *to_next;
104 u32 n_allow_intra, n_allow_a_bit, n_allow_sclass_1;
107 n_left_from = frame->n_vectors;
108 from = vlib_frame_vector_args (frame);
109 n_allow_intra = n_allow_a_bit = n_allow_sclass_1 = 0;
111 while (n_left_from > 0)
115 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
117 while (n_left_from > 0 && n_left_to_next > 0)
119 gbp_rule_action_t action0 = GBP_RULE_DENY;
120 const ethernet_header_t *h0;
121 const gbp_endpoint_t *ge0;
122 gbp_contract_error_t err0;
123 u32 acl_match = ~0, rule_match = ~0;
124 gbp_policy_next_t next0;
125 gbp_contract_key_t key0;
126 u32 bi0, sw_if_index0;
130 next0 = GBP_POLICY_NEXT_DROP;
138 b0 = vlib_get_buffer (vm, bi0);
139 h0 = vlib_buffer_get_current (b0);
140 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
143 * Reflection check; in and out on an ivxlan tunnel
145 if ((~0 != vxlan_gbp_tunnel_by_sw_if_index (sw_if_index0)) &&
146 (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_R))
152 * If the A-bit is set then policy has already been applied
153 * and we skip enforcement here.
155 if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
157 next0 = gbp_policy_l2_feature_next (gpm, b0, type);
164 * determine the src and dst EPG
167 /* zero out the key to ensure the pad space is clear */
169 key0.gck_src = vnet_buffer2 (b0)->gbp.sclass;
170 key0.gck_dst = SCLASS_INVALID;
172 if (GBP_POLICY_LPM == type)
174 const ip4_address_t *ip4 = 0;
175 const ip6_address_t *ip6 = 0;
176 const dpo_proto_t proto =
177 gbp_classify_get_ip_address (h0, &ip4, &ip6,
178 GBP_CLASSIFY_GET_IP_DST);
179 if (PREDICT_TRUE (DPO_PROTO_NONE != proto))
181 const gbp_ext_itf_t *ext_itf =
182 gbp_ext_itf_get (sw_if_index0);
183 const gbp_policy_dpo_t *gpd =
184 gbp_classify_get_gpd (ip4, ip6,
185 ext_itf->gx_fib_index[proto]);
187 key0.gck_dst = gpd->gpd_sclass;
192 if (GBP_POLICY_PORT == type)
193 ge0 = gbp_endpoint_find_itf (sw_if_index0);
195 ge0 = gbp_endpoint_find_mac (h0->dst_address,
196 vnet_buffer (b0)->l2.bd_index);
198 key0.gck_dst = ge0->ge_fwd.gef_sclass;
201 if (SCLASS_INVALID == key0.gck_dst)
203 /* If you cannot determine the destination EP then drop */
204 b0->error = node->errors[GBP_CONTRACT_ERROR_DROP_NO_DCLASS];
208 key0.gck_src = vnet_buffer2 (b0)->gbp.sclass;
209 if (SCLASS_INVALID == key0.gck_src)
212 * the src EPG is not set when the packet arrives on an EPG
213 * uplink interface and we do not need to apply policy
215 next0 = gbp_policy_l2_feature_next (gpm, b0, type);
220 gbp_bridge_domain_get_scope (vnet_buffer (b0)->l2.bd_index);
223 gbp_contract_apply (vm, gm, &key0, b0, &rule0, &n_allow_intra,
224 &n_allow_sclass_1, &acl_match, &rule_match,
225 &err0, GBP_CONTRACT_APPLY_L2);
228 case GBP_RULE_PERMIT:
229 next0 = gbp_policy_l2_feature_next (gpm, b0, type);
230 vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
232 case GBP_RULE_REDIRECT:
233 next0 = gbp_rule_l2_redirect (rule0, b0);
234 vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
237 next0 = GBP_POLICY_NEXT_DROP;
238 b0->error = node->errors[err0];
243 gbp_policy_trace (vm, node, b0, &key0, action0, acl_match,
246 /* verify speculative enqueue, maybe switch current next frame */
247 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
248 to_next, n_left_to_next,
252 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
255 vlib_node_increment_counter (vm, node->node_index,
256 GBP_CONTRACT_ERROR_ALLOW_INTRA, n_allow_intra);
257 vlib_node_increment_counter (vm, node->node_index,
258 GBP_CONTRACT_ERROR_ALLOW_A_BIT, n_allow_a_bit);
259 vlib_node_increment_counter (vm, node->node_index,
260 GBP_CONTRACT_ERROR_ALLOW_SCLASS_1,
263 return frame->n_vectors;
266 VLIB_NODE_FN (gbp_policy_port_node) (vlib_main_t * vm,
267 vlib_node_runtime_t * node,
268 vlib_frame_t * frame)
270 return (gbp_policy_inline (vm, node, frame, GBP_POLICY_PORT));
273 VLIB_NODE_FN (gbp_policy_mac_node) (vlib_main_t * vm,
274 vlib_node_runtime_t * node,
275 vlib_frame_t * frame)
277 return (gbp_policy_inline (vm, node, frame, GBP_POLICY_MAC));
280 VLIB_NODE_FN (gbp_policy_lpm_node) (vlib_main_t * vm,
281 vlib_node_runtime_t * node,
282 vlib_frame_t * frame)
284 return (gbp_policy_inline (vm, node, frame, GBP_POLICY_LPM));
288 VLIB_REGISTER_NODE (gbp_policy_port_node) = {
289 .name = "gbp-policy-port",
290 .vector_size = sizeof (u32),
291 .format_trace = format_gbp_policy_trace,
292 .type = VLIB_NODE_TYPE_INTERNAL,
294 .n_errors = ARRAY_LEN(gbp_contract_error_strings),
295 .error_strings = gbp_contract_error_strings,
297 .n_next_nodes = GBP_POLICY_N_NEXT,
299 [GBP_POLICY_NEXT_DROP] = "error-drop",
303 VLIB_REGISTER_NODE (gbp_policy_mac_node) = {
304 .name = "gbp-policy-mac",
305 .vector_size = sizeof (u32),
306 .format_trace = format_gbp_policy_trace,
307 .type = VLIB_NODE_TYPE_INTERNAL,
309 .n_errors = ARRAY_LEN(gbp_contract_error_strings),
310 .error_strings = gbp_contract_error_strings,
312 .n_next_nodes = GBP_POLICY_N_NEXT,
314 [GBP_POLICY_NEXT_DROP] = "error-drop",
318 VLIB_REGISTER_NODE (gbp_policy_lpm_node) = {
319 .name = "gbp-policy-lpm",
320 .vector_size = sizeof (u32),
321 .format_trace = format_gbp_policy_trace,
322 .type = VLIB_NODE_TYPE_INTERNAL,
324 .n_errors = ARRAY_LEN(gbp_contract_error_strings),
325 .error_strings = gbp_contract_error_strings,
327 .n_next_nodes = GBP_POLICY_N_NEXT,
329 [GBP_POLICY_NEXT_DROP] = "error-drop",
336 * fd.io coding-style-patch-verification: ON
339 * eval: (c-set-style "gnu")