2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <plugins/gbp/gbp.h>
17 #include <plugins/gbp/gbp_policy_dpo.h>
19 #include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
22 * Grouping of global data for the GBP source EPG classification feature
24 typedef struct gbp_policy_main_t_
27 * Next nodes for L2 output features
29 u32 l2_output_feat_next[2][32];
32 static gbp_policy_main_t gbp_policy_main;
34 #define foreach_gbp_policy \
39 #define _(sym,str) GBP_ERROR_##sym,
45 static char *gbp_policy_error_strings[] = {
46 #define _(sym,string) string,
53 #define _(sym,str) GBP_POLICY_NEXT_##sym,
60 * per-packet trace data
62 typedef struct gbp_policy_trace_t_
64 /* per-pkt trace data */
71 always_inline dpo_proto_t
72 ethertype_to_dpo_proto (u16 etype)
74 etype = clib_net_to_host_u16 (etype);
78 case ETHERNET_TYPE_IP4:
79 return (DPO_PROTO_IP4);
80 case ETHERNET_TYPE_IP6:
81 return (DPO_PROTO_IP6);
84 return (DPO_PROTO_NONE);
88 gbp_rule_l2_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0)
90 const ethernet_header_t *eth0;
94 eth0 = vlib_buffer_get_current (b0);
95 /* pop the ethernet header to prepare for L3 rewrite */
96 vlib_buffer_advance (b0, vnet_buffer (b0)->l2.l2_len);
98 dproto = ethertype_to_dpo_proto (eth0->type);
99 dpo = &gu->gu_dpo[GBP_POLICY_NODE_L2][dproto];
101 /* save the LB index for the next node and reset the IP flow hash
102 * so it's recalculated */
103 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
104 vnet_buffer (b0)->ip.flow_hash = 0;
106 return (dpo->dpoi_next_node);
110 gbp_policy_inline (vlib_main_t * vm,
111 vlib_node_runtime_t * node,
112 vlib_frame_t * frame, u8 is_port_based)
114 gbp_main_t *gm = &gbp_main;
115 gbp_policy_main_t *gpm = &gbp_policy_main;
116 u32 n_left_from, *from, *to_next;
120 n_left_from = frame->n_vectors;
121 from = vlib_frame_vector_args (frame);
123 while (n_left_from > 0)
127 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
129 while (n_left_from > 0 && n_left_to_next > 0)
131 const ethernet_header_t *h0;
132 const gbp_endpoint_t *ge0;
133 gbp_policy_next_t next0;
134 gbp_contract_key_t key0;
136 u32 bi0, sw_if_index0;
141 next0 = GBP_POLICY_NEXT_DENY;
149 b0 = vlib_get_buffer (vm, bi0);
150 h0 = vlib_buffer_get_current (b0);
151 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
154 * If the A0bit is set then policy has already been applied
155 * and we skip enforcement here.
157 if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
159 next0 = vnet_l2_feature_next (b0,
160 gpm->l2_output_feat_next
163 L2OUTPUT_FEAT_GBP_POLICY_PORT :
164 L2OUTPUT_FEAT_GBP_POLICY_MAC));
169 * determine the src and dst EPG
172 ge0 = gbp_endpoint_find_itf (sw_if_index0);
174 ge0 = gbp_endpoint_find_mac (h0->dst_address,
175 vnet_buffer (b0)->l2.bd_index);
178 key0.gck_dst = ge0->ge_fwd.gef_epg_id;
180 /* If you cannot determine the destination EP then drop */
183 key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
185 if (EPG_INVALID != key0.gck_src)
187 if (PREDICT_FALSE (key0.gck_src == key0.gck_dst))
193 vnet_l2_feature_next (b0,
194 gpm->l2_output_feat_next
197 L2OUTPUT_FEAT_GBP_POLICY_PORT :
198 L2OUTPUT_FEAT_GBP_POLICY_MAC));
199 vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
203 gci0 = gbp_contract_find (&key0);
205 if (INDEX_INVALID != gci0)
207 fa_5tuple_opaque_t pkt_5tuple0;
209 u32 acl_pos_p0, acl_match_p0;
210 u32 rule_match_p0, trace_bitmap0;
215 gc0 = gbp_contract_get (gci0);
216 l2_len0 = vnet_buffer (b0)->l2.l2_len;
217 h0 = vlib_buffer_get_current (b0);
220 clib_net_to_host_u16 (*(u16 *) (h0 + l2_len0 - 2));
222 is_ip60 = (ether_type0 == ETHERNET_TYPE_IP6) ? 1 : 0;
224 * tests against the ACL
226 acl_plugin_fill_5tuple_inline (gm->
227 acl_plugin.p_acl_main,
228 gc0->gc_lc_index, b0,
233 acl_plugin_match_5tuple_inline (gm->
234 acl_plugin.p_acl_main,
236 &pkt_5tuple0, is_ip60,
237 &action0, &acl_pos_p0,
246 vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
247 gu = gbp_rule_get (gc0->gc_rules[rule_match_p0]);
249 switch (gu->gu_action)
251 case GBP_RULE_PERMIT:
252 next0 = vnet_l2_feature_next
254 gpm->l2_output_feat_next
257 L2OUTPUT_FEAT_GBP_POLICY_PORT :
258 L2OUTPUT_FEAT_GBP_POLICY_MAC));
264 case GBP_RULE_REDIRECT:
265 next0 = gbp_rule_l2_redirect (gu, b0);
275 * the src EPG is not set when the packet arrives on an EPG
276 * uplink interface and we do not need to apply policy
279 vnet_l2_feature_next (b0,
280 gpm->l2_output_feat_next[is_port_based],
282 L2OUTPUT_FEAT_GBP_POLICY_PORT :
283 L2OUTPUT_FEAT_GBP_POLICY_MAC));
287 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
289 gbp_policy_trace_t *t =
290 vlib_add_trace (vm, node, b0, sizeof (*t));
291 t->src_epg = key0.gck_src;
292 t->dst_epg = key0.gck_dst;
293 t->acl_index = (gc0 ? gc0->gc_acl_index : ~0),
294 t->allowed = (next0 != GBP_POLICY_NEXT_DENY);
297 /* verify speculative enqueue, maybe switch current next frame */
298 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
299 to_next, n_left_to_next,
303 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
306 return frame->n_vectors;
310 gbp_policy_port (vlib_main_t * vm,
311 vlib_node_runtime_t * node, vlib_frame_t * frame)
313 return (gbp_policy_inline (vm, node, frame, 1));
317 gbp_policy_mac (vlib_main_t * vm,
318 vlib_node_runtime_t * node, vlib_frame_t * frame)
320 return (gbp_policy_inline (vm, node, frame, 0));
323 /* packet trace format function */
325 format_gbp_policy_trace (u8 * s, va_list * args)
327 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
328 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
329 gbp_policy_trace_t *t = va_arg (*args, gbp_policy_trace_t *);
332 format (s, "src:%d, dst:%d, acl:%d allowed:%d",
333 t->src_epg, t->dst_epg, t->acl_index, t->allowed);
339 VLIB_REGISTER_NODE (gbp_policy_port_node) = {
340 .function = gbp_policy_port,
341 .name = "gbp-policy-port",
342 .vector_size = sizeof (u32),
343 .format_trace = format_gbp_policy_trace,
344 .type = VLIB_NODE_TYPE_INTERNAL,
346 .n_errors = ARRAY_LEN(gbp_policy_error_strings),
347 .error_strings = gbp_policy_error_strings,
349 .n_next_nodes = GBP_POLICY_N_NEXT,
352 [GBP_POLICY_NEXT_DENY] = "error-drop",
356 VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_port_node, gbp_policy_port);
358 VLIB_REGISTER_NODE (gbp_policy_mac_node) = {
359 .function = gbp_policy_mac,
360 .name = "gbp-policy-mac",
361 .vector_size = sizeof (u32),
362 .format_trace = format_gbp_policy_trace,
363 .type = VLIB_NODE_TYPE_INTERNAL,
364 .sibling_of = "gbp-policy-port",
367 VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_mac_node, gbp_policy_mac);
371 static clib_error_t *
372 gbp_policy_init (vlib_main_t * vm)
374 gbp_policy_main_t *gpm = &gbp_policy_main;
375 clib_error_t *error = 0;
377 /* Initialize the feature next-node indexes */
378 feat_bitmap_init_next_nodes (vm,
379 gbp_policy_port_node.index,
381 l2output_get_feat_names (),
382 gpm->l2_output_feat_next[1]);
383 feat_bitmap_init_next_nodes (vm,
384 gbp_policy_mac_node.index,
386 l2output_get_feat_names (),
387 gpm->l2_output_feat_next[0]);
392 VLIB_INIT_FUNCTION (gbp_policy_init);
395 * fd.io coding-style-patch-verification: ON
398 * eval: (c-set-style "gnu")