2 * gbp.h : Group Based Policy
4 * Copyright (c) 2018 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <plugins/gbp/gbp.h>
19 #include <plugins/gbp/gbp_policy_dpo.h>
20 #include <plugins/gbp/gbp_ext_itf.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/dpo/load_balance.h>
24 #include <vnet/l2/l2_input.h>
25 #include <vnet/l2/feat_bitmap.h>
26 #include <vnet/fib/fib_table.h>
27 #include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
29 typedef enum gbp_src_classify_type_t_
31 GBP_SRC_CLASSIFY_NULL,
32 GBP_SRC_CLASSIFY_PORT,
34 } gbp_src_classify_type_t;
36 #define GBP_SRC_N_CLASSIFY (GBP_SRC_CLASSIFY_LPM + 1)
39 * Grouping of global data for the GBP source EPG classification feature
41 typedef struct gbp_src_classify_main_t_
44 * Next nodes for L2 output features
46 u32 l2_input_feat_next[GBP_SRC_N_CLASSIFY][32];
47 } gbp_src_classify_main_t;
49 static gbp_src_classify_main_t gbp_src_classify_main;
52 * per-packet trace data
54 typedef struct gbp_classify_trace_t_
56 /* per-pkt trace data */
58 } gbp_classify_trace_t;
61 * determine the SRC EPG form the input port
64 gbp_classify_inline (vlib_main_t * vm,
65 vlib_node_runtime_t * node,
67 gbp_src_classify_type_t type, dpo_proto_t dproto)
69 gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
70 u32 n_left_from, *from, *to_next;
74 n_left_from = frame->n_vectors;
75 from = vlib_frame_vector_args (frame);
77 while (n_left_from > 0)
81 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
83 while (n_left_from > 0 && n_left_to_next > 0)
85 u32 next0, bi0, src_epg, sw_if_index0;
86 const gbp_endpoint_t *ge0;
96 b0 = vlib_get_buffer (vm, bi0);
98 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
99 vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
101 if (GBP_SRC_CLASSIFY_NULL == type)
103 src_epg = EPG_INVALID;
105 vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
106 L2INPUT_FEAT_GBP_NULL_CLASSIFY);
110 if (DPO_PROTO_ETHERNET == dproto)
112 const ethernet_header_t *h0;
114 h0 = vlib_buffer_get_current (b0);
116 vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
117 L2INPUT_FEAT_GBP_SRC_CLASSIFY);
118 ge0 = gbp_endpoint_find_mac (h0->src_address,
119 vnet_buffer (b0)->l2.bd_index);
121 else if (DPO_PROTO_IP4 == dproto)
123 const ip4_header_t *h0;
125 h0 = vlib_buffer_get_current (b0);
127 ge0 = gbp_endpoint_find_ip4
129 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
134 * Go straight to looukp, do not pass go, do not collect $200
138 else if (DPO_PROTO_IP6 == dproto)
140 const ip6_header_t *h0;
142 h0 = vlib_buffer_get_current (b0);
144 ge0 = gbp_endpoint_find_ip6
146 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6,
151 * Go straight to lookup, do not pass go, do not collect $200
162 if (PREDICT_TRUE (NULL != ge0))
163 src_epg = ge0->ge_fwd.gef_epg_id;
165 src_epg = EPG_INVALID;
168 vnet_buffer2 (b0)->gbp.src_epg = src_epg;
170 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
172 gbp_classify_trace_t *t =
173 vlib_add_trace (vm, node, b0, sizeof (*t));
174 t->src_epg = src_epg;
177 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
178 to_next, n_left_to_next,
182 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
185 return frame->n_vectors;
189 gbp_src_classify (vlib_main_t * vm,
190 vlib_node_runtime_t * node, vlib_frame_t * frame)
192 return (gbp_classify_inline (vm, node, frame,
193 GBP_SRC_CLASSIFY_PORT, DPO_PROTO_ETHERNET));
197 gbp_null_classify (vlib_main_t * vm,
198 vlib_node_runtime_t * node, vlib_frame_t * frame)
200 return (gbp_classify_inline (vm, node, frame,
201 GBP_SRC_CLASSIFY_NULL, DPO_PROTO_ETHERNET));
205 gbp_ip4_src_classify (vlib_main_t * vm,
206 vlib_node_runtime_t * node, vlib_frame_t * frame)
208 return (gbp_classify_inline (vm, node, frame,
209 GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP4));
213 gbp_ip6_src_classify (vlib_main_t * vm,
214 vlib_node_runtime_t * node, vlib_frame_t * frame)
216 return (gbp_classify_inline (vm, node, frame,
217 GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP6));
221 /* packet trace format function */
223 format_gbp_classify_trace (u8 * s, va_list * args)
225 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
226 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
227 gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
229 s = format (s, "src-epg:%d", t->src_epg);
235 VLIB_REGISTER_NODE (gbp_null_classify_node) = {
236 .function = gbp_null_classify,
237 .name = "gbp-null-classify",
238 .vector_size = sizeof (u32),
239 .format_trace = format_gbp_classify_trace,
240 .type = VLIB_NODE_TYPE_INTERNAL,
246 VLIB_NODE_FUNCTION_MULTIARCH (gbp_null_classify_node, gbp_null_classify);
248 VLIB_REGISTER_NODE (gbp_src_classify_node) = {
249 .function = gbp_src_classify,
250 .name = "gbp-src-classify",
251 .vector_size = sizeof (u32),
252 .format_trace = format_gbp_classify_trace,
253 .type = VLIB_NODE_TYPE_INTERNAL,
259 VLIB_NODE_FUNCTION_MULTIARCH (gbp_src_classify_node, gbp_src_classify);
261 VLIB_REGISTER_NODE (gbp_ip4_src_classify_node) = {
262 .function = gbp_ip4_src_classify,
263 .name = "ip4-gbp-src-classify",
264 .vector_size = sizeof (u32),
265 .format_trace = format_gbp_classify_trace,
266 .type = VLIB_NODE_TYPE_INTERNAL,
275 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip4_src_classify_node, gbp_ip4_src_classify);
277 VLIB_REGISTER_NODE (gbp_ip6_src_classify_node) = {
278 .function = gbp_ip6_src_classify,
279 .name = "ip6-gbp-src-classify",
280 .vector_size = sizeof (u32),
281 .format_trace = format_gbp_classify_trace,
282 .type = VLIB_NODE_TYPE_INTERNAL,
291 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip6_src_classify_node, gbp_ip6_src_classify);
293 VNET_FEATURE_INIT (gbp_ip4_src_classify_feat_node, static) =
295 .arc_name = "ip4-unicast",
296 .node_name = "ip4-gbp-src-classify",
297 .runs_before = VNET_FEATURES ("nat44-out2in"),
299 VNET_FEATURE_INIT (gbp_ip6_src_classify_feat_node, static) =
301 .arc_name = "ip6-unicast",
302 .node_name = "ip6-gbp-src-classify",
303 .runs_before = VNET_FEATURES ("nat66-out2in"),
308 typedef enum gbp_lpm_classify_next_t_
310 GPB_LPM_CLASSIFY_DROP,
311 } gbp_lpm_classify_next_t;
313 always_inline dpo_proto_t
314 ethertype_to_dpo_proto (const ethernet_header_t * eh0)
316 u16 etype = clib_net_to_host_u16 (eh0->type);
320 case ETHERNET_TYPE_IP4:
321 return (DPO_PROTO_IP4);
322 case ETHERNET_TYPE_IP6:
323 return (DPO_PROTO_IP6);
324 case ETHERNET_TYPE_VLAN:
326 ethernet_vlan_header_t *vh0;
328 vh0 = (ethernet_vlan_header_t *) (eh0 + 1);
330 switch (clib_net_to_host_u16 (vh0->type))
332 case ETHERNET_TYPE_IP4:
333 return (DPO_PROTO_IP4);
334 case ETHERNET_TYPE_IP6:
335 return (DPO_PROTO_IP6);
340 return (DPO_PROTO_NONE);
344 * Determine the SRC EPG from a LPM
347 gbp_lpm_classify_inline (vlib_main_t * vm,
348 vlib_node_runtime_t * node,
349 vlib_frame_t * frame,
350 dpo_proto_t dproto, u8 is_recirc)
352 gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
353 u32 n_left_from, *from, *to_next;
357 n_left_from = frame->n_vectors;
358 from = vlib_frame_vector_args (frame);
360 while (n_left_from > 0)
364 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
366 while (n_left_from > 0 && n_left_to_next > 0)
368 u32 bi0, sw_if_index0, fib_index0, lbi0;
369 gbp_lpm_classify_next_t next0;
370 const gbp_policy_dpo_t *gpd0;
371 const gbp_ext_itf_t *gx0;
372 const gbp_recirc_t *gr0;
373 const dpo_id_t *dpo0;
388 next0 = GPB_LPM_CLASSIFY_DROP;
390 b0 = vlib_get_buffer (vm, bi0);
392 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
393 vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
395 if (DPO_PROTO_IP4 == dproto)
396 ip4_0 = vlib_buffer_get_current (b0);
397 else if (DPO_PROTO_IP6 == dproto)
398 ip6_0 = vlib_buffer_get_current (b0);
399 else if (DPO_PROTO_ETHERNET == dproto)
401 const ethernet_header_t *eh0;
403 eh0 = vlib_buffer_get_current (b0);
405 dproto = ethertype_to_dpo_proto (eh0);
410 ip4_0 = (vlib_buffer_get_current (b0) +
411 vnet_buffer (b0)->l2.l2_len);
414 ip6_0 = (vlib_buffer_get_current (b0) +
415 vnet_buffer (b0)->l2.l2_len);
418 /* not IP so no LPM classify possible */
419 src_epg0 = EPG_INVALID;
426 gr0 = gbp_recirc_get (sw_if_index0);
427 fib_index0 = gr0->gr_fib_index[dproto];
429 vnet_feature_next (&next0, b0);
433 gx0 = gbp_ext_itf_get (sw_if_index0);
434 fib_index0 = gx0->gx_fib_index[dproto];
436 next0 = vnet_l2_feature_next
437 (b0, gscm->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM],
438 L2INPUT_FEAT_GBP_LPM_CLASSIFY);
441 if (DPO_PROTO_IP4 == dproto)
443 lbi0 = ip4_fib_forwarding_lookup (fib_index0,
444 &ip4_0->src_address);
446 else if (DPO_PROTO_IP6 == dproto)
448 lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0,
449 &ip6_0->src_address);
453 /* not IP so no LPM classify possible */
454 src_epg0 = EPG_INVALID;
457 lb0 = load_balance_get (lbi0);
458 dpo0 = load_balance_get_bucket_i (lb0, 0);
460 if (gbp_policy_dpo_type == dpo0->dpoi_type)
462 gpd0 = gbp_policy_dpo_get (dpo0->dpoi_index);
463 src_epg0 = gpd0->gpd_epg;
467 /* could not classify => drop */
468 src_epg0 = EPG_INVALID;
469 next0 = GPB_LPM_CLASSIFY_DROP;
473 vnet_buffer2 (b0)->gbp.src_epg = src_epg0;
475 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
477 gbp_classify_trace_t *t =
478 vlib_add_trace (vm, node, b0, sizeof (*t));
479 t->src_epg = src_epg0;
482 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
483 to_next, n_left_to_next,
487 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
490 return frame->n_vectors;
494 gbp_ip4_lpm_classify (vlib_main_t * vm,
495 vlib_node_runtime_t * node, vlib_frame_t * frame)
497 return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP4, 1));
501 gbp_ip6_lpm_classify (vlib_main_t * vm,
502 vlib_node_runtime_t * node, vlib_frame_t * frame)
504 return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP6, 1));
508 gbp_l2_lpm_classify (vlib_main_t * vm,
509 vlib_node_runtime_t * node, vlib_frame_t * frame)
511 return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_ETHERNET, 0));
515 VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = {
516 .function = gbp_ip4_lpm_classify,
517 .name = "ip4-gbp-lpm-classify",
518 .vector_size = sizeof (u32),
519 .format_trace = format_gbp_classify_trace,
520 .type = VLIB_NODE_TYPE_INTERNAL,
525 [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
529 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip4_lpm_classify_node, gbp_ip4_lpm_classify);
531 VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = {
532 .function = gbp_ip6_lpm_classify,
533 .name = "ip6-gbp-lpm-classify",
534 .vector_size = sizeof (u32),
535 .format_trace = format_gbp_classify_trace,
536 .type = VLIB_NODE_TYPE_INTERNAL,
541 [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
545 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip6_lpm_classify_node, gbp_ip6_lpm_classify);
547 VLIB_REGISTER_NODE (gbp_l2_lpm_classify_node) = {
548 .function = gbp_l2_lpm_classify,
549 .name = "l2-gbp-lpm-classify",
550 .vector_size = sizeof (u32),
551 .format_trace = format_gbp_classify_trace,
552 .type = VLIB_NODE_TYPE_INTERNAL,
557 [GPB_LPM_CLASSIFY_DROP] = "error-drop"
561 VLIB_NODE_FUNCTION_MULTIARCH (gbp_l2_lpm_classify_node, gbp_l2_lpm_classify);
563 VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
565 .arc_name = "ip4-unicast",
566 .node_name = "ip4-gbp-lpm-classify",
567 .runs_before = VNET_FEATURES ("nat44-out2in"),
569 VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
571 .arc_name = "ip6-unicast",
572 .node_name = "ip6-gbp-lpm-classify",
573 .runs_before = VNET_FEATURES ("nat66-out2in"),
578 static clib_error_t *
579 gbp_src_classify_init (vlib_main_t * vm)
581 gbp_src_classify_main_t *em = &gbp_src_classify_main;
583 /* Initialize the feature next-node indexes */
584 feat_bitmap_init_next_nodes (vm,
585 gbp_src_classify_node.index,
587 l2input_get_feat_names (),
588 em->l2_input_feat_next[GBP_SRC_CLASSIFY_NULL]);
589 feat_bitmap_init_next_nodes (vm,
590 gbp_null_classify_node.index,
592 l2input_get_feat_names (),
593 em->l2_input_feat_next[GBP_SRC_CLASSIFY_PORT]);
594 feat_bitmap_init_next_nodes (vm,
595 gbp_l2_lpm_classify_node.index,
597 l2input_get_feat_names (),
598 em->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM]);
603 VLIB_INIT_FUNCTION (gbp_src_classify_init);
606 * fd.io coding-style-patch-verification: ON
609 * eval: (c-set-style "gnu")