2 * gbp.h : Group Based Policy
4 * Copyright (c) 2018 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <plugins/gbp/gbp.h>
19 #include <plugins/gbp/gbp_policy_dpo.h>
20 #include <plugins/gbp/gbp_ext_itf.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/dpo/load_balance.h>
24 #include <vnet/l2/l2_input.h>
25 #include <vnet/l2/feat_bitmap.h>
26 #include <vnet/fib/fib_table.h>
27 #include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
29 typedef enum gbp_src_classify_type_t_
31 GBP_SRC_CLASSIFY_NULL,
32 GBP_SRC_CLASSIFY_PORT,
34 } gbp_src_classify_type_t;
36 #define GBP_SRC_N_CLASSIFY (GBP_SRC_CLASSIFY_LPM + 1)
39 * Grouping of global data for the GBP source EPG classification feature
41 typedef struct gbp_src_classify_main_t_
44 * Next nodes for L2 output features
46 u32 l2_input_feat_next[GBP_SRC_N_CLASSIFY][32];
47 } gbp_src_classify_main_t;
49 static gbp_src_classify_main_t gbp_src_classify_main;
52 * per-packet trace data
54 typedef struct gbp_classify_trace_t_
56 /* per-pkt trace data */
58 } gbp_classify_trace_t;
61 * determine the SRC EPG form the input port
64 gbp_classify_inline (vlib_main_t * vm,
65 vlib_node_runtime_t * node,
67 gbp_src_classify_type_t type, dpo_proto_t dproto)
69 gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
70 u32 n_left_from, *from, *to_next;
74 n_left_from = frame->n_vectors;
75 from = vlib_frame_vector_args (frame);
77 while (n_left_from > 0)
81 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
83 while (n_left_from > 0 && n_left_to_next > 0)
85 u32 next0, bi0, src_epg, sw_if_index0;
86 const gbp_endpoint_t *ge0;
96 b0 = vlib_get_buffer (vm, bi0);
98 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
99 vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
101 if (GBP_SRC_CLASSIFY_NULL == type)
103 src_epg = EPG_INVALID;
105 vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
106 L2INPUT_FEAT_GBP_NULL_CLASSIFY);
110 if (DPO_PROTO_ETHERNET == dproto)
112 const ethernet_header_t *h0;
114 h0 = vlib_buffer_get_current (b0);
116 vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
117 L2INPUT_FEAT_GBP_SRC_CLASSIFY);
118 ge0 = gbp_endpoint_find_mac (h0->src_address,
119 vnet_buffer (b0)->l2.bd_index);
121 else if (DPO_PROTO_IP4 == dproto)
123 const ip4_header_t *h0;
125 h0 = vlib_buffer_get_current (b0);
127 ge0 = gbp_endpoint_find_ip4
129 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
134 * Go straight to looukp, do not pass go, do not collect $200
138 else if (DPO_PROTO_IP6 == dproto)
140 const ip6_header_t *h0;
142 h0 = vlib_buffer_get_current (b0);
144 ge0 = gbp_endpoint_find_ip6
146 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6,
151 * Go straight to lookup, do not pass go, do not collect $200
162 if (PREDICT_TRUE (NULL != ge0))
163 src_epg = ge0->ge_fwd.gef_epg_id;
165 src_epg = EPG_INVALID;
168 vnet_buffer2 (b0)->gbp.src_epg = src_epg;
170 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
172 gbp_classify_trace_t *t =
173 vlib_add_trace (vm, node, b0, sizeof (*t));
174 t->src_epg = src_epg;
177 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
178 to_next, n_left_to_next,
182 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
185 return frame->n_vectors;
188 VLIB_NODE_FN (gbp_src_classify_node) (vlib_main_t * vm,
189 vlib_node_runtime_t * node,
190 vlib_frame_t * frame)
192 return (gbp_classify_inline (vm, node, frame,
193 GBP_SRC_CLASSIFY_PORT, DPO_PROTO_ETHERNET));
196 VLIB_NODE_FN (gbp_null_classify_node) (vlib_main_t * vm,
197 vlib_node_runtime_t * node,
198 vlib_frame_t * frame)
200 return (gbp_classify_inline (vm, node, frame,
201 GBP_SRC_CLASSIFY_NULL, DPO_PROTO_ETHERNET));
204 VLIB_NODE_FN (gbp_ip4_src_classify_node) (vlib_main_t * vm,
205 vlib_node_runtime_t * node,
206 vlib_frame_t * frame)
208 return (gbp_classify_inline (vm, node, frame,
209 GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP4));
212 VLIB_NODE_FN (gbp_ip6_src_classify_node) (vlib_main_t * vm,
213 vlib_node_runtime_t * node,
214 vlib_frame_t * frame)
216 return (gbp_classify_inline (vm, node, frame,
217 GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP6));
221 /* packet trace format function */
223 format_gbp_classify_trace (u8 * s, va_list * args)
225 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
226 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
227 gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
229 s = format (s, "src-epg:%d", t->src_epg);
235 VLIB_REGISTER_NODE (gbp_null_classify_node) = {
236 .name = "gbp-null-classify",
237 .vector_size = sizeof (u32),
238 .format_trace = format_gbp_classify_trace,
239 .type = VLIB_NODE_TYPE_INTERNAL,
245 VLIB_REGISTER_NODE (gbp_src_classify_node) = {
246 .name = "gbp-src-classify",
247 .vector_size = sizeof (u32),
248 .format_trace = format_gbp_classify_trace,
249 .type = VLIB_NODE_TYPE_INTERNAL,
255 VLIB_REGISTER_NODE (gbp_ip4_src_classify_node) = {
256 .name = "ip4-gbp-src-classify",
257 .vector_size = sizeof (u32),
258 .format_trace = format_gbp_classify_trace,
259 .type = VLIB_NODE_TYPE_INTERNAL,
268 VLIB_REGISTER_NODE (gbp_ip6_src_classify_node) = {
269 .name = "ip6-gbp-src-classify",
270 .vector_size = sizeof (u32),
271 .format_trace = format_gbp_classify_trace,
272 .type = VLIB_NODE_TYPE_INTERNAL,
281 VNET_FEATURE_INIT (gbp_ip4_src_classify_feat_node, static) =
283 .arc_name = "ip4-unicast",
284 .node_name = "ip4-gbp-src-classify",
285 .runs_before = VNET_FEATURES ("nat44-out2in"),
287 VNET_FEATURE_INIT (gbp_ip6_src_classify_feat_node, static) =
289 .arc_name = "ip6-unicast",
290 .node_name = "ip6-gbp-src-classify",
291 .runs_before = VNET_FEATURES ("nat66-out2in"),
296 typedef enum gbp_lpm_classify_next_t_
298 GPB_LPM_CLASSIFY_DROP,
299 } gbp_lpm_classify_next_t;
301 always_inline dpo_proto_t
302 ethertype_to_dpo_proto (const ethernet_header_t * eh0)
304 u16 etype = clib_net_to_host_u16 (eh0->type);
308 case ETHERNET_TYPE_IP4:
309 return (DPO_PROTO_IP4);
310 case ETHERNET_TYPE_IP6:
311 return (DPO_PROTO_IP6);
312 case ETHERNET_TYPE_VLAN:
314 ethernet_vlan_header_t *vh0;
316 vh0 = (ethernet_vlan_header_t *) (eh0 + 1);
318 switch (clib_net_to_host_u16 (vh0->type))
320 case ETHERNET_TYPE_IP4:
321 return (DPO_PROTO_IP4);
322 case ETHERNET_TYPE_IP6:
323 return (DPO_PROTO_IP6);
328 return (DPO_PROTO_NONE);
332 * Determine the SRC EPG from a LPM
335 gbp_lpm_classify_inline (vlib_main_t * vm,
336 vlib_node_runtime_t * node,
337 vlib_frame_t * frame,
338 dpo_proto_t dproto, u8 is_recirc)
340 gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
341 u32 n_left_from, *from, *to_next;
345 n_left_from = frame->n_vectors;
346 from = vlib_frame_vector_args (frame);
348 while (n_left_from > 0)
352 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
354 while (n_left_from > 0 && n_left_to_next > 0)
356 u32 bi0, sw_if_index0, fib_index0, lbi0;
357 gbp_lpm_classify_next_t next0;
358 const gbp_policy_dpo_t *gpd0;
359 const gbp_ext_itf_t *gx0;
360 const gbp_recirc_t *gr0;
361 const dpo_id_t *dpo0;
376 next0 = GPB_LPM_CLASSIFY_DROP;
378 b0 = vlib_get_buffer (vm, bi0);
380 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
381 vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
383 if (DPO_PROTO_IP4 == dproto)
384 ip4_0 = vlib_buffer_get_current (b0);
385 else if (DPO_PROTO_IP6 == dproto)
386 ip6_0 = vlib_buffer_get_current (b0);
387 else if (DPO_PROTO_ETHERNET == dproto)
389 const ethernet_header_t *eh0;
391 eh0 = vlib_buffer_get_current (b0);
393 dproto = ethertype_to_dpo_proto (eh0);
398 ip4_0 = (vlib_buffer_get_current (b0) +
399 vnet_buffer (b0)->l2.l2_len);
402 ip6_0 = (vlib_buffer_get_current (b0) +
403 vnet_buffer (b0)->l2.l2_len);
406 /* not IP so no LPM classify possible */
407 src_epg0 = EPG_INVALID;
414 gr0 = gbp_recirc_get (sw_if_index0);
415 fib_index0 = gr0->gr_fib_index[dproto];
417 vnet_feature_next (&next0, b0);
421 gx0 = gbp_ext_itf_get (sw_if_index0);
422 fib_index0 = gx0->gx_fib_index[dproto];
424 next0 = vnet_l2_feature_next
425 (b0, gscm->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM],
426 L2INPUT_FEAT_GBP_LPM_CLASSIFY);
429 if (DPO_PROTO_IP4 == dproto)
431 lbi0 = ip4_fib_forwarding_lookup (fib_index0,
432 &ip4_0->src_address);
434 else if (DPO_PROTO_IP6 == dproto)
436 lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0,
437 &ip6_0->src_address);
441 /* not IP so no LPM classify possible */
442 src_epg0 = EPG_INVALID;
445 lb0 = load_balance_get (lbi0);
446 dpo0 = load_balance_get_bucket_i (lb0, 0);
448 if (gbp_policy_dpo_type == dpo0->dpoi_type)
450 gpd0 = gbp_policy_dpo_get (dpo0->dpoi_index);
451 src_epg0 = gpd0->gpd_epg;
455 /* could not classify => drop */
456 src_epg0 = EPG_INVALID;
457 next0 = GPB_LPM_CLASSIFY_DROP;
461 vnet_buffer2 (b0)->gbp.src_epg = src_epg0;
463 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
465 gbp_classify_trace_t *t =
466 vlib_add_trace (vm, node, b0, sizeof (*t));
467 t->src_epg = src_epg0;
470 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
471 to_next, n_left_to_next,
475 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
478 return frame->n_vectors;
481 VLIB_NODE_FN (gbp_ip4_lpm_classify_node) (vlib_main_t * vm,
482 vlib_node_runtime_t * node,
483 vlib_frame_t * frame)
485 return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP4, 1));
488 VLIB_NODE_FN (gbp_ip6_lpm_classify_node) (vlib_main_t * vm,
489 vlib_node_runtime_t * node,
490 vlib_frame_t * frame)
492 return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP6, 1));
495 VLIB_NODE_FN (gbp_l2_lpm_classify_node) (vlib_main_t * vm,
496 vlib_node_runtime_t * node,
497 vlib_frame_t * frame)
499 return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_ETHERNET, 0));
503 VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = {
504 .name = "ip4-gbp-lpm-classify",
505 .vector_size = sizeof (u32),
506 .format_trace = format_gbp_classify_trace,
507 .type = VLIB_NODE_TYPE_INTERNAL,
512 [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
516 VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = {
517 .name = "ip6-gbp-lpm-classify",
518 .vector_size = sizeof (u32),
519 .format_trace = format_gbp_classify_trace,
520 .type = VLIB_NODE_TYPE_INTERNAL,
525 [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
529 VLIB_REGISTER_NODE (gbp_l2_lpm_classify_node) = {
530 .name = "l2-gbp-lpm-classify",
531 .vector_size = sizeof (u32),
532 .format_trace = format_gbp_classify_trace,
533 .type = VLIB_NODE_TYPE_INTERNAL,
538 [GPB_LPM_CLASSIFY_DROP] = "error-drop"
542 VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
544 .arc_name = "ip4-unicast",
545 .node_name = "ip4-gbp-lpm-classify",
546 .runs_before = VNET_FEATURES ("nat44-out2in"),
548 VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
550 .arc_name = "ip6-unicast",
551 .node_name = "ip6-gbp-lpm-classify",
552 .runs_before = VNET_FEATURES ("nat66-out2in"),
557 static clib_error_t *
558 gbp_src_classify_init (vlib_main_t * vm)
560 gbp_src_classify_main_t *em = &gbp_src_classify_main;
562 vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-src-classify");
564 /* Initialize the feature next-node indexes */
565 feat_bitmap_init_next_nodes (vm,
568 l2input_get_feat_names (),
569 em->l2_input_feat_next[GBP_SRC_CLASSIFY_NULL]);
571 node = vlib_get_node_by_name (vm, (u8 *) "gbp-null-classify");
572 feat_bitmap_init_next_nodes (vm,
575 l2input_get_feat_names (),
576 em->l2_input_feat_next[GBP_SRC_CLASSIFY_PORT]);
578 node = vlib_get_node_by_name (vm, (u8 *) "l2-gbp-lpm-classify");
579 feat_bitmap_init_next_nodes (vm,
582 l2input_get_feat_names (),
583 em->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM]);
588 VLIB_INIT_FUNCTION (gbp_src_classify_init);
591 * fd.io coding-style-patch-verification: ON
594 * eval: (c-set-style "gnu")