2 * gbp.h : Group Based Policy
4 * Copyright (c) 2018 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <plugins/gbp/gbp.h>
19 #include <plugins/gbp/gbp_classify.h>
20 #include <plugins/gbp/gbp_policy_dpo.h>
21 #include <plugins/gbp/gbp_ext_itf.h>
22 #include <vnet/fib/ip4_fib.h>
23 #include <vnet/fib/ip6_fib.h>
24 #include <vnet/dpo/load_balance.h>
25 #include <vnet/l2/l2_input.h>
26 #include <vnet/l2/feat_bitmap.h>
27 #include <vnet/fib/fib_table.h>
28 #include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
31 * per-packet trace data
33 typedef struct gbp_classify_trace_t_
35 /* per-pkt trace data */
37 } gbp_classify_trace_t;
40 * determine the SRC EPG form the input port
43 gbp_classify_inline (vlib_main_t * vm,
44 vlib_node_runtime_t * node,
46 gbp_src_classify_type_t type, dpo_proto_t dproto)
48 gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
49 u32 n_left_from, *from, *to_next;
53 n_left_from = frame->n_vectors;
54 from = vlib_frame_vector_args (frame);
56 while (n_left_from > 0)
60 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
62 while (n_left_from > 0 && n_left_to_next > 0)
64 u32 next0, bi0, sw_if_index0;
65 const gbp_endpoint_t *ge0;
76 b0 = vlib_get_buffer (vm, bi0);
78 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
79 vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
81 if (GBP_SRC_CLASSIFY_NULL == type)
83 sclass0 = SCLASS_INVALID;
85 vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
86 L2INPUT_FEAT_GBP_NULL_CLASSIFY);
90 if (DPO_PROTO_ETHERNET == dproto)
92 const ethernet_header_t *h0;
94 h0 = vlib_buffer_get_current (b0);
96 vnet_l2_feature_next (b0, gscm->l2_input_feat_next[type],
97 L2INPUT_FEAT_GBP_SRC_CLASSIFY);
98 ge0 = gbp_endpoint_find_mac (h0->src_address,
99 vnet_buffer (b0)->l2.bd_index);
101 else if (DPO_PROTO_IP4 == dproto)
103 const ip4_header_t *h0;
105 h0 = vlib_buffer_get_current (b0);
107 ge0 = gbp_endpoint_find_ip4
109 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
114 * Go straight to looukp, do not pass go, do not collect $200
118 else if (DPO_PROTO_IP6 == dproto)
120 const ip6_header_t *h0;
122 h0 = vlib_buffer_get_current (b0);
124 ge0 = gbp_endpoint_find_ip6
126 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6,
131 * Go straight to lookup, do not pass go, do not collect $200
142 if (PREDICT_TRUE (NULL != ge0))
143 sclass0 = ge0->ge_fwd.gef_sclass;
145 sclass0 = SCLASS_INVALID;
148 vnet_buffer2 (b0)->gbp.sclass = sclass0;
150 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
152 gbp_classify_trace_t *t =
153 vlib_add_trace (vm, node, b0, sizeof (*t));
157 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
158 to_next, n_left_to_next,
162 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
165 return frame->n_vectors;
168 VLIB_NODE_FN (gbp_src_classify_node) (vlib_main_t * vm,
169 vlib_node_runtime_t * node,
170 vlib_frame_t * frame)
172 return (gbp_classify_inline (vm, node, frame,
173 GBP_SRC_CLASSIFY_PORT, DPO_PROTO_ETHERNET));
176 VLIB_NODE_FN (gbp_null_classify_node) (vlib_main_t * vm,
177 vlib_node_runtime_t * node,
178 vlib_frame_t * frame)
180 return (gbp_classify_inline (vm, node, frame,
181 GBP_SRC_CLASSIFY_NULL, DPO_PROTO_ETHERNET));
184 VLIB_NODE_FN (gbp_ip4_src_classify_node) (vlib_main_t * vm,
185 vlib_node_runtime_t * node,
186 vlib_frame_t * frame)
188 return (gbp_classify_inline (vm, node, frame,
189 GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP4));
192 VLIB_NODE_FN (gbp_ip6_src_classify_node) (vlib_main_t * vm,
193 vlib_node_runtime_t * node,
194 vlib_frame_t * frame)
196 return (gbp_classify_inline (vm, node, frame,
197 GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP6));
201 /* packet trace format function */
203 format_gbp_classify_trace (u8 * s, va_list * args)
205 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
206 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
207 gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
209 s = format (s, "sclass:%d", t->sclass);
215 VLIB_REGISTER_NODE (gbp_null_classify_node) = {
216 .name = "gbp-null-classify",
217 .vector_size = sizeof (u32),
218 .format_trace = format_gbp_classify_trace,
219 .type = VLIB_NODE_TYPE_INTERNAL,
225 VLIB_REGISTER_NODE (gbp_src_classify_node) = {
226 .name = "gbp-src-classify",
227 .vector_size = sizeof (u32),
228 .format_trace = format_gbp_classify_trace,
229 .type = VLIB_NODE_TYPE_INTERNAL,
235 VLIB_REGISTER_NODE (gbp_ip4_src_classify_node) = {
236 .name = "ip4-gbp-src-classify",
237 .vector_size = sizeof (u32),
238 .format_trace = format_gbp_classify_trace,
239 .type = VLIB_NODE_TYPE_INTERNAL,
248 VLIB_REGISTER_NODE (gbp_ip6_src_classify_node) = {
249 .name = "ip6-gbp-src-classify",
250 .vector_size = sizeof (u32),
251 .format_trace = format_gbp_classify_trace,
252 .type = VLIB_NODE_TYPE_INTERNAL,
261 VNET_FEATURE_INIT (gbp_ip4_src_classify_feat_node, static) =
263 .arc_name = "ip4-unicast",
264 .node_name = "ip4-gbp-src-classify",
265 .runs_before = VNET_FEATURES ("nat44-out2in"),
267 VNET_FEATURE_INIT (gbp_ip6_src_classify_feat_node, static) =
269 .arc_name = "ip6-unicast",
270 .node_name = "ip6-gbp-src-classify",
271 .runs_before = VNET_FEATURES ("nat66-out2in"),
276 typedef enum gbp_lpm_classify_next_t_
278 GPB_LPM_CLASSIFY_DROP,
279 } gbp_lpm_classify_next_t;
281 always_inline dpo_proto_t
282 ethertype_to_dpo_proto (const ethernet_header_t * eh0)
284 u16 etype = clib_net_to_host_u16 (eh0->type);
288 case ETHERNET_TYPE_IP4:
289 return (DPO_PROTO_IP4);
290 case ETHERNET_TYPE_IP6:
291 return (DPO_PROTO_IP6);
292 case ETHERNET_TYPE_VLAN:
294 ethernet_vlan_header_t *vh0;
296 vh0 = (ethernet_vlan_header_t *) (eh0 + 1);
298 switch (clib_net_to_host_u16 (vh0->type))
300 case ETHERNET_TYPE_IP4:
301 return (DPO_PROTO_IP4);
302 case ETHERNET_TYPE_IP6:
303 return (DPO_PROTO_IP6);
308 return (DPO_PROTO_NONE);
312 * per-packet trace data
314 typedef struct gbp_lpm_classify_trace_t_
318 } gbp_lpm_classify_trace_t;
320 /* packet trace format function */
322 format_gbp_lpm_classify_trace (u8 * s, va_list * args)
324 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
325 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
326 gbp_lpm_classify_trace_t *t = va_arg (*args, gbp_lpm_classify_trace_t *);
328 s = format (s, "sclass:%d lb:%d", t->sclass, t->lbi);
334 * Determine the SRC EPG from a LPM
337 gbp_lpm_classify_inline (vlib_main_t * vm,
338 vlib_node_runtime_t * node,
339 vlib_frame_t * frame,
340 dpo_proto_t dproto, u8 is_recirc)
342 gbp_src_classify_main_t *gscm = &gbp_src_classify_main;
343 u32 n_left_from, *from, *to_next;
347 n_left_from = frame->n_vectors;
348 from = vlib_frame_vector_args (frame);
350 while (n_left_from > 0)
354 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
356 while (n_left_from > 0 && n_left_to_next > 0)
358 u32 bi0, sw_if_index0, fib_index0, lbi0;
359 gbp_lpm_classify_next_t next0;
360 const ethernet_header_t *eh0;
361 const gbp_policy_dpo_t *gpd0;
362 const gbp_endpoint_t *ge0;
363 const gbp_recirc_t *gr0;
364 const dpo_id_t *dpo0;
379 next0 = GPB_LPM_CLASSIFY_DROP;
383 b0 = vlib_get_buffer (vm, bi0);
385 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
386 vnet_buffer2 (b0)->gbp.flags = VXLAN_GBP_GPFLAGS_NONE;
388 if (DPO_PROTO_IP4 == dproto)
389 ip4_0 = vlib_buffer_get_current (b0);
390 else if (DPO_PROTO_IP6 == dproto)
391 ip6_0 = vlib_buffer_get_current (b0);
392 else if (DPO_PROTO_ETHERNET == dproto)
394 eh0 = vlib_buffer_get_current (b0);
396 dproto = ethertype_to_dpo_proto (eh0);
401 ip4_0 = (vlib_buffer_get_current (b0) +
402 vnet_buffer (b0)->l2.l2_len);
405 ip6_0 = (vlib_buffer_get_current (b0) +
406 vnet_buffer (b0)->l2.l2_len);
409 /* not IP so no LPM classify possible */
410 sclass0 = SCLASS_INVALID;
417 gr0 = gbp_recirc_get (sw_if_index0);
418 fib_index0 = gr0->gr_fib_index[dproto];
421 vnet_feature_next (&next0, b0);
427 /* packet should be l2 */
428 sclass0 = SCLASS_INVALID;
432 ge0 = gbp_endpoint_find_mac (eh0->src_address,
433 vnet_buffer (b0)->l2.bd_index);
437 /* packet must have come from an EP's mac */
438 sclass0 = SCLASS_INVALID;
442 fib_index0 = ge0->ge_fwd.gef_fib_index;
444 if (~0 == fib_index0)
446 sclass0 = SCLASS_INVALID;
450 if (DPO_PROTO_IP4 == dproto)
453 gbp_endpoint_find_ip4 (&ip4_0->src_address, fib_index0);
455 else if (DPO_PROTO_IP6 == dproto)
458 gbp_endpoint_find_ip6 (&ip6_0->src_address, fib_index0);
461 next0 = vnet_l2_feature_next
462 (b0, gscm->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM],
463 L2INPUT_FEAT_GBP_LPM_CLASSIFY);
466 * if we found the EP by IP lookup, it must be from the EP
467 * not a network behind it
471 sclass0 = ge0->ge_fwd.gef_sclass;
476 if (DPO_PROTO_IP4 == dproto)
478 lbi0 = ip4_fib_forwarding_lookup (fib_index0,
479 &ip4_0->src_address);
481 else if (DPO_PROTO_IP6 == dproto)
483 lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0,
484 &ip6_0->src_address);
488 /* not IP so no LPM classify possible */
489 sclass0 = SCLASS_INVALID;
490 next0 = GPB_LPM_CLASSIFY_DROP;
493 lb0 = load_balance_get (lbi0);
494 dpo0 = load_balance_get_bucket_i (lb0, 0);
496 if (gbp_policy_dpo_type == dpo0->dpoi_type)
498 gpd0 = gbp_policy_dpo_get (dpo0->dpoi_index);
499 sclass0 = gpd0->gpd_sclass;
503 /* could not classify => drop */
504 sclass0 = SCLASS_INVALID;
509 vnet_buffer2 (b0)->gbp.sclass = sclass0;
511 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
513 gbp_lpm_classify_trace_t *t =
514 vlib_add_trace (vm, node, b0, sizeof (*t));
519 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
520 to_next, n_left_to_next,
524 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
527 return frame->n_vectors;
530 VLIB_NODE_FN (gbp_ip4_lpm_classify_node) (vlib_main_t * vm,
531 vlib_node_runtime_t * node,
532 vlib_frame_t * frame)
534 return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP4, 1));
537 VLIB_NODE_FN (gbp_ip6_lpm_classify_node) (vlib_main_t * vm,
538 vlib_node_runtime_t * node,
539 vlib_frame_t * frame)
541 return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP6, 1));
544 VLIB_NODE_FN (gbp_l2_lpm_classify_node) (vlib_main_t * vm,
545 vlib_node_runtime_t * node,
546 vlib_frame_t * frame)
548 return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_ETHERNET, 0));
552 VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = {
553 .name = "ip4-gbp-lpm-classify",
554 .vector_size = sizeof (u32),
555 .format_trace = format_gbp_lpm_classify_trace,
556 .type = VLIB_NODE_TYPE_INTERNAL,
561 [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
565 VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = {
566 .name = "ip6-gbp-lpm-classify",
567 .vector_size = sizeof (u32),
568 .format_trace = format_gbp_lpm_classify_trace,
569 .type = VLIB_NODE_TYPE_INTERNAL,
574 [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
578 VLIB_REGISTER_NODE (gbp_l2_lpm_classify_node) = {
579 .name = "l2-gbp-lpm-classify",
580 .vector_size = sizeof (u32),
581 .format_trace = format_gbp_lpm_classify_trace,
582 .type = VLIB_NODE_TYPE_INTERNAL,
587 [GPB_LPM_CLASSIFY_DROP] = "error-drop"
591 VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
593 .arc_name = "ip4-unicast",
594 .node_name = "ip4-gbp-lpm-classify",
595 .runs_before = VNET_FEATURES ("nat44-out2in"),
597 VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
599 .arc_name = "ip6-unicast",
600 .node_name = "ip6-gbp-lpm-classify",
601 .runs_before = VNET_FEATURES ("nat66-out2in"),
607 * fd.io coding-style-patch-verification: ON
610 * eval: (c-set-style "gnu")