2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/dpo/dvr_dpo.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/ip6_fib.h>
19 #include <vnet/dpo/load_balance.h>
20 #include <vnet/dpo/drop_dpo.h>
21 #include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
23 #include <plugins/gbp/gbp.h>
24 #include <plugins/gbp/gbp_policy_dpo.h>
25 #include <plugins/gbp/gbp_recirc.h>
30 static gbp_policy_dpo_t *gbp_policy_dpo_pool;
33 * DPO type registered for these GBP FWD
35 static dpo_type_t gbp_policy_dpo_type;
37 static inline gbp_policy_dpo_t *
38 gbp_policy_dpo_get_i (index_t index)
40 return (pool_elt_at_index (gbp_policy_dpo_pool, index));
44 gbp_policy_dpo_get (index_t index)
46 return (gbp_policy_dpo_get_i (index));
49 static gbp_policy_dpo_t *
50 gbp_policy_dpo_alloc (void)
52 gbp_policy_dpo_t *gpd;
54 pool_get_zero (gbp_policy_dpo_pool, gpd);
59 static inline gbp_policy_dpo_t *
60 gbp_policy_dpo_get_from_dpo (const dpo_id_t * dpo)
62 ASSERT (gbp_policy_dpo_type == dpo->dpoi_type);
64 return (gbp_policy_dpo_get_i (dpo->dpoi_index));
68 gbp_policy_dpo_get_index (gbp_policy_dpo_t * gpd)
70 return (gpd - gbp_policy_dpo_pool);
74 gbp_policy_dpo_lock (dpo_id_t * dpo)
76 gbp_policy_dpo_t *gpd;
78 gpd = gbp_policy_dpo_get_from_dpo (dpo);
83 gbp_policy_dpo_unlock (dpo_id_t * dpo)
85 gbp_policy_dpo_t *gpd;
87 gpd = gbp_policy_dpo_get_from_dpo (dpo);
90 if (0 == gpd->gpd_locks)
92 dpo_reset (&gpd->gpd_dpo);
93 pool_put (gbp_policy_dpo_pool, gpd);
98 gbp_policy_dpo_get_urpf (const dpo_id_t * dpo)
100 gbp_policy_dpo_t *gpd;
102 gpd = gbp_policy_dpo_get_from_dpo (dpo);
104 return (gpd->gpd_sw_if_index);
108 gbp_policy_dpo_add_or_lock (dpo_proto_t dproto,
109 epg_id_t epg, u32 sw_if_index, dpo_id_t * dpo)
111 gbp_policy_dpo_t *gpd;
112 dpo_id_t parent = DPO_INVALID;
114 gpd = gbp_policy_dpo_alloc ();
116 gpd->gpd_proto = dproto;
117 gpd->gpd_sw_if_index = sw_if_index;
120 if (~0 != sw_if_index)
123 * stack on the DVR DPO for the output interface
125 dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
129 dpo_copy (&parent, drop_dpo_get (dproto));
132 dpo_stack (gbp_policy_dpo_type, dproto, &gpd->gpd_dpo, &parent);
133 dpo_set (dpo, gbp_policy_dpo_type, dproto, gbp_policy_dpo_get_index (gpd));
137 format_gbp_policy_dpo (u8 * s, va_list * ap)
139 index_t index = va_arg (*ap, index_t);
140 u32 indent = va_arg (*ap, u32);
141 gbp_policy_dpo_t *gpd = gbp_policy_dpo_get_i (index);
142 vnet_main_t *vnm = vnet_get_main ();
144 s = format (s, "gbp-policy-dpo: %U, epg:%d out:%U",
145 format_dpo_proto, gpd->gpd_proto,
147 format_vnet_sw_if_index_name, vnm, gpd->gpd_sw_if_index);
148 s = format (s, "\n%U", format_white_space, indent + 2);
149 s = format (s, "%U", format_dpo_id, &gpd->gpd_dpo, indent + 4);
155 * Interpose a policy DPO
158 gbp_policy_dpo_interpose (const dpo_id_t * original,
159 const dpo_id_t * parent, dpo_id_t * clone)
161 gbp_policy_dpo_t *gpd, *gpd_clone;
163 gpd_clone = gbp_policy_dpo_alloc ();
164 gpd = gbp_policy_dpo_get (original->dpoi_index);
166 gpd_clone->gpd_proto = gpd->gpd_proto;
167 gpd_clone->gpd_epg = gpd->gpd_epg;
168 gpd_clone->gpd_sw_if_index = gpd->gpd_sw_if_index;
170 dpo_stack (gbp_policy_dpo_type,
171 gpd_clone->gpd_proto, &gpd_clone->gpd_dpo, parent);
175 gpd_clone->gpd_proto, gbp_policy_dpo_get_index (gpd_clone));
178 const static dpo_vft_t gbp_policy_dpo_vft = {
179 .dv_lock = gbp_policy_dpo_lock,
180 .dv_unlock = gbp_policy_dpo_unlock,
181 .dv_format = format_gbp_policy_dpo,
182 .dv_get_urpf = gbp_policy_dpo_get_urpf,
183 .dv_mk_interpose = gbp_policy_dpo_interpose,
187 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
190 * this means that these graph nodes are ones from which a glean is the
191 * parent object in the DPO-graph.
193 const static char *const gbp_policy_dpo_ip4_nodes[] = {
194 "ip4-gbp-policy-dpo",
198 const static char *const gbp_policy_dpo_ip6_nodes[] = {
199 "ip6-gbp-policy-dpo",
203 const static char *const *const gbp_policy_dpo_nodes[DPO_PROTO_NUM] = {
204 [DPO_PROTO_IP4] = gbp_policy_dpo_ip4_nodes,
205 [DPO_PROTO_IP6] = gbp_policy_dpo_ip6_nodes,
209 gbp_policy_dpo_get_type (void)
211 return (gbp_policy_dpo_type);
214 static clib_error_t *
215 gbp_policy_dpo_module_init (vlib_main_t * vm)
217 gbp_policy_dpo_type = dpo_register_new_type (&gbp_policy_dpo_vft,
218 gbp_policy_dpo_nodes);
223 VLIB_INIT_FUNCTION (gbp_policy_dpo_module_init);
225 typedef struct gbp_policy_dpo_trace_t_
231 } gbp_policy_dpo_trace_t;
240 gbp_rule_l3_redirect (const gbp_rule_t * gu, vlib_buffer_t * b0, int is_ip6)
242 gbp_policy_node_t pnode;
246 pnode = (is_ip6 ? GBP_POLICY_NODE_IP6 : GBP_POLICY_NODE_IP4);
247 dproto = (is_ip6 ? DPO_PROTO_IP6 : DPO_PROTO_IP4);
248 dpo = &gu->gu_dpo[pnode][dproto];
250 /* The flow hash is still valid as this is a IP packet being switched */
251 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
253 return (dpo->dpoi_next_node);
257 gbp_policy_dpo_inline (vlib_main_t * vm,
258 vlib_node_runtime_t * node,
259 vlib_frame_t * from_frame, u8 is_ip6)
261 gbp_main_t *gm = &gbp_main;
262 u32 n_left_from, next_index, *from, *to_next;
265 from = vlib_frame_vector_args (from_frame);
266 n_left_from = from_frame->n_vectors;
268 next_index = node->cached_next_index;
270 while (n_left_from > 0)
274 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
276 while (n_left_from > 0 && n_left_to_next > 0)
278 const gbp_policy_dpo_t *gpd0;
280 gbp_contract_key_t key0;
291 next0 = GBP_POLICY_DROP;
293 b0 = vlib_get_buffer (vm, bi0);
297 gbp_policy_dpo_get_i (vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
298 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index;
300 if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A)
302 next0 = gpd0->gpd_dpo.dpoi_next_node;
307 key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
308 key0.gck_dst = gpd0->gpd_epg;
310 if (EPG_INVALID != key0.gck_src)
312 if (PREDICT_FALSE (key0.gck_src == key0.gck_dst))
317 next0 = gpd0->gpd_dpo.dpoi_next_node;
318 vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
322 gci0 = gbp_contract_find (&key0);
324 if (INDEX_INVALID != gci0)
326 fa_5tuple_opaque_t pkt_5tuple0;
328 u32 acl_pos_p0, acl_match_p0;
329 u32 rule_match_p0, trace_bitmap0;
331 * tests against the ACL
333 gc0 = gbp_contract_get (gci0);
334 acl_plugin_fill_5tuple_inline (gm->
335 acl_plugin.p_acl_main,
336 gc0->gc_lc_index, b0,
341 acl_plugin_match_5tuple_inline (gm->
342 acl_plugin.p_acl_main,
344 &pkt_5tuple0, is_ip6,
345 &action0, &acl_pos_p0,
353 vnet_buffer2 (b0)->gbp.flags |= VXLAN_GBP_GPFLAGS_A;
354 gu = gbp_rule_get (gc0->gc_rules[rule_match_p0]);
356 switch (gu->gu_action)
358 case GBP_RULE_PERMIT:
359 next0 = gpd0->gpd_dpo.dpoi_next_node;
365 case GBP_RULE_REDIRECT:
366 next0 = gbp_rule_l3_redirect (gu, b0, is_ip6);
376 * the src EPG is not set when the packet arrives on an EPG
377 * uplink interface and we do not need to apply policy
379 next0 = gpd0->gpd_dpo.dpoi_next_node;
382 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
384 gbp_policy_dpo_trace_t *tr;
386 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
387 tr->src_epg = key0.gck_src;
388 tr->dst_epg = key0.gck_dst;
389 tr->acl_index = (gc0 ? gc0->gc_acl_index : ~0);
390 tr->a_bit = vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_A;
393 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
394 n_left_to_next, bi0, next0);
396 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
398 return from_frame->n_vectors;
402 format_gbp_policy_dpo_trace (u8 * s, va_list * args)
404 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
405 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
406 gbp_policy_dpo_trace_t *t = va_arg (*args, gbp_policy_dpo_trace_t *);
408 s = format (s, " src-epg:%d dst-epg:%d acl-index:%d a-bit:%d",
409 t->src_epg, t->dst_epg, t->acl_index, t->a_bit);
415 ip4_gbp_policy_dpo (vlib_main_t * vm,
416 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
418 return (gbp_policy_dpo_inline (vm, node, from_frame, 0));
422 ip6_gbp_policy_dpo (vlib_main_t * vm,
423 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
425 return (gbp_policy_dpo_inline (vm, node, from_frame, 1));
429 VLIB_REGISTER_NODE (ip4_gbp_policy_dpo_node) = {
430 .function = ip4_gbp_policy_dpo,
431 .name = "ip4-gbp-policy-dpo",
432 .vector_size = sizeof (u32),
433 .format_trace = format_gbp_policy_dpo_trace,
434 .n_next_nodes = GBP_POLICY_N_NEXT,
437 [GBP_POLICY_DROP] = "ip4-drop",
440 VLIB_REGISTER_NODE (ip6_gbp_policy_dpo_node) = {
441 .function = ip6_gbp_policy_dpo,
442 .name = "ip6-gbp-policy-dpo",
443 .vector_size = sizeof (u32),
444 .format_trace = format_gbp_policy_dpo_trace,
445 .n_next_nodes = GBP_POLICY_N_NEXT,
448 [GBP_POLICY_DROP] = "ip6-drop",
452 VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_policy_dpo_node, ip4_gbp_policy_dpo)
453 VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_policy_dpo_node, ip6_gbp_policy_dpo)
457 * per-packet trace data
459 typedef struct gbp_classify_trace_t_
461 /* per-pkt trace data */
463 } gbp_classify_trace_t;
465 typedef enum gbp_lpm_classify_next_t_
467 GPB_LPM_CLASSIFY_DROP,
468 } gbp_lpm_classify_next_t;
471 * Determine the SRC EPG from a LPM
474 gbp_lpm_classify_inline (vlib_main_t * vm,
475 vlib_node_runtime_t * node,
476 vlib_frame_t * frame, fib_protocol_t fproto)
478 u32 n_left_from, *from, *to_next;
482 n_left_from = frame->n_vectors;
483 from = vlib_frame_vector_args (frame);
485 while (n_left_from > 0)
489 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
491 while (n_left_from > 0 && n_left_to_next > 0)
493 u32 bi0, sw_if_index0, fib_index0, lbi0;
494 gbp_lpm_classify_next_t next0;
495 const gbp_policy_dpo_t *gpd0;
496 const gbp_recirc_t *gr0;
497 const dpo_id_t *dpo0;
510 next0 = GPB_LPM_CLASSIFY_DROP;
512 b0 = vlib_get_buffer (vm, bi0);
514 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
515 gr0 = gbp_recirc_get (sw_if_index0);
516 fib_index0 = gr0->gr_fib_index[fproto];
518 if (FIB_PROTOCOL_IP4 == fproto)
520 ip4_0 = vlib_buffer_get_current (b0);
521 lbi0 = ip4_fib_forwarding_lookup (fib_index0,
522 &ip4_0->src_address);
526 ip6_0 = vlib_buffer_get_current (b0);
527 lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0,
528 &ip6_0->src_address);
531 lb0 = load_balance_get (lbi0);
532 dpo0 = load_balance_get_bucket_i (lb0, 0);
534 if (gbp_policy_dpo_type == dpo0->dpoi_type)
536 gpd0 = gbp_policy_dpo_get_i (dpo0->dpoi_index);
537 src_epg0 = gpd0->gpd_epg;
538 vnet_feature_next (&next0, b0);
542 /* could not classify => drop */
546 vnet_buffer2 (b0)->gbp.src_epg = src_epg0;
548 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
550 gbp_classify_trace_t *t =
551 vlib_add_trace (vm, node, b0, sizeof (*t));
552 t->src_epg = src_epg0;
555 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
556 to_next, n_left_to_next,
560 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
563 return frame->n_vectors;
567 gbp_ip4_lpm_classify (vlib_main_t * vm,
568 vlib_node_runtime_t * node, vlib_frame_t * frame)
570 return (gbp_lpm_classify_inline (vm, node, frame, FIB_PROTOCOL_IP4));
574 gbp_ip6_lpm_classify (vlib_main_t * vm,
575 vlib_node_runtime_t * node, vlib_frame_t * frame)
577 return (gbp_lpm_classify_inline (vm, node, frame, FIB_PROTOCOL_IP6));
580 /* packet trace format function */
582 format_gbp_classify_trace (u8 * s, va_list * args)
584 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
585 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
586 gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
588 s = format (s, "src-epg:%d", t->src_epg);
594 VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = {
595 .function = gbp_ip4_lpm_classify,
596 .name = "ip4-gbp-lpm-classify",
597 .vector_size = sizeof (u32),
598 .format_trace = format_gbp_classify_trace,
599 .type = VLIB_NODE_TYPE_INTERNAL,
604 [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
608 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip4_lpm_classify_node, gbp_ip4_lpm_classify);
610 VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = {
611 .function = gbp_ip6_lpm_classify,
612 .name = "ip6-gbp-lpm-classify",
613 .vector_size = sizeof (u32),
614 .format_trace = format_gbp_classify_trace,
615 .type = VLIB_NODE_TYPE_INTERNAL,
620 [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
624 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip6_lpm_classify_node, gbp_ip6_lpm_classify);
626 VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
628 .arc_name = "ip4-unicast",
629 .node_name = "ip4-gbp-lpm-classify",
630 .runs_before = VNET_FEATURES ("nat44-out2in"),
632 VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
634 .arc_name = "ip6-unicast",
635 .node_name = "ip6-gbp-lpm-classify",
636 .runs_before = VNET_FEATURES ("nat66-out2in"),
642 * fd.io coding-style-patch-verification: ON
645 * eval: (c-set-style "gnu")