2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/dpo/dvr_dpo.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/ip6_fib.h>
19 #include <vnet/dpo/load_balance.h>
21 #include <plugins/gbp/gbp.h>
22 #include <plugins/gbp/gbp_policy_dpo.h>
23 #include <plugins/gbp/gbp_recirc.h>
28 static gbp_policy_dpo_t *gbp_policy_dpo_pool;
31 * DPO type registered for these GBP FWD
33 static dpo_type_t gbp_policy_dpo_type;
35 static inline gbp_policy_dpo_t *
36 gbp_policy_dpo_get_i (index_t index)
38 return (pool_elt_at_index (gbp_policy_dpo_pool, index));
42 gbp_policy_dpo_get (index_t index)
44 return (gbp_policy_dpo_get_i (index));
47 static gbp_policy_dpo_t *
48 gbp_policy_dpo_alloc (void)
50 gbp_policy_dpo_t *gpd;
52 pool_get (gbp_policy_dpo_pool, gpd);
57 static inline gbp_policy_dpo_t *
58 gbp_policy_dpo_get_from_dpo (const dpo_id_t * dpo)
60 ASSERT (gbp_policy_dpo_type == dpo->dpoi_type);
62 return (gbp_policy_dpo_get_i (dpo->dpoi_index));
66 gbp_policy_dpo_get_index (gbp_policy_dpo_t * gpd)
68 return (gpd - gbp_policy_dpo_pool);
72 gbp_policy_dpo_lock (dpo_id_t * dpo)
74 gbp_policy_dpo_t *gpd;
76 gpd = gbp_policy_dpo_get_from_dpo (dpo);
81 gbp_policy_dpo_unlock (dpo_id_t * dpo)
83 gbp_policy_dpo_t *gpd;
85 gpd = gbp_policy_dpo_get_from_dpo (dpo);
88 if (0 == gpd->gpd_locks)
90 dpo_reset (&gpd->gpd_dpo);
91 pool_put (gbp_policy_dpo_pool, gpd);
96 gbp_policy_dpo_get_urpf (const dpo_id_t * dpo)
98 gbp_policy_dpo_t *gpd;
100 gpd = gbp_policy_dpo_get_from_dpo (dpo);
102 return (gpd->gpd_sw_if_index);
106 gbp_policy_dpo_add_or_lock (dpo_proto_t dproto,
107 epg_id_t epg, u32 sw_if_index, dpo_id_t * dpo)
109 gbp_policy_dpo_t *gpd;
110 dpo_id_t parent = DPO_INVALID;
112 gpd = gbp_policy_dpo_alloc ();
113 memset (gpd, 0, sizeof (*gpd));
115 gpd->gpd_proto = dproto;
116 gpd->gpd_sw_if_index = sw_if_index;
120 * stack on the DVR DPO for the output interface
122 dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
124 dpo_stack (gbp_policy_dpo_type, dproto, &gpd->gpd_dpo, &parent);
126 dpo_set (dpo, gbp_policy_dpo_type, dproto, gbp_policy_dpo_get_index (gpd));
130 format_gbp_policy_dpo (u8 * s, va_list * ap)
132 index_t index = va_arg (*ap, index_t);
133 u32 indent = va_arg (*ap, u32);
134 gbp_policy_dpo_t *gpd = gbp_policy_dpo_get_i (index);
135 vnet_main_t *vnm = vnet_get_main ();
137 s = format (s, "gbp-policy-dpo: %U, epg:%d out:%U",
138 format_dpo_proto, gpd->gpd_proto,
140 format_vnet_sw_if_index_name, vnm, gpd->gpd_sw_if_index);
141 s = format (s, "\n%U", format_white_space, indent + 2);
142 s = format (s, "%U", format_dpo_id, &gpd->gpd_dpo, indent + 4);
147 const static dpo_vft_t gbp_policy_dpo_vft = {
148 .dv_lock = gbp_policy_dpo_lock,
149 .dv_unlock = gbp_policy_dpo_unlock,
150 .dv_format = format_gbp_policy_dpo,
151 .dv_get_urpf = gbp_policy_dpo_get_urpf,
155 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
158 * this means that these graph nodes are ones from which a glean is the
159 * parent object in the DPO-graph.
161 const static char *const gbp_policy_dpo_ip4_nodes[] = {
162 "ip4-gbp-policy-dpo",
166 const static char *const gbp_policy_dpo_ip6_nodes[] = {
167 "ip6-gbp-policy-dpo",
171 const static char *const *const gbp_policy_dpo_nodes[DPO_PROTO_NUM] = {
172 [DPO_PROTO_IP4] = gbp_policy_dpo_ip4_nodes,
173 [DPO_PROTO_IP6] = gbp_policy_dpo_ip6_nodes,
177 gbp_policy_dpo_get_type (void)
179 return (gbp_policy_dpo_type);
182 static clib_error_t *
183 gbp_policy_dpo_module_init (vlib_main_t * vm)
185 gbp_policy_dpo_type = dpo_register_new_type (&gbp_policy_dpo_vft,
186 gbp_policy_dpo_nodes);
191 VLIB_INIT_FUNCTION (gbp_policy_dpo_module_init);
193 typedef struct gbp_policy_dpo_trace_t_
198 } gbp_policy_dpo_trace_t;
207 gbp_policy_dpo_inline (vlib_main_t * vm,
208 vlib_node_runtime_t * node,
209 vlib_frame_t * from_frame, u8 is_ip6)
211 gbp_main_t *gm = &gbp_main;
212 u32 n_left_from, next_index, *from, *to_next;
214 from = vlib_frame_vector_args (from_frame);
215 n_left_from = from_frame->n_vectors;
217 next_index = node->cached_next_index;
219 while (n_left_from > 0)
223 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
225 while (n_left_from > 0 && n_left_to_next > 0)
227 const gbp_policy_dpo_t *gpd0;
229 gbp_contract_key_t key0;
230 gbp_contract_value_t value0 = {
241 next0 = GBP_POLICY_DROP;
243 b0 = vlib_get_buffer (vm, bi0);
245 gbp_policy_dpo_get_i (vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
246 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index;
248 key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
249 key0.gck_dst = gpd0->gpd_epg;
251 if (~0 != key0.gck_src)
253 if (PREDICT_FALSE (key0.gck_src == key0.gck_dst))
258 next0 = gpd0->gpd_dpo.dpoi_next_node;
262 value0.as_u64 = gbp_acl_lookup (&key0);
264 if (~0 != value0.gc_lc_index)
266 fa_5tuple_opaque_t pkt_5tuple0;
268 u32 acl_pos_p0, acl_match_p0;
269 u32 rule_match_p0, trace_bitmap0;
271 * tests against the ACL
273 acl_plugin_fill_5tuple_inline (gm->
274 acl_plugin.p_acl_main,
275 value0.gc_lc_index, b0,
280 acl_plugin_match_5tuple_inline (gm->
281 acl_plugin.p_acl_main,
283 &pkt_5tuple0, is_ip6,
284 &action0, &acl_pos_p0,
290 next0 = gpd0->gpd_dpo.dpoi_next_node;
297 * the src EPG is not set when the packet arrives on an EPG
298 * uplink interface and we do not need to apply policy
300 next0 = gpd0->gpd_dpo.dpoi_next_node;
303 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
305 gbp_policy_dpo_trace_t *tr;
307 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
308 tr->src_epg = key0.gck_src;
309 tr->dst_epg = key0.gck_dst;
310 tr->acl_index = value0.gc_acl_index;
313 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
314 n_left_to_next, bi0, next0);
316 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
318 return from_frame->n_vectors;
322 format_gbp_policy_dpo_trace (u8 * s, va_list * args)
324 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
325 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
326 gbp_policy_dpo_trace_t *t = va_arg (*args, gbp_policy_dpo_trace_t *);
328 s = format (s, " src-epg:%d dst-epg:%d acl-index:%d",
329 t->src_epg, t->dst_epg, t->acl_index);
335 ip4_gbp_policy_dpo (vlib_main_t * vm,
336 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
338 return (gbp_policy_dpo_inline (vm, node, from_frame, 0));
342 ip6_gbp_policy_dpo (vlib_main_t * vm,
343 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
345 return (gbp_policy_dpo_inline (vm, node, from_frame, 1));
349 VLIB_REGISTER_NODE (ip4_gbp_policy_dpo_node) = {
350 .function = ip4_gbp_policy_dpo,
351 .name = "ip4-gbp-policy-dpo",
352 .vector_size = sizeof (u32),
353 .format_trace = format_gbp_policy_dpo_trace,
354 .n_next_nodes = GBP_POLICY_N_NEXT,
357 [GBP_POLICY_DROP] = "ip4-drop",
360 VLIB_REGISTER_NODE (ip6_gbp_policy_dpo_node) = {
361 .function = ip6_gbp_policy_dpo,
362 .name = "ip6-gbp-policy-dpo",
363 .vector_size = sizeof (u32),
364 .format_trace = format_gbp_policy_dpo_trace,
365 .n_next_nodes = GBP_POLICY_N_NEXT,
368 [GBP_POLICY_DROP] = "ip6-drop",
372 VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_policy_dpo_node, ip4_gbp_policy_dpo)
373 VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_policy_dpo_node, ip6_gbp_policy_dpo)
377 * per-packet trace data
379 typedef struct gbp_classify_trace_t_
381 /* per-pkt trace data */
383 } gbp_classify_trace_t;
385 typedef enum gbp_lpm_classify_next_t_
387 GPB_LPM_CLASSIFY_DROP,
388 } gbp_lpm_classify_next_t;
391 * Determine the SRC EPG from a LPM
394 gbp_lpm_classify_inline (vlib_main_t * vm,
395 vlib_node_runtime_t * node,
396 vlib_frame_t * frame, fib_protocol_t fproto)
398 u32 n_left_from, *from, *to_next;
402 n_left_from = frame->n_vectors;
403 from = vlib_frame_vector_args (frame);
405 while (n_left_from > 0)
409 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
411 while (n_left_from > 0 && n_left_to_next > 0)
413 u32 bi0, sw_if_index0, fib_index0, lbi0;
414 gbp_lpm_classify_next_t next0;
415 const gbp_policy_dpo_t *gpd0;
416 const gbp_recirc_t *gr0;
417 const dpo_id_t *dpo0;
430 next0 = GPB_LPM_CLASSIFY_DROP;
432 b0 = vlib_get_buffer (vm, bi0);
434 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
435 gr0 = gbp_recirc_get (sw_if_index0);
436 fib_index0 = gr0->gr_fib_index[fproto];
438 if (FIB_PROTOCOL_IP4 == fproto)
440 ip4_0 = vlib_buffer_get_current (b0);
441 lbi0 = ip4_fib_forwarding_lookup (fib_index0,
442 &ip4_0->src_address);
446 ip6_0 = vlib_buffer_get_current (b0);
447 lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0,
448 &ip6_0->src_address);
451 lb0 = load_balance_get (lbi0);
452 dpo0 = load_balance_get_bucket_i (lb0, 0);
454 if (gbp_policy_dpo_type == dpo0->dpoi_type)
456 gpd0 = gbp_policy_dpo_get_i (dpo0->dpoi_index);
457 src_epg0 = gpd0->gpd_epg;
458 vnet_feature_next (&next0, b0);
462 /* could not classify => drop */
466 vnet_buffer2 (b0)->gbp.src_epg = src_epg0;
468 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
470 gbp_classify_trace_t *t =
471 vlib_add_trace (vm, node, b0, sizeof (*t));
472 t->src_epg = src_epg0;
475 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
476 to_next, n_left_to_next,
480 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
483 return frame->n_vectors;
487 gbp_ip4_lpm_classify (vlib_main_t * vm,
488 vlib_node_runtime_t * node, vlib_frame_t * frame)
490 return (gbp_lpm_classify_inline (vm, node, frame, FIB_PROTOCOL_IP4));
494 gbp_ip6_lpm_classify (vlib_main_t * vm,
495 vlib_node_runtime_t * node, vlib_frame_t * frame)
497 return (gbp_lpm_classify_inline (vm, node, frame, FIB_PROTOCOL_IP6));
500 /* packet trace format function */
502 format_gbp_classify_trace (u8 * s, va_list * args)
504 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
505 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
506 gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
508 s = format (s, "src-epg:%d", t->src_epg);
514 VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = {
515 .function = gbp_ip4_lpm_classify,
516 .name = "ip4-gbp-lpm-classify",
517 .vector_size = sizeof (u32),
518 .format_trace = format_gbp_classify_trace,
519 .type = VLIB_NODE_TYPE_INTERNAL,
524 [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
528 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip4_lpm_classify_node, gbp_ip4_lpm_classify);
530 VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = {
531 .function = gbp_ip6_lpm_classify,
532 .name = "ip6-gbp-lpm-classify",
533 .vector_size = sizeof (u32),
534 .format_trace = format_gbp_classify_trace,
535 .type = VLIB_NODE_TYPE_INTERNAL,
540 [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
544 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip6_lpm_classify_node, gbp_ip6_lpm_classify);
546 VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
548 .arc_name = "ip4-unicast",
549 .node_name = "ip4-gbp-lpm-classify",
550 .runs_before = VNET_FEATURES ("nat44-out2in"),
552 VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
554 .arc_name = "ip6-unicast",
555 .node_name = "ip6-gbp-lpm-classify",
556 .runs_before = VNET_FEATURES ("nat66-out2in"),
562 * fd.io coding-style-patch-verification: ON
565 * eval: (c-set-style "gnu")