2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <plugins/gbp/gbp.h>
17 #include <plugins/gbp/gbp_policy_dpo.h>
18 #include <plugins/gbp/gbp_recirc.h>
20 #include <vnet/dpo/dvr_dpo.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/dpo/load_balance.h>
28 static gbp_policy_dpo_t *gbp_policy_dpo_pool;
31 * DPO type registered for these GBP FWD
33 static dpo_type_t gbp_policy_dpo_type;
35 static inline gbp_policy_dpo_t *
36 gbp_policy_dpo_get_i (index_t index)
38 return (pool_elt_at_index (gbp_policy_dpo_pool, index));
42 gbp_policy_dpo_get (index_t index)
44 return (gbp_policy_dpo_get_i (index));
47 static gbp_policy_dpo_t *
48 gbp_policy_dpo_alloc (void)
50 gbp_policy_dpo_t *gpd;
52 pool_get (gbp_policy_dpo_pool, gpd);
57 static inline gbp_policy_dpo_t *
58 gbp_policy_dpo_get_from_dpo (const dpo_id_t * dpo)
60 ASSERT (gbp_policy_dpo_type == dpo->dpoi_type);
62 return (gbp_policy_dpo_get_i (dpo->dpoi_index));
66 gbp_policy_dpo_get_index (gbp_policy_dpo_t * gpd)
68 return (gpd - gbp_policy_dpo_pool);
72 gbp_policy_dpo_lock (dpo_id_t * dpo)
74 gbp_policy_dpo_t *gpd;
76 gpd = gbp_policy_dpo_get_from_dpo (dpo);
81 gbp_policy_dpo_unlock (dpo_id_t * dpo)
83 gbp_policy_dpo_t *gpd;
85 gpd = gbp_policy_dpo_get_from_dpo (dpo);
88 if (0 == gpd->gpd_locks)
90 dpo_reset (&gpd->gpd_dpo);
91 pool_put (gbp_policy_dpo_pool, gpd);
96 gbp_policy_dpo_get_urpf (const dpo_id_t * dpo)
98 gbp_policy_dpo_t *gpd;
100 gpd = gbp_policy_dpo_get_from_dpo (dpo);
102 return (gpd->gpd_sw_if_index);
106 gbp_policy_dpo_add_or_lock (dpo_proto_t dproto,
107 epg_id_t epg, u32 sw_if_index, dpo_id_t * dpo)
109 gbp_policy_dpo_t *gpd;
110 dpo_id_t parent = DPO_INVALID;
112 gpd = gbp_policy_dpo_alloc ();
113 memset (gpd, 0, sizeof (*gpd));
115 gpd->gpd_proto = dproto;
116 gpd->gpd_sw_if_index = sw_if_index;
120 * stack on the DVR DPO for the output interface
122 dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
124 dpo_stack (gbp_policy_dpo_type, dproto, &gpd->gpd_dpo, &parent);
126 dpo_set (dpo, gbp_policy_dpo_type, dproto, gbp_policy_dpo_get_index (gpd));
130 format_gbp_policy_dpo (u8 * s, va_list * ap)
132 index_t index = va_arg (*ap, index_t);
133 u32 indent = va_arg (*ap, u32);
134 gbp_policy_dpo_t *gpd = gbp_policy_dpo_get_i (index);
135 vnet_main_t *vnm = vnet_get_main ();
137 s = format (s, "gbp-policy-dpo: %U, epg:%d out:%U",
138 format_dpo_proto, gpd->gpd_proto,
140 format_vnet_sw_if_index_name, vnm, gpd->gpd_sw_if_index);
141 s = format (s, "\n%U", format_white_space, indent + 2);
142 s = format (s, "%U", format_dpo_id, &gpd->gpd_dpo, indent + 4);
147 const static dpo_vft_t gbp_policy_dpo_vft = {
148 .dv_lock = gbp_policy_dpo_lock,
149 .dv_unlock = gbp_policy_dpo_unlock,
150 .dv_format = format_gbp_policy_dpo,
151 .dv_get_urpf = gbp_policy_dpo_get_urpf,
155 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
158 * this means that these graph nodes are ones from which a glean is the
159 * parent object in the DPO-graph.
161 const static char *const gbp_policy_dpo_ip4_nodes[] = {
162 "ip4-gbp-policy-dpo",
166 const static char *const gbp_policy_dpo_ip6_nodes[] = {
167 "ip6-gbp-policy-dpo",
171 const static char *const *const gbp_policy_dpo_nodes[DPO_PROTO_NUM] = {
172 [DPO_PROTO_IP4] = gbp_policy_dpo_ip4_nodes,
173 [DPO_PROTO_IP6] = gbp_policy_dpo_ip6_nodes,
177 gbp_policy_dpo_get_type (void)
179 return (gbp_policy_dpo_type);
182 static clib_error_t *
183 gbp_policy_dpo_module_init (vlib_main_t * vm)
185 gbp_policy_dpo_type = dpo_register_new_type (&gbp_policy_dpo_vft,
186 gbp_policy_dpo_nodes);
191 VLIB_INIT_FUNCTION (gbp_policy_dpo_module_init);
193 typedef struct gbp_policy_dpo_trace_t_
198 } gbp_policy_dpo_trace_t;
207 gbp_policy_dpo_inline (vlib_main_t * vm,
208 vlib_node_runtime_t * node,
209 vlib_frame_t * from_frame, fib_protocol_t fproto)
211 u32 n_left_from, next_index, *from, *to_next;
213 from = vlib_frame_vector_args (from_frame);
214 n_left_from = from_frame->n_vectors;
216 next_index = node->cached_next_index;
218 while (n_left_from > 0)
222 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
224 while (n_left_from > 0 && n_left_to_next > 0)
226 const gbp_policy_dpo_t *gpd0;
227 u32 bi0, next0, acl_index0;
228 gbp_contract_key_t key0;
237 next0 = GBP_POLICY_DROP;
240 b0 = vlib_get_buffer (vm, bi0);
242 gbp_policy_dpo_get_i (vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
243 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index;
245 key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
246 key0.gck_dst = gpd0->gpd_epg;
248 if (~0 != key0.gck_src)
250 if (PREDICT_FALSE (key0.gck_src == key0.gck_dst))
255 next0 = gpd0->gpd_dpo.dpoi_next_node;
259 acl_index0 = gbp_acl_lookup (&key0);
261 if (~0 != acl_index0)
264 * TODO tests against the ACL
267 * ACL tables are not available outside of ACL plugin
268 * until then bypass the ACL to next node
270 next0 = gpd0->gpd_dpo.dpoi_next_node;
277 * the src EPG is not set when the packet arrives on an EPG
278 * uplink interface and we do not need to apply policy
280 next0 = gpd0->gpd_dpo.dpoi_next_node;
283 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
285 gbp_policy_dpo_trace_t *tr;
287 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
288 tr->src_epg = key0.gck_src;
289 tr->dst_epg = key0.gck_dst;
290 tr->acl_index = acl_index0;
293 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
294 n_left_to_next, bi0, next0);
296 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
298 return from_frame->n_vectors;
302 format_gbp_policy_dpo_trace (u8 * s, va_list * args)
304 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
305 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
306 gbp_policy_dpo_trace_t *t = va_arg (*args, gbp_policy_dpo_trace_t *);
308 s = format (s, " src-epg:%d dst-epg:%d acl-index:%d",
309 t->src_epg, t->dst_epg, t->acl_index);
315 ip4_gbp_policy_dpo (vlib_main_t * vm,
316 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
318 return (gbp_policy_dpo_inline (vm, node, from_frame, FIB_PROTOCOL_IP4));
322 ip6_gbp_policy_dpo (vlib_main_t * vm,
323 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
325 return (gbp_policy_dpo_inline (vm, node, from_frame, FIB_PROTOCOL_IP6));
329 VLIB_REGISTER_NODE (ip4_gbp_policy_dpo_node) = {
330 .function = ip4_gbp_policy_dpo,
331 .name = "ip4-gbp-policy-dpo",
332 .vector_size = sizeof (u32),
333 .format_trace = format_gbp_policy_dpo_trace,
334 .n_next_nodes = GBP_POLICY_N_NEXT,
337 [GBP_POLICY_DROP] = "ip4-drop",
340 VLIB_REGISTER_NODE (ip6_gbp_policy_dpo_node) = {
341 .function = ip6_gbp_policy_dpo,
342 .name = "ip6-gbp-policy-dpo",
343 .vector_size = sizeof (u32),
344 .format_trace = format_gbp_policy_dpo_trace,
345 .n_next_nodes = GBP_POLICY_N_NEXT,
348 [GBP_POLICY_DROP] = "ip6-drop",
352 VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_policy_dpo_node, ip4_gbp_policy_dpo)
353 VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_policy_dpo_node, ip6_gbp_policy_dpo)
357 * per-packet trace data
359 typedef struct gbp_classify_trace_t_
361 /* per-pkt trace data */
363 } gbp_classify_trace_t;
365 typedef enum gbp_lpm_classify_next_t_
367 GPB_LPM_CLASSIFY_DROP,
368 } gbp_lpm_classify_next_t;
371 * Determine the SRC EPG from a LPM
374 gbp_lpm_classify_inline (vlib_main_t * vm,
375 vlib_node_runtime_t * node,
376 vlib_frame_t * frame, fib_protocol_t fproto)
378 u32 n_left_from, *from, *to_next;
382 n_left_from = frame->n_vectors;
383 from = vlib_frame_vector_args (frame);
385 while (n_left_from > 0)
389 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
391 while (n_left_from > 0 && n_left_to_next > 0)
393 u32 bi0, sw_if_index0, fib_index0, lbi0;
394 gbp_lpm_classify_next_t next0;
395 const gbp_policy_dpo_t *gpd0;
396 const gbp_recirc_t *gr0;
397 const dpo_id_t *dpo0;
410 next0 = GPB_LPM_CLASSIFY_DROP;
412 b0 = vlib_get_buffer (vm, bi0);
414 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
415 gr0 = gbp_recirc_get (sw_if_index0);
416 fib_index0 = gr0->gr_fib_index[fproto];
418 if (FIB_PROTOCOL_IP4 == fproto)
420 ip4_0 = vlib_buffer_get_current (b0);
421 lbi0 = ip4_fib_forwarding_lookup (fib_index0,
422 &ip4_0->src_address);
426 ip6_0 = vlib_buffer_get_current (b0);
427 lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0,
428 &ip6_0->src_address);
431 lb0 = load_balance_get (lbi0);
432 dpo0 = load_balance_get_bucket_i (lb0, 0);
434 if (gbp_policy_dpo_type == dpo0->dpoi_type)
436 gpd0 = gbp_policy_dpo_get_i (dpo0->dpoi_index);
437 src_epg0 = gpd0->gpd_epg;
438 vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX],
443 /* could not classify => drop */
447 vnet_buffer2 (b0)->gbp.src_epg = src_epg0;
449 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
451 gbp_classify_trace_t *t =
452 vlib_add_trace (vm, node, b0, sizeof (*t));
453 t->src_epg = src_epg0;
456 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
457 to_next, n_left_to_next,
461 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
464 return frame->n_vectors;
468 gbp_ip4_lpm_classify (vlib_main_t * vm,
469 vlib_node_runtime_t * node, vlib_frame_t * frame)
471 return (gbp_lpm_classify_inline (vm, node, frame, FIB_PROTOCOL_IP4));
475 gbp_ip6_lpm_classify (vlib_main_t * vm,
476 vlib_node_runtime_t * node, vlib_frame_t * frame)
478 return (gbp_lpm_classify_inline (vm, node, frame, FIB_PROTOCOL_IP6));
481 /* packet trace format function */
483 format_gbp_classify_trace (u8 * s, va_list * args)
485 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
486 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
487 gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
489 s = format (s, "src-epg:%d", t->src_epg);
495 VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = {
496 .function = gbp_ip4_lpm_classify,
497 .name = "ip4-gbp-lpm-classify",
498 .vector_size = sizeof (u32),
499 .format_trace = format_gbp_classify_trace,
500 .type = VLIB_NODE_TYPE_INTERNAL,
505 [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
509 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip4_lpm_classify_node, gbp_ip4_lpm_classify);
511 VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = {
512 .function = gbp_ip6_lpm_classify,
513 .name = "ip6-gbp-lpm-classify",
514 .vector_size = sizeof (u32),
515 .format_trace = format_gbp_classify_trace,
516 .type = VLIB_NODE_TYPE_INTERNAL,
521 [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
525 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip6_lpm_classify_node, gbp_ip6_lpm_classify);
527 VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
529 .arc_name = "ip4-unicast",
530 .node_name = "ip4-gbp-lpm-classify",
531 .runs_before = VNET_FEATURES ("nat44-out2in"),
533 VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
535 .arc_name = "ip6-unicast",
536 .node_name = "ip6-gbp-lpm-classify",
537 .runs_before = VNET_FEATURES ("nat66-out2in"),
543 * fd.io coding-style-patch-verification: ON
546 * eval: (c-set-style "gnu")