2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <plugins/gbp/gbp.h>
17 #include <plugins/gbp/gbp_policy_dpo.h>
18 #include <plugins/gbp/gbp_recirc.h>
20 #include <vnet/dpo/dvr_dpo.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/dpo/load_balance.h>
28 static gbp_policy_dpo_t *gbp_policy_dpo_pool;
31 * DPO type registered for these GBP FWD
33 static dpo_type_t gbp_policy_dpo_type;
35 static inline gbp_policy_dpo_t *
36 gbp_policy_dpo_get_i (index_t index)
38 return (pool_elt_at_index (gbp_policy_dpo_pool, index));
42 gbp_policy_dpo_get (index_t index)
44 return (gbp_policy_dpo_get_i (index));
47 static gbp_policy_dpo_t *
48 gbp_policy_dpo_alloc (void)
50 gbp_policy_dpo_t *gpd;
52 pool_get (gbp_policy_dpo_pool, gpd);
57 static inline gbp_policy_dpo_t *
58 gbp_policy_dpo_get_from_dpo (const dpo_id_t * dpo)
60 ASSERT (gbp_policy_dpo_type == dpo->dpoi_type);
62 return (gbp_policy_dpo_get_i (dpo->dpoi_index));
66 gbp_policy_dpo_get_index (gbp_policy_dpo_t * gpd)
68 return (gpd - gbp_policy_dpo_pool);
72 gbp_policy_dpo_lock (dpo_id_t * dpo)
74 gbp_policy_dpo_t *gpd;
76 gpd = gbp_policy_dpo_get_from_dpo (dpo);
81 gbp_policy_dpo_unlock (dpo_id_t * dpo)
83 gbp_policy_dpo_t *gpd;
85 gpd = gbp_policy_dpo_get_from_dpo (dpo);
88 if (0 == gpd->gpd_locks)
90 dpo_reset (&gpd->gpd_dpo);
91 pool_put (gbp_policy_dpo_pool, gpd);
96 gbp_policy_dpo_add_or_lock (dpo_proto_t dproto,
97 epg_id_t epg, u32 sw_if_index, dpo_id_t * dpo)
99 gbp_policy_dpo_t *gpd;
100 dpo_id_t parent = DPO_INVALID;
102 gpd = gbp_policy_dpo_alloc ();
103 memset (gpd, 0, sizeof (*gpd));
105 gpd->gpd_proto = dproto;
106 gpd->gpd_sw_if_index = sw_if_index;
110 * stack on the DVR DPO for the output interface
112 dvr_dpo_add_or_lock (sw_if_index, dproto, &parent);
114 dpo_stack (gbp_policy_dpo_type, dproto, &gpd->gpd_dpo, &parent);
116 dpo_set (dpo, gbp_policy_dpo_type, dproto, gbp_policy_dpo_get_index (gpd));
120 format_gbp_policy_dpo (u8 * s, va_list * ap)
122 index_t index = va_arg (*ap, index_t);
123 u32 indent = va_arg (*ap, u32);
124 gbp_policy_dpo_t *gpd = gbp_policy_dpo_get_i (index);
125 vnet_main_t *vnm = vnet_get_main ();
127 s = format (s, "gbp-policy-dpo: %U, epg:%d out:%U",
128 format_dpo_proto, gpd->gpd_proto,
130 format_vnet_sw_if_index_name, vnm, gpd->gpd_sw_if_index);
131 s = format (s, "\n%U", format_white_space, indent + 2);
132 s = format (s, "%U", format_dpo_id, &gpd->gpd_dpo, indent + 4);
137 const static dpo_vft_t gbp_policy_dpo_vft = {
138 .dv_lock = gbp_policy_dpo_lock,
139 .dv_unlock = gbp_policy_dpo_unlock,
140 .dv_format = format_gbp_policy_dpo,
144 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
147 * this means that these graph nodes are ones from which a glean is the
148 * parent object in the DPO-graph.
150 const static char *const gbp_policy_dpo_ip4_nodes[] = {
151 "ip4-gbp-policy-dpo",
155 const static char *const gbp_policy_dpo_ip6_nodes[] = {
156 "ip6-gbp-policy-dpo",
160 const static char *const *const gbp_policy_dpo_nodes[DPO_PROTO_NUM] = {
161 [DPO_PROTO_IP4] = gbp_policy_dpo_ip4_nodes,
162 [DPO_PROTO_IP6] = gbp_policy_dpo_ip6_nodes,
166 gbp_policy_dpo_get_type (void)
168 return (gbp_policy_dpo_type);
171 static clib_error_t *
172 gbp_policy_dpo_module_init (vlib_main_t * vm)
174 gbp_policy_dpo_type = dpo_register_new_type (&gbp_policy_dpo_vft,
175 gbp_policy_dpo_nodes);
180 VLIB_INIT_FUNCTION (gbp_policy_dpo_module_init);
182 typedef struct gbp_policy_dpo_trace_t_
187 } gbp_policy_dpo_trace_t;
196 gbp_policy_dpo_inline (vlib_main_t * vm,
197 vlib_node_runtime_t * node,
198 vlib_frame_t * from_frame, fib_protocol_t fproto)
200 u32 n_left_from, next_index, *from, *to_next;
202 from = vlib_frame_vector_args (from_frame);
203 n_left_from = from_frame->n_vectors;
205 next_index = node->cached_next_index;
207 while (n_left_from > 0)
211 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
213 while (n_left_from > 0 && n_left_to_next > 0)
215 const gbp_policy_dpo_t *gpd0;
216 u32 bi0, next0, acl_index0;
217 gbp_contract_key_t key0;
226 next0 = GBP_POLICY_DROP;
229 b0 = vlib_get_buffer (vm, bi0);
231 gbp_policy_dpo_get_i (vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
232 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = gpd0->gpd_dpo.dpoi_index;
234 key0.gck_src = vnet_buffer2 (b0)->gbp.src_epg;
235 key0.gck_dst = gpd0->gpd_epg;
237 if (~0 != key0.gck_src)
239 if (PREDICT_FALSE (key0.gck_src == key0.gck_dst))
244 next0 = gpd0->gpd_dpo.dpoi_next_node;
248 acl_index0 = gbp_acl_lookup (&key0);
250 if (~0 != acl_index0)
253 * TODO tests against the ACL
256 * ACL tables are not available outside of ACL plugin
257 * until then bypass the ACL to next node
259 next0 = gpd0->gpd_dpo.dpoi_next_node;
266 * the src EPG is not set when the packet arrives on an EPG
267 * uplink interface and we do not need to apply policy
269 next0 = gpd0->gpd_dpo.dpoi_next_node;
272 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
274 gbp_policy_dpo_trace_t *tr;
276 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
277 tr->src_epg = key0.gck_src;
278 tr->dst_epg = key0.gck_dst;
279 tr->acl_index = acl_index0;
282 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
283 n_left_to_next, bi0, next0);
285 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
287 return from_frame->n_vectors;
291 format_gbp_policy_dpo_trace (u8 * s, va_list * args)
293 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
294 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
295 gbp_policy_dpo_trace_t *t = va_arg (*args, gbp_policy_dpo_trace_t *);
297 s = format (s, " src-epg:%d dst-epg:%d acl-index:%d",
298 t->src_epg, t->dst_epg, t->acl_index);
304 ip4_gbp_policy_dpo (vlib_main_t * vm,
305 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
307 return (gbp_policy_dpo_inline (vm, node, from_frame, FIB_PROTOCOL_IP4));
311 ip6_gbp_policy_dpo (vlib_main_t * vm,
312 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
314 return (gbp_policy_dpo_inline (vm, node, from_frame, FIB_PROTOCOL_IP6));
318 VLIB_REGISTER_NODE (ip4_gbp_policy_dpo_node) = {
319 .function = ip4_gbp_policy_dpo,
320 .name = "ip4-gbp-policy-dpo",
321 .vector_size = sizeof (u32),
322 .format_trace = format_gbp_policy_dpo_trace,
323 .n_next_nodes = GBP_POLICY_N_NEXT,
326 [GBP_POLICY_DROP] = "ip4-drop",
329 VLIB_REGISTER_NODE (ip6_gbp_policy_dpo_node) = {
330 .function = ip6_gbp_policy_dpo,
331 .name = "ip6-gbp-policy-dpo",
332 .vector_size = sizeof (u32),
333 .format_trace = format_gbp_policy_dpo_trace,
334 .n_next_nodes = GBP_POLICY_N_NEXT,
337 [GBP_POLICY_DROP] = "ip6-drop",
341 VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_policy_dpo_node, ip4_gbp_policy_dpo)
342 VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_policy_dpo_node, ip6_gbp_policy_dpo)
346 * per-packet trace data
348 typedef struct gbp_classify_trace_t_
350 /* per-pkt trace data */
352 } gbp_classify_trace_t;
354 typedef enum gbp_lpm_classify_next_t_
356 GPB_LPM_CLASSIFY_DROP,
357 } gbp_lpm_classify_next_t;
360 * Determine the SRC EPG from a LPM
363 gbp_lpm_classify_inline (vlib_main_t * vm,
364 vlib_node_runtime_t * node,
365 vlib_frame_t * frame, fib_protocol_t fproto)
367 u32 n_left_from, *from, *to_next;
371 n_left_from = frame->n_vectors;
372 from = vlib_frame_vector_args (frame);
374 while (n_left_from > 0)
378 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
380 while (n_left_from > 0 && n_left_to_next > 0)
382 u32 bi0, sw_if_index0, fib_index0, lbi0;
383 gbp_lpm_classify_next_t next0;
384 const gbp_policy_dpo_t *gpd0;
385 const gbp_recirc_t *gr0;
386 const dpo_id_t *dpo0;
399 next0 = GPB_LPM_CLASSIFY_DROP;
401 b0 = vlib_get_buffer (vm, bi0);
403 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
404 gr0 = gbp_recirc_get (sw_if_index0);
405 fib_index0 = gr0->gr_fib_index[fproto];
407 if (FIB_PROTOCOL_IP4 == fproto)
409 ip4_0 = vlib_buffer_get_current (b0);
410 lbi0 = ip4_fib_forwarding_lookup (fib_index0,
411 &ip4_0->src_address);
415 ip6_0 = vlib_buffer_get_current (b0);
416 lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0,
417 &ip6_0->src_address);
420 lb0 = load_balance_get (lbi0);
421 dpo0 = load_balance_get_bucket_i (lb0, 0);
423 if (gbp_policy_dpo_type == dpo0->dpoi_type)
425 gpd0 = gbp_policy_dpo_get_i (dpo0->dpoi_index);
426 src_epg0 = gpd0->gpd_epg;
427 vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX],
432 /* could not classify => drop */
436 vnet_buffer2 (b0)->gbp.src_epg = src_epg0;
438 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
440 gbp_classify_trace_t *t =
441 vlib_add_trace (vm, node, b0, sizeof (*t));
442 t->src_epg = src_epg0;
445 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
446 to_next, n_left_to_next,
450 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
453 return frame->n_vectors;
457 gbp_ip4_lpm_classify (vlib_main_t * vm,
458 vlib_node_runtime_t * node, vlib_frame_t * frame)
460 return (gbp_lpm_classify_inline (vm, node, frame, FIB_PROTOCOL_IP4));
464 gbp_ip6_lpm_classify (vlib_main_t * vm,
465 vlib_node_runtime_t * node, vlib_frame_t * frame)
467 return (gbp_lpm_classify_inline (vm, node, frame, FIB_PROTOCOL_IP6));
470 /* packet trace format function */
472 format_gbp_classify_trace (u8 * s, va_list * args)
474 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
475 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
476 gbp_classify_trace_t *t = va_arg (*args, gbp_classify_trace_t *);
478 s = format (s, "src-epg:%d", t->src_epg);
484 VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = {
485 .function = gbp_ip4_lpm_classify,
486 .name = "ip4-gbp-lpm-classify",
487 .vector_size = sizeof (u32),
488 .format_trace = format_gbp_classify_trace,
489 .type = VLIB_NODE_TYPE_INTERNAL,
494 [GPB_LPM_CLASSIFY_DROP] = "ip4-drop"
498 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip4_lpm_classify_node, gbp_ip4_lpm_classify);
500 VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = {
501 .function = gbp_ip6_lpm_classify,
502 .name = "ip6-gpb-lpm-classify",
503 .vector_size = sizeof (u32),
504 .format_trace = format_gbp_classify_trace,
505 .type = VLIB_NODE_TYPE_INTERNAL,
510 [GPB_LPM_CLASSIFY_DROP] = "ip6-drop"
514 VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip6_lpm_classify_node, gbp_ip6_lpm_classify);
516 VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) =
518 .arc_name = "ip4-unicast",
519 .node_name = "ip4-gbp-lpm-classify",
520 .runs_before = VNET_FEATURES ("nat44-out2in"),
522 VNET_FEATURE_INIT (gbp_ip6_lpm_classify_feat_node, static) =
524 .arc_name = "ip6-unicast",
525 .node_name = "ip6-gbp-lpm-classify",
526 .runs_before = VNET_FEATURES ("nat66-out2in"),
532 * fd.io coding-style-patch-verification: ON
535 * eval: (c-set-style "gnu")