2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <plugins/gbp/gbp.h>
17 #include <plugins/gbp/gbp_fwd_dpo.h>
19 #include <vnet/ethernet/ethernet.h>
22 #ifndef CLIB_MARCH_VARIANT
24 * The 'DB' of GBP FWD DPOs.
25 * There is one per-proto
27 static index_t gbp_fwd_dpo_db[DPO_PROTO_NUM] = { INDEX_INVALID };
30 * DPO type registered for these GBP FWD
32 static dpo_type_t gbp_fwd_dpo_type;
35 * @brief pool of all interface DPOs
37 gbp_fwd_dpo_t *gbp_fwd_dpo_pool;
39 static gbp_fwd_dpo_t *
40 gbp_fwd_dpo_alloc (void)
44 pool_get (gbp_fwd_dpo_pool, gfd);
49 static inline gbp_fwd_dpo_t *
50 gbp_fwd_dpo_get_from_dpo (const dpo_id_t * dpo)
52 ASSERT (gbp_fwd_dpo_type == dpo->dpoi_type);
54 return (gbp_fwd_dpo_get (dpo->dpoi_index));
58 gbp_fwd_dpo_get_index (gbp_fwd_dpo_t * gfd)
60 return (gfd - gbp_fwd_dpo_pool);
64 gbp_fwd_dpo_lock (dpo_id_t * dpo)
68 gfd = gbp_fwd_dpo_get_from_dpo (dpo);
73 gbp_fwd_dpo_unlock (dpo_id_t * dpo)
77 gfd = gbp_fwd_dpo_get_from_dpo (dpo);
80 if (0 == gfd->gfd_locks)
82 gbp_fwd_dpo_db[gfd->gfd_proto] = INDEX_INVALID;
83 pool_put (gbp_fwd_dpo_pool, gfd);
88 gbp_fwd_dpo_add_or_lock (dpo_proto_t dproto, dpo_id_t * dpo)
92 if (INDEX_INVALID == gbp_fwd_dpo_db[dproto])
94 gfd = gbp_fwd_dpo_alloc ();
96 gfd->gfd_proto = dproto;
98 gbp_fwd_dpo_db[dproto] = gbp_fwd_dpo_get_index (gfd);
102 gfd = gbp_fwd_dpo_get (gbp_fwd_dpo_db[dproto]);
105 dpo_set (dpo, gbp_fwd_dpo_type, dproto, gbp_fwd_dpo_get_index (gfd));
109 format_gbp_fwd_dpo (u8 * s, va_list * ap)
111 index_t index = va_arg (*ap, index_t);
112 CLIB_UNUSED (u32 indent) = va_arg (*ap, u32);
113 gbp_fwd_dpo_t *gfd = gbp_fwd_dpo_get (index);
115 return (format (s, "gbp-fwd-dpo: %U", format_dpo_proto, gfd->gfd_proto));
118 const static dpo_vft_t gbp_fwd_dpo_vft = {
119 .dv_lock = gbp_fwd_dpo_lock,
120 .dv_unlock = gbp_fwd_dpo_unlock,
121 .dv_format = format_gbp_fwd_dpo,
125 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
128 * this means that these graph nodes are ones from which a glean is the
129 * parent object in the DPO-graph.
131 const static char *const gbp_fwd_dpo_ip4_nodes[] = {
136 const static char *const gbp_fwd_dpo_ip6_nodes[] = {
141 const static char *const *const gbp_fwd_dpo_nodes[DPO_PROTO_NUM] = {
142 [DPO_PROTO_IP4] = gbp_fwd_dpo_ip4_nodes,
143 [DPO_PROTO_IP6] = gbp_fwd_dpo_ip6_nodes,
147 gbp_fwd_dpo_get_type (void)
149 return (gbp_fwd_dpo_type);
152 static clib_error_t *
153 gbp_fwd_dpo_module_init (vlib_main_t * vm)
157 FOR_EACH_DPO_PROTO (dproto)
159 gbp_fwd_dpo_db[dproto] = INDEX_INVALID;
162 gbp_fwd_dpo_type = dpo_register_new_type (&gbp_fwd_dpo_vft,
168 VLIB_INIT_FUNCTION (gbp_fwd_dpo_module_init);
169 #endif /* CLIB_MARCH_VARIANT */
171 typedef struct gbp_fwd_dpo_trace_t_
175 } gbp_fwd_dpo_trace_t;
185 gbp_fwd_dpo_inline (vlib_main_t * vm,
186 vlib_node_runtime_t * node,
187 vlib_frame_t * from_frame, fib_protocol_t fproto)
189 u32 n_left_from, next_index, *from, *to_next;
191 from = vlib_frame_vector_args (from_frame);
192 n_left_from = from_frame->n_vectors;
194 next_index = node->cached_next_index;
196 while (n_left_from > 0)
200 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
202 while (n_left_from > 0 && n_left_to_next > 0)
204 const dpo_id_t *next_dpo0;
216 b0 = vlib_get_buffer (vm, bi0);
218 sclass0 = vnet_buffer2 (b0)->gbp.sclass;
219 next_dpo0 = gbp_epg_dpo_lookup (sclass0, fproto);
221 if (PREDICT_TRUE (NULL != next_dpo0))
223 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = next_dpo0->dpoi_index;
228 next0 = GBP_FWD_DROP;
231 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
233 gbp_fwd_dpo_trace_t *tr;
235 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
236 tr->sclass = sclass0;
237 tr->dpo_index = (NULL != next_dpo0 ?
238 next_dpo0->dpoi_index : ~0);
241 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
242 n_left_to_next, bi0, next0);
244 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
246 return from_frame->n_vectors;
250 format_gbp_fwd_dpo_trace (u8 * s, va_list * args)
252 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
253 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
254 gbp_fwd_dpo_trace_t *t = va_arg (*args, gbp_fwd_dpo_trace_t *);
256 s = format (s, " sclass:%d dpo:%d", t->sclass, t->dpo_index);
261 VLIB_NODE_FN (ip4_gbp_fwd_dpo_node) (vlib_main_t * vm,
262 vlib_node_runtime_t * node,
263 vlib_frame_t * from_frame)
265 return (gbp_fwd_dpo_inline (vm, node, from_frame, FIB_PROTOCOL_IP4));
268 VLIB_NODE_FN (ip6_gbp_fwd_dpo_node) (vlib_main_t * vm,
269 vlib_node_runtime_t * node,
270 vlib_frame_t * from_frame)
272 return (gbp_fwd_dpo_inline (vm, node, from_frame, FIB_PROTOCOL_IP6));
276 VLIB_REGISTER_NODE (ip4_gbp_fwd_dpo_node) = {
277 .name = "ip4-gbp-fwd-dpo",
278 .vector_size = sizeof (u32),
279 .format_trace = format_gbp_fwd_dpo_trace,
280 .n_next_nodes = GBP_FWD_N_NEXT,
283 [GBP_FWD_DROP] = "ip4-drop",
284 [GBP_FWD_FWD] = "ip4-dvr-dpo",
287 VLIB_REGISTER_NODE (ip6_gbp_fwd_dpo_node) = {
288 .name = "ip6-gbp-fwd-dpo",
289 .vector_size = sizeof (u32),
290 .format_trace = format_gbp_fwd_dpo_trace,
291 .n_next_nodes = GBP_FWD_N_NEXT,
294 [GBP_FWD_DROP] = "ip6-drop",
295 [GBP_FWD_FWD] = "ip6-dvr-dpo",
301 * fd.io coding-style-patch-verification: ON
304 * eval: (c-set-style "gnu")