2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * A Data-Path Object is an object that represents actions that are
18 * applied to packets are they are switched through VPP.
20 * The DPO is a base class that is specialised by other objects to provide
23 * The VLIB graph nodes are graph of types, the DPO graph is a graph of instances.
26 #include <vnet/dpo/dpo.h>
27 #include <vnet/ip/lookup.h>
28 #include <vnet/ip/format.h>
29 #include <vnet/adj/adj.h>
31 #include <vnet/dpo/load_balance.h>
32 #include <vnet/dpo/mpls_label_dpo.h>
33 #include <vnet/dpo/lookup_dpo.h>
34 #include <vnet/dpo/drop_dpo.h>
35 #include <vnet/dpo/receive_dpo.h>
36 #include <vnet/dpo/punt_dpo.h>
37 #include <vnet/dpo/classify_dpo.h>
38 #include <vnet/dpo/ip_null_dpo.h>
39 #include <vnet/dpo/replicate_dpo.h>
40 #include <vnet/dpo/interface_rx_dpo.h>
41 #include <vnet/dpo/interface_tx_dpo.h>
42 #include <vnet/dpo/mpls_disposition.h>
43 #include <vnet/dpo/l2_bridge_dpo.h>
46 * Array of char* names for the DPO types and protos
48 static const char* dpo_type_names[] = DPO_TYPES;
49 static const char* dpo_proto_names[] = DPO_PROTOS;
52 * @brief Vector of virtual function tables for the DPO types
54 * This is a vector so we can dynamically register new DPO types in plugins.
56 static dpo_vft_t *dpo_vfts;
59 * @brief vector of graph node names associated with each DPO type and protocol.
61 * dpo_nodes[child_type][child_proto][node_X] = node_name;
63 * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][0] = "ip4-lookup"
64 * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][1] = "ip4-load-balance"
66 * This is a vector so we can dynamically register new DPO types in plugins.
68 static const char* const * const ** dpo_nodes;
71 * @brief Vector of edge indicies from parent DPO nodes to child
73 * dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge_index
75 * This array is derived at init time from the dpo_nodes above. Note that
76 * the third dimension in dpo_nodes is lost, hence, the edge index from each
77 * node MUST be the same.
78 * Including both the child and parent protocol is required to support the
79 * case where it changes as the grapth is traversed, most notablly when an
80 * MPLS label is popped.
82 * Note that this array is child type specific, not child instance specific.
84 static u32 ****dpo_edges;
87 * @brief The DPO type value that can be assigend to the next dynamic
90 static dpo_type_t dpo_dynamic = DPO_LAST;
93 vnet_link_to_dpo_proto (vnet_link_t linkt)
98 return (DPO_PROTO_IP6);
100 return (DPO_PROTO_IP4);
102 return (DPO_PROTO_MPLS);
103 case VNET_LINK_ETHERNET:
104 return (DPO_PROTO_ETHERNET);
106 return (DPO_PROTO_NSH);
115 dpo_proto_to_link (dpo_proto_t dp)
120 return (VNET_LINK_IP6);
122 return (VNET_LINK_IP4);
124 return (VNET_LINK_MPLS);
125 case DPO_PROTO_ETHERNET:
126 return (VNET_LINK_ETHERNET);
128 return (VNET_LINK_NSH);
134 format_dpo_type (u8 * s, va_list * args)
136 dpo_type_t type = va_arg (*args, int);
138 s = format(s, "%s", dpo_type_names[type]);
144 format_dpo_id (u8 * s, va_list * args)
146 dpo_id_t *dpo = va_arg (*args, dpo_id_t*);
147 u32 indent = va_arg (*args, u32);
149 s = format(s, "[@%d]: ", dpo->dpoi_next_node);
151 if (NULL != dpo_vfts[dpo->dpoi_type].dv_format)
153 return (format(s, "%U",
154 dpo_vfts[dpo->dpoi_type].dv_format,
159 switch (dpo->dpoi_type)
162 s = format(s, "unset");
165 s = format(s, "unknown");
172 format_dpo_proto (u8 * s, va_list * args)
174 dpo_proto_t proto = va_arg (*args, int);
176 return (format(s, "%s", dpo_proto_names[proto]));
180 dpo_set (dpo_id_t *dpo,
187 dpo->dpoi_type = type;
188 dpo->dpoi_proto = proto,
189 dpo->dpoi_index = index;
191 if (DPO_ADJACENCY == type)
194 * set the adj subtype
198 adj = adj_get(index);
200 switch (adj->lookup_next_index)
202 case IP_LOOKUP_NEXT_ARP:
203 dpo->dpoi_type = DPO_ADJACENCY_INCOMPLETE;
205 case IP_LOOKUP_NEXT_MIDCHAIN:
206 dpo->dpoi_type = DPO_ADJACENCY_MIDCHAIN;
208 case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
209 dpo->dpoi_type = DPO_ADJACENCY_MCAST_MIDCHAIN;
211 case IP_LOOKUP_NEXT_MCAST:
212 dpo->dpoi_type = DPO_ADJACENCY_MCAST;
214 case IP_LOOKUP_NEXT_GLEAN:
215 dpo->dpoi_type = DPO_ADJACENCY_GLEAN;
226 dpo_reset (dpo_id_t *dpo)
228 dpo_id_t tmp = DPO_INVALID;
231 * use the atomic copy operation.
238 * Compare two Data-path objects
240 * like memcmp, return 0 is matching, !0 otherwise.
243 dpo_cmp (const dpo_id_t *dpo1,
244 const dpo_id_t *dpo2)
248 res = dpo1->dpoi_type - dpo2->dpoi_type;
250 if (0 != res) return (res);
252 return (dpo1->dpoi_index - dpo2->dpoi_index);
256 dpo_copy (dpo_id_t *dst,
262 * the destination is written in a single u64 write - hence atomically w.r.t
263 * any packets inflight.
265 *((u64*)dst) = *(u64*)src;
272 dpo_is_adj (const dpo_id_t *dpo)
274 return ((dpo->dpoi_type == DPO_ADJACENCY) ||
275 (dpo->dpoi_type == DPO_ADJACENCY_INCOMPLETE) ||
276 (dpo->dpoi_type == DPO_ADJACENCY_MIDCHAIN) ||
277 (dpo->dpoi_type == DPO_ADJACENCY_GLEAN));
281 dpo_default_get_next_node (const dpo_id_t *dpo)
283 u32 *node_indices = NULL;
284 const char *node_name;
287 node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii];
288 while (NULL != node_name)
292 node = vlib_get_node_by_name(vlib_get_main(), (u8*) node_name);
293 ASSERT(NULL != node);
294 vec_add1(node_indices, node->index);
297 node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii];
300 return (node_indices);
304 dpo_register (dpo_type_t type,
305 const dpo_vft_t *vft,
306 const char * const * const * nodes)
308 vec_validate(dpo_vfts, type);
309 dpo_vfts[type] = *vft;
310 if (NULL == dpo_vfts[type].dv_get_next_node)
312 dpo_vfts[type].dv_get_next_node = dpo_default_get_next_node;
315 vec_validate(dpo_nodes, type);
316 dpo_nodes[type] = nodes;
320 dpo_register_new_type (const dpo_vft_t *vft,
321 const char * const * const * nodes)
323 dpo_type_t type = dpo_dynamic++;
325 dpo_register(type, vft, nodes);
331 dpo_lock (dpo_id_t *dpo)
333 if (!dpo_id_is_valid(dpo))
336 dpo_vfts[dpo->dpoi_type].dv_lock(dpo);
340 dpo_unlock (dpo_id_t *dpo)
342 if (!dpo_id_is_valid(dpo))
345 dpo_vfts[dpo->dpoi_type].dv_unlock(dpo);
350 dpo_get_next_node (dpo_type_t child_type,
351 dpo_proto_t child_proto,
352 const dpo_id_t *parent_dpo)
354 dpo_proto_t parent_proto;
355 dpo_type_t parent_type;
357 parent_type = parent_dpo->dpoi_type;
358 parent_proto = parent_dpo->dpoi_proto;
360 vec_validate(dpo_edges, child_type);
361 vec_validate(dpo_edges[child_type], child_proto);
362 vec_validate(dpo_edges[child_type][child_proto], parent_type);
363 vec_validate_init_empty(
364 dpo_edges[child_type][child_proto][parent_type],
368 * if the edge index has not yet been created for this node to node transistion
370 if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
372 vlib_node_t *child_node;
377 vm = vlib_get_main();
379 ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node);
380 ASSERT(NULL != dpo_nodes[child_type]);
381 ASSERT(NULL != dpo_nodes[child_type][child_proto]);
384 parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent_dpo);
386 vlib_worker_thread_barrier_sync(vm);
389 * create a graph arc from each of the child's registered node types,
390 * to each of the parent's.
392 while (NULL != dpo_nodes[child_type][child_proto][cc])
395 vlib_get_node_by_name(vm,
396 (u8*) dpo_nodes[child_type][child_proto][cc]);
398 vec_foreach(pi, parent_indices)
400 edge = vlib_node_add_next(vm, child_node->index, *pi);
402 if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
404 dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge;
408 ASSERT(dpo_edges[child_type][child_proto][parent_type][parent_proto] == edge);
414 vlib_worker_thread_barrier_release(vm);
415 vec_free(parent_indices);
418 return (dpo_edges[child_type][child_proto][parent_type][parent_proto]);
422 * @brief Stack one DPO object on another, and thus establish a child parent
423 * relationship. The VLIB graph arc used is taken from the parent and child types
427 dpo_stack_i (u32 edge,
429 const dpo_id_t *parent)
432 * in order to get an atomic update of the parent we create a temporary,
433 * from a copy of the child, and add the next_node. then we copy to the parent
435 dpo_id_t tmp = DPO_INVALID;
436 dpo_copy(&tmp, parent);
439 * get the edge index for the parent to child VLIB graph transisition
441 tmp.dpoi_next_node = edge;
444 * this update is atomic.
452 * @brief Stack one DPO object on another, and thus establish a child-parent
453 * relationship. The VLIB graph arc used is taken from the parent and child types
457 dpo_stack (dpo_type_t child_type,
458 dpo_proto_t child_proto,
460 const dpo_id_t *parent)
462 dpo_stack_i(dpo_get_next_node(child_type, child_proto, parent), dpo, parent);
466 * @brief Stack one DPO object on another, and thus establish a child parent
467 * relationship. A new VLIB graph arc is created from the child node passed
468 * to the nodes registered by the parent. The VLIB infra will ensure this arc
469 * is added only once.
472 dpo_stack_from_node (u32 child_node_index,
474 const dpo_id_t *parent)
476 dpo_type_t parent_type;
482 parent_type = parent->dpoi_type;
483 vm = vlib_get_main();
485 ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node);
486 parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent);
487 ASSERT(parent_indices);
490 * This loop is purposefully written with the worker thread lock in the
491 * inner loop because;
492 * 1) the likelihood that the edge does not exist is smaller
493 * 2) the likelihood there is more than one node is even smaller
494 * so we are optimising for not need to take the lock
496 vec_foreach(pi, parent_indices)
498 edge = vlib_node_get_next(vm, child_node_index, *pi);
502 vlib_worker_thread_barrier_sync(vm);
504 edge = vlib_node_add_next(vm, child_node_index, *pi);
506 vlib_worker_thread_barrier_release(vm);
509 dpo_stack_i(edge, dpo, parent);
512 static clib_error_t *
513 dpo_module_init (vlib_main_t * vm)
515 drop_dpo_module_init();
516 punt_dpo_module_init();
517 receive_dpo_module_init();
518 load_balance_module_init();
519 mpls_label_dpo_module_init();
520 classify_dpo_module_init();
521 lookup_dpo_module_init();
522 ip_null_dpo_module_init();
523 replicate_module_init();
524 interface_rx_dpo_module_init();
525 interface_tx_dpo_module_init();
526 mpls_disp_dpo_module_init();
527 l2_bridge_dpo_module_init();
532 VLIB_INIT_FUNCTION(dpo_module_init);
534 static clib_error_t *
535 dpo_memory_show (vlib_main_t * vm,
536 unformat_input_t * input,
537 vlib_cli_command_t * cmd)
541 vlib_cli_output (vm, "DPO memory");
542 vlib_cli_output (vm, "%=30s %=5s %=8s/%=9s totals",
543 "Name","Size", "in-use", "allocated");
545 vec_foreach(vft, dpo_vfts)
547 if (NULL != vft->dv_mem_show)
556 * The '<em>sh dpo memory </em>' command displays the memory usage for each
557 * data-plane object type.
560 * @cliexstart{show dpo memory}
562 * Name Size in-use /allocated totals
563 * load-balance 64 12 / 12 768/768
564 * Adjacency 256 1 / 1 256/256
565 * Receive 24 5 / 5 120/120
566 * Lookup 12 0 / 0 0/0
567 * Classify 12 0 / 0 0/0
568 * MPLS label 24 0 / 0 0/0
571 VLIB_CLI_COMMAND (show_fib_memory, static) = {
572 .path = "show dpo memory",
573 .function = dpo_memory_show,
574 .short_help = "show dpo memory",