2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * A Data-Path Object is an object that represents actions that are
18 * applied to packets are they are switched through VPP.
20 * The DPO is a base class that is specialised by other objects to provide
23 * The VLIB graph nodes are graph of types, the DPO graph is a graph of instances.
26 #include <vnet/dpo/dpo.h>
27 #include <vnet/ip/lookup.h>
28 #include <vnet/ip/format.h>
29 #include <vnet/adj/adj.h>
31 #include <vnet/dpo/load_balance.h>
32 #include <vnet/dpo/mpls_label_dpo.h>
33 #include <vnet/dpo/lookup_dpo.h>
34 #include <vnet/dpo/drop_dpo.h>
35 #include <vnet/dpo/receive_dpo.h>
36 #include <vnet/dpo/punt_dpo.h>
37 #include <vnet/dpo/classify_dpo.h>
38 #include <vnet/dpo/ip_null_dpo.h>
39 #include <vnet/dpo/replicate_dpo.h>
40 #include <vnet/dpo/interface_rx_dpo.h>
41 #include <vnet/dpo/interface_tx_dpo.h>
42 #include <vnet/dpo/mpls_disposition.h>
43 #include <vnet/dpo/dvr_dpo.h>
44 #include <vnet/dpo/l3_proxy_dpo.h>
47 * Array of char* names for the DPO types and protos
49 static const char* dpo_type_names[] = DPO_TYPES;
50 static const char* dpo_proto_names[] = DPO_PROTOS;
53 * @brief Vector of virtual function tables for the DPO types
55 * This is a vector so we can dynamically register new DPO types in plugins.
57 static dpo_vft_t *dpo_vfts;
60 * @brief vector of graph node names associated with each DPO type and protocol.
62 * dpo_nodes[child_type][child_proto][node_X] = node_name;
64 * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][0] = "ip4-lookup"
65 * dpo_node[DPO_LOAD_BALANCE][DPO_PROTO_IP4][1] = "ip4-load-balance"
67 * This is a vector so we can dynamically register new DPO types in plugins.
69 static const char* const * const ** dpo_nodes;
72 * @brief Vector of edge indicies from parent DPO nodes to child
74 * dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge_index
76 * This array is derived at init time from the dpo_nodes above. Note that
77 * the third dimension in dpo_nodes is lost, hence, the edge index from each
78 * node MUST be the same.
79 * Including both the child and parent protocol is required to support the
80 * case where it changes as the grapth is traversed, most notablly when an
81 * MPLS label is popped.
83 * Note that this array is child type specific, not child instance specific.
85 static u32 ****dpo_edges;
88 * @brief The DPO type value that can be assigend to the next dynamic
91 static dpo_type_t dpo_dynamic = DPO_LAST;
94 vnet_link_to_dpo_proto (vnet_link_t linkt)
99 return (DPO_PROTO_IP6);
101 return (DPO_PROTO_IP4);
103 return (DPO_PROTO_MPLS);
104 case VNET_LINK_ETHERNET:
105 return (DPO_PROTO_ETHERNET);
107 return (DPO_PROTO_NSH);
116 dpo_proto_to_link (dpo_proto_t dp)
121 return (VNET_LINK_IP6);
123 return (VNET_LINK_IP4);
126 return (VNET_LINK_MPLS);
127 case DPO_PROTO_ETHERNET:
128 return (VNET_LINK_ETHERNET);
130 return (VNET_LINK_NSH);
136 format_dpo_type (u8 * s, va_list * args)
138 dpo_type_t type = va_arg (*args, int);
140 s = format(s, "%s", dpo_type_names[type]);
146 format_dpo_id (u8 * s, va_list * args)
148 dpo_id_t *dpo = va_arg (*args, dpo_id_t*);
149 u32 indent = va_arg (*args, u32);
151 s = format(s, "[@%d]: ", dpo->dpoi_next_node);
153 if (NULL != dpo_vfts[dpo->dpoi_type].dv_format)
155 return (format(s, "%U",
156 dpo_vfts[dpo->dpoi_type].dv_format,
161 switch (dpo->dpoi_type)
164 s = format(s, "unset");
167 s = format(s, "unknown");
174 format_dpo_proto (u8 * s, va_list * args)
176 dpo_proto_t proto = va_arg (*args, int);
178 return (format(s, "%s", dpo_proto_names[proto]));
182 dpo_set (dpo_id_t *dpo,
189 dpo->dpoi_type = type;
190 dpo->dpoi_proto = proto,
191 dpo->dpoi_index = index;
193 if (DPO_ADJACENCY == type)
196 * set the adj subtype
200 adj = adj_get(index);
202 switch (adj->lookup_next_index)
204 case IP_LOOKUP_NEXT_ARP:
205 dpo->dpoi_type = DPO_ADJACENCY_INCOMPLETE;
207 case IP_LOOKUP_NEXT_MIDCHAIN:
208 dpo->dpoi_type = DPO_ADJACENCY_MIDCHAIN;
210 case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
211 dpo->dpoi_type = DPO_ADJACENCY_MCAST_MIDCHAIN;
213 case IP_LOOKUP_NEXT_MCAST:
214 dpo->dpoi_type = DPO_ADJACENCY_MCAST;
216 case IP_LOOKUP_NEXT_GLEAN:
217 dpo->dpoi_type = DPO_ADJACENCY_GLEAN;
228 dpo_reset (dpo_id_t *dpo)
230 dpo_id_t tmp = DPO_INVALID;
233 * use the atomic copy operation.
240 * Compare two Data-path objects
242 * like memcmp, return 0 is matching, !0 otherwise.
245 dpo_cmp (const dpo_id_t *dpo1,
246 const dpo_id_t *dpo2)
250 res = dpo1->dpoi_type - dpo2->dpoi_type;
252 if (0 != res) return (res);
254 return (dpo1->dpoi_index - dpo2->dpoi_index);
258 dpo_copy (dpo_id_t *dst,
264 * the destination is written in a single u64 write - hence atomically w.r.t
265 * any packets inflight.
267 *((u64*)dst) = *(u64*)src;
274 dpo_is_adj (const dpo_id_t *dpo)
276 return ((dpo->dpoi_type == DPO_ADJACENCY) ||
277 (dpo->dpoi_type == DPO_ADJACENCY_INCOMPLETE) ||
278 (dpo->dpoi_type == DPO_ADJACENCY_MIDCHAIN) ||
279 (dpo->dpoi_type == DPO_ADJACENCY_GLEAN));
283 dpo_default_get_next_node (const dpo_id_t *dpo)
285 u32 *node_indices = NULL;
286 const char *node_name;
289 node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii];
290 while (NULL != node_name)
294 node = vlib_get_node_by_name(vlib_get_main(), (u8*) node_name);
295 ASSERT(NULL != node);
296 vec_add1(node_indices, node->index);
299 node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii];
302 return (node_indices);
306 dpo_register (dpo_type_t type,
307 const dpo_vft_t *vft,
308 const char * const * const * nodes)
310 vec_validate(dpo_vfts, type);
311 dpo_vfts[type] = *vft;
312 if (NULL == dpo_vfts[type].dv_get_next_node)
314 dpo_vfts[type].dv_get_next_node = dpo_default_get_next_node;
317 vec_validate(dpo_nodes, type);
318 dpo_nodes[type] = nodes;
322 dpo_register_new_type (const dpo_vft_t *vft,
323 const char * const * const * nodes)
325 dpo_type_t type = dpo_dynamic++;
327 dpo_register(type, vft, nodes);
333 dpo_lock (dpo_id_t *dpo)
335 if (!dpo_id_is_valid(dpo))
338 dpo_vfts[dpo->dpoi_type].dv_lock(dpo);
342 dpo_unlock (dpo_id_t *dpo)
344 if (!dpo_id_is_valid(dpo))
347 dpo_vfts[dpo->dpoi_type].dv_unlock(dpo);
351 dpo_get_urpf(const dpo_id_t *dpo)
353 if (dpo_id_is_valid(dpo) &&
354 (NULL != dpo_vfts[dpo->dpoi_type].dv_get_urpf))
356 return (dpo_vfts[dpo->dpoi_type].dv_get_urpf(dpo));
363 dpo_get_next_node (dpo_type_t child_type,
364 dpo_proto_t child_proto,
365 const dpo_id_t *parent_dpo)
367 dpo_proto_t parent_proto;
368 dpo_type_t parent_type;
370 parent_type = parent_dpo->dpoi_type;
371 parent_proto = parent_dpo->dpoi_proto;
373 vec_validate(dpo_edges, child_type);
374 vec_validate(dpo_edges[child_type], child_proto);
375 vec_validate(dpo_edges[child_type][child_proto], parent_type);
376 vec_validate_init_empty(
377 dpo_edges[child_type][child_proto][parent_type],
381 * if the edge index has not yet been created for this node to node transistion
383 if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
385 vlib_node_t *child_node;
390 vm = vlib_get_main();
392 ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node);
393 ASSERT(NULL != dpo_nodes[child_type]);
394 ASSERT(NULL != dpo_nodes[child_type][child_proto]);
397 parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent_dpo);
399 vlib_worker_thread_barrier_sync(vm);
402 * create a graph arc from each of the child's registered node types,
403 * to each of the parent's.
405 while (NULL != dpo_nodes[child_type][child_proto][cc])
408 vlib_get_node_by_name(vm,
409 (u8*) dpo_nodes[child_type][child_proto][cc]);
411 vec_foreach(pi, parent_indices)
413 edge = vlib_node_add_next(vm, child_node->index, *pi);
415 if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto])
417 dpo_edges[child_type][child_proto][parent_type][parent_proto] = edge;
421 ASSERT(dpo_edges[child_type][child_proto][parent_type][parent_proto] == edge);
427 vlib_worker_thread_barrier_release(vm);
428 vec_free(parent_indices);
431 return (dpo_edges[child_type][child_proto][parent_type][parent_proto]);
435 * @brief Stack one DPO object on another, and thus establish a child parent
436 * relationship. The VLIB graph arc used is taken from the parent and child types
440 dpo_stack_i (u32 edge,
442 const dpo_id_t *parent)
445 * in order to get an atomic update of the parent we create a temporary,
446 * from a copy of the child, and add the next_node. then we copy to the parent
448 dpo_id_t tmp = DPO_INVALID;
449 dpo_copy(&tmp, parent);
452 * get the edge index for the parent to child VLIB graph transisition
454 tmp.dpoi_next_node = edge;
457 * this update is atomic.
465 * @brief Stack one DPO object on another, and thus establish a child-parent
466 * relationship. The VLIB graph arc used is taken from the parent and child types
470 dpo_stack (dpo_type_t child_type,
471 dpo_proto_t child_proto,
473 const dpo_id_t *parent)
475 dpo_stack_i(dpo_get_next_node(child_type, child_proto, parent), dpo, parent);
479 * @brief Stack one DPO object on another, and thus establish a child parent
480 * relationship. A new VLIB graph arc is created from the child node passed
481 * to the nodes registered by the parent. The VLIB infra will ensure this arc
482 * is added only once.
485 dpo_stack_from_node (u32 child_node_index,
487 const dpo_id_t *parent)
489 dpo_type_t parent_type;
495 parent_type = parent->dpoi_type;
496 vm = vlib_get_main();
498 ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node);
499 parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent);
500 ASSERT(parent_indices);
503 * This loop is purposefully written with the worker thread lock in the
504 * inner loop because;
505 * 1) the likelihood that the edge does not exist is smaller
506 * 2) the likelihood there is more than one node is even smaller
507 * so we are optimising for not need to take the lock
509 vec_foreach(pi, parent_indices)
511 edge = vlib_node_get_next(vm, child_node_index, *pi);
515 vlib_worker_thread_barrier_sync(vm);
517 edge = vlib_node_add_next(vm, child_node_index, *pi);
519 vlib_worker_thread_barrier_release(vm);
522 dpo_stack_i(edge, dpo, parent);
525 static clib_error_t *
526 dpo_module_init (vlib_main_t * vm)
528 drop_dpo_module_init();
529 punt_dpo_module_init();
530 receive_dpo_module_init();
531 load_balance_module_init();
532 mpls_label_dpo_module_init();
533 classify_dpo_module_init();
534 lookup_dpo_module_init();
535 ip_null_dpo_module_init();
536 replicate_module_init();
537 interface_rx_dpo_module_init();
538 interface_tx_dpo_module_init();
539 mpls_disp_dpo_module_init();
540 dvr_dpo_module_init();
541 l3_proxy_dpo_module_init();
546 VLIB_INIT_FUNCTION(dpo_module_init);
548 static clib_error_t *
549 dpo_memory_show (vlib_main_t * vm,
550 unformat_input_t * input,
551 vlib_cli_command_t * cmd)
555 vlib_cli_output (vm, "DPO memory");
556 vlib_cli_output (vm, "%=30s %=5s %=8s/%=9s totals",
557 "Name","Size", "in-use", "allocated");
559 vec_foreach(vft, dpo_vfts)
561 if (NULL != vft->dv_mem_show)
570 * The '<em>sh dpo memory </em>' command displays the memory usage for each
571 * data-plane object type.
574 * @cliexstart{show dpo memory}
576 * Name Size in-use /allocated totals
577 * load-balance 64 12 / 12 768/768
578 * Adjacency 256 1 / 1 256/256
579 * Receive 24 5 / 5 120/120
580 * Lookup 12 0 / 0 0/0
581 * Classify 12 0 / 0 0/0
582 * MPLS label 24 0 / 0 0/0
585 VLIB_CLI_COMMAND (show_fib_memory, static) = {
586 .path = "show dpo memory",
587 .function = dpo_memory_show,
588 .short_help = "show dpo memory",