X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fdpo%2Fdpo.c;h=df78456bf60d098987b9ad8eefbe4f8f1b6e5f59;hb=8feeaff56;hp=aa7708385a5aac9734cefed9ce4b2b9fa53b61ea;hpb=da78f957e46c686434149d332a477d7ea055d76a;p=vpp.git diff --git a/src/vnet/dpo/dpo.c b/src/vnet/dpo/dpo.c index aa7708385a5..df78456bf60 100644 --- a/src/vnet/dpo/dpo.c +++ b/src/vnet/dpo/dpo.c @@ -18,7 +18,7 @@ * applied to packets are they are switched through VPP. * * The DPO is a base class that is specialised by other objects to provide - * concreate actions + * concrete actions * * The VLIB graph nodes are graph of types, the DPO graph is a graph of instances. */ @@ -37,8 +37,12 @@ #include #include #include -#include +#include +#include #include +#include +#include +#include /** * Array of char* names for the DPO types and protos @@ -74,7 +78,7 @@ static const char* const * const ** dpo_nodes; * the third dimension in dpo_nodes is lost, hence, the edge index from each * node MUST be the same. * Including both the child and parent protocol is required to support the - * case where it changes as the grapth is traversed, most notablly when an + * case where it changes as the graph is traversed, most notably when an * MPLS label is popped. * * Note that this array is child type specific, not child instance specific. @@ -82,7 +86,7 @@ static const char* const * const ** dpo_nodes; static u32 ****dpo_edges; /** - * @brief The DPO type value that can be assigend to the next dynamic + * @brief The DPO type value that can be assigned to the next dynamic * type registration. */ static dpo_type_t dpo_dynamic = DPO_LAST; @@ -119,6 +123,7 @@ dpo_proto_to_link (dpo_proto_t dp) case DPO_PROTO_IP4: return (VNET_LINK_IP4); case DPO_PROTO_MPLS: + case DPO_PROTO_BIER: return (VNET_LINK_MPLS); case DPO_PROTO_ETHERNET: return (VNET_LINK_ETHERNET); @@ -148,20 +153,22 @@ format_dpo_id (u8 * s, va_list * args) if (NULL != dpo_vfts[dpo->dpoi_type].dv_format) { - return (format(s, "%U", - dpo_vfts[dpo->dpoi_type].dv_format, - dpo->dpoi_index, - indent)); + s = format(s, "%U", + dpo_vfts[dpo->dpoi_type].dv_format, + dpo->dpoi_index, + indent); } - - switch (dpo->dpoi_type) + else { - case DPO_FIRST: - s = format(s, "unset"); - break; - default: - s = format(s, "unknown"); - break; + switch (dpo->dpoi_type) + { + case DPO_FIRST: + s = format(s, "unset"); + break; + default: + s = format(s, "unknown"); + break; + } } return (s); } @@ -275,6 +282,41 @@ dpo_is_adj (const dpo_id_t *dpo) (dpo->dpoi_type == DPO_ADJACENCY_GLEAN)); } +static u32 * +dpo_default_get_next_node (const dpo_id_t *dpo) +{ + u32 *node_indices = NULL; + const char *node_name; + u32 ii = 0; + + node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii]; + while (NULL != node_name) + { + vlib_node_t *node; + + node = vlib_get_node_by_name(vlib_get_main(), (u8*) node_name); + ASSERT(NULL != node); + vec_add1(node_indices, node->index); + + ++ii; + node_name = dpo_nodes[dpo->dpoi_type][dpo->dpoi_proto][ii]; + } + + return (node_indices); +} + +/** + * A default variant of the make interpose function that just returns + * the original + */ +static void +dpo_default_mk_interpose (const dpo_id_t *original, + const dpo_id_t *parent, + dpo_id_t *clone) +{ + dpo_copy(clone, original); +} + void dpo_register (dpo_type_t type, const dpo_vft_t *vft, @@ -282,6 +324,14 @@ dpo_register (dpo_type_t type, { vec_validate(dpo_vfts, type); dpo_vfts[type] = *vft; + if (NULL == dpo_vfts[type].dv_get_next_node) + { + dpo_vfts[type].dv_get_next_node = dpo_default_get_next_node; + } + if (NULL == dpo_vfts[type].dv_mk_interpose) + { + dpo_vfts[type].dv_mk_interpose = dpo_default_mk_interpose; + } vec_validate(dpo_nodes, type); dpo_nodes[type] = nodes; @@ -298,6 +348,17 @@ dpo_register_new_type (const dpo_vft_t *vft, return (type); } +void +dpo_mk_interpose (const dpo_id_t *original, + const dpo_id_t *parent, + dpo_id_t *clone) +{ + if (!dpo_id_is_valid(original)) + return; + + dpo_vfts[original->dpoi_type].dv_mk_interpose(original, parent, clone); +} + void dpo_lock (dpo_id_t *dpo) { @@ -316,6 +377,17 @@ dpo_unlock (dpo_id_t *dpo) dpo_vfts[dpo->dpoi_type].dv_unlock(dpo); } +u32 +dpo_get_urpf(const dpo_id_t *dpo) +{ + if (dpo_id_is_valid(dpo) && + (NULL != dpo_vfts[dpo->dpoi_type].dv_get_urpf)) + { + return (dpo_vfts[dpo->dpoi_type].dv_get_urpf(dpo)); + } + + return (~0); +} static u32 dpo_get_next_node (dpo_type_t child_type, @@ -336,28 +408,29 @@ dpo_get_next_node (dpo_type_t child_type, parent_proto, ~0); /* - * if the edge index has not yet been created for this node to node transistion + * if the edge index has not yet been created for this node to node transition */ if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto]) { - vlib_node_t *parent_node, *child_node; + vlib_node_t *child_node; + u32 *parent_indices; vlib_main_t *vm; - u32 edge ,pp, cc; + u32 edge, *pi, cc; vm = vlib_get_main(); - vlib_worker_thread_barrier_sync(vm); - + ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node); ASSERT(NULL != dpo_nodes[child_type]); ASSERT(NULL != dpo_nodes[child_type][child_proto]); - ASSERT(NULL != dpo_nodes[parent_type]); - ASSERT(NULL != dpo_nodes[parent_type][parent_proto]); cc = 0; + parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent_dpo); + + vlib_worker_thread_barrier_sync(vm); /* - * create a graph arc from each of the parent's registered node types, - * to each of the childs. + * create a graph arc from each of the child's registered node types, + * to each of the parent's. */ while (NULL != dpo_nodes[child_type][child_proto][cc]) { @@ -365,17 +438,9 @@ dpo_get_next_node (dpo_type_t child_type, vlib_get_node_by_name(vm, (u8*) dpo_nodes[child_type][child_proto][cc]); - pp = 0; - - while (NULL != dpo_nodes[parent_type][parent_proto][pp]) + vec_foreach(pi, parent_indices) { - parent_node = - vlib_get_node_by_name(vm, - (u8*) dpo_nodes[parent_type][parent_proto][pp]); - - edge = vlib_node_add_next(vm, - child_node->index, - parent_node->index); + edge = vlib_node_add_next(vm, child_node->index, *pi); if (~0 == dpo_edges[child_type][child_proto][parent_type][parent_proto]) { @@ -385,17 +450,32 @@ dpo_get_next_node (dpo_type_t child_type, { ASSERT(dpo_edges[child_type][child_proto][parent_type][parent_proto] == edge); } - pp++; } cc++; } vlib_worker_thread_barrier_release(vm); + vec_free(parent_indices); } return (dpo_edges[child_type][child_proto][parent_type][parent_proto]); } +/** + * @brief return already stacked up next node index for a given + * child_type/child_proto and parent_type/patent_proto. + * The VLIB graph arc used is taken from the parent and child types + * passed. + */ +u32 +dpo_get_next_node_by_type_and_proto (dpo_type_t child_type, + dpo_proto_t child_proto, + dpo_type_t parent_type, + dpo_proto_t parent_proto) +{ + return (dpo_edges[child_type][child_proto][parent_type][parent_proto]); +} + /** * @brief Stack one DPO object on another, and thus establish a child parent * relationship. The VLIB graph arc used is taken from the parent and child types @@ -414,7 +494,7 @@ dpo_stack_i (u32 edge, dpo_copy(&tmp, parent); /* - * get the edge index for the parent to child VLIB graph transisition + * get the edge index for the parent to child VLIB graph transition */ tmp.dpoi_next_node = edge; @@ -451,39 +531,43 @@ dpo_stack_from_node (u32 child_node_index, dpo_id_t *dpo, const dpo_id_t *parent) { - dpo_proto_t parent_proto; - vlib_node_t *parent_node; dpo_type_t parent_type; + u32 *parent_indices; vlib_main_t *vm; - u32 edge; + u32 edge, *pi; + edge = 0; parent_type = parent->dpoi_type; - parent_proto = parent->dpoi_proto; - vm = vlib_get_main(); - ASSERT(NULL != dpo_nodes[parent_type]); - ASSERT(NULL != dpo_nodes[parent_type][parent_proto]); + ASSERT(NULL != dpo_vfts[parent_type].dv_get_next_node); + parent_indices = dpo_vfts[parent_type].dv_get_next_node(parent); + ASSERT(parent_indices); - parent_node = - vlib_get_node_by_name(vm, (u8*) dpo_nodes[parent_type][parent_proto][0]); - - edge = vlib_node_get_next(vm, - child_node_index, - parent_node->index); - - if (~0 == edge) + /* + * This loop is purposefully written with the worker thread lock in the + * inner loop because; + * 1) the likelihood that the edge does not exist is smaller + * 2) the likelihood there is more than one node is even smaller + * so we are optimising for not need to take the lock + */ + vec_foreach(pi, parent_indices) { - vlib_worker_thread_barrier_sync(vm); + edge = vlib_node_get_next(vm, child_node_index, *pi); - edge = vlib_node_add_next(vm, - child_node_index, - parent_node->index); + if (~0 == edge) + { + vlib_worker_thread_barrier_sync(vm); - vlib_worker_thread_barrier_release(vm); - } + edge = vlib_node_add_next(vm, child_node_index, *pi); + vlib_worker_thread_barrier_release(vm); + } + } dpo_stack_i(edge, dpo, parent); + + /* should free this local vector to avoid memory leak */ + vec_free(parent_indices); } static clib_error_t * @@ -497,9 +581,13 @@ dpo_module_init (vlib_main_t * vm) classify_dpo_module_init(); lookup_dpo_module_init(); ip_null_dpo_module_init(); + ip6_ll_dpo_module_init(); replicate_module_init(); - interface_dpo_module_init(); + interface_rx_dpo_module_init(); + interface_tx_dpo_module_init(); mpls_disp_dpo_module_init(); + dvr_dpo_module_init(); + l3_proxy_dpo_module_init(); return (NULL); }