X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fdpdk%2Fipsec%2Fcrypto_node.c;h=5fbaaf3316a91f8f9cba644c326eb8138e1cf66e;hb=040950a59d53e8802ad31430d67df105939cce4c;hp=edebaf6f83899c7f703835e474a144284c5479c5;hpb=db93cd971320301eb21403caabada7a3ec6a4cce;p=vpp.git diff --git a/src/plugins/dpdk/ipsec/crypto_node.c b/src/plugins/dpdk/ipsec/crypto_node.c index edebaf6f838..5fbaaf3316a 100644 --- a/src/plugins/dpdk/ipsec/crypto_node.c +++ b/src/plugins/dpdk/ipsec/crypto_node.c @@ -44,7 +44,7 @@ static char *dpdk_crypto_input_error_strings[] = { #undef _ }; -vlib_node_registration_t dpdk_crypto_input_node; +extern vlib_node_registration_t dpdk_crypto_input_node; typedef struct { @@ -63,7 +63,7 @@ static u8 * format_cryptodev_status (u8 * s, va_list * args) { u32 status = va_arg (*args, u32); - i8 *str = 0; + char *str = 0; switch (status) { @@ -102,90 +102,87 @@ dpdk_crypto_dequeue (vlib_main_t * vm, vlib_node_runtime_t * node, next_index = node->cached_next_index; - do - { - ops = cwm->ops; - n_ops = rte_cryptodev_dequeue_burst (res->dev_id, - res->qp_id + outbound, - ops, VLIB_FRAME_SIZE); - res->inflights[outbound] -= n_ops; - ASSERT (res->inflights >= 0); - - n_deq = n_ops; - total_n_deq += n_ops; - - while (n_ops > 0) - { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - while (n_ops > 0 && n_left_to_next > 0) - { - u32 bi0, next0; - vlib_buffer_t *b0 = 0; - struct rte_crypto_op *op; - - op = ops[0]; - ops += 1; - n_ops -= 1; - n_left_to_next -= 1; - - dpdk_op_priv_t *priv = crypto_op_get_priv (op); - next0 = priv->next; - - if (PREDICT_FALSE (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) - { - next0 = DPDK_CRYPTO_INPUT_NEXT_DROP; - vlib_node_increment_counter (vm, - dpdk_crypto_input_node.index, - DPDK_CRYPTO_INPUT_ERROR_STATUS, - 1); - } - - /* XXX store bi0 and next0 in op private? */ - - b0 = vlib_buffer_from_rte_mbuf (op->sym[0].m_src); - bi0 = vlib_get_buffer_index (vm, b0); - - to_next[0] = bi0; - to_next += 1; - - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - vlib_trace_next_frame (vm, node, next0); - dpdk_crypto_input_trace_t *tr = - vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->status = op->status; - } - - op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, - n_left_to_next, bi0, next0); - } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - - crypto_free_ops (numa, cwm->ops, n_deq); - } - while (n_deq == VLIB_FRAME_SIZE && res->inflights[outbound]); + { + ops = cwm->ops; + n_ops = rte_cryptodev_dequeue_burst (res->dev_id, + res->qp_id + outbound, + ops, VLIB_FRAME_SIZE); + res->inflights[outbound] -= n_ops; + ASSERT (res->inflights >= 0); + + n_deq = n_ops; + total_n_deq += n_ops; + + while (n_ops > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_ops > 0 && n_left_to_next > 0) + { + u32 bi0, next0; + vlib_buffer_t *b0 = 0; + struct rte_crypto_op *op; + + op = ops[0]; + ops += 1; + n_ops -= 1; + n_left_to_next -= 1; + + dpdk_op_priv_t *priv = crypto_op_get_priv (op); + next0 = priv->next; + + if (PREDICT_FALSE (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)) + { + next0 = DPDK_CRYPTO_INPUT_NEXT_DROP; + vlib_node_increment_counter (vm, + dpdk_crypto_input_node.index, + DPDK_CRYPTO_INPUT_ERROR_STATUS, + 1); + } + + /* XXX store bi0 and next0 in op private? */ + + b0 = vlib_buffer_from_rte_mbuf (op->sym[0].m_src); + bi0 = vlib_get_buffer_index (vm, b0); + + to_next[0] = bi0; + to_next += 1; + + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + vlib_trace_next_frame (vm, node, next0); + dpdk_crypto_input_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->status = op->status; + } + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + crypto_free_ops (numa, cwm->ops, n_deq); + } vlib_node_increment_counter (vm, dpdk_crypto_input_node.index, DPDK_CRYPTO_INPUT_ERROR_DQ_COPS, total_n_deq); return total_n_deq; } -static uword -dpdk_crypto_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame) +static_always_inline uword +dpdk_crypto_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) { u32 thread_index = vlib_get_thread_index (); dpdk_crypto_main_t *dcm = &dpdk_crypto_main; crypto_worker_main_t *cwm = &dcm->workers_main[thread_index]; crypto_resource_t *res; u32 n_deq = 0; - u8 outbound; u16 *remove = NULL, *res_idx; word i; @@ -194,13 +191,11 @@ dpdk_crypto_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { res = vec_elt_at_index (dcm->resource, res_idx[0]); - outbound = 0; - if (res->inflights[outbound]) - n_deq += dpdk_crypto_dequeue (vm, node, res, outbound); + if (res->inflights[0]) + n_deq += dpdk_crypto_dequeue (vm, node, res, 0); - outbound = 1; - if (res->inflights[outbound]) - n_deq += dpdk_crypto_dequeue (vm, node, res, outbound); + if (res->inflights[1]) + n_deq += dpdk_crypto_dequeue (vm, node, res, 1); if (unlikely(res->remove && !(res->inflights[0] || res->inflights[1]))) vec_add1 (remove, res_idx[0]); @@ -230,15 +225,19 @@ dpdk_crypto_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vec_free (remove); } - /* TODO Clear all sessions in device */ - return n_deq; } +VLIB_NODE_FN (dpdk_crypto_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_crypto_input_inline (vm, node, from_frame); +} + /* *INDENT-OFF* */ VLIB_REGISTER_NODE (dpdk_crypto_input_node) = { - .function = dpdk_crypto_input_fn, .name = "dpdk-crypto-input", .format_trace = format_dpdk_crypto_input_trace, .type = VLIB_NODE_TYPE_INPUT, @@ -255,7 +254,6 @@ VLIB_REGISTER_NODE (dpdk_crypto_input_node) = }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (dpdk_crypto_input_node, dpdk_crypto_input_fn) /* * fd.io coding-style-patch-verification: ON *