tcp: make syn-rcvd timeout configurable
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.c
index 84a307d..43c2c87 100644 (file)
@@ -579,14 +579,14 @@ cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
        return -EBUSY;
 
       vec_foreach_index (idx, cmt->cryptodev_inst)
-      {
-       cinst = cmt->cryptodev_inst + idx;
-       if (cinst->dev_id == cet->cryptodev_id &&
-           cinst->q_id == cet->cryptodev_q)
-         break;
-      }
+       {
+         cinst = cmt->cryptodev_inst + idx;
+         if (cinst->dev_id == cet->cryptodev_id &&
+             cinst->q_id == cet->cryptodev_q)
+           break;
+       }
       /* invalid existing worker resource assignment */
-      if (idx == vec_len (cmt->cryptodev_inst))
+      if (idx >= vec_len (cmt->cryptodev_inst))
        return -EINVAL;
       clib_spinlock_lock (&cmt->tlock);
       clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
@@ -666,6 +666,90 @@ VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
     .function = cryptodev_show_assignment_fn,
 };
 
+static clib_error_t *
+cryptodev_show_cache_rings_fn (vlib_main_t *vm, unformat_input_t *input,
+                              vlib_cli_command_t *cmd)
+{
+  cryptodev_main_t *cmt = &cryptodev_main;
+  u32 thread_index = 0;
+  u16 i;
+  vec_foreach_index (thread_index, cmt->per_thread_data)
+    {
+      cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
+      cryptodev_cache_ring_t *ring = &cet->cache_ring;
+      u16 head = ring->head;
+      u16 tail = ring->tail;
+      u16 n_cached = (CRYPTODEV_CACHE_QUEUE_SIZE - tail + head) &
+                    CRYPTODEV_CACHE_QUEUE_MASK;
+
+      u16 enq_head = ring->enq_head;
+      u16 deq_tail = ring->deq_tail;
+      u16 n_frames_inflight =
+       (enq_head == deq_tail) ?
+               0 :
+               ((CRYPTODEV_CACHE_QUEUE_SIZE + enq_head - deq_tail) &
+          CRYPTODEV_CACHE_QUEUE_MASK);
+      /* even if some elements of dequeued frame are still pending for deq
+       * we consider the frame as processed */
+      u16 n_frames_processed =
+       ((tail == deq_tail) && (ring->frames[deq_tail].f == 0)) ?
+               0 :
+               ((CRYPTODEV_CACHE_QUEUE_SIZE - tail + deq_tail) &
+          CRYPTODEV_CACHE_QUEUE_MASK) +
+           1;
+      /* even if some elements of enqueued frame are still pending for enq
+       * we consider the frame as enqueued */
+      u16 n_frames_pending =
+       (head == enq_head) ? 0 :
+                                  ((CRYPTODEV_CACHE_QUEUE_SIZE - enq_head + head) &
+                             CRYPTODEV_CACHE_QUEUE_MASK) -
+                              1;
+
+      u16 elts_to_enq =
+       (ring->frames[enq_head].n_elts - ring->frames[enq_head].enq_elts_head);
+      u16 elts_to_deq =
+       (ring->frames[deq_tail].n_elts - ring->frames[deq_tail].deq_elts_tail);
+
+      u32 elts_total = 0;
+
+      for (i = 0; i < CRYPTODEV_CACHE_QUEUE_SIZE; i++)
+       elts_total += ring->frames[i].n_elts;
+
+      if (vlib_num_workers () > 0 && thread_index == 0)
+       continue;
+
+      vlib_cli_output (vm, "\n\n");
+      vlib_cli_output (vm, "Frames cached in the ring: %u", n_cached);
+      vlib_cli_output (vm, "Frames cached but not processed: %u",
+                      n_frames_pending);
+      vlib_cli_output (vm, "Frames inflight: %u", n_frames_inflight);
+      vlib_cli_output (vm, "Frames processed: %u", n_frames_processed);
+      vlib_cli_output (vm, "Elements total: %u", elts_total);
+      vlib_cli_output (vm, "Elements inflight: %u", cet->inflight);
+      vlib_cli_output (vm, "Head index: %u", head);
+      vlib_cli_output (vm, "Tail index: %u", tail);
+      vlib_cli_output (vm, "Current frame index beeing enqueued: %u",
+                      enq_head);
+      vlib_cli_output (vm, "Current frame index being dequeued: %u", deq_tail);
+      vlib_cli_output (vm,
+                      "Elements in current frame to be enqueued: %u, waiting "
+                      "to be enqueued: %u",
+                      ring->frames[enq_head].n_elts, elts_to_enq);
+      vlib_cli_output (vm,
+                      "Elements in current frame to be dequeued: %u, waiting "
+                      "to be dequeued: %u",
+                      ring->frames[deq_tail].n_elts, elts_to_deq);
+      vlib_cli_output (vm, "\n\n");
+    }
+  return 0;
+}
+
+VLIB_CLI_COMMAND (show_cryptodev_sw_rings, static) = {
+  .path = "show cryptodev cache status",
+  .short_help = "show status of all cryptodev cache rings",
+  .function = cryptodev_show_cache_rings_fn,
+};
+
 static clib_error_t *
 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
                             vlib_cli_command_t * cmd)
@@ -1235,7 +1319,7 @@ dpdk_cryptodev_init (vlib_main_t * vm)
   vec_free (unique_drivers);
 #endif
 
-  clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
+  clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, n_workers);
   clib_spinlock_init (&cmt->tlock);
 
   vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,