- vlib_frame_queue_t *fq;
- vlib_frame_queue_elt_t *elt;
- vlib_thread_main_t *tm = &vlib_thread_main;
- vlib_frame_queue_main_t *fqm =
- vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
- u64 new_tail;
-
- fq = fqm->vlib_frame_queues[index];
- ASSERT (fq);
-
- new_tail = __sync_add_and_fetch (&fq->tail, 1);
-
- /* Wait until a ring slot is available */
- while (new_tail >= fq->head_hint + fq->nelts)
- vlib_worker_thread_barrier_check ();
-
- elt = fq->elts + (new_tail & (fq->nelts - 1));
-
- /* this would be very bad... */
- while (elt->valid)
- ;
-
- elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
- elt->last_n_vectors = elt->n_vectors = 0;
-
- return elt;
-}
-
-static inline vlib_frame_queue_t *
-is_vlib_frame_queue_congested (u32 frame_queue_index,
- u32 index,
- u32 queue_hi_thresh,
- vlib_frame_queue_t **
- handoff_queue_by_worker_index)
-{
- vlib_frame_queue_t *fq;
- vlib_thread_main_t *tm = &vlib_thread_main;
- vlib_frame_queue_main_t *fqm =
- vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
-
- fq = handoff_queue_by_worker_index[index];
- if (fq != (vlib_frame_queue_t *) (~0))
- return fq;
-
- fq = fqm->vlib_frame_queues[index];
- ASSERT (fq);
-
- if (PREDICT_FALSE (fq->tail >= (fq->head_hint + queue_hi_thresh)))
- {
- /* a valid entry in the array will indicate the queue has reached
- * the specified threshold and is congested
- */
- handoff_queue_by_worker_index[index] = fq;
- fq->enqueue_full_events++;
- return fq;
- }
-
- return NULL;
-}
-
-static inline vlib_frame_queue_elt_t *
-vlib_get_worker_handoff_queue_elt (u32 frame_queue_index,
- u32 vlib_worker_index,
- vlib_frame_queue_elt_t **
- handoff_queue_elt_by_worker_index)
-{
- vlib_frame_queue_elt_t *elt;
-
- if (handoff_queue_elt_by_worker_index[vlib_worker_index])
- return handoff_queue_elt_by_worker_index[vlib_worker_index];
-
- elt = vlib_get_frame_queue_elt (frame_queue_index, vlib_worker_index);
-
- handoff_queue_elt_by_worker_index[vlib_worker_index] = elt;
-
- return elt;