}
}
+void
+sesssion_reschedule_tx (transport_connection_t * tc)
+{
+ session_worker_t *wrk = session_main_get_worker (tc->thread_index);
+ session_evt_elt_t *elt;
+
+ ASSERT (tc->thread_index == vlib_get_thread_index ());
+
+ elt = session_evt_alloc_new (wrk);
+ elt->evt.session_index = tc->s_index;
+ elt->evt.event_type = SESSION_IO_EVT_TX;
+}
+
static void
session_program_transport_ctrl_evt (session_t * s, session_evt_type_t evt)
{
return 0;
}
+void
+session_fifo_tuning (session_t * s, svm_fifo_t * f,
+ session_ft_action_t act, u32 len)
+{
+ if (s->flags & SESSION_F_CUSTOM_FIFO_TUNING)
+ {
+ app_worker_t *app_wrk = app_worker_get (s->app_wrk_index);
+ app_worker_session_fifo_tuning (app_wrk, s, f, act, len);
+ if (CLIB_ASSERT_ENABLE)
+ {
+ segment_manager_t *sm;
+ sm = segment_manager_get (f->segment_manager);
+ ASSERT (f->size >= 4096);
+ ASSERT (f->size <= sm->max_fifo_size);
+ }
+ }
+}
+
/*
* Enqueue data for delivery to session peer. Does not notify peer of enqueue
* event but on request can queue notification events for later delivery by
s->flags |= SESSION_F_RX_EVT;
vec_add1 (wrk->session_to_enqueue[tc->proto], s->session_index);
}
+
+ session_fifo_tuning (s, s->rx_fifo, SESSION_FT_ACTION_ENQUEUED, 0);
}
return enqueued;
s->flags |= SESSION_F_RX_EVT;
vec_add1 (wrk->session_to_enqueue[proto], s->session_index);
}
+
+ session_fifo_tuning (s, s->rx_fifo, SESSION_FT_ACTION_ENQUEUED, 0);
}
return enqueued;
}
u32 rv;
rv = svm_fifo_dequeue_drop (s->tx_fifo, max_bytes);
+ session_fifo_tuning (s, s->tx_fifo, SESSION_FT_ACTION_DEQUEUED, rv);
if (svm_fifo_needs_deq_ntf (s->tx_fifo, max_bytes))
session_dequeue_notify (s);
continue;
}
+ session_fifo_tuning (s, s->rx_fifo, SESSION_FT_ACTION_ENQUEUED,
+ 0 /* TODO/not needed */ );
+
if (PREDICT_FALSE (session_enqueue_notify_inline (s)))
errors++;
}