mq = session_main_get_vpp_event_queue (thread_index);
if (PREDICT_FALSE (svm_msg_q_lock (mq)))
return -1;
- if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
+ if (PREDICT_FALSE (svm_msg_q_is_full (mq)
+ || svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
{
svm_msg_q_unlock (mq);
return -2;
if (!(s->flags & SESSION_F_CUSTOM_TX))
{
s->flags |= SESSION_F_CUSTOM_TX;
- if (svm_fifo_set_event (s->tx_fifo))
+ if (svm_fifo_set_event (s->tx_fifo)
+ || transport_connection_is_descheduled (tc))
{
session_worker_t *wrk;
session_evt_elt_t *elt;
elt = session_evt_alloc_old (wrk);
elt->evt.session_index = tc->s_index;
elt->evt.event_type = SESSION_IO_EVT_TX;
+ tc->flags &= ~TRANSPORT_CONNECTION_F_DESCHED;
}
}
}
+void
+sesssion_reschedule_tx (transport_connection_t * tc)
+{
+ session_worker_t *wrk = session_main_get_worker (tc->thread_index);
+ session_evt_elt_t *elt;
+
+ ASSERT (tc->thread_index == vlib_get_thread_index ());
+
+ elt = session_evt_alloc_new (wrk);
+ elt->evt.session_index = tc->s_index;
+ elt->evt.event_type = SESSION_IO_EVT_TX;
+}
+
static void
session_program_transport_ctrl_evt (session_t * s, session_evt_type_t evt)
{
session_free_w_fifos (s);
}
-static session_t *
+session_t *
session_alloc_for_connection (transport_connection_t * tc)
{
session_t *s;
return 0;
}
+void
+session_fifo_tuning (session_t * s, svm_fifo_t * f,
+ session_ft_action_t act, u32 len)
+{
+ if (s->flags & SESSION_F_CUSTOM_FIFO_TUNING)
+ {
+ app_worker_t *app_wrk = app_worker_get (s->app_wrk_index);
+ app_worker_session_fifo_tuning (app_wrk, s, f, act, len);
+ if (CLIB_ASSERT_ENABLE)
+ {
+ segment_manager_t *sm;
+ sm = segment_manager_get (f->segment_manager);
+ ASSERT (f->size >= 4096);
+ ASSERT (f->size <= sm->max_fifo_size);
+ }
+ }
+}
+
/*
* Enqueue data for delivery to session peer. Does not notify peer of enqueue
* event but on request can queue notification events for later delivery by
s->flags |= SESSION_F_RX_EVT;
vec_add1 (wrk->session_to_enqueue[tc->proto], s->session_index);
}
+
+ session_fifo_tuning (s, s->rx_fifo, SESSION_FT_ACTION_ENQUEUED, 0);
}
return enqueued;
s->flags |= SESSION_F_RX_EVT;
vec_add1 (wrk->session_to_enqueue[proto], s->session_index);
}
+
+ session_fifo_tuning (s, s->rx_fifo, SESSION_FT_ACTION_ENQUEUED, 0);
}
return enqueued;
}
u32 rv;
rv = svm_fifo_dequeue_drop (s->tx_fifo, max_bytes);
+ session_fifo_tuning (s, s->tx_fifo, SESSION_FT_ACTION_DEQUEUED, rv);
if (svm_fifo_needs_deq_ntf (s->tx_fifo, max_bytes))
session_dequeue_notify (s);
continue;
}
+ session_fifo_tuning (s, s->rx_fifo, SESSION_FT_ACTION_ENQUEUED,
+ 0 /* TODO/not needed */ );
+
if (PREDICT_FALSE (session_enqueue_notify_inline (s)))
errors++;
}
if (!app_wrk)
return -1;
s->session_state = SESSION_STATE_ACCEPTING;
- return app_worker_accept_notify (app_wrk, s);
+ if (app_worker_accept_notify (app_wrk, s))
+ {
+ /* On transport delete, no notifications should be sent. Unless, the
+ * accept is retried and successful. */
+ s->session_state = SESSION_STATE_CREATED;
+ return -1;
+ }
+ return 0;
}
/**
s->thread_index);
/* Since we called cleanup, no delete notification will come. So, make
* sure the session is properly freed. */
- session_free_w_fifos (s);
+ segment_manager_dealloc_fifos (s->rx_fifo, s->tx_fifo);
+ session_free (s);
}
/**
{
u32 evt_q_length = 2048, evt_size = sizeof (session_event_t);
ssvm_private_t *eqs = &smm->evt_qs_segment;
- api_main_t *am = &api_main;
uword eqs_size = 64 << 20;
pid_t vpp_pid = getpid ();
void *oldheap;
if (smm->evt_qs_use_memfd_seg)
oldheap = ssvm_push_heap (eqs->sh);
else
- oldheap = svm_push_data_heap (am->vlib_rp);
+ oldheap = vl_msg_push_heap ();
for (i = 0; i < vec_len (smm->wrk); i++)
{
if (smm->evt_qs_use_memfd_seg)
ssvm_pop_heap (oldheap);
else
- svm_pop_heap (oldheap);
+ vl_msg_pop_heap (oldheap);
}
ssvm_private_t *
};
/* *INDENT-ON* */
-/**
- * Initialize session layer for given transport proto and ip version
- *
- * Allocates per session type (transport proto + ip version) data structures
- * and adds arc from session queue node to session type output node.
- */
void
session_register_transport (transport_proto_t transport_proto,
const transport_proto_vft_t * vft, u8 is_ip4,
session_tx_fns[vft->transport_options.tx_type];
}
+transport_proto_t
+session_add_transport_proto (void)
+{
+ session_main_t *smm = &session_main;
+ session_worker_t *wrk;
+ u32 thread;
+
+ smm->last_transport_proto_type += 1;
+
+ for (thread = 0; thread < vec_len (smm->wrk); thread++)
+ {
+ wrk = session_main_get_worker (thread);
+ vec_validate (wrk->session_to_enqueue, smm->last_transport_proto_type);
+ }
+
+ return smm->last_transport_proto_type;
+}
+
transport_connection_t *
session_get_transport (session_t * s)
{
if (num_threads < 1)
return clib_error_return (0, "n_thread_stacks not set");
+ smm->last_transport_proto_type = TRANSPORT_PROTO_QUIC;
/* Allocate cache line aligned worker contexts */
vec_validate_aligned (smm->wrk, num_threads - 1, CLIB_CACHE_LINE_BYTES);
wrk->vm = vlib_mains[i];
wrk->last_vlib_time = vlib_time_now (vlib_mains[i]);
wrk->last_vlib_us_time = wrk->last_vlib_time * CLIB_US_TIME_FREQ;
+ vec_validate (wrk->session_to_enqueue, smm->last_transport_proto_type);
if (num_threads > 1)
clib_rwlock_init (&smm->wrk[i].peekers_rw_locks);