a->sep.sw_if_index = ENDPOINT_INVALID_INDEX;
a->sep.transport_proto = mp->proto;
a->sep_ext.ckpair_index = mp->ckpair_index;
+ a->sep_ext.crypto_engine = mp->crypto_engine;
a->app_index = app->app_index;
a->wrk_map_index = mp->wrk_index;
a->sep.port = mp->port;
a->sep.transport_proto = mp->proto;
a->sep.peer.fib_index = mp->vrf;
+ clib_memcpy_fast (&a->sep.peer.ip, &mp->lcl_ip, sizeof (mp->lcl_ip));
a->sep.peer.sw_if_index = ENDPOINT_INVALID_INDEX;
a->sep_ext.parent_handle = mp->parent_handle;
a->sep_ext.ckpair_index = mp->ckpair_index;
+ a->sep_ext.crypto_engine = mp->crypto_engine;
+ a->sep_ext.flags = mp->flags;
if (mp->hostname_len)
{
vec_validate (a->sep_ext.hostname, mp->hostname_len - 1);
session_parse_handle (mp->handle, &index, &thread_index);
s = session_get_if_valid (index, thread_index);
- /* Session was already closed or already cleaned up */
- if (!s || s->session_state != SESSION_STATE_TRANSPORT_CLOSING)
+ /* No session or not the right session */
+ if (!s || s->session_state < SESSION_STATE_TRANSPORT_CLOSING)
return;
app_wrk = app_worker_get (s->app_wrk_index);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *);
- s = format (s, "SESSION_QUEUE: session index %d, server thread index %d",
+ s = format (s, "session index %d thread index %d",
t->session_index, t->server_thread_index);
return s;
}
for (i = 0; i < clib_min (n_trace, n_segs); i++)
{
- b = vlib_get_buffer (vm, to_next[i - n_segs]);
+ b = vlib_get_buffer (vm, to_next[i]);
vlib_trace_buffer (vm, node, next_index, b, 1 /* follow_chain */ );
t = vlib_add_trace (vm, node, b, sizeof (*t));
t->session_index = s->session_index;
TRANSPORT_MAX_HDRS_LEN);
}
+always_inline void
+session_tx_maybe_reschedule (session_worker_t * wrk,
+ session_tx_context_t * ctx,
+ session_evt_elt_t * elt, u8 is_peek)
+{
+ session_t *s = ctx->s;
+
+ svm_fifo_unset_event (s->tx_fifo);
+ if (svm_fifo_max_dequeue_cons (s->tx_fifo) > is_peek ? ctx->tx_offset : 0)
+ if (svm_fifo_set_event (s->tx_fifo))
+ session_evt_add_head_old (wrk, elt);
+}
+
always_inline int
session_tx_fifo_read_and_snd_i (session_worker_t * wrk,
vlib_node_runtime_t * node,
}
ctx->snd_mss = ctx->transport_vft->send_mss (ctx->tc);
- ctx->snd_space = transport_connection_snd_space (ctx->tc,
- vm->clib_time.
- last_cpu_time,
- ctx->snd_mss);
-
- if (ctx->snd_space == 0 || ctx->snd_mss == 0)
+ if (PREDICT_FALSE (ctx->snd_mss == 0))
{
session_evt_add_old (wrk, elt);
return SESSION_TX_NO_DATA;
}
- /* Allow enqueuing of a new event */
- svm_fifo_unset_event (ctx->s->tx_fifo);
+ ctx->snd_space = transport_connection_snd_space (ctx->tc);
+
+ /* This flow queue is "empty" so it should be re-evaluated before
+ * the ones that have data to send. */
+ if (!ctx->snd_space)
+ {
+ session_evt_add_head_old (wrk, elt);
+ return SESSION_TX_NO_DATA;
+ }
+
+ if (transport_connection_is_tx_paced (ctx->tc))
+ {
+ u32 snd_space = transport_connection_tx_pacer_burst (ctx->tc);
+ if (snd_space < TRANSPORT_PACER_MIN_BURST)
+ {
+ session_evt_add_head_old (wrk, elt);
+ return SESSION_TX_NO_DATA;
+ }
+ snd_space = clib_min (ctx->snd_space, snd_space);
+ ctx->snd_space = snd_space >= ctx->snd_mss ?
+ snd_space - snd_space % ctx->snd_mss : snd_space;
+ }
/* Check how much we can pull. */
session_tx_set_dequeue_params (vm, ctx, max_burst, peek_data);
if (PREDICT_FALSE (!ctx->max_len_to_snd))
{
- transport_connection_tx_pacer_reset_bucket (ctx->tc,
- vm->clib_time.
- last_cpu_time);
+ transport_connection_tx_pacer_reset_bucket (ctx->tc, 0);
+ session_tx_maybe_reschedule (wrk, ctx, elt, peek_data);
return SESSION_TX_NO_DATA;
}
{
if (n_bufs)
vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
- session_evt_add_old (wrk, elt);
+ session_evt_add_head_old (wrk, elt);
vlib_node_increment_counter (wrk->vm, node->node_index,
SESSION_QUEUE_ERROR_NO_BUFFER, 1);
return SESSION_TX_NO_BUFFERS;
SESSION_EVT (SESSION_EVT_DEQ, ctx->s, ctx->max_len_to_snd, ctx->max_dequeue,
ctx->s->tx_fifo->has_event, wrk->last_vlib_time);
- /* If we couldn't dequeue all bytes mark as partially read */
ASSERT (ctx->left_to_snd == 0);
+
+ /* If we couldn't dequeue all bytes reschedule as old flow. Otherwise,
+ * check if application enqueued more data and reschedule accordingly */
if (ctx->max_len_to_snd < ctx->max_dequeue)
- if (svm_fifo_set_event (ctx->s->tx_fifo))
- session_evt_add_old (wrk, elt);
+ session_evt_add_old (wrk, elt);
+ else
+ session_tx_maybe_reschedule (wrk, ctx, elt, peek_data);
if (!peek_data
&& ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
case SESSION_IO_EVT_TX:
s = session_event_get_session (e, thread_index);
if (PREDICT_FALSE (!s))
- {
- clib_warning ("session %u was freed!", e->session_index);
- break;
- }
+ break;
CLIB_PREFETCH (s->tx_fifo, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
wrk->ctx.s = s;
/* Spray packets in per session type frames, since they go to
u32 thread_index = vm->thread_index, n_to_dequeue;
session_worker_t *wrk = &smm->wrk[thread_index];
session_evt_elt_t *elt, *ctrl_he, *new_he, *old_he;
+ clib_llist_index_t ei, next_ei, old_ti;
svm_msg_q_msg_t _msg, *msg = &_msg;
- clib_llist_index_t old_ti;
- int i, n_tx_packets = 0;
+ int i, n_tx_packets;
session_event_t *evt;
svm_msg_q_t *mq;
SESSION_EVT (SESSION_EVT_DISPATCH_START, wrk);
wrk->last_vlib_time = vlib_time_now (vm);
+ wrk->last_vlib_us_time = wrk->last_vlib_time * CLIB_US_TIME_FREQ;
/*
* Update transport time
*/
transport_update_time (wrk->last_vlib_time, thread_index);
+ n_tx_packets = vec_len (wrk->pending_tx_buffers);
/*
* Dequeue and handle new events
*/
new_he = pool_elt_at_index (wrk->event_elts, wrk->new_head);
+ old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
+ old_ti = clib_llist_prev_index (old_he, evt_list);
- /* *INDENT-OFF* */
- clib_llist_foreach_safe (wrk->event_elts, evt_list, new_he, elt, ({
- session_evt_type_t et;
-
- et = elt->evt.event_type;
- clib_llist_remove (wrk->event_elts, evt_list, elt);
-
- /* Postpone tx events if we can't handle them this dispatch cycle */
- if (n_tx_packets >= VLIB_FRAME_SIZE
- && (et == SESSION_IO_EVT_TX || et == SESSION_IO_EVT_TX_FLUSH))
- {
- clib_llist_add (wrk->event_elts, evt_list, elt, new_he);
- continue;
- }
-
- session_event_dispatch_io (wrk, node, elt, thread_index, &n_tx_packets);
- }));
- /* *INDENT-ON* */
+ ei = clib_llist_next_index (new_he, evt_list);
+ while (ei != wrk->new_head && n_tx_packets < VLIB_FRAME_SIZE)
+ {
+ elt = pool_elt_at_index (wrk->event_elts, ei);
+ ei = clib_llist_next_index (elt, evt_list);
+ clib_llist_remove (wrk->event_elts, evt_list, elt);
+ session_event_dispatch_io (wrk, node, elt, thread_index, &n_tx_packets);
+ }
/*
- * Handle the old io events
+ * Handle the old io events, if we had any prior to processing the new ones
*/
- old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
- old_ti = clib_llist_prev_index (old_he, evt_list);
-
- while (n_tx_packets < VLIB_FRAME_SIZE
- && !clib_llist_is_empty (wrk->event_elts, evt_list, old_he))
+ if (old_ti != wrk->old_head)
{
- clib_llist_index_t ei;
+ old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
+ ei = clib_llist_next_index (old_he, evt_list);
- clib_llist_pop_first (wrk->event_elts, evt_list, elt, old_he);
- ei = clib_llist_entry_index (wrk->event_elts, elt);
- session_event_dispatch_io (wrk, node, elt, thread_index, &n_tx_packets);
+ while (n_tx_packets < VLIB_FRAME_SIZE)
+ {
+ elt = pool_elt_at_index (wrk->event_elts, ei);
+ next_ei = clib_llist_next_index (elt, evt_list);
+ clib_llist_remove (wrk->event_elts, evt_list, elt);
- old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
- if (ei == old_ti)
- break;
- };
+ session_event_dispatch_io (wrk, node, elt, thread_index,
+ &n_tx_packets);
+
+ if (ei == old_ti)
+ break;
+
+ ei = next_ei;
+ };
+ }
if (vec_len (wrk->pending_tx_buffers))
session_flush_pending_tx_buffers (wrk, node);
session_main_t *sm = &session_main;
if (!sm->wrk[0].vpp_event_queue)
return 0;
+ node = vlib_node_get_runtime (vm, session_queue_node.index);
return session_queue_node_fn (vm, node, frame);
}