static quic_main_t quic_main;
static void quic_update_timer (quic_ctx_t * ctx);
-static int quic_check_quic_session_connected (quic_ctx_t * ctx);
+static void quic_check_quic_session_connected (quic_ctx_t * ctx);
+static int quic_reset_connection (u64 udp_session_handle,
+ quic_rx_packet_ctx_t * pctx);
+static void quic_proto_on_close (u32 ctx_index, u32 thread_index);
+
static quicly_stream_open_t on_stream_open;
static quicly_closed_by_peer_t on_closed_by_peer;
static quicly_now_t quicly_vpp_now_cb;
quicly_ctx->transport_params.max_data = QUIC_INT_MAX;
quicly_ctx->transport_params.max_streams_uni = (uint64_t) 1 << 60;
quicly_ctx->transport_params.max_streams_bidi = (uint64_t) 1 << 60;
+ quicly_ctx->transport_params.idle_timeout = qm->connection_timeout;
/* max_enq is FIFO_SIZE - 1 */
max_enq = app->sm_properties.rx_fifo_size - 1;
return VNET_API_ERROR_MISSING_CERT_KEY;
}
-
/* Helper functions */
static u32
{
QUIC_DBG (2, "Free ctx %u %x", ctx->c_thread_index, ctx->c_c_index);
u32 thread_index = ctx->c_thread_index;
- ASSERT (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID);
+ QUIC_ASSERT (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID);
if (CLIB_DEBUG)
clib_memset (ctx, 0xfb, sizeof (*ctx));
pool_put (quic_main.ctx_pool[thread_index], ctx);
return (ctx->flags & QUIC_F_IS_LISTENER);
}
+static inline int
+quic_ctx_is_conn (quic_ctx_t * ctx)
+{
+ return !(quic_ctx_is_listener (ctx) || quic_ctx_is_stream (ctx));
+}
+
static session_t *
get_stream_session_from_stream (quicly_stream_t * stream)
{
sctx = quic_ctx_get (stream_session->connection_index,
stream_session->thread_index);
- ASSERT (quic_ctx_is_stream (sctx));
+ QUIC_ASSERT (quic_ctx_is_stream (sctx));
stream = sctx->stream;
stream_data = (quic_stream_data_t *) stream->data;
f = stream_session->rx_fifo;
max_deq = svm_fifo_max_dequeue (f);
- ASSERT (stream_data->app_rx_data_len >= max_deq);
+ QUIC_ASSERT (stream_data->app_rx_data_len >= max_deq);
quicly_stream_sync_recvbuf (stream, stream_data->app_rx_data_len - max_deq);
QUIC_DBG (3, "Acking %u bytes", stream_data->app_rx_data_len - max_deq);
stream_data->app_rx_data_len = max_deq;
QUIC_DBG (2, "Deleting connection %u", ctx->c_c_index);
- ASSERT (!quic_ctx_is_stream (ctx));
+ QUIC_ASSERT (!quic_ctx_is_stream (ctx));
quic_stop_ctx_timer (ctx);
/* Delete the connection from the connection map */
conn = ctx->conn;
+ ctx->conn = NULL;
quic_make_connection_key (&kv, quicly_get_master_id (conn));
QUIC_DBG (2, "Deleting conn with id %lu %lu from map", kv.key[0],
kv.key[1]);
if (ctx->conn)
quicly_free (ctx->conn);
- ctx->conn = NULL;
session_transport_delete_notify (&ctx->connection);
}
/* App already confirmed close, we can delete the connection */
quic_connection_delete (ctx);
break;
- case QUIC_CONN_STATE_PASSIVE_CLOSING_QUIC_CLOSED:
- QUIC_DBG (0, "BUG");
- break;
+ case QUIC_CONN_STATE_OPENED:
+ case QUIC_CONN_STATE_HANDSHAKE:
case QUIC_CONN_STATE_ACTIVE_CLOSING:
quic_connection_delete (ctx);
break;
/* Read dest address from quicly-provided sockaddr */
if (hdr.is_ip4)
{
- ASSERT (packet->dest.sa.sa_family == AF_INET);
+ QUIC_ASSERT (packet->dest.sa.sa_family == AF_INET);
struct sockaddr_in *sa4 = (struct sockaddr_in *) &packet->dest.sa;
hdr.rmt_port = sa4->sin_port;
hdr.rmt_ip.ip4.as_u32 = sa4->sin_addr.s_addr;
}
else
{
- ASSERT (packet->dest.sa.sa_family == AF_INET6);
+ QUIC_ASSERT (packet->dest.sa.sa_family == AF_INET6);
struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &packet->dest.sa;
hdr.rmt_port = sa6->sin6_port;
clib_memcpy (&hdr.rmt_ip.ip6, &sa6->sin6_addr, 16);
if (quic_ctx_is_stream (ctx))
ctx = quic_ctx_get (ctx->quic_connection_ctx_id, ctx->c_thread_index);
- ASSERT (!quic_ctx_is_stream (ctx));
+ QUIC_ASSERT (!quic_ctx_is_stream (ctx));
udp_session = session_get_from_handle_if_valid (ctx->udp_session_handle);
if (!udp_session)
stream_session->session_state = SESSION_STATE_CLOSED;
session_transport_delete_notify (&sctx->connection);
+ quic_increment_counter (QUIC_ERROR_CLOSED_STREAM, 1);
quic_ctx_free (sctx);
clib_mem_free (stream->data);
}
size_t len)
{
QUIC_DBG (3, "received data: %lu bytes, offset %lu", len, off);
- u32 max_enq;
+ u32 max_enq, rlen, rv;
quic_ctx_t *sctx;
session_t *stream_session;
app_worker_t *app_wrk;
svm_fifo_t *f;
quic_stream_data_t *stream_data;
- int rlen, rv;
stream_data = (quic_stream_data_t *) stream->data;
sctx = quic_ctx_get (stream_data->ctx_id, stream_data->thread_index);
stream_session->app_wrk_index,
stream_session->thread_index, f, len, rlen, off, max_enq);
stream_data->app_rx_data_len += rlen;
- ASSERT (rlen >= len);
+ QUIC_ASSERT (rlen >= len);
app_wrk = app_worker_get_if_valid (stream_session->app_wrk_index);
if (PREDICT_TRUE (app_wrk != 0))
{
rlen = svm_fifo_enqueue_with_offset (f,
off - stream_data->app_rx_data_len,
len, (u8 *) src);
- ASSERT (rlen == 0);
+ QUIC_ASSERT (rlen == 0);
}
return 0;
}
void
quic_fifo_egress_shift (quicly_stream_t * stream, size_t delta)
{
+ quic_stream_data_t *stream_data;
session_t *stream_session;
svm_fifo_t *f;
- int rv;
+ u32 rv;
+ stream_data = (quic_stream_data_t *) stream->data;
stream_session = get_stream_session_from_stream (stream);
f = stream_session->tx_fifo;
+ QUIC_ASSERT (stream_data->app_tx_data_len >= delta);
+ stream_data->app_tx_data_len -= delta;
rv = svm_fifo_dequeue_drop (f, delta);
- ASSERT (rv == delta);
- quicly_stream_sync_sendbuf (stream, 0);
+ QUIC_ASSERT (rv == delta);
+
+ rv = quicly_stream_sync_sendbuf (stream, 0);
+ QUIC_ASSERT (!rv);
}
int
quic_fifo_egress_emit (quicly_stream_t * stream, size_t off, void *dst,
size_t * len, int *wrote_all)
{
+ u32 deq_max, first_deq, max_rd_chunk, rem_offset;
+ quic_stream_data_t *stream_data;
session_t *stream_session;
svm_fifo_t *f;
- u32 deq_max, first_deq, max_rd_chunk, rem_offset;
+ stream_data = (quic_stream_data_t *) stream->data;
stream_session = get_stream_session_from_stream (stream);
f = stream_session->tx_fifo;
QUIC_DBG (3, "Emitting %u, offset %u", *len, off);
deq_max = svm_fifo_max_dequeue_cons (f);
- ASSERT (off <= deq_max);
+ QUIC_ASSERT (off <= deq_max);
if (off + *len < deq_max)
{
*wrote_all = 0;
{
*wrote_all = 1;
*len = deq_max - off;
- QUIC_DBG (3, "Wrote ALL, %u", *len);
}
+ QUIC_ASSERT (*len > 0);
+
+ if (off + *len > stream_data->app_tx_data_len)
+ stream_data->app_tx_data_len = off + *len;
/* TODO, use something like : return svm_fifo_peek (f, off, *len, dst); */
max_rd_chunk = svm_fifo_max_read_chunk (f);
static int
quic_on_stream_open (quicly_stream_open_t * self, quicly_stream_t * stream)
{
+ /* Return code for this function ends either
+ * - in quicly_receive : if not QUICLY_ERROR_PACKET_IGNORED, will close connection
+ * - in quicly_open_stream, returned directly
+ */
+
session_t *stream_session, *quic_session;
quic_stream_data_t *stream_data;
app_worker_t *app_wrk;
stream_data->ctx_id = sctx_id;
stream_data->thread_index = sctx->c_thread_index;
stream_data->app_rx_data_len = 0;
+ stream_data->app_tx_data_len = 0;
sctx->c_s_index = stream_session->session_index;
stream_session->session_state = SESSION_STATE_CREATED;
if ((rv = app_worker_init_connected (app_wrk, stream_session)))
{
QUIC_ERR ("failed to allocate fifos");
- session_free (stream_session);
quicly_reset_stream (stream, QUIC_APP_ALLOCATION_ERROR);
return 0; /* Frame is still valid */
}
if ((rv = app_worker_accept_notify (app_wrk, stream_session)))
{
QUIC_ERR ("failed to notify accept worker app");
- session_free_w_fifos (stream_session);
quicly_reset_stream (stream, QUIC_APP_ACCEPT_NOTIFY_ERROR);
return 0; /* Frame is still valid */
}
QUIC_DBG (2, "Stream open failed with %d", rv);
return -1;
}
+ quic_increment_counter (QUIC_ERROR_OPENED_STREAM, 1);
+
sctx->stream = stream;
QUIC_DBG (2, "Opened stream %d, creating session", stream->stream_id);
session_type_from_proto_and_ip (TRANSPORT_PROTO_QUIC, qctx->udp_is_ip4);
sctx->c_s_index = stream_session->session_index;
+ stream_data = (quic_stream_data_t *) stream->data;
+ stream_data->ctx_id = sctx->c_c_index;
+ stream_data->thread_index = sctx->c_thread_index;
+ stream_data->app_rx_data_len = 0;
+ stream_data->app_tx_data_len = 0;
+ stream_session->session_state = SESSION_STATE_READY;
+ /* For now we only reset streams. Cleanup will be triggered by timers */
if (app_worker_init_connected (app_wrk, stream_session))
{
QUIC_ERR ("failed to app_worker_init_connected");
- quicly_reset_stream (stream, QUIC_APP_ALLOCATION_ERROR);
- session_free_w_fifos (stream_session);
- quic_ctx_free (sctx);
+ quicly_reset_stream (stream, QUIC_APP_CONNECT_NOTIFY_ERROR);
return app_worker_connect_notify (app_wrk, NULL, opaque);
}
SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL |
SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY);
- stream_session->session_state = SESSION_STATE_READY;
if (app_worker_connect_notify (app_wrk, stream_session, opaque))
{
QUIC_ERR ("failed to notify app");
+ quic_increment_counter (QUIC_ERROR_CLOSED_STREAM, 1);
quicly_reset_stream (stream, QUIC_APP_CONNECT_NOTIFY_ERROR);
- session_free_w_fifos (stream_session);
- quic_ctx_free (sctx);
return -1;
}
- stream_data = (quic_stream_data_t *) stream->data;
- stream_data->ctx_id = sctx->c_c_index;
- stream_data->thread_index = sctx->c_thread_index;
- stream_data->app_rx_data_len = 0;
+
return 0;
}
switch (ctx->conn_state)
{
+ case QUIC_CONN_STATE_OPENED:
+ case QUIC_CONN_STATE_HANDSHAKE:
case QUIC_CONN_STATE_READY:
ctx->conn_state = QUIC_CONN_STATE_ACTIVE_CLOSING;
quicly_conn_t *conn = ctx->conn;
/* Start connection closing. Keep sending packets until quicly_send
returns QUICLY_ERROR_FREE_CONNECTION */
+
+ quic_increment_counter (QUIC_ERROR_CLOSED_CONNECTION, 1);
quicly_close (conn, QUIC_APP_ERROR_CLOSE_NOTIFY, "Closed by peer");
/* This also causes all streams to be closed (and the cb called) */
quic_send_packets (ctx);
case QUIC_CONN_STATE_PASSIVE_CLOSING_QUIC_CLOSED:
quic_connection_delete (ctx);
break;
+ case QUIC_CONN_STATE_ACTIVE_CLOSING:
+ break;
default:
- QUIC_DBG (0, "BUG");
+ QUIC_ERR ("Trying to close conn in state %d", ctx->conn_state);
break;
}
}
QUIC_DBG (2, "Called quic_stop_listen");
quic_ctx_t *lctx;
lctx = quic_ctx_get (lctx_index, 0);
- ASSERT (quic_ctx_is_listener (lctx));
+ QUIC_ASSERT (quic_ctx_is_listener (lctx));
vnet_unlisten_args_t a = {
.handle = lctx->udp_session_handle,
.app_index = quic_main.app_index,
}
}
-static int
+static void
quic_on_quic_session_connected (quic_ctx_t * ctx)
{
session_t *quic_session;
u32 thread_index = ctx->c_thread_index;
int rv;
- app_wrk = app_worker_get_if_valid (ctx->parent_app_wrk_id);
- if (!app_wrk)
- {
- quic_disconnect_transport (ctx);
- return 0;
- }
-
quic_session = session_alloc (thread_index);
QUIC_DBG (2, "Allocated quic session 0x%lx", session_handle (quic_session));
quic_session->session_type =
session_type_from_proto_and_ip (TRANSPORT_PROTO_QUIC, ctx->udp_is_ip4);
+ /* If quic session connected fails, immediatly close connection */
+ app_wrk = app_worker_get (ctx->parent_app_wrk_id);
if (app_worker_init_connected (app_wrk, quic_session))
{
QUIC_ERR ("failed to app_worker_init_connected");
quic_proto_on_close (ctx_id, thread_index);
- return app_worker_connect_notify (app_wrk, NULL, ctx->client_opaque);
+ app_worker_connect_notify (app_wrk, NULL, ctx->client_opaque);
+ return;
}
quic_session->session_state = SESSION_STATE_CONNECTING;
{
QUIC_ERR ("failed to notify app %d", rv);
quic_proto_on_close (ctx_id, thread_index);
- return -1;
+ return;
}
/* If the app opens a stream in its callback it may invalidate ctx */
*/
quic_session = session_get (ctx->c_s_index, thread_index);
quic_session->session_state = SESSION_STATE_LISTENING;
-
- return 0;
}
-static int
+static void
quic_check_quic_session_connected (quic_ctx_t * ctx)
{
/* Called when we need to trigger quic session connected
/* Conn may be set to null if the connection is terminated */
if (!ctx->conn || ctx->conn_state != QUIC_CONN_STATE_HANDSHAKE)
- return 0;
+ return;
if (!quicly_connection_is_ready (ctx->conn))
- return 0;
+ return;
ctx->conn_state = QUIC_CONN_STATE_READY;
if (!quicly_is_client (ctx->conn))
- return 0;
- return quic_on_quic_session_connected (ctx);
+ return;
+ quic_on_quic_session_connected (ctx);
}
static void
QUIC_DBG (2, "Transferring conn %u to thread %u", ctx_index, dest_thread);
temp_ctx = clib_mem_alloc (sizeof (quic_ctx_t));
- ASSERT (temp_ctx);
+ QUIC_ASSERT (temp_ctx != NULL);
ctx = quic_ctx_get (ctx_index, thread_index);
clib_memcpy (temp_ctx, ctx, sizeof (quic_ctx_t));
quicly_ctx = quic_get_quicly_ctx_from_ctx (ctx);
ret = quicly_connect (&ctx->conn, quicly_ctx, (char *) ctx->srv_hostname,
- sa, NULL, &quic_main.next_cid, ptls_iovec_init (NULL,
- 0),
- &quic_main.hs_properties, NULL);
- ++quic_main.next_cid.master_id;
+ sa, NULL, &quic_main.wrk_ctx[thread_index].next_cid,
+ ptls_iovec_init (NULL, 0), &quic_main.hs_properties,
+ NULL);
+ ++quic_main.wrk_ctx[thread_index].next_cid.master_id;
/* Save context handle in quicly connection */
quic_store_conn_ctx (ctx->conn, ctx);
assert (ret == 0);
u32 new_thread = session_thread_from_handle (new_sh);
quic_ctx_t *ctx;
- QUIC_ERR ("Session %x migrated to %lx", s->session_index, new_sh);
- ASSERT (vlib_get_thread_index () == s->thread_index);
+ QUIC_DBG (2, "Session %x migrated to %lx", s->session_index, new_sh);
+ QUIC_ASSERT (vlib_get_thread_index () == s->thread_index);
ctx = quic_ctx_get (s->opaque, s->thread_index);
- ASSERT (ctx->udp_session_handle == session_handle (s));
+ QUIC_ASSERT (ctx->udp_session_handle == session_handle (s));
ctx->udp_session_handle = new_sh;
#if QUIC_DEBUG >= 1
quic_custom_tx_callback (void *s, u32 max_burst_size)
{
session_t *stream_session = (session_t *) s;
+ quic_stream_data_t *stream_data;
quicly_stream_t *stream;
quic_ctx_t *ctx;
+ u32 max_deq;
int rv;
if (PREDICT_FALSE
QUIC_DBG (3, "Stream TX event");
quic_ack_rx_data (stream_session);
- if (!svm_fifo_max_dequeue (stream_session->tx_fifo))
- return 0;
-
stream = ctx->stream;
if (!quicly_sendstate_is_open (&stream->sendstate))
{
return -1;
}
- if ((rv = quicly_stream_sync_sendbuf (stream, 1)) != 0)
- return rv;
+ stream_data = (quic_stream_data_t *) stream->data;
+ max_deq = svm_fifo_max_dequeue (stream_session->tx_fifo);
+ QUIC_ASSERT (max_deq >= stream_data->app_tx_data_len);
+ if (max_deq == stream_data->app_tx_data_len)
+ {
+ QUIC_DBG (3, "TX but no data %d / %d", max_deq,
+ stream_data->app_tx_data_len);
+ return 0;
+ }
+ stream_data->app_tx_data_len = max_deq;
+ rv = quicly_stream_sync_sendbuf (stream, 1);
+ QUIC_ASSERT (!rv);
tx_end:
quic_send_packets (ctx);
return 0;
}
-
/*
* Returns 0 if a matching connection is found and is on the right thread.
* Otherwise returns -1.
static inline int
quic_find_packet_ctx (quic_rx_packet_ctx_t * pctx, u32 caller_thread_index)
{
- quic_ctx_t *ctx_;
- quicly_conn_t *conn_;
clib_bihash_kv_16_8_t kv;
clib_bihash_16_8_t *h;
+ quic_ctx_t *ctx;
+ u32 index, thread_id;
h = &quic_main.connection_hash;
quic_make_connection_key (&kv, &pctx->packet.cid.dest.plaintext);
QUIC_DBG (3, "Searching conn with id %lu %lu", kv.key[0], kv.key[1]);
- if (clib_bihash_search_16_8 (h, &kv, &kv) == 0)
+ if (clib_bihash_search_16_8 (h, &kv, &kv))
{
- u32 index = kv.value & UINT32_MAX;
- u32 thread_id = kv.value >> 32;
- /* Check if this connection belongs to this thread, otherwise
- * ask for it to be moved */
- if (thread_id != caller_thread_index)
- {
- QUIC_DBG (2, "Connection is on wrong thread");
- /* Cannot make full check with quicly_is_destination... */
- pctx->ctx_index = index;
- pctx->thread_index = thread_id;
- return QUIC_PACKET_TYPE_MIGRATE;
- }
- ctx_ = quic_ctx_get (index, vlib_get_thread_index ());
- conn_ = ctx_->conn;
- if (conn_
- && quicly_is_destination (conn_, NULL, &pctx->sa, &pctx->packet))
- {
- QUIC_DBG (3, "Connection found");
- pctx->ctx_index = index;
- pctx->thread_index = thread_id;
- return QUIC_PACKET_TYPE_RECEIVE;
- }
+ QUIC_DBG (3, "connection not found");
+ return QUIC_PACKET_TYPE_NONE;
+ }
+
+ index = kv.value & UINT32_MAX;
+ thread_id = kv.value >> 32;
+ /* Check if this connection belongs to this thread, otherwise
+ * ask for it to be moved */
+ if (thread_id != caller_thread_index)
+ {
+ QUIC_DBG (2, "Connection is on wrong thread");
+ /* Cannot make full check with quicly_is_destination... */
+ pctx->ctx_index = index;
+ pctx->thread_index = thread_id;
+ return QUIC_PACKET_TYPE_MIGRATE;
+ }
+ ctx = quic_ctx_get (index, vlib_get_thread_index ());
+ if (!ctx->conn)
+ {
+ QUIC_ERR ("ctx has no conn");
+ return QUIC_PACKET_TYPE_NONE;
}
- QUIC_DBG (3, "connection not found");
- return QUIC_PACKET_TYPE_NONE;
+ if (!quicly_is_destination (ctx->conn, NULL, &pctx->sa, &pctx->packet))
+ return QUIC_PACKET_TYPE_NONE;
+
+ QUIC_DBG (3, "Connection found");
+ pctx->ctx_index = index;
+ pctx->thread_index = thread_id;
+ return QUIC_PACKET_TYPE_RECEIVE;
}
static int
if (ctx->c_s_index != QUIC_SESSION_INVALID)
{
QUIC_DBG (2, "already accepted ctx 0x%x", ctx_index);
- return -1;
+ return 0;
}
quicly_ctx = quic_get_quicly_ctx_from_ctx (ctx);
if ((rv = quicly_accept (&conn, quicly_ctx, NULL, &pctx->sa,
- &pctx->packet, NULL, &quic_main.next_cid, NULL)))
+ &pctx->packet, NULL,
+ &quic_main.wrk_ctx[thread_index].next_cid, NULL)))
{
/* Invalid packet, pass */
assert (conn == NULL);
}
assert (conn != NULL);
- ++quic_main.next_cid.master_id;
+ ++quic_main.wrk_ctx[thread_index].next_cid.master_id;
/* Save ctx handle in quicly connection */
quic_store_conn_ctx (conn, ctx);
ctx->conn = conn;
- ctx->conn_state = QUIC_CONN_STATE_HANDSHAKE;
quic_session = session_alloc (ctx->c_thread_index);
QUIC_DBG (2, "Allocated quic_session, 0x%lx ctx %u",
session_type_from_proto_and_ip (TRANSPORT_PROTO_QUIC, ctx->udp_is_ip4);
quic_session->listener_handle = lctx->c_s_index;
- /* TODO: don't alloc fifos when we don't transfer data on this session
- * but we still need fifos for the events? */
+ /* Register connection in connections map */
+ quic_make_connection_key (&kv, quicly_get_master_id (conn));
+ kv.value = ((u64) thread_index) << 32 | (u64) ctx_index;
+ clib_bihash_add_del_16_8 (&quic_main.connection_hash, &kv, 1 /* is_add */ );
+ QUIC_DBG (2, "Registering conn with id %lu %lu", kv.key[0], kv.key[1]);
+
+ /* If notify fails, reset connection immediatly */
if ((rv = app_worker_init_accepted (quic_session)))
{
QUIC_ERR ("failed to allocate fifos");
- session_free (quic_session);
+ quic_proto_on_close (ctx_index, thread_index);
return rv;
}
+
app_wrk = app_worker_get (quic_session->app_wrk_index);
if ((rv = app_worker_accept_notify (app_wrk, quic_session)))
{
QUIC_ERR ("failed to notify accept worker app");
+ quic_proto_on_close (ctx_index, thread_index);
return rv;
}
- /* Register connection in connections map */
- quic_make_connection_key (&kv, quicly_get_master_id (conn));
- kv.value = ((u64) thread_index) << 32 | (u64) ctx_index;
- clib_bihash_add_del_16_8 (&quic_main.connection_hash, &kv, 1 /* is_add */ );
- QUIC_DBG (2, "Registering conn with id %lu %lu", kv.key[0], kv.key[1]);
+ ctx->conn_state = QUIC_CONN_STATE_READY;
+ pctx->ctx_index = ctx_index;
+ pctx->thread_index = thread_index;
- return quic_send_packets (ctx);
+ return 0;
}
static int
ret = svm_fifo_peek (f, fifo_offset,
SESSION_CONN_HDR_LEN, (u8 *) & pctx->ph);
- ASSERT (ret == SESSION_CONN_HDR_LEN);
- ASSERT (pctx->ph.data_offset == 0);
+ QUIC_ASSERT (ret == SESSION_CONN_HDR_LEN);
+ QUIC_ASSERT (pctx->ph.data_offset == 0);
full_len = pctx->ph.data_length + SESSION_CONN_HDR_LEN;
if (full_len > cur_deq)
{
ctx = quic_ctx_get (packets_ctx[i].ctx_index, thread_index);
rv = quicly_receive (ctx->conn, NULL, &packets_ctx[i].sa,
&packets_ctx[i].packet);
- if (rv)
+ if (rv && rv != QUICLY_ERROR_PACKET_IGNORED)
{
- QUIC_DBG (1, "quicly_receive return error %U",
+ QUIC_ERR ("quicly_receive return error %U",
quic_format_err, rv);
}
break;
}
for (i = 0; i < max_packets; i++)
{
- if (packets_ctx[i].ptype != QUIC_PACKET_TYPE_RECEIVE)
- continue;
- ctx = quic_ctx_get (packets_ctx[i].ctx_index,
- packets_ctx[i].thread_index);
- quic_check_quic_session_connected (ctx);
- ctx = quic_ctx_get (packets_ctx[i].ctx_index,
- packets_ctx[i].thread_index);
+ switch (packets_ctx[i].ptype)
+ {
+ case QUIC_PACKET_TYPE_RECEIVE:
+ ctx = quic_ctx_get (packets_ctx[i].ctx_index,
+ packets_ctx[i].thread_index);
+ quic_check_quic_session_connected (ctx);
+ ctx = quic_ctx_get (packets_ctx[i].ctx_index,
+ packets_ctx[i].thread_index);
+ break;
+ case QUIC_PACKET_TYPE_ACCEPT:
+ ctx = quic_ctx_get (packets_ctx[i].ctx_index,
+ packets_ctx[i].thread_index);
+ break;
+ default:
+ continue;
+ }
quic_send_packets (ctx);
}
/* Timer wheels, one per thread. */
for (i = 0; i < num_threads; i++)
{
+ qm->wrk_ctx[i].next_cid.thread_id = i;
tw = &qm->wrk_ctx[i].timer_wheel;
tw_timer_wheel_init_1t_3w_1024sl_ov (tw, quic_expired_timers_dispatch,
1e-3 /* timer period 1ms */ , ~0);
return 0;
}
-static u8 *
-quic_format_ctx_stat (u8 * s, va_list * args)
+static inline u64
+quic_get_counter_value (u32 event_code)
{
- quic_ctx_t *ctx = va_arg (*args, quic_ctx_t *);
- quicly_stats_t quicly_stats;
+ vlib_node_t *n;
+ vlib_main_t *vm;
+ vlib_error_main_t *em;
- quicly_get_stats (ctx->conn, &quicly_stats);
-
- s = format (s, "\n\rQUIC conn stats \n\r");
+ u32 code, i;
+ u64 c, sum = 0;
+ int index = 0;
- s =
- format (s, "RTT: min:%d, smoothed:%d, variance:%d, latest:%d \n\r",
- quicly_stats.rtt.minimum, quicly_stats.rtt.smoothed,
- quicly_stats.rtt.variance, quicly_stats.rtt.latest);
- s = format (s, "Packet loss:%d \n\r", quicly_stats.num_packets.lost);
+ vm = vlib_get_main ();
+ em = &vm->error_main;
+ n = vlib_get_node (vm, quic_input_node.index);
+ code = event_code;
+ /* *INDENT-OFF* */
+ foreach_vlib_main(({
+ em = &this_vlib_main->error_main;
+ i = n->error_heap_index + code;
+ c = em->counters[i];
- return s;
+ if (i < vec_len (em->counters_last_clear))
+ c -= em->counters_last_clear[i];
+ sum += c;
+ index++;
+ }));
+ /* *INDENT-ON* */
+ return sum;
}
-static clib_error_t *
-quic_plugin_showstats_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+static void
+quic_show_aggregated_stats (vlib_main_t * vm)
{
+ u32 num_workers = vlib_num_workers ();
quic_main_t *qm = &quic_main;
quic_ctx_t *ctx = NULL;
- u32 num_workers = vlib_num_workers ();
+ quicly_stats_t st, agg_stats;
+ u32 i, nconn = 0, nstream = 0;
- for (int i = 0; i < num_workers + 1; i++)
+ clib_memset (&agg_stats, 0, sizeof (agg_stats));
+ for (i = 0; i < num_workers + 1; i++)
{
/* *INDENT-OFF* */
pool_foreach (ctx, qm->ctx_pool[i],
({
- if(!(ctx->flags & QUIC_F_IS_LISTENER) && !(ctx->flags & QUIC_F_IS_STREAM))
- vlib_cli_output (vm, "%U", quic_format_ctx_stat, ctx);
+ if (quic_ctx_is_conn (ctx) && ctx->conn)
+ {
+ quicly_get_stats (ctx->conn, &st);
+ agg_stats.rtt.smoothed += st.rtt.smoothed;
+ agg_stats.rtt.minimum += st.rtt.minimum;
+ agg_stats.rtt.variance += st.rtt.variance;
+ agg_stats.num_packets.received += st.num_packets.received;
+ agg_stats.num_packets.sent += st.num_packets.sent;
+ agg_stats.num_packets.lost += st.num_packets.lost;
+ agg_stats.num_packets.ack_received += st.num_packets.ack_received;
+ agg_stats.num_bytes.received += st.num_bytes.received;
+ agg_stats.num_bytes.sent += st.num_bytes.sent;
+ nconn++;
+ }
+ else if (quic_ctx_is_stream (ctx))
+ nstream++;
}));
/* *INDENT-ON* */
}
- return 0;
+ vlib_cli_output (vm, "-------- Connections --------");
+ vlib_cli_output (vm, "Current: %u", nconn);
+ vlib_cli_output (vm, "Opened: %d",
+ quic_get_counter_value (QUIC_ERROR_OPENED_CONNECTION));
+ vlib_cli_output (vm, "Closed: %d",
+ quic_get_counter_value (QUIC_ERROR_CLOSED_CONNECTION));
+ vlib_cli_output (vm, "---------- Streams ----------");
+ vlib_cli_output (vm, "Current: %u", nstream);
+ vlib_cli_output (vm, "Opened: %d",
+ quic_get_counter_value (QUIC_ERROR_OPENED_STREAM));
+ vlib_cli_output (vm, "Closed: %d",
+ quic_get_counter_value (QUIC_ERROR_CLOSED_STREAM));
+ vlib_cli_output (vm, "---------- Packets ----------");
+ vlib_cli_output (vm, "RX Total: %d",
+ quic_get_counter_value (QUIC_ERROR_RX_PACKETS));
+ vlib_cli_output (vm, "RX 0RTT: %d",
+ quic_get_counter_value (QUIC_ERROR_ZERO_RTT_RX_PACKETS));
+ vlib_cli_output (vm, "RX 1RTT: %d",
+ quic_get_counter_value (QUIC_ERROR_ONE_RTT_RX_PACKETS));
+ vlib_cli_output (vm, "TX Total: %d",
+ quic_get_counter_value (QUIC_ERROR_TX_PACKETS));
+ vlib_cli_output (vm, "----------- Stats -----------");
+ vlib_cli_output (vm, "Min RTT %f",
+ nconn > 0 ? agg_stats.rtt.minimum / nconn : 0);
+ vlib_cli_output (vm, "Smoothed RTT %f",
+ nconn > 0 ? agg_stats.rtt.smoothed / nconn : 0);
+ vlib_cli_output (vm, "Variance on RTT %f",
+ nconn > 0 ? agg_stats.rtt.variance / nconn : 0);
+ vlib_cli_output (vm, "Packets Received %lu",
+ agg_stats.num_packets.received);
+ vlib_cli_output (vm, "Packets Sent %lu", agg_stats.num_packets.sent);
+ vlib_cli_output (vm, "Packets Lost %lu", agg_stats.num_packets.lost);
+ vlib_cli_output (vm, "Packets Acks %lu",
+ agg_stats.num_packets.ack_received);
+ vlib_cli_output (vm, "RX bytes %lu", agg_stats.num_bytes.received);
+ vlib_cli_output (vm, "TX bytes %lu", agg_stats.num_bytes.sent);
+}
+
+static u8 *
+quic_format_quicly_conn_id (u8 * s, va_list * args)
+{
+ quicly_cid_plaintext_t *mid = va_arg (*args, quicly_cid_plaintext_t *);
+ s = format (s, "C%x_%x", mid->master_id, mid->thread_id);
+ return s;
+}
+
+static u8 *
+quic_format_quicly_stream_id (u8 * s, va_list * args)
+{
+ quicly_stream_t *stream = va_arg (*args, quicly_stream_t *);
+ s =
+ format (s, "%U S%lx", quic_format_quicly_conn_id,
+ quicly_get_master_id (stream->conn), stream->stream_id);
+ return s;
+}
+
+static u8 *
+quic_format_listener_ctx (u8 * s, va_list * args)
+{
+ quic_ctx_t *ctx = va_arg (*args, quic_ctx_t *);
+ s = format (s, "[#%d][%x][Listener]", ctx->c_thread_index, ctx->c_c_index);
+ return s;
+}
+
+static u8 *
+quic_format_connection_ctx (u8 * s, va_list * args)
+{
+ quic_ctx_t *ctx = va_arg (*args, quic_ctx_t *);
+ quicly_stats_t quicly_stats;
+
+ s = format (s, "[#%d][%x]", ctx->c_thread_index, ctx->c_c_index);
+
+ if (!ctx->conn)
+ {
+ s = format (s, "- no conn -\n");
+ return s;
+ }
+ s = format (s, "[%U]",
+ quic_format_quicly_conn_id, quicly_get_master_id (ctx->conn));
+ quicly_get_stats (ctx->conn, &quicly_stats);
+
+ s = format (s, "[RTT >%3d, ~%3d, V%3d, last %3d]",
+ quicly_stats.rtt.minimum, quicly_stats.rtt.smoothed,
+ quicly_stats.rtt.variance, quicly_stats.rtt.latest);
+ s = format (s, " TX:%d RX:%d loss:%d ack:%d",
+ quicly_stats.num_packets.sent,
+ quicly_stats.num_packets.received,
+ quicly_stats.num_packets.lost,
+ quicly_stats.num_packets.ack_received);
+ return s;
+}
+
+static u8 *
+quic_format_stream_ctx (u8 * s, va_list * args)
+{
+ quic_ctx_t *ctx = va_arg (*args, quic_ctx_t *);
+ session_t *stream_session;
+ quicly_stream_t *stream = ctx->stream;
+ u32 txs, rxs;
+
+ s = format (s, "[#%d][%x]", ctx->c_thread_index, ctx->c_c_index);
+ s = format (s, "[%U]", quic_format_quicly_stream_id, stream);
+
+ stream_session = session_get_if_valid (ctx->c_s_index, ctx->c_thread_index);
+ if (!stream_session)
+ {
+ s = format (s, "- no session -\n");
+ return s;
+ }
+ txs = svm_fifo_max_dequeue (stream_session->tx_fifo);
+ rxs = svm_fifo_max_dequeue (stream_session->rx_fifo);
+ s = format (s, "[rx %d tx %d]\n", rxs, txs);
+ return s;
}
static clib_error_t *
-quic_show_ctx_command_fn (vlib_main_t * vm, unformat_input_t * input,
- vlib_cli_command_t * cmd)
+quic_show_connections_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 show_listeners = 0, show_conn = 0, show_stream = 0;
+ u32 num_workers = vlib_num_workers ();
quic_main_t *qm = &quic_main;
+ clib_error_t *error = 0;
quic_ctx_t *ctx = NULL;
- u32 num_workers = vlib_num_workers ();
+
+ session_cli_return_if_not_enabled ();
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ {
+ quic_show_aggregated_stats (vm);
+ return 0;
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "listener"))
+ show_listeners = 1;
+ else if (unformat (line_input, "conn"))
+ show_conn = 1;
+ else if (unformat (line_input, "stream"))
+ show_stream = 1;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
for (int i = 0; i < num_workers + 1; i++)
{
/* *INDENT-OFF* */
pool_foreach (ctx, qm->ctx_pool[i],
({
- vlib_cli_output (vm, "%U", format_quic_ctx, ctx, 1);
+ if (quic_ctx_is_stream (ctx) && show_stream)
+ vlib_cli_output (vm, "%U", quic_format_stream_ctx, ctx);
+ else if (quic_ctx_is_listener (ctx) && show_listeners)
+ vlib_cli_output (vm, "%U", quic_format_listener_ctx, ctx);
+ else if (quic_ctx_is_conn (ctx) && show_conn)
+ vlib_cli_output (vm, "%U", quic_format_connection_ctx, ctx);
}));
/* *INDENT-ON* */
}
- return 0;
+
+done:
+ unformat_free (line_input);
+ return error;
}
/* *INDENT-OFF* */
.short_help = "quic set fifo-size N[K|M|G] (default 64K)",
.function = quic_plugin_set_fifo_size_command_fn,
};
-VLIB_CLI_COMMAND(quic_plugin_stats_command, static)=
-{
- .path = "show quic stats",
- .short_help = "show quic stats",
- .function = quic_plugin_showstats_command_fn,
-};
VLIB_CLI_COMMAND(quic_show_ctx_command, static)=
{
- .path = "show quic ctx",
- .short_help = "show quic ctx",
- .function = quic_show_ctx_command_fn,
+ .path = "show quic",
+ .short_help = "show quic",
+ .function = quic_show_connections_command_fn,
};
VLIB_PLUGIN_REGISTER () =
{
{
quic_main_t *qm = &quic_main;
uword tmp;
+ u32 i;
qm->udp_fifo_size = QUIC_DEFAULT_FIFO_SIZE;
qm->udp_fifo_prealloc = 0;
+ qm->connection_timeout = QUIC_DEFAULT_CONN_TIMEOUT;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "fifo-size %U", unformat_memory_size, &tmp))
{
if (tmp >= 0x100000000ULL)
{
- return clib_error_return
- (0, "fifo-size %llu (0x%llx) too large", tmp, tmp);
+ return clib_error_return (0,
+ "fifo-size %llu (0x%llx) too large",
+ tmp, tmp);
}
qm->udp_fifo_size = tmp;
}
- else
- if (unformat
- (input, "fifo-prealloc %u", &quic_main.udp_fifo_prealloc))
- ;
+ else if (unformat (input, "conn-timeout %u", &i))
+ qm->connection_timeout = i;
+ else if (unformat (input, "fifo-prealloc %u", &i))
+ qm->udp_fifo_prealloc = i;
else
return clib_error_return (0, "unknown input '%U'",
format_unformat_error, input);