static quic_main_t quic_main;
static void quic_update_timer (quic_ctx_t * ctx);
-static int quic_on_client_connected (quic_ctx_t * ctx);
+static int quic_check_quic_session_connected (quic_ctx_t * ctx);
+
+/* Helper functions */
static u32
quic_ctx_alloc (u32 thread_index)
pool_get (qm->ctx_pool[thread_index], ctx);
- memset (ctx, 0, sizeof (quic_ctx_t));
+ clib_memset (ctx, 0, sizeof (quic_ctx_t));
ctx->c_thread_index = thread_index;
+ ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
QUIC_DBG (3, "Allocated quic_ctx %u on thread %u",
ctx - qm->ctx_pool[thread_index], thread_index);
return ctx - qm->ctx_pool[thread_index];
static void
quic_ctx_free (quic_ctx_t * ctx)
{
- QUIC_DBG (2, "Free ctx %u", ctx->c_c_index);
+ QUIC_DBG (2, "Free ctx %u %x", ctx->c_thread_index, ctx->c_c_index);
u32 thread_index = ctx->c_thread_index;
+ ASSERT (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID);
if (CLIB_DEBUG)
- memset (ctx, 0xfb, sizeof (*ctx));
+ clib_memset (ctx, 0xfb, sizeof (*ctx));
pool_put (quic_main.ctx_pool[thread_index], ctx);
}
static quicly_context_t *
quic_get_quicly_ctx_from_ctx (quic_ctx_t * ctx)
{
- app_worker_t *app_wrk;
- application_t *app;
- app_wrk = app_worker_get_if_valid (ctx->parent_app_wrk_id);
- if (!app_wrk)
- return 0;
- app = application_get (app_wrk->app_index);
- return (quicly_context_t *) app->quicly_ctx;
+ return ctx->quicly_ctx;
}
static quicly_context_t *
quic_get_quicly_ctx_from_udp (u64 udp_session_handle)
{
- session_t *udp_session;
- application_t *app;
- udp_session = session_get_from_handle (udp_session_handle);
- app = application_get (udp_session->opaque);
- return (quicly_context_t *) app->quicly_ctx;
+ session_t *udp_session = session_get_from_handle (udp_session_handle);
+ quic_ctx_t *ctx =
+ quic_ctx_get (udp_session->opaque, udp_session->thread_index);
+ return ctx->quicly_ctx;
+}
+
+static inline void
+quic_set_udp_tx_evt (session_t * udp_session)
+{
+ int rv = 0;
+ if (svm_fifo_set_event (udp_session->tx_fifo))
+ rv = session_send_io_evt_to_thread (udp_session->tx_fifo,
+ SESSION_IO_EVT_TX);
+ if (PREDICT_FALSE (rv))
+ clib_warning ("Event enqueue errored %d", rv);
+}
+
+static inline void
+quic_stop_ctx_timer (quic_ctx_t * ctx)
+{
+ tw_timer_wheel_1t_3w_1024sl_ov_t *tw;
+ if (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID)
+ return;
+ tw = &quic_main.wrk_ctx[ctx->c_thread_index].timer_wheel;
+ tw_timer_stop_1t_3w_1024sl_ov (tw, ctx->timer_handle);
+ ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
+ QUIC_DBG (4, "Stopping timer for ctx %u", ctx->c_c_index);
}
+/* QUIC protocol actions */
+
static void
quic_ack_rx_data (session_t * stream_session)
{
quicly_stream_t *stream;
quic_stream_data_t *stream_data;
- sctx =
- quic_ctx_get (stream_session->connection_index,
- stream_session->thread_index);
+ sctx = quic_ctx_get (stream_session->connection_index,
+ stream_session->thread_index);
ASSERT (quic_ctx_is_stream (sctx));
stream = sctx->stream;
stream_data = (quic_stream_data_t *) stream->data;
static void
quic_connection_delete (quic_ctx_t * ctx)
{
- tw_timer_wheel_1t_3w_1024sl_ov_t *tw;
clib_bihash_kv_16_8_t kv;
quicly_conn_t *conn;
QUIC_DBG (2, "Deleting connection %u", ctx->c_c_index);
ASSERT (!quic_ctx_is_stream (ctx));
-
- /* Stop the timer */
- if (ctx->timer_handle != QUIC_TIMER_HANDLE_INVALID)
- {
- tw = &quic_main.wrk_ctx[ctx->c_thread_index].timer_wheel;
- tw_timer_stop_1t_3w_1024sl_ov (tw, ctx->timer_handle);
- }
+ quic_stop_ctx_timer (ctx);
/* Delete the connection from the connection map */
conn = ctx->conn;
if (ctx->conn)
quicly_free (ctx->conn);
ctx->conn = NULL;
-
session_transport_delete_notify (&ctx->connection);
- quic_ctx_free (ctx);
}
void
vlib_node_increment_counter (vm, quic_input_node.index, evt, val);
}
-struct st_quic_event_log_t
-{
- quicly_event_logger_t super;
-};
-
-void
-quic_event_log (quicly_event_logger_t * _self, quicly_event_type_t type,
- const quicly_event_attribute_t * attributes,
- size_t num_attributes)
-{
- if (type == QUICLY_EVENT_TYPE_PACKET_LOST)
- {
- QUIC_DBG (1, "QUIC packet loss");
- quic_increment_counter (QUIC_ERROR_PACKET_DROP, 1);
- }
-}
-
-quicly_event_logger_t *
-quic_new_event_logger ()
-{
- struct st_quic_event_log_t *self;
-
- if ((self = clib_mem_alloc (sizeof (*self))) == NULL)
- return NULL;
- /* *INDENT-OFF* */
- *self = (struct st_quic_event_log_t) {{quic_event_log}};
- /* *INDENT-ON* */
- return &self->super;
-}
-
-void
-quic_free_event_logger (quicly_event_logger_t * _self)
-{
- struct st_quicly_default_event_log_t *self = (void *) _self;
- clib_mem_free (self);
-}
-
/**
* Called when quicly return an error
* This function interacts tightly with quic_proto_on_close
break;
case QUIC_CONN_STATE_PASSIVE_CLOSING_APP_CLOSED:
/* App already confirmed close, we can delete the connection */
- session_transport_delete_notify (&ctx->connection);
quic_connection_delete (ctx);
break;
case QUIC_CONN_STATE_PASSIVE_CLOSING_QUIC_CLOSED:
QUIC_DBG (0, "BUG");
break;
case QUIC_CONN_STATE_ACTIVE_CLOSING:
- session_transport_delete_notify (&ctx->connection);
quic_connection_delete (ctx);
break;
default:
- QUIC_DBG (0, "BUG");
+ QUIC_DBG (0, "BUG %d", ctx->conn_state);
break;
}
}
max_enqueue = svm_fifo_max_enqueue (f);
if (max_enqueue < SESSION_CONN_HDR_LEN + len)
{
- QUIC_DBG (1, "Too much data to send, max_enqueue %u, len %u",
+ QUIC_ERR ("Too much data to send, max_enqueue %u, len %u",
max_enqueue, len + SESSION_CONN_HDR_LEN);
return QUIC_ERROR_FULL_FIFO;
}
/* Read dest address from quicly-provided sockaddr */
if (hdr.is_ip4)
{
- ASSERT (packet->sa.sa_family == AF_INET);
- struct sockaddr_in *sa4 = (struct sockaddr_in *) &packet->sa;
+ ASSERT (packet->dest.sa.sa_family == AF_INET);
+ struct sockaddr_in *sa4 = (struct sockaddr_in *) &packet->dest.sa;
hdr.rmt_port = sa4->sin_port;
hdr.rmt_ip.ip4.as_u32 = sa4->sin_addr.s_addr;
}
else
{
- ASSERT (packet->sa.sa_family == AF_INET6);
- struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &packet->sa;
+ ASSERT (packet->dest.sa.sa_family == AF_INET6);
+ struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &packet->dest.sa;
hdr.rmt_port = sa6->sin6_port;
clib_memcpy (&hdr.rmt_ip.ip6, &sa6->sin6_addr, 16);
}
ret = svm_fifo_enqueue (f, sizeof (hdr), (u8 *) & hdr);
if (ret != sizeof (hdr))
{
- QUIC_DBG (1, "Not enough space to enqueue header");
+ QUIC_ERR ("Not enough space to enqueue header");
return QUIC_ERROR_FULL_FIFO;
}
ret = svm_fifo_enqueue (f, len, packet->data.base);
if (ret != len)
{
- QUIC_DBG (1, "Not enough space to enqueue payload");
+ QUIC_ERR ("Not enough space to enqueue payload");
return QUIC_ERROR_FULL_FIFO;
}
while (num_packets > 0 && num_packets == max_packets);
stop_sending:
- if (svm_fifo_set_event (udp_session->tx_fifo))
- if ((err =
- session_send_io_evt_to_thread (udp_session->tx_fifo,
- SESSION_IO_EVT_TX)))
- clib_warning ("Event enqueue errored %d", err);
+ quic_set_udp_tx_evt (udp_session);
QUIC_DBG (3, "%u[TX] %u[RX]", svm_fifo_max_dequeue (udp_session->tx_fifo),
svm_fifo_max_dequeue (udp_session->rx_fifo));
return 1;
}
-/*****************************************************************************
- *
- * START QUICLY CALLBACKS
- * Called from QUIC lib
- *
- *****************************************************************************/
+/* Quicly callbacks */
static void
quic_on_stream_destroy (quicly_stream_t * stream, int err)
{
quic_stream_data_t *stream_data = (quic_stream_data_t *) stream->data;
- quic_ctx_t *sctx =
- quic_ctx_get (stream_data->ctx_id, stream_data->thread_index);
- session_t *stream_session =
- session_get (sctx->c_s_index, sctx->c_thread_index);
+ quic_ctx_t *sctx = quic_ctx_get (stream_data->ctx_id,
+ stream_data->thread_index);
+ session_t *stream_session = session_get (sctx->c_s_index,
+ sctx->c_thread_index);
QUIC_DBG (2, "DESTROYED_STREAM: session 0x%lx (%U)",
session_handle (stream_session), quic_format_err, err);
session_transport_delete_notify (&sctx->connection);
quic_ctx_free (sctx);
- free (stream->data);
+ clib_mem_free (stream->data);
}
static int
{
#if QUIC_DEBUG >= 2
quic_stream_data_t *stream_data = (quic_stream_data_t *) stream->data;
- quic_ctx_t *sctx =
- quic_ctx_get (stream_data->ctx_id, stream_data->thread_index);
- session_t *stream_session =
- session_get (sctx->c_s_index, sctx->c_thread_index);
+ quic_ctx_t *sctx = quic_ctx_get (stream_data->ctx_id,
+ stream_data->thread_index);
+ session_t *stream_session = session_get (sctx->c_s_index,
+ sctx->c_thread_index);
clib_warning ("(NOT IMPLEMENTD) STOP_SENDING: session 0x%lx (%U)",
session_handle (stream_session), quic_format_err, err);
#endif
quic_on_receive_reset (quicly_stream_t * stream, int err)
{
quic_stream_data_t *stream_data = (quic_stream_data_t *) stream->data;
- quic_ctx_t *sctx =
- quic_ctx_get (stream_data->ctx_id, stream_data->thread_index);
+ quic_ctx_t *sctx = quic_ctx_get (stream_data->ctx_id,
+ stream_data->thread_index);
#if QUIC_DEBUG >= 2
- session_t *stream_session =
- session_get (sctx->c_s_index, sctx->c_thread_index);
+ session_t *stream_session = session_get (sctx->c_s_index,
+ sctx->c_thread_index);
clib_warning ("RESET_STREAM: session 0x%lx (%U)",
session_handle (stream_session), quic_format_err, err);
#endif
app_worker_t *app_wrk;
svm_fifo_t *f;
quic_stream_data_t *stream_data;
- int rlen;
+ int rlen, rv;
stream_data = (quic_stream_data_t *) stream->data;
sctx = quic_ctx_get (stream_data->ctx_id, stream_data->thread_index);
max_enq = svm_fifo_max_enqueue_prod (f);
QUIC_DBG (3, "Enqueuing %u at off %u in %u space", len, off, max_enq);
- if (off - stream_data->app_rx_data_len + len > max_enq)
+ /* Handle duplicate packet/chunk from quicly */
+ if (off < stream_data->app_rx_data_len)
+ {
+ QUIC_DBG (3, "Session [idx %u, app_wrk %u, thread %u, rx-fifo 0x%llx]: "
+ "DUPLICATE PACKET (max_enq %u, len %u, "
+ "app_rx_data_len %u, off %u, ToBeNQ %u)",
+ stream_session->session_index,
+ stream_session->app_wrk_index,
+ stream_session->thread_index, f,
+ max_enq, len, stream_data->app_rx_data_len, off,
+ off - stream_data->app_rx_data_len + len);
+ return 0;
+ }
+ if (PREDICT_FALSE ((off - stream_data->app_rx_data_len + len) > max_enq))
{
- QUIC_DBG (1, "Error RX fifo is full");
+ QUIC_ERR ("Session [idx %u, app_wrk %u, thread %u, rx-fifo 0x%llx]: "
+ "RX FIFO IS FULL (max_enq %u, len %u, "
+ "app_rx_data_len %u, off %u, ToBeNQ %u)",
+ stream_session->session_index,
+ stream_session->app_wrk_index,
+ stream_session->thread_index, f,
+ max_enq, len, stream_data->app_rx_data_len, off,
+ off - stream_data->app_rx_data_len + len);
return 1;
}
if (off == stream_data->app_rx_data_len)
{
/* Streams live on the same thread so (f, stream_data) should stay consistent */
rlen = svm_fifo_enqueue (f, len, (u8 *) src);
+ QUIC_DBG (3, "Session [idx %u, app_wrk %u, ti %u, rx-fifo 0x%llx]: "
+ "Enqueuing %u (rlen %u) at off %u in %u space, ",
+ stream_session->session_index,
+ stream_session->app_wrk_index,
+ stream_session->thread_index, f, len, rlen, off, max_enq);
stream_data->app_rx_data_len += rlen;
ASSERT (rlen >= len);
app_wrk = app_worker_get_if_valid (stream_session->app_wrk_index);
if (PREDICT_TRUE (app_wrk != 0))
- app_worker_lock_and_send_event (app_wrk, stream_session,
- SESSION_IO_EVT_RX);
+ {
+ rv = app_worker_lock_and_send_event (app_wrk, stream_session,
+ SESSION_IO_EVT_RX);
+ if (rv)
+ QUIC_ERR ("Failed to ping app for RX");
+ }
quic_ack_rx_data (stream_session);
}
else
{
- rlen =
- svm_fifo_enqueue_with_offset (f, off - stream_data->app_rx_data_len,
- len, (u8 *) src);
+ rlen = svm_fifo_enqueue_with_offset (f,
+ off - stream_data->app_rx_data_len,
+ len, (u8 *) src);
ASSERT (rlen == 0);
}
return 0;
.on_receive_reset = quic_on_receive_reset
};
-static void
-quic_accept_stream (void *s)
+static int
+quic_on_stream_open (quicly_stream_open_t * self, quicly_stream_t * stream)
{
- quicly_stream_t *stream = (quicly_stream_t *) s;
session_t *stream_session, *quic_session;
quic_stream_data_t *stream_data;
app_worker_t *app_wrk;
u32 sctx_id;
int rv;
- sctx_id = quic_ctx_alloc (vlib_get_thread_index ());
+ QUIC_DBG (2, "on_stream_open called");
+ stream->data = clib_mem_alloc (sizeof (quic_stream_data_t));
+ stream->callbacks = &quic_stream_callbacks;
+ /* Notify accept on parent qsession, but only if this is not a locally
+ * initiated stream */
+ if (quicly_stream_is_self_initiated (stream))
+ return 0;
+ sctx_id = quic_ctx_alloc (vlib_get_thread_index ());
qctx = quic_get_conn_ctx (stream->conn);
/* Might need to signal that the connection is ready if the first thing the
* server does is open a stream */
- if (qctx->conn_state == QUIC_CONN_STATE_HANDSHAKE)
- {
- if (quicly_connection_is_ready (qctx->conn))
- {
- qctx->conn_state = QUIC_CONN_STATE_READY;
- if (quicly_is_client (qctx->conn))
- {
- quic_on_client_connected (qctx);
- /* ctx might be invalidated */
- qctx = quic_get_conn_ctx (stream->conn);
- }
- }
- }
+ quic_check_quic_session_connected (qctx);
+ /* ctx might be invalidated */
+ qctx = quic_get_conn_ctx (stream->conn);
stream_session = session_alloc (qctx->c_thread_index);
QUIC_DBG (2, "ACCEPTED stream_session 0x%lx ctx %u",
app_wrk = app_worker_get (stream_session->app_wrk_index);
if ((rv = app_worker_init_connected (app_wrk, stream_session)))
{
- QUIC_DBG (1, "failed to allocate fifos");
+ QUIC_ERR ("failed to allocate fifos");
session_free (stream_session);
quicly_reset_stream (stream, QUIC_APP_ALLOCATION_ERROR);
- return;
+ return 0; /* Frame is still valid */
}
svm_fifo_add_want_deq_ntf (stream_session->rx_fifo,
SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL |
if ((rv = app_worker_accept_notify (app_wrk, stream_session)))
{
- QUIC_DBG (1, "failed to notify accept worker app");
+ QUIC_ERR ("failed to notify accept worker app");
session_free_w_fifos (stream_session);
quicly_reset_stream (stream, QUIC_APP_ACCEPT_NOTIFY_ERROR);
- return;
+ return 0; /* Frame is still valid */
}
-}
-static int
-quic_on_stream_open (quicly_stream_open_t * self, quicly_stream_t * stream)
-{
- QUIC_DBG (2, "on_stream_open called");
- stream->data = malloc (sizeof (quic_stream_data_t));
- stream->callbacks = &quic_stream_callbacks;
- /* Notify accept on parent qsession, but only if this is not a locally
- * initiated stream */
- if (!quicly_stream_is_self_initiated (stream))
- quic_accept_stream (stream);
return 0;
}
session_transport_closing_notify (&ctx->connection);
}
-static quicly_stream_open_t on_stream_open = { &quic_on_stream_open };
-static quicly_closed_by_peer_t on_closed_by_peer =
- { &quic_on_closed_by_peer };
+static quicly_stream_open_t on_stream_open = { quic_on_stream_open };
+static quicly_closed_by_peer_t on_closed_by_peer = { quic_on_closed_by_peer };
-
-/*****************************************************************************
- *
- * END QUICLY CALLBACKS
- *
- *****************************************************************************/
-
-/*****************************************************************************
- *
- * BEGIN TIMERS HANDLING
- *
- *****************************************************************************/
+/* Timer handling */
static int64_t
quic_get_thread_time (u8 thread_index)
tw_timer_wheel_1t_3w_1024sl_ov_t *tw;
int64_t next_timeout, next_interval;
session_t *quic_session;
+ int rv;
/* This timeout is in ms which is the unit of our timer */
next_timeout = quicly_get_first_timeout (ctx->conn);
{
quic_session = session_get (ctx->c_s_index, ctx->c_thread_index);
if (svm_fifo_set_event (quic_session->tx_fifo))
- session_send_io_evt_to_thread_custom (quic_session,
- quic_session->thread_index,
- SESSION_IO_EVT_BUILTIN_TX);
+ {
+ rv = session_send_io_evt_to_thread_custom (quic_session,
+ quic_session->thread_index,
+ SESSION_IO_EVT_BUILTIN_TX);
+ if (PREDICT_FALSE (rv))
+ QUIC_ERR ("Failed to enqueue builtin_tx %d", rv);
+ }
return;
}
}
QUIC_DBG (4, "timer for ctx %u already stopped", ctx->c_c_index);
return;
}
- ctx->timer_handle =
- tw_timer_start_1t_3w_1024sl_ov (tw, ctx->c_c_index, 0, next_interval);
+ ctx->timer_handle = tw_timer_start_1t_3w_1024sl_ov (tw, ctx->c_c_index,
+ 0, next_interval);
}
else
{
if (next_timeout == INT64_MAX)
{
- tw_timer_stop_1t_3w_1024sl_ov (tw, ctx->timer_handle);
- ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
- QUIC_DBG (4, "Stopping timer for ctx %u", ctx->c_c_index);
+ quic_stop_ctx_timer (ctx);
}
else
tw_timer_update_1t_3w_1024sl_ov (tw, ctx->timer_handle,
}
}
-/*****************************************************************************
- *
- * END TIMERS HANDLING
- *
- *****************************************************************************/
-
static int
quic_encrypt_ticket_cb (ptls_encrypt_ticket_t * _self, ptls_t * tls,
int is_encrypt, ptls_buffer_t * dst, ptls_iovec_t src)
{
/* replace the cached entry along with a newly generated session id */
- free (self->data.base);
- if ((self->data.base = malloc (src.len)) == NULL)
+ clib_mem_free (self->data.base);
+ if ((self->data.base = clib_mem_alloc (src.len)) == NULL)
return PTLS_ERROR_NO_MEMORY;
ptls_get_context (tls)->random_bytes (self->id, sizeof (self->id));
- memcpy (self->data.base, src.base, src.len);
+ clib_memcpy (self->data.base, src.base, src.len);
self->data.len = src.len;
/* store the session id in buffer */
if ((ret = ptls_buffer_reserve (dst, sizeof (self->id))) != 0)
return ret;
- memcpy (dst->base + dst->off, self->id, sizeof (self->id));
+ clib_memcpy (dst->base + dst->off, self->id, sizeof (self->id));
dst->off += sizeof (self->id);
}
/* check if session id is the one stored in cache */
if (src.len != sizeof (self->id))
return PTLS_ERROR_SESSION_NOT_FOUND;
- if (memcmp (self->id, src.base, sizeof (self->id)) != 0)
+ if (clib_memcmp (self->id, src.base, sizeof (self->id)) != 0)
return PTLS_ERROR_SESSION_NOT_FOUND;
/* return the cached value */
if ((ret = ptls_buffer_reserve (dst, self->data.len)) != 0)
return ret;
- memcpy (dst->base + dst->off, self->data.base, self->data.len);
+ clib_memcpy (dst->base + dst->off, self->data.base, self->data.len);
dst->off += self->data.len;
}
return 0;
}
-typedef struct quicly_ctx_data_
-{
- quicly_context_t quicly_ctx;
- char cid_key[17];
- ptls_context_t ptls_ctx;
-} quicly_ctx_data_t;
-
-static void
-quic_store_quicly_ctx (application_t * app, u8 is_client)
+static int
+quic_store_quicly_ctx (application_t * app, u32 ckpair_index,
+ u8 crypto_engine)
{
quic_main_t *qm = &quic_main;
quicly_context_t *quicly_ctx;
ptls_iovec_t key_vec;
+ app_cert_key_pair_t *ckpair;
+ u64 max_enq;
if (app->quicly_ctx)
- return;
+ return 0;
+
+ if (crypto_engine == CRYPTO_ENGINE_NONE)
+ {
+ QUIC_DBG (2, "No crypto engine specified, using %d", crypto_engine);
+ crypto_engine = qm->default_crypto_engine;
+ }
+ if (!clib_bitmap_get (qm->available_crypto_engines, crypto_engine))
+ {
+ QUIC_ERR ("Quic does not support crypto engine %d", crypto_engine);
+ return VNET_API_ERROR_MISSING_CERT_KEY;
+ }
quicly_ctx_data_t *quicly_ctx_data =
clib_mem_alloc (sizeof (quicly_ctx_data_t));
ptls_ctx->random_bytes = ptls_openssl_random_bytes;
ptls_ctx->get_time = &ptls_get_time;
ptls_ctx->key_exchanges = ptls_openssl_key_exchanges;
- ptls_ctx->cipher_suites = qm->quic_ciphers[qm->default_cipher];
+ ptls_ctx->cipher_suites = qm->quic_ciphers[crypto_engine];
ptls_ctx->certificates.list = NULL;
ptls_ctx->certificates.count = 0;
ptls_ctx->esni = NULL;
ptls_ctx->encrypt_ticket = &qm->session_cache.super;
app->quicly_ctx = (u64 *) quicly_ctx;
- memcpy (quicly_ctx, &quicly_spec_context, sizeof (quicly_context_t));
+ clib_memcpy (quicly_ctx, &quicly_spec_context, sizeof (quicly_context_t));
quicly_ctx->max_packet_size = QUIC_MAX_PACKET_SIZE;
quicly_ctx->tls = ptls_ctx;
quicly_ctx->now = &quicly_vpp_now_cb;
quicly_amend_ptls_context (quicly_ctx->tls);
- quicly_ctx->event_log.mask = UINT64_MAX; /* logs */
- quicly_ctx->event_log.cb = quic_new_event_logger ();
-
quicly_ctx->transport_params.max_data = QUIC_INT_MAX;
quicly_ctx->transport_params.max_streams_uni = (uint64_t) 1 << 60;
quicly_ctx->transport_params.max_streams_bidi = (uint64_t) 1 << 60;
- quicly_ctx->transport_params.max_stream_data.bidi_local = (QUIC_FIFO_SIZE - 1); /* max_enq is SIZE - 1 */
- quicly_ctx->transport_params.max_stream_data.bidi_remote = (QUIC_FIFO_SIZE - 1); /* max_enq is SIZE - 1 */
+
+ /* max_enq is FIFO_SIZE - 1 */
+ max_enq = app->sm_properties.rx_fifo_size - 1;
+ quicly_ctx->transport_params.max_stream_data.bidi_local = max_enq;
+ max_enq = app->sm_properties.tx_fifo_size - 1;
+ quicly_ctx->transport_params.max_stream_data.bidi_remote = max_enq;
quicly_ctx->transport_params.max_stream_data.uni = QUIC_INT_MAX;
quicly_ctx->tls->random_bytes (quicly_ctx_data->cid_key, 16);
quicly_ctx_data->cid_key[16] = 0;
- key_vec =
- ptls_iovec_init (quicly_ctx_data->cid_key,
- strlen (quicly_ctx_data->cid_key));
+ key_vec = ptls_iovec_init (quicly_ctx_data->cid_key,
+ strlen (quicly_ctx_data->cid_key));
quicly_ctx->cid_encryptor =
quicly_new_default_cid_encryptor (&ptls_openssl_bfecb,
+ &ptls_openssl_aes128ecb,
&ptls_openssl_sha256, key_vec);
- if (is_client)
- return;
- if (app->tls_key != NULL && app->tls_cert != NULL)
+
+ ckpair = app_cert_key_pair_get_if_valid (ckpair_index);
+ if (!ckpair || !ckpair->key || !ckpair->cert)
{
- if (load_bio_private_key (quicly_ctx->tls, (char *) app->tls_key))
- {
- QUIC_DBG (1, "failed to read private key from app configuration\n");
- }
- if (load_bio_certificate_chain (quicly_ctx->tls,
- (char *) app->tls_cert))
- {
- QUIC_DBG (1, "failed to load certificate\n");
- }
+ QUIC_ERR ("Wrong ckpair id %d\n", ckpair_index);
+ goto error;
+ }
+ if (load_bio_private_key (quicly_ctx->tls, (char *) ckpair->key))
+ {
+ QUIC_ERR ("failed to read private key from app configuration\n");
+ goto error;
+ }
+ if (load_bio_certificate_chain (quicly_ctx->tls, (char *) ckpair->cert))
+ {
+ QUIC_ERR ("failed to load certificate\n");
+ goto error;
}
+ return 0;
+
+error:
+ clib_mem_free (quicly_ctx_data);
+ return VNET_API_ERROR_MISSING_CERT_KEY;
}
-/*****************************************************************************
- *
- * BEGIN TRANSPORT PROTO FUNCTIONS
- *
- *****************************************************************************/
+/* Transport proto functions */
static int
-quic_connect_new_stream (session_t * quic_session, u32 opaque)
+quic_connect_stream (session_t * quic_session, u32 opaque)
{
uint64_t quic_session_handle;
session_t *stream_session;
if (session_type_transport_proto (quic_session->session_type) !=
TRANSPORT_PROTO_QUIC)
{
- QUIC_DBG (1, "received incompatible session");
+ QUIC_ERR ("received incompatible session");
return -1;
}
app_wrk = app_worker_get_if_valid (quic_session->app_wrk_index);
if (!app_wrk)
{
- QUIC_DBG (1, "Invalid app worker :(");
+ QUIC_ERR ("Invalid app worker :(");
return -1;
}
sctx_index = quic_ctx_alloc (quic_session->thread_index); /* Allocate before we get pointers */
sctx = quic_ctx_get (sctx_index, quic_session->thread_index);
- qctx =
- quic_ctx_get (quic_session->connection_index, quic_session->thread_index);
+ qctx = quic_ctx_get (quic_session->connection_index,
+ quic_session->thread_index);
if (quic_ctx_is_stream (qctx))
{
- QUIC_DBG (1, "session is a stream");
+ QUIC_ERR ("session is a stream");
quic_ctx_free (sctx);
return -1;
}
if (app_worker_init_connected (app_wrk, stream_session))
{
- QUIC_DBG (1, "failed to app_worker_init_connected");
+ QUIC_ERR ("failed to app_worker_init_connected");
quicly_reset_stream (stream, QUIC_APP_ALLOCATION_ERROR);
session_free_w_fifos (stream_session);
quic_ctx_free (sctx);
stream_session->session_state = SESSION_STATE_READY;
if (app_worker_connect_notify (app_wrk, stream_session, opaque))
{
- QUIC_DBG (1, "failed to notify app");
+ QUIC_ERR ("failed to notify app");
quicly_reset_stream (stream, QUIC_APP_CONNECT_NOTIFY_ERROR);
session_free_w_fifos (stream_session);
quic_ctx_free (sctx);
}
static int
-quic_connect_new_connection (session_endpoint_cfg_t * sep)
+quic_connect_connection (session_endpoint_cfg_t * sep)
{
- vnet_connect_args_t _cargs = { {}, }, *cargs = &_cargs;
+ vnet_connect_args_t _cargs, *cargs = &_cargs;
quic_main_t *qm = &quic_main;
quic_ctx_t *ctx;
app_worker_t *app_wrk;
u32 ctx_index;
int error;
+ clib_memset (cargs, 0, sizeof (*cargs));
ctx_index = quic_ctx_alloc (vlib_get_thread_index ());
ctx = quic_ctx_get (ctx_index, vlib_get_thread_index ());
ctx->parent_app_wrk_id = sep->app_wrk_index;
ctx->srv_hostname = format (0, "%v", sep->hostname);
else
/* needed by quic for crypto + determining client / server */
- ctx->srv_hostname =
- format (0, "%U", format_ip46_address, &sep->ip, sep->is_ip4);
+ ctx->srv_hostname = format (0, "%U", format_ip46_address,
+ &sep->ip, sep->is_ip4);
vec_terminate_c_string (ctx->srv_hostname);
clib_memcpy (&cargs->sep, sep, sizeof (session_endpoint_cfg_t));
ctx->parent_app_id = app_wrk->app_index;
cargs->sep_ext.ns_index = app->ns_index;
- quic_store_quicly_ctx (app, 1 /* is client */ );
+ if ((error =
+ quic_store_quicly_ctx (app, sep->ckpair_index, sep->crypto_engine)))
+ return error;
+ /* Also store it in ctx for convenience
+ * Waiting for crypto_ctx logic */
+ ctx->quicly_ctx = (quicly_context_t *) app->quicly_ctx;
if ((error = vnet_connect (cargs)))
return error;
quic_session = session_get_from_handle_if_valid (sep->parent_handle);
if (quic_session)
- return quic_connect_new_stream (quic_session, sep->opaque);
+ return quic_connect_stream (quic_session, sep->opaque);
else
- return quic_connect_new_connection (sep);
+ return quic_connect_connection (sep);
}
static void
if (!ctx)
return;
#if QUIC_DEBUG >= 2
- session_t *stream_session =
- session_get (ctx->c_s_index, ctx->c_thread_index);
+ session_t *stream_session = session_get (ctx->c_s_index,
+ ctx->c_thread_index);
clib_warning ("Closing session 0x%lx", session_handle (stream_session));
#endif
if (quic_ctx_is_stream (ctx))
app = application_get (app_wrk->app_index);
QUIC_DBG (2, "Called quic_start_listen for app %d", app_wrk->app_index);
- quic_store_quicly_ctx (app, 0 /* is_client */ );
+ if (quic_store_quicly_ctx (app, sep->ckpair_index, sep->crypto_engine))
+ return -1;
sep->transport_proto = TRANSPORT_PROTO_UDPC;
- memset (args, 0, sizeof (*args));
+ clib_memset (args, 0, sizeof (*args));
args->app_index = qm->app_index;
args->sep_ext = *sep;
args->sep_ext.ns_index = app->ns_index;
lctx = quic_ctx_get (lctx_index, 0);
lctx->flags |= QUIC_F_IS_LISTENER;
+ /* Also store it in ctx for convenience
+ * Waiting for crypto_ctx logic */
+ lctx->quicly_ctx = (quicly_context_t *) app->quicly_ctx;
clib_memcpy (&lctx->c_rmt_ip, &args->sep.peer.ip, sizeof (ip46_address_t));
clib_memcpy (&lctx->c_lcl_ip, &args->sep.ip, sizeof (ip46_address_t));
u32 qc_index = va_arg (*args, u32);
u32 thread_index = va_arg (*args, u32);
quic_ctx_t *ctx = quic_ctx_get (qc_index, thread_index);
- s =
- format (s, "[#%d][Q] half-open app %u", thread_index, ctx->parent_app_id);
+ s = format (s, "[#%d][Q] half-open app %u", thread_index,
+ ctx->parent_app_id);
return s;
}
return s;
}
-/*****************************************************************************
- * END TRANSPORT PROTO FUNCTIONS
- *
- * START SESSION CALLBACKS
- * Called from UDP layer
- *****************************************************************************/
+/* Session layer callbacks */
static inline void
quic_build_sockaddr (struct sockaddr *sa, socklen_t * salen,
}
static int
-quic_on_client_connected (quic_ctx_t * ctx)
+quic_on_quic_session_connected (quic_ctx_t * ctx)
{
session_t *quic_session;
app_worker_t *app_wrk;
if (!app_wrk)
{
quic_disconnect_transport (ctx);
- return -1;
+ return 0;
}
quic_session = session_alloc (thread_index);
if (app_worker_init_connected (app_wrk, quic_session))
{
- QUIC_DBG (1, "failed to app_worker_init_connected");
+ QUIC_ERR ("failed to app_worker_init_connected");
quic_proto_on_close (ctx_id, thread_index);
return app_worker_connect_notify (app_wrk, NULL, ctx->client_opaque);
}
if ((rv = app_worker_connect_notify (app_wrk, quic_session,
ctx->client_opaque)))
{
- QUIC_DBG (1, "failed to notify app %d", rv);
+ QUIC_ERR ("failed to notify app %d", rv);
quic_proto_on_close (ctx_id, thread_index);
return -1;
}
return 0;
}
+static int
+quic_check_quic_session_connected (quic_ctx_t * ctx)
+{
+ /* Called when we need to trigger quic session connected
+ * we may call this function on the server side / at
+ * stream opening */
+
+ /* Conn may be set to null if the connection is terminated */
+ if (!ctx->conn || ctx->conn_state != QUIC_CONN_STATE_HANDSHAKE)
+ return 0;
+ if (!quicly_connection_is_ready (ctx->conn))
+ return 0;
+ ctx->conn_state = QUIC_CONN_STATE_READY;
+ if (!quicly_is_client (ctx->conn))
+ return 0;
+ return quic_on_quic_session_connected (ctx);
+}
+
static void
quic_receive_connection (void *arg)
{
new_ctx_id);
- memcpy (new_ctx, temp_ctx, sizeof (quic_ctx_t));
+ clib_memcpy (new_ctx, temp_ctx, sizeof (quic_ctx_t));
clib_mem_free (temp_ctx);
new_ctx->c_thread_index = thread_index;
/* Trigger write on this connection if necessary */
udp_session = session_get_from_handle (new_ctx->udp_session_handle);
+ udp_session->opaque = new_ctx_id;
+ udp_session->flags &= ~SESSION_F_IS_MIGRATING;
if (svm_fifo_max_dequeue (udp_session->tx_fifo))
- if (session_send_io_evt_to_thread (udp_session->tx_fifo,
- SESSION_IO_EVT_TX))
- QUIC_DBG (4, "Cannot send TX event");
+ quic_set_udp_tx_evt (udp_session);
}
static void
quic_transfer_connection (u32 ctx_index, u32 dest_thread)
{
- tw_timer_wheel_1t_3w_1024sl_ov_t *tw;
quic_ctx_t *ctx, *temp_ctx;
u32 thread_index = vlib_get_thread_index ();
ASSERT (temp_ctx);
ctx = quic_ctx_get (ctx_index, thread_index);
- memcpy (temp_ctx, ctx, sizeof (quic_ctx_t));
+ clib_memcpy (temp_ctx, ctx, sizeof (quic_ctx_t));
- /* Remove from timer wheel and thread-local pool */
- if (ctx->timer_handle != QUIC_TIMER_HANDLE_INVALID)
- {
- tw = &quic_main.wrk_ctx[thread_index].timer_wheel;
- tw_timer_stop_1t_3w_1024sl_ov (tw, ctx->timer_handle);
- }
+ quic_stop_ctx_timer (ctx);
quic_ctx_free (ctx);
/* Send connection to destination thread */
}
static int
-quic_session_connected_callback (u32 quic_app_index, u32 ctx_index,
- session_t * udp_session, u8 is_fail)
+quic_udp_session_connected_callback (u32 quic_app_index, u32 ctx_index,
+ session_t * udp_session, u8 is_fail)
{
QUIC_DBG (2, "QSession is now connected (id %u)",
udp_session->session_index);
is_fail, thread_index, (ctx) ? ctx_index : ~0);
ctx->udp_session_handle = session_handle (udp_session);
- udp_session->opaque = ctx->parent_app_id;
+ udp_session->opaque = ctx_index;
/* Init QUIC lib connection
* Generate required sockaddr & salen */
quicly_ctx = quic_get_quicly_ctx_from_ctx (ctx);
ret = quicly_connect (&ctx->conn, quicly_ctx, (char *) ctx->srv_hostname,
- sa, salen, &quic_main.next_cid,
+ sa, NULL, &quic_main.next_cid, ptls_iovec_init (NULL,
+ 0),
&quic_main.hs_properties, NULL);
++quic_main.next_cid.master_id;
/* Save context handle in quicly connection */
}
static void
-quic_session_disconnect_callback (session_t * s)
+quic_udp_session_disconnect_callback (session_t * s)
{
clib_warning ("UDP session disconnected???");
}
static void
-quic_session_reset_callback (session_t * s)
+quic_udp_session_cleanup_callback (session_t * udp_session,
+ session_cleanup_ntf_t ntf)
+{
+ quic_ctx_t *ctx;
+
+ if (ntf != SESSION_CLEANUP_SESSION)
+ return;
+
+ ctx = quic_ctx_get (udp_session->opaque, udp_session->thread_index);
+ quic_stop_ctx_timer (ctx);
+ quic_ctx_free (ctx);
+}
+
+static void
+quic_udp_session_reset_callback (session_t * s)
{
clib_warning ("UDP session reset???");
}
static void
-quic_session_migrate_callback (session_t * s, session_handle_t new_sh)
+quic_udp_session_migrate_callback (session_t * s, session_handle_t new_sh)
{
- /*
- * TODO we need better way to get the connection from the session
- * This will become possible once we stop storing the app id in the UDP
- * session opaque
- */
- u32 thread_index = vlib_get_thread_index ();
- u64 old_session_handle = session_handle (s);
u32 new_thread = session_thread_from_handle (new_sh);
quic_ctx_t *ctx;
- QUIC_DBG (1, "Session %x migrated to %lx", s->session_index, new_sh);
- /* *INDENT-OFF* */
- pool_foreach (ctx, quic_main.ctx_pool[thread_index],
- ({
- if (ctx->udp_session_handle == old_session_handle)
- {
- /* Right ctx found, move associated conn */
- QUIC_DBG (5, "Found right ctx: %x", ctx->c_c_index);
- ctx->udp_session_handle = new_sh;
- quic_transfer_connection (ctx->c_c_index, new_thread);
- return;
- }
- }));
- /* *INDENT-ON* */
- QUIC_DBG (0, "BUG: Connection to migrate not found");
+ QUIC_ERR ("Session %x migrated to %lx", s->session_index, new_sh);
+ ASSERT (vlib_get_thread_index () == s->thread_index);
+ ctx = quic_ctx_get (s->opaque, s->thread_index);
+ ASSERT (ctx->udp_session_handle == session_handle (s));
+
+ ctx->udp_session_handle = new_sh;
+#if QUIC_DEBUG >= 1
+ s->opaque = 0xfeedface;
+#endif
+ quic_transfer_connection (ctx->c_c_index, new_thread);
}
int
-quic_session_accepted_callback (session_t * udp_session)
+quic_udp_session_accepted_callback (session_t * udp_session)
{
/* New UDP connection, try to accept it */
u32 ctx_index;
- u32 *pool_index;
quic_ctx_t *ctx, *lctx;
session_t *udp_listen_session;
u32 thread_index = vlib_get_thread_index ();
ctx->conn_state = QUIC_CONN_STATE_OPENED;
ctx->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP;
- udp_session->opaque = ctx->parent_app_id;
+ /* Also store it in ctx for convenience
+ * Waiting for crypto_ctx logic */
+ ctx->quicly_ctx = lctx->quicly_ctx;
- /* Put this ctx in the "opening" pool */
- pool_get (quic_main.wrk_ctx[ctx->c_thread_index].opening_ctx_pool,
- pool_index);
- *pool_index = ctx_index;
+ udp_session->opaque = ctx_index;
/* TODO timeout to delete these if they never connect */
return 0;
static int
quic_add_segment_callback (u32 client_index, u64 seg_handle)
{
- QUIC_DBG (2, "Called quic_add_segment_callback");
- QUIC_DBG (2, "NOT IMPLEMENTED");
/* No-op for builtin */
return 0;
}
static int
quic_del_segment_callback (u32 client_index, u64 seg_handle)
{
- QUIC_DBG (2, "Called quic_del_segment_callback");
- QUIC_DBG (2, "NOT IMPLEMENTED");
/* No-op for builtin */
return 0;
}
if (PREDICT_FALSE
(stream_session->session_state >= SESSION_STATE_TRANSPORT_CLOSING))
return 0;
- ctx =
- quic_ctx_get (stream_session->connection_index,
- stream_session->thread_index);
+ ctx = quic_ctx_get (stream_session->connection_index,
+ stream_session->thread_index);
if (PREDICT_FALSE (!quic_ctx_is_stream (ctx)))
{
goto tx_end; /* Most probably a reschedule */
stream = ctx->stream;
if (!quicly_sendstate_is_open (&stream->sendstate))
{
- QUIC_DBG (1, "Warning: tried to send on closed stream");
+ QUIC_ERR ("Warning: tried to send on closed stream");
return -1;
}
}
ctx_ = quic_ctx_get (index, vlib_get_thread_index ());
conn_ = ctx_->conn;
- if (conn_ && quicly_is_destination (conn_, sa, salen, packet))
+ if (conn_ && quicly_is_destination (conn_, NULL, sa, packet))
{
QUIC_DBG (3, "Connection found");
*ctx_index = index;
}
static int
-quic_create_quic_session (quic_ctx_t * ctx)
+quic_accept_connection (u32 ctx_index, struct sockaddr *sa,
+ socklen_t salen, quicly_decoded_packet_t packet)
{
+ u32 thread_index = vlib_get_thread_index ();
+ quicly_context_t *quicly_ctx;
session_t *quic_session;
+ clib_bihash_kv_16_8_t kv;
app_worker_t *app_wrk;
+ quicly_conn_t *conn;
+ quic_ctx_t *ctx;
quic_ctx_t *lctx;
int rv;
+ /* new connection, accept and create context if packet is valid
+ * TODO: check if socket is actually listening? */
+ ctx = quic_ctx_get (ctx_index, thread_index);
+ if (ctx->c_s_index != QUIC_SESSION_INVALID)
+ {
+ QUIC_DBG (2, "already accepted ctx 0x%x", ctx_index);
+ return -1;
+ }
+
+ quicly_ctx = quic_get_quicly_ctx_from_ctx (ctx);
+ if ((rv = quicly_accept (&conn, quicly_ctx, NULL, sa,
+ &packet, NULL, &quic_main.next_cid, NULL)))
+ {
+ /* Invalid packet, pass */
+ assert (conn == NULL);
+ QUIC_ERR ("Accept failed with %U", quic_format_err, rv);
+ /* TODO: cleanup created quic ctx and UDP session */
+ return 0;
+ }
+ assert (conn != NULL);
+
+ ++quic_main.next_cid.master_id;
+ /* Save ctx handle in quicly connection */
+ quic_store_conn_ctx (conn, ctx);
+ ctx->conn = conn;
+ ctx->conn_state = QUIC_CONN_STATE_HANDSHAKE;
+
quic_session = session_alloc (ctx->c_thread_index);
QUIC_DBG (2, "Allocated quic_session, 0x%lx ctx %u",
session_handle (quic_session), ctx->c_c_index);
* but we still need fifos for the events? */
if ((rv = app_worker_init_accepted (quic_session)))
{
- QUIC_DBG (1, "failed to allocate fifos");
+ QUIC_ERR ("failed to allocate fifos");
session_free (quic_session);
return rv;
}
app_wrk = app_worker_get (quic_session->app_wrk_index);
if ((rv = app_worker_accept_notify (app_wrk, quic_session)))
{
- QUIC_DBG (1, "failed to notify accept worker app");
+ QUIC_ERR ("failed to notify accept worker app");
return rv;
}
- return 0;
-}
-
-static int
-quic_create_connection (u32 ctx_index, struct sockaddr *sa,
- socklen_t salen, quicly_decoded_packet_t packet)
-{
- clib_bihash_kv_16_8_t kv;
- quic_ctx_t *ctx;
- quicly_conn_t *conn;
- u32 thread_index = vlib_get_thread_index ();
- quicly_context_t *quicly_ctx;
- int rv;
-
- /* new connection, accept and create context if packet is valid
- * TODO: check if socket is actually listening? */
- ctx = quic_ctx_get (ctx_index, thread_index);
- quicly_ctx = quic_get_quicly_ctx_from_ctx (ctx);
- if ((rv = quicly_accept (&conn, quicly_ctx, sa, salen,
- &packet, ptls_iovec_init (NULL, 0),
- &quic_main.next_cid, NULL)))
- {
- /* Invalid packet, pass */
- assert (conn == NULL);
- QUIC_DBG (1, "Accept failed with %d", rv);
- /* TODO: cleanup created quic ctx and UDP session */
- return 0;
- }
- assert (conn != NULL);
-
- ++quic_main.next_cid.master_id;
- /* Save ctx handle in quicly connection */
- quic_store_conn_ctx (conn, ctx);
- ctx->conn = conn;
- ctx->conn_state = QUIC_CONN_STATE_HANDSHAKE;
-
- quic_create_quic_session (ctx);
/* Register connection in connections map */
quic_make_connection_key (&kv, quicly_get_master_id (conn));
|| packet.cid.dest.plaintext.thread_id != 0)
return 0;
quicly_ctx = quic_get_quicly_ctx_from_udp (udp_session_handle);
- dgram = quicly_send_stateless_reset (quicly_ctx, sa, salen,
+ dgram = quicly_send_stateless_reset (quicly_ctx, sa, NULL,
&packet.cid.dest.plaintext);
if (dgram == NULL)
return 1;
udp_session = session_get_from_handle (udp_session_handle);
rv = quic_send_datagram (udp_session, dgram);
- if (svm_fifo_set_event (udp_session->tx_fifo))
- session_send_io_evt_to_thread (udp_session->tx_fifo, SESSION_IO_EVT_TX);
+ quic_set_udp_tx_evt (udp_session);
return rv;
}
-typedef struct quic_rx_packet_ctx_
-{
- quicly_decoded_packet_t packet;
- u8 data[QUIC_MAX_PACKET_SIZE];
- u32 ctx_index;
- u32 thread_index;
-} quic_rx_packet_ctx_t;
-
-static void
-check_quic_client_connected (struct quic_rx_packet_ctx_ *quic_rx_ctx)
-{
- /* ctx pointer may change if a new stream is opened */
- quic_ctx_t *ctx = quic_ctx_get (quic_rx_ctx->ctx_index,
- quic_rx_ctx->thread_index);
- /* Conn may be set to null if the connection is terminated */
- if (ctx->conn && ctx->conn_state == QUIC_CONN_STATE_HANDSHAKE)
- {
- if (quicly_connection_is_ready (ctx->conn))
- {
- ctx->conn_state = QUIC_CONN_STATE_READY;
- if (quicly_is_client (ctx->conn))
- {
- quic_on_client_connected (ctx);
- }
- }
- }
-
-}
-
static int
-quic_process_one_rx_packet (u64 udp_session_handle,
- quicly_context_t * quicly_ctx, svm_fifo_t * f,
+quic_process_one_rx_packet (u64 udp_session_handle, svm_fifo_t * f,
u32 * fifo_offset, u32 * max_packet, u32 packet_n,
quic_rx_packet_ctx_t * packet_ctx)
{
size_t plen;
struct sockaddr_in6 sa6;
struct sockaddr *sa = (struct sockaddr *) &sa6;
+ session_t *udp_session;
socklen_t salen;
u32 full_len, ret;
int err, rv = 0;
packet_ctx->thread_index = UINT32_MAX;
packet_ctx->ctx_index = UINT32_MAX;
u32 thread_index = vlib_get_thread_index ();
- u32 *opening_ctx_pool, *ctx_index_ptr;
u32 cur_deq = svm_fifo_max_dequeue (f) - *fifo_offset;
+ quicly_context_t *quicly_ctx;
if (cur_deq == 0)
{
if (cur_deq < SESSION_CONN_HDR_LEN)
{
- QUIC_DBG (1, "Not enough data for even a header in RX");
+ QUIC_ERR ("Not enough data for even a header in RX");
return 1;
}
ret = svm_fifo_peek (f, *fifo_offset, SESSION_CONN_HDR_LEN, (u8 *) & ph);
if (ret != SESSION_CONN_HDR_LEN)
{
- QUIC_DBG (1, "Not enough data for header in RX");
+ QUIC_ERR ("Not enough data for header in RX");
return 1;
}
ASSERT (ph.data_offset == 0);
full_len = ph.data_length + SESSION_CONN_HDR_LEN;
if (full_len > cur_deq)
{
- QUIC_DBG (1, "Not enough data in fifo RX");
+ QUIC_ERR ("Not enough data in fifo RX");
return 1;
}
/* Quicly can read len bytes from the fifo at offset:
* ph.data_offset + SESSION_CONN_HDR_LEN */
- ret =
- svm_fifo_peek (f, SESSION_CONN_HDR_LEN + *fifo_offset, ph.data_length,
- packet_ctx->data);
+ ret = svm_fifo_peek (f, SESSION_CONN_HDR_LEN + *fifo_offset,
+ ph.data_length, packet_ctx->data);
if (ret != ph.data_length)
{
- QUIC_DBG (1, "Not enough data peeked in RX");
+ QUIC_ERR ("Not enough data peeked in RX");
return 1;
}
rv = 0;
quic_build_sockaddr (sa, &salen, &ph.rmt_ip, ph.rmt_port, ph.is_ip4);
quicly_ctx = quic_get_quicly_ctx_from_udp (udp_session_handle);
- plen =
- quicly_decode_packet (quicly_ctx, &packet_ctx->packet, packet_ctx->data,
- ph.data_length);
+ plen = quicly_decode_packet (quicly_ctx, &packet_ctx->packet,
+ packet_ctx->data, ph.data_length);
if (plen == SIZE_MAX)
{
return 1;
}
- err =
- quic_find_packet_ctx (&packet_ctx->thread_index, &packet_ctx->ctx_index,
- sa, salen, &packet_ctx->packet, thread_index);
+ err = quic_find_packet_ctx (&packet_ctx->thread_index,
+ &packet_ctx->ctx_index, sa, salen,
+ &packet_ctx->packet, thread_index);
if (err == 0)
{
ctx = quic_ctx_get (packet_ctx->ctx_index, thread_index);
- rv = quicly_receive (ctx->conn, &packet_ctx->packet);
+ rv = quicly_receive (ctx->conn, NULL, sa, &packet_ctx->packet);
if (rv)
- QUIC_DBG (1, "quicly_receive return error %d", rv);
+ QUIC_ERR ("quicly_receive errored %U", quic_format_err, rv);
}
else if (packet_ctx->ctx_index != UINT32_MAX)
{
*max_packet = packet_n + 1;
return 0;
}
- else if ((packet_ctx->packet.octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) ==
- QUICLY_PACKET_TYPE_INITIAL)
+ else if (QUICLY_PACKET_IS_LONG_HEADER (packet_ctx->packet.octets.base[0]))
{
- /* Try to find matching "opening" ctx */
- opening_ctx_pool = quic_main.wrk_ctx[thread_index].opening_ctx_pool;
-
- /* *INDENT-OFF* */
- pool_foreach (ctx_index_ptr, opening_ctx_pool,
- ({
- ctx = quic_ctx_get (*ctx_index_ptr, thread_index);
- if (ctx->udp_session_handle == udp_session_handle)
- {
- /* Right ctx found, create conn & remove from pool */
- quic_create_connection(*ctx_index_ptr, sa, salen, packet_ctx->packet);
- *max_packet = packet_n + 1;
- packet_ctx->thread_index = thread_index;
- packet_ctx->ctx_index = *ctx_index_ptr;
- pool_put (opening_ctx_pool, ctx_index_ptr);
- goto updateOffset;
- }
- }));
- /* *INDENT-ON* */
+ udp_session = session_get_from_handle (udp_session_handle);
+ if ((rv = quic_accept_connection (udp_session->opaque, sa,
+ salen, packet_ctx->packet)))
+ {
+ QUIC_ERR ("quic accept errored with %d", rv);
+ }
}
else
{
packet_ctx->packet);
}
-updateOffset:
*fifo_offset += SESSION_CONN_HDR_LEN + ph.data_length;
return 0;
}
static int
-quic_app_rx_callback (session_t * udp_session)
+quic_udp_session_rx_callback (session_t * udp_session)
{
/* Read data from UDP rx_fifo and pass it to the quicly conn. */
- application_t *app;
quic_ctx_t *ctx = NULL;
svm_fifo_t *f;
u32 max_deq;
- u32 app_index = udp_session->opaque;
u64 udp_session_handle = session_handle (udp_session);
int rv = 0;
- app = application_get_if_valid (app_index);
u32 thread_index = vlib_get_thread_index ();
quic_rx_packet_ctx_t packets_ctx[16];
+ u32 i, fifo_offset, max_packets;
- if (!app)
+ if (udp_session->flags & SESSION_F_IS_MIGRATING)
{
- QUIC_DBG (1, "Got RX on detached app");
- /* TODO: close this session, cleanup state? */
- return 1;
+ QUIC_DBG (3, "RX on migrating udp session");
+ return 0;
}
- do
+ while (1)
{
udp_session = session_get_from_handle (udp_session_handle); /* session alloc might have happened */
f = udp_session->rx_fifo;
max_deq = svm_fifo_max_dequeue (f);
if (max_deq == 0)
- {
- return 0;
- }
+ return 0;
- u32 fifo_offset = 0;
- u32 max_packets = 16;
- for (int i = 0; i < max_packets; i++)
- {
- quic_process_one_rx_packet (udp_session_handle,
- (quicly_context_t *) app->quicly_ctx, f,
- &fifo_offset, &max_packets, i,
- &packets_ctx[i]);
- }
+ fifo_offset = 0;
+ max_packets = 16;
+ for (i = 0; i < max_packets; i++)
+ quic_process_one_rx_packet (udp_session_handle, f, &fifo_offset,
+ &max_packets, i, &packets_ctx[i]);
- for (int i = 0; i < max_packets; i++)
+ for (i = 0; i < max_packets; i++)
{
if (packets_ctx[i].thread_index != thread_index)
continue;
-
- check_quic_client_connected (&packets_ctx[i]);
- ctx =
- quic_ctx_get (packets_ctx[i].ctx_index,
- packets_ctx[i].thread_index);
+ ctx = quic_ctx_get (packets_ctx[i].ctx_index,
+ packets_ctx[i].thread_index);
+ quic_check_quic_session_connected (ctx);
+ ctx = quic_ctx_get (packets_ctx[i].ctx_index,
+ packets_ctx[i].thread_index);
quic_send_packets (ctx);
}
svm_fifo_dequeue_drop (f, fifo_offset);
}
- while (1);
return rv;
}
quic_common_get_transport_endpoint (ctx, tep, is_lcl);
}
-/*****************************************************************************
- * END TRANSPORT PROTO FUNCTIONS
-*****************************************************************************/
-
/* *INDENT-OFF* */
static session_cb_vft_t quic_app_cb_vft = {
- .session_accept_callback = quic_session_accepted_callback,
- .session_disconnect_callback = quic_session_disconnect_callback,
- .session_connected_callback = quic_session_connected_callback,
- .session_reset_callback = quic_session_reset_callback,
- .session_migrate_callback = quic_session_migrate_callback,
+ .session_accept_callback = quic_udp_session_accepted_callback,
+ .session_disconnect_callback = quic_udp_session_disconnect_callback,
+ .session_connected_callback = quic_udp_session_connected_callback,
+ .session_reset_callback = quic_udp_session_reset_callback,
+ .session_migrate_callback = quic_udp_session_migrate_callback,
.add_segment_callback = quic_add_segment_callback,
.del_segment_callback = quic_del_segment_callback,
- .builtin_app_rx_callback = quic_app_rx_callback,
+ .builtin_app_rx_callback = quic_udp_session_rx_callback,
+ .session_cleanup_callback = quic_udp_session_cleanup_callback,
};
static const transport_proto_vft_t quic_proto = {
/* *INDENT-ON* */
static void
-quic_register_cipher_suite (quic_crypto_engine_t type,
+quic_register_cipher_suite (crypto_engine_type_t type,
ptls_cipher_suite_t ** ciphers)
{
quic_main_t *qm = &quic_main;
vec_validate (qm->quic_ciphers, type);
+ clib_bitmap_set (qm->available_crypto_engines, type, 1);
qm->quic_ciphers[type] = ciphers;
}
+static void
+quic_update_fifo_size ()
+{
+ quic_main_t *qm = &quic_main;
+ segment_manager_props_t *seg_mgr_props =
+ application_get_segment_manager_properties (qm->app_index);
+
+ if (!seg_mgr_props)
+ {
+ clib_warning
+ ("error while getting segment_manager_props_t, can't update fifo-size");
+ return;
+ }
+
+ seg_mgr_props->tx_fifo_size = qm->udp_fifo_size;
+ seg_mgr_props->rx_fifo_size = qm->udp_fifo_size;
+}
+
static clib_error_t *
quic_init (vlib_main_t * vm)
{
vnet_app_attach_args_t _a, *a = &_a;
u64 options[APP_OPTIONS_N_OPTIONS];
quic_main_t *qm = &quic_main;
- u32 fifo_size = QUIC_FIFO_SIZE;
u32 num_threads, i;
num_threads = 1 /* main thread */ + vtm->n_threads;
- memset (a, 0, sizeof (*a));
- memset (options, 0, sizeof (options));
+ clib_memset (a, 0, sizeof (*a));
+ clib_memset (options, 0, sizeof (options));
a->session_cb_vft = &quic_app_cb_vft;
a->api_client_index = APP_INVALID_INDEX;
a->name = format (0, "quic");
a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
- a->options[APP_OPTIONS_RX_FIFO_SIZE] = fifo_size;
- a->options[APP_OPTIONS_TX_FIFO_SIZE] = fifo_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] = qm->udp_fifo_size;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] = qm->udp_fifo_size;
+ a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = qm->udp_fifo_prealloc;
a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
a->options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
a->options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_IS_TRANSPORT_APP;
transport_register_protocol (TRANSPORT_PROTO_QUIC, &quic_proto,
FIB_PROTOCOL_IP6, ~0);
+ clib_bitmap_alloc (qm->available_crypto_engines,
+ app_crypto_engine_n_types ());
quic_register_cipher_suite (CRYPTO_ENGINE_VPP, quic_crypto_cipher_suites);
quic_register_cipher_suite (CRYPTO_ENGINE_PICOTLS,
ptls_openssl_cipher_suites);
- qm->default_cipher = CRYPTO_ENGINE_PICOTLS;
+ qm->default_crypto_engine = CRYPTO_ENGINE_PICOTLS;
vec_free (a->name);
return 0;
}
return clib_error_return (0, "unknown input '%U'",
format_unformat_error, input);
if (unformat (input, "vpp"))
- qm->default_cipher = CRYPTO_ENGINE_VPP;
+ qm->default_crypto_engine = CRYPTO_ENGINE_VPP;
else if (unformat (input, "picotls"))
- qm->default_cipher = CRYPTO_ENGINE_PICOTLS;
+ qm->default_crypto_engine = CRYPTO_ENGINE_PICOTLS;
else
return clib_error_return (0, "unknown input '%U'",
format_unformat_error, input);
return 0;
}
+u64 quic_fifosize = 0;
+static clib_error_t *
+quic_plugin_set_fifo_size_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ quic_main_t *qm = &quic_main;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ uword tmp;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%U", unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000ULL)
+ {
+ return clib_error_return
+ (0, "fifo-size %llu (0x%llx) too large", tmp, tmp);
+ }
+ qm->udp_fifo_size = tmp;
+ quic_update_fifo_size ();
+ }
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, line_input);
+ }
+
+ return 0;
+}
+
static u8 *
quic_format_ctx_stat (u8 * s, va_list * args)
{
return 0;
}
+static clib_error_t *
+quic_show_ctx_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ quic_main_t *qm = &quic_main;
+ quic_ctx_t *ctx = NULL;
+ u32 num_workers = vlib_num_workers ();
+
+ for (int i = 0; i < num_workers + 1; i++)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach (ctx, qm->ctx_pool[i],
+ ({
+ vlib_cli_output (vm, "%U", format_quic_ctx, ctx, 1);
+ }));
+ /* *INDENT-ON* */
+ }
+ return 0;
+}
+
/* *INDENT-OFF* */
-VLIB_CLI_COMMAND(quic_plugin_crypto_command, static)=
+VLIB_CLI_COMMAND (quic_plugin_crypto_command, static) =
{
.path = "quic set crypto api",
.short_help = "quic set crypto api [picotls, vpp]",
.function = quic_plugin_crypto_command_fn,
};
+VLIB_CLI_COMMAND(quic_plugin_set_fifo_size_command, static)=
+{
+ .path = "quic set fifo-size",
+ .short_help = "quic set fifo-size N[K|M|G] (default 64K)",
+ .function = quic_plugin_set_fifo_size_command_fn,
+};
VLIB_CLI_COMMAND(quic_plugin_stats_command, static)=
{
.path = "show quic stats",
.short_help = "show quic stats",
.function = quic_plugin_showstats_command_fn,
};
+VLIB_CLI_COMMAND(quic_show_ctx_command, static)=
+{
+ .path = "show quic ctx",
+ .short_help = "show quic ctx",
+ .function = quic_show_ctx_command_fn,
+};
VLIB_PLUGIN_REGISTER () =
{
.version = VPP_BUILD_VER,
.description = "Quic transport protocol",
.default_disabled = 1,
};
+/* *INDENT-ON* */
+
+static clib_error_t *
+quic_config_fn (vlib_main_t * vm, unformat_input_t * input)
+{
+ quic_main_t *qm = &quic_main;
+ uword tmp;
+
+ qm->udp_fifo_size = QUIC_DEFAULT_FIFO_SIZE;
+ qm->udp_fifo_prealloc = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "fifo-size %U", unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000ULL)
+ {
+ return clib_error_return
+ (0, "fifo-size %llu (0x%llx) too large", tmp, tmp);
+ }
+ qm->udp_fifo_size = tmp;
+ }
+ else
+ if (unformat
+ (input, "fifo-prealloc %u", &quic_main.udp_fifo_prealloc))
+ ;
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, input);
+ }
+
+ return 0;
+}
+
+VLIB_EARLY_CONFIG_FUNCTION (quic_config_fn, "quic");
static uword
quic_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,