memset (ctx, 0, sizeof (quic_ctx_t));
ctx->c_thread_index = thread_index;
- QUIC_DBG (1, "Allocated quic_ctx %u on thread %u",
+ QUIC_DBG (3, "Allocated quic_ctx %u on thread %u",
ctx - qm->ctx_pool[thread_index], thread_index);
return ctx - qm->ctx_pool[thread_index];
}
}
static quicly_context_t *
-quic_get_quicly_ctx_from_udp (u32 udp_session_handle)
+quic_get_quicly_ctx_from_udp (u64 udp_session_handle)
{
session_t *udp_session;
application_t *app;
SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL |
SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY);
- rv = app_worker_accept_notify (app_wrk, stream_session);
- if (rv)
+ if ((rv = app_worker_accept_notify (app_wrk, stream_session)))
{
QUIC_DBG (1, "failed to notify accept worker app");
session_free_w_fifos (stream_session);
quicly_ctx_data_t *quicly_ctx_data =
clib_mem_alloc (sizeof (quicly_ctx_data_t));
+ clib_memset (quicly_ctx_data, 0, sizeof (*quicly_ctx_data)); /* picotls depends on this */
quicly_ctx = &quicly_ctx_data->quicly_ctx;
ptls_context_t *ptls_ctx = &quicly_ctx_data->ptls_ctx;
ptls_ctx->random_bytes = ptls_openssl_random_bytes;
quicly_stream_t *stream = ctx->stream;
quicly_reset_stream (stream, QUIC_APP_ERROR_CLOSE_NOTIFY);
quic_send_packets (ctx);
+ return;
}
switch (ctx->conn_state)
app_worker_t *app_wrk;
u32 ctx_id = ctx->c_c_index;
u32 thread_index = ctx->c_thread_index;
+ int rv;
app_wrk = app_worker_get_if_valid (ctx->parent_app_wrk_id);
if (!app_wrk)
}
quic_session->session_state = SESSION_STATE_CONNECTING;
- if (app_worker_connect_notify (app_wrk, quic_session, ctx->client_opaque))
+ if ((rv = app_worker_connect_notify (app_wrk, quic_session,
+ ctx->client_opaque)))
{
- QUIC_DBG (1, "failed to notify app");
+ QUIC_DBG (1, "failed to notify app %d", rv);
quic_proto_on_close (ctx_id, thread_index);
return -1;
}
quic_ctx_t *temp_ctx, *new_ctx;
clib_bihash_kv_16_8_t kv;
quicly_conn_t *conn;
+ session_t *udp_session;
temp_ctx = arg;
new_ctx_id = quic_ctx_alloc (thread_index);
memcpy (new_ctx, temp_ctx, sizeof (quic_ctx_t));
- free (temp_ctx);
+ clib_mem_free (temp_ctx);
new_ctx->c_thread_index = thread_index;
new_ctx->c_c_index = new_ctx_id;
new_ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
quic_update_timer (new_ctx);
- /* Trigger read on this connection ? */
+ /* Trigger write on this connection if necessary */
+ udp_session = session_get_from_handle (new_ctx->udp_session_handle);
+ if (svm_fifo_max_dequeue (udp_session->tx_fifo))
+ if (session_send_io_evt_to_thread (udp_session->tx_fifo,
+ SESSION_IO_EVT_TX))
+ QUIC_DBG (4, "Cannot send TX event");
}
static void
{
tw_timer_wheel_1t_3w_1024sl_ov_t *tw;
quic_ctx_t *ctx, *temp_ctx;
- clib_bihash_kv_16_8_t kv;
- quicly_conn_t *conn;
u32 thread_index = vlib_get_thread_index ();
QUIC_DBG (2, "Transferring conn %u to thread %u", ctx_index, dest_thread);
- temp_ctx = malloc (sizeof (quic_ctx_t));
+ temp_ctx = clib_mem_alloc (sizeof (quic_ctx_t));
ASSERT (temp_ctx);
ctx = quic_ctx_get (ctx_index, thread_index);
memcpy (temp_ctx, ctx, sizeof (quic_ctx_t));
- /* Remove from lookup hash, timer wheel and thread-local pool */
- conn = ctx->conn;
- quic_make_connection_key (&kv, quicly_get_master_id (conn));
- clib_bihash_add_del_16_8 (&quic_main.connection_hash, &kv, 0 /* is_add */ );
+ /* Remove from timer wheel and thread-local pool */
if (ctx->timer_handle != QUIC_TIMER_HANDLE_INVALID)
{
tw = &quic_main.wrk_ctx[thread_index].timer_wheel;
(void *) temp_ctx);
}
-static void
-quic_transfer_connection_rpc (void *arg)
-{
- u64 arg_int = (u64) arg;
- u32 ctx_index, dest_thread;
-
- ctx_index = (u32) (arg_int >> 32);
- dest_thread = (u32) (arg_int & UINT32_MAX);
- quic_transfer_connection (ctx_index, dest_thread);
-}
-
-/*
- * This assumes that the connection is not yet associated to a session
- * So currently it only works on the client side when receiving the first packet
- * from the server
- */
-static void
-quic_move_connection_to_thread (u32 ctx_index, u32 owner_thread,
- u32 to_thread)
-{
- QUIC_DBG (2, "Requesting transfer of conn %u from thread %u", ctx_index,
- owner_thread);
- u64 arg = ((u64) ctx_index) << 32 | to_thread;
- session_send_rpc_evt_to_thread (owner_thread, quic_transfer_connection_rpc,
- (void *) arg);
-}
-
static int
quic_session_connected_callback (u32 quic_app_index, u32 ctx_index,
session_t * udp_session, u8 is_fail)
ctx->udp_session_handle = session_handle (udp_session);
udp_session->opaque = ctx->parent_app_id;
- udp_session->session_state = SESSION_STATE_READY;
/* Init QUIC lib connection
* Generate required sockaddr & salen */
QUIC_DBG (2, "Registering conn with id %lu %lu", kv.key[0], kv.key[1]);
clib_bihash_add_del_16_8 (&quic_main.connection_hash, &kv, 1 /* is_add */ );
- quic_send_packets (ctx);
-
/* UDP stack quirk? preemptively transfer connection if that happens */
if (udp_session->thread_index != thread_index)
quic_transfer_connection (ctx_index, udp_session->thread_index);
+ else
+ quic_send_packets (ctx);
return ret;
}
clib_warning ("UDP session reset???");
}
+static void
+quic_session_migrate_callback (session_t * s, session_handle_t new_sh)
+{
+ /*
+ * TODO we need better way to get the connection from the session
+ * This will become possible once we stop storing the app id in the UDP
+ * session opaque
+ */
+ u32 thread_index = vlib_get_thread_index ();
+ u64 old_session_handle = session_handle (s);
+ u32 new_thread = session_thread_from_handle (new_sh);
+ quic_ctx_t *ctx;
+
+ QUIC_DBG (1, "Session %x migrated to %lx", s->session_index, new_sh);
+ /* *INDENT-OFF* */
+ pool_foreach (ctx, quic_main.ctx_pool[thread_index],
+ ({
+ if (ctx->udp_session_handle == old_session_handle)
+ {
+ /* Right ctx found, move associated conn */
+ QUIC_DBG (5, "Found right ctx: %x", ctx->c_c_index);
+ ctx->udp_session_handle = new_sh;
+ quic_transfer_connection (ctx->c_c_index, new_thread);
+ return;
+ }
+ }));
+ /* *INDENT-ON* */
+ QUIC_DBG (0, "BUG: Connection to migrate not found");
+}
+
int
quic_session_accepted_callback (session_t * udp_session)
{
return 0;
}
-
static int
quic_custom_app_rx_callback (transport_connection_t * tc)
{
/*
* Returns 0 if a matching connection is found and is on the right thread.
+ * Otherwise returns -1.
* If a connection is found, even on the wrong thread, ctx_thread and ctx_index
* will be set.
*/
if (clib_bihash_search_16_8 (h, &kv, &kv) == 0)
{
u32 index = kv.value & UINT32_MAX;
- u8 thread_id = kv.value >> 32;
+ u32 thread_id = kv.value >> 32;
/* Check if this connection belongs to this thread, otherwise
* ask for it to be moved */
if (thread_id != caller_thread_index)
return -1;
}
-static int
-quic_receive (quic_ctx_t * ctx, quicly_conn_t * conn,
- quicly_decoded_packet_t packet)
-{
- int rv;
- u32 ctx_id = ctx->c_c_index;
- u32 thread_index = ctx->c_thread_index;
- /* TODO : QUICLY_ERROR_PACKET_IGNORED sould be handled */
- rv = quicly_receive (conn, &packet);
- if (rv)
- {
- QUIC_DBG (2, "quicly_receive errored %U", quic_format_err, rv);
- return 0;
- }
- /* ctx pointer may change if a new stream is opened */
- ctx = quic_ctx_get (ctx_id, thread_index);
- /* Conn may be set to null if the connection is terminated */
- if (ctx->conn && ctx->conn_state == QUIC_CONN_STATE_HANDSHAKE)
- {
- if (quicly_connection_is_ready (conn))
- {
- ctx->conn_state = QUIC_CONN_STATE_READY;
- if (quicly_is_client (conn))
- {
- quic_on_client_connected (ctx);
- ctx = quic_ctx_get (ctx_id, thread_index);
- }
- }
- }
- return quic_send_packets (ctx);
-}
-
static int
quic_create_quic_session (quic_ctx_t * ctx)
{
return rv;
}
app_wrk = app_worker_get (quic_session->app_wrk_index);
- rv = app_worker_accept_notify (app_wrk, quic_session);
- if (rv)
+ if ((rv = app_worker_accept_notify (app_wrk, quic_session)))
{
QUIC_DBG (1, "failed to notify accept worker app");
return rv;
return rv;
}
-static int
-quic_app_rx_callback (session_t * udp_session)
+typedef struct quic_rx_packet_ctx_
{
- /* Read data from UDP rx_fifo and pass it to the quicly conn. */
quicly_decoded_packet_t packet;
+ u8 data[QUIC_MAX_PACKET_SIZE];
+ u32 ctx_index;
+ u32 thread_index;
+} quic_rx_packet_ctx_t;
+
+static void
+check_quic_client_connected (struct quic_rx_packet_ctx_ *quic_rx_ctx)
+{
+ /* ctx pointer may change if a new stream is opened */
+ quic_ctx_t *ctx = quic_ctx_get (quic_rx_ctx->ctx_index,
+ quic_rx_ctx->thread_index);
+ /* Conn may be set to null if the connection is terminated */
+ if (ctx->conn && ctx->conn_state == QUIC_CONN_STATE_HANDSHAKE)
+ {
+ if (quicly_connection_is_ready (ctx->conn))
+ {
+ ctx->conn_state = QUIC_CONN_STATE_READY;
+ if (quicly_is_client (ctx->conn))
+ {
+ quic_on_client_connected (ctx);
+ }
+ }
+ }
+
+}
+
+static int
+quic_process_one_rx_packet (u64 udp_session_handle,
+ quicly_context_t * quicly_ctx, svm_fifo_t * f,
+ u32 * fifo_offset, u32 * max_packet, u32 packet_n,
+ quic_rx_packet_ctx_t * packet_ctx)
+{
session_dgram_hdr_t ph;
- application_t *app;
quic_ctx_t *ctx = NULL;
- svm_fifo_t *f;
size_t plen;
struct sockaddr_in6 sa6;
struct sockaddr *sa = (struct sockaddr *) &sa6;
socklen_t salen;
- u32 max_deq, full_len, ctx_index = UINT32_MAX, ctx_thread = UINT32_MAX, ret;
- u8 *data;
- int err;
+ u32 full_len, ret;
+ int err, rv = 0;
+ packet_ctx->thread_index = UINT32_MAX;
+ packet_ctx->ctx_index = UINT32_MAX;
+ u32 thread_index = vlib_get_thread_index ();
u32 *opening_ctx_pool, *ctx_index_ptr;
+ u32 cur_deq = svm_fifo_max_dequeue (f) - *fifo_offset;
+
+ if (cur_deq == 0)
+ {
+ *max_packet = packet_n + 1;
+ return 0;
+ }
+
+ if (cur_deq < SESSION_CONN_HDR_LEN)
+ {
+ QUIC_DBG (1, "Not enough data for even a header in RX");
+ return 1;
+ }
+ ret = svm_fifo_peek (f, *fifo_offset, SESSION_CONN_HDR_LEN, (u8 *) & ph);
+ if (ret != SESSION_CONN_HDR_LEN)
+ {
+ QUIC_DBG (1, "Not enough data for header in RX");
+ return 1;
+ }
+ ASSERT (ph.data_offset == 0);
+ full_len = ph.data_length + SESSION_CONN_HDR_LEN;
+ if (full_len > cur_deq)
+ {
+ QUIC_DBG (1, "Not enough data in fifo RX");
+ return 1;
+ }
+
+ /* Quicly can read len bytes from the fifo at offset:
+ * ph.data_offset + SESSION_CONN_HDR_LEN */
+ ret =
+ svm_fifo_peek (f, SESSION_CONN_HDR_LEN + *fifo_offset, ph.data_length,
+ packet_ctx->data);
+ if (ret != ph.data_length)
+ {
+ QUIC_DBG (1, "Not enough data peeked in RX");
+ return 1;
+ }
+
+ rv = 0;
+ quic_build_sockaddr (sa, &salen, &ph.rmt_ip, ph.rmt_port, ph.is_ip4);
+ quicly_ctx = quic_get_quicly_ctx_from_udp (udp_session_handle);
+ plen =
+ quicly_decode_packet (quicly_ctx, &packet_ctx->packet, packet_ctx->data,
+ ph.data_length);
+
+ if (plen == SIZE_MAX)
+ {
+ *fifo_offset += SESSION_CONN_HDR_LEN + ph.data_length;
+ return 1;
+ }
+
+ err =
+ quic_find_packet_ctx (&packet_ctx->thread_index, &packet_ctx->ctx_index,
+ sa, salen, &packet_ctx->packet, thread_index);
+ if (err == 0)
+ {
+ ctx = quic_ctx_get (packet_ctx->ctx_index, thread_index);
+ rv = quicly_receive (ctx->conn, &packet_ctx->packet);
+ if (rv)
+ QUIC_DBG (1, "quicly_receive return error %d", rv);
+ }
+ else if (packet_ctx->ctx_index != UINT32_MAX)
+ {
+ /* Connection found but on wrong thread, ask move */
+ *max_packet = packet_n + 1;
+ return 0;
+ }
+ else if ((packet_ctx->packet.octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) ==
+ QUICLY_PACKET_TYPE_INITIAL)
+ {
+ /* Try to find matching "opening" ctx */
+ opening_ctx_pool = quic_main.wrk_ctx[thread_index].opening_ctx_pool;
+
+ /* *INDENT-OFF* */
+ pool_foreach (ctx_index_ptr, opening_ctx_pool,
+ ({
+ ctx = quic_ctx_get (*ctx_index_ptr, thread_index);
+ if (ctx->udp_session_handle == udp_session_handle)
+ {
+ /* Right ctx found, create conn & remove from pool */
+ quic_create_connection(*ctx_index_ptr, sa, salen, packet_ctx->packet);
+ pool_put (opening_ctx_pool, ctx_index_ptr);
+ *max_packet = packet_n + 1;
+ packet_ctx->thread_index = thread_index;
+ packet_ctx->ctx_index = *ctx_index_ptr;
+ goto updateOffset;
+ }
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ quic_reset_connection (udp_session_handle, sa, salen,
+ packet_ctx->packet);
+ }
+
+updateOffset:
+ *fifo_offset += SESSION_CONN_HDR_LEN + ph.data_length;
+ return 0;
+}
+
+static int
+quic_app_rx_callback (session_t * udp_session)
+{
+ /* Read data from UDP rx_fifo and pass it to the quicly conn. */
+ application_t *app;
+ quic_ctx_t *ctx = NULL;
+ svm_fifo_t *f;
+ u32 max_deq;
u32 app_index = udp_session->opaque;
u64 udp_session_handle = session_handle (udp_session);
int rv = 0;
+ app = application_get_if_valid (app_index);
u32 thread_index = vlib_get_thread_index ();
- quicly_context_t *quicly_ctx;
+ quic_rx_packet_ctx_t packets_ctx[16];
- app = application_get_if_valid (app_index);
if (!app)
{
QUIC_DBG (1, "Got RX on detached app");
f = udp_session->rx_fifo;
max_deq = svm_fifo_max_dequeue (f);
if (max_deq == 0)
- return 0;
-
- if (max_deq < SESSION_CONN_HDR_LEN)
{
- QUIC_DBG (1, "Not enough data for even a header in RX");
- return 1;
- }
- ret = svm_fifo_peek (f, 0, SESSION_CONN_HDR_LEN, (u8 *) & ph);
- if (ret != SESSION_CONN_HDR_LEN)
- {
- QUIC_DBG (1, "Not enough data for header in RX");
- return 1;
- }
- ASSERT (ph.data_offset == 0);
- full_len = ph.data_length + SESSION_CONN_HDR_LEN;
- if (full_len > max_deq)
- {
- QUIC_DBG (1, "Not enough data in fifo RX");
- return 1;
+ return 0;
}
- /* Quicly can read len bytes from the fifo at offset:
- * ph.data_offset + SESSION_CONN_HDR_LEN */
- data = malloc (ph.data_length);
- ret = svm_fifo_peek (f, SESSION_CONN_HDR_LEN, ph.data_length, data);
- if (ret != ph.data_length)
+ u32 fifo_offset = 0;
+ u32 max_packets = 16;
+ for (int i = 0; i < max_packets; i++)
{
- QUIC_DBG (1, "Not enough data peeked in RX");
- free (data);
- return 1;
+ quic_process_one_rx_packet (udp_session_handle,
+ (quicly_context_t *) app->quicly_ctx, f,
+ &fifo_offset, &max_packets, i,
+ &packets_ctx[i]);
}
- rv = 0;
- quic_build_sockaddr (sa, &salen, &ph.rmt_ip, ph.rmt_port, ph.is_ip4);
-
- quicly_ctx = quic_get_quicly_ctx_from_udp (udp_session_handle);
- plen = quicly_decode_packet (quicly_ctx, &packet, data, ph.data_length);
- if (plen != SIZE_MAX)
+ for (int i = 0; i < max_packets; i++)
{
-
- err = quic_find_packet_ctx (&ctx_thread, &ctx_index, sa, salen,
- &packet, thread_index);
- if (err == 0)
- {
- ctx = quic_ctx_get (ctx_index, thread_index);
- quic_receive (ctx, ctx->conn, packet);
- }
- else if (ctx_thread != UINT32_MAX)
- {
- /* Connection found but on wrong thread, ask move */
- quic_move_connection_to_thread (ctx_index, ctx_thread,
- thread_index);
- }
- else if ((packet.octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) ==
- QUICLY_PACKET_TYPE_INITIAL)
- {
- /* Try to find matching "opening" ctx */
- opening_ctx_pool =
- quic_main.wrk_ctx[thread_index].opening_ctx_pool;
-
- /* *INDENT-OFF* */
- pool_foreach (ctx_index_ptr, opening_ctx_pool,
- ({
- ctx = quic_ctx_get (*ctx_index_ptr, thread_index);
- if (ctx->udp_session_handle == udp_session_handle)
- {
- /* Right ctx found, create conn & remove from pool */
- quic_create_connection (*ctx_index_ptr, sa, salen, packet);
- pool_put (opening_ctx_pool, ctx_index_ptr);
- goto ctx_search_done;
- }
- }));
- /* *INDENT-ON* */
-
- }
- else
- {
- quic_reset_connection (udp_session_handle, sa, salen, packet);
- }
+ if (packets_ctx[i].thread_index != thread_index)
+ continue;
+
+ check_quic_client_connected (&packets_ctx[i]);
+ ctx =
+ quic_ctx_get (packets_ctx[i].ctx_index,
+ packets_ctx[i].thread_index);
+ quic_send_packets (ctx);
}
- ctx_search_done:
- svm_fifo_dequeue_drop (f, full_len);
- free (data);
+ svm_fifo_dequeue_drop (f, fifo_offset);
}
while (1);
return rv;
.session_disconnect_callback = quic_session_disconnect_callback,
.session_connected_callback = quic_session_connected_callback,
.session_reset_callback = quic_session_reset_callback,
+ .session_migrate_callback = quic_session_migrate_callback,
.add_segment_callback = quic_add_segment_callback,
.del_segment_callback = quic_del_segment_callback,
.builtin_app_rx_callback = quic_app_rx_callback,
{
.version = VPP_BUILD_VER,
.description = "Quic transport protocol",
+ .default_disabled = 1,
};
/* *INDENT-ON* */