X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fquic%2Fquic.c;h=866aaadb6a885923c44b445f47021cc1c7d3000f;hb=baf1c7ccc10134ee38d75532d7bef7d9f3fabfc9;hp=43ac87990da5bd4024db0eb92dddf9c519336ab7;hpb=46eb1950a13b7b01afcc83cb3d8ce59012dfee46;p=vpp.git diff --git a/src/plugins/quic/quic.c b/src/plugins/quic/quic.c index 43ac87990da..866aaadb6a8 100644 --- a/src/plugins/quic/quic.c +++ b/src/plugins/quic/quic.c @@ -141,7 +141,7 @@ quic_get_quicly_ctx_from_ctx (quic_ctx_t * ctx) } static quicly_context_t * -quic_get_quicly_ctx_from_udp (u32 udp_session_handle) +quic_get_quicly_ctx_from_udp (u64 udp_session_handle) { session_t *udp_session; application_t *app; @@ -875,6 +875,7 @@ quic_store_quicly_ctx (application_t * app, u8 is_client) quicly_ctx_data_t *quicly_ctx_data = clib_mem_alloc (sizeof (quicly_ctx_data_t)); + clib_memset (quicly_ctx_data, 0, sizeof (*quicly_ctx_data)); /* picotls depends on this */ quicly_ctx = &quicly_ctx_data->quicly_ctx; ptls_context_t *ptls_ctx = &quicly_ctx_data->ptls_ctx; ptls_ctx->random_bytes = ptls_openssl_random_bytes; @@ -1404,6 +1405,7 @@ quic_receive_connection (void *arg) quic_ctx_t *temp_ctx, *new_ctx; clib_bihash_kv_16_8_t kv; quicly_conn_t *conn; + session_t *udp_session; temp_ctx = arg; new_ctx_id = quic_ctx_alloc (thread_index); @@ -1428,7 +1430,12 @@ quic_receive_connection (void *arg) new_ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID; quic_update_timer (new_ctx); - /* Trigger read on this connection ? */ + /* Trigger write on this connection if necessary */ + udp_session = session_get_from_handle (new_ctx->udp_session_handle); + if (svm_fifo_max_dequeue (udp_session->tx_fifo)) + if (session_send_io_evt_to_thread (udp_session->tx_fifo, + SESSION_IO_EVT_TX)) + QUIC_DBG (4, "Cannot send TX event"); } static void @@ -1459,60 +1466,6 @@ quic_transfer_connection (u32 ctx_index, u32 dest_thread) (void *) temp_ctx); } -static void -quic_transfer_connection_rpc (void *arg) -{ - u64 arg_int = (u64) arg; - u32 ctx_index, dest_thread; - - ctx_index = (u32) (arg_int >> 32); - dest_thread = (u32) (arg_int & UINT32_MAX); - quic_transfer_connection (ctx_index, dest_thread); -} - -/* - * This assumes that the connection is not yet associated to a session - * So currently it only works on the client side when receiving the first packet - * from the server - */ -static void -quic_move_connection_to_thread (u32 ctx_index, u32 owner_thread, - u32 to_thread, - quicly_decoded_packet_t * packet) -{ - clib_bihash_kv_16_8_t kv; - clib_bihash_16_8_t *h; - - if (owner_thread == UINT32_MAX) - { - QUIC_DBG (3, "Connection already moving to right thread"); - return; - } - - /* Mark connection as moving in the conn map */ - h = &quic_main.connection_hash; - quic_make_connection_key (&kv, &packet->cid.dest.plaintext); - if (clib_bihash_search_16_8 (h, &kv, &kv) != 0) - { - QUIC_DBG (0, "Bug: conn to move not found"); - return; - } - kv.value |= (u64) UINT32_MAX << 32; - if (clib_bihash_add_del_16_8 - (&quic_main.connection_hash, &kv, /* is_add */ 1)) - { - QUIC_DBG (0, "Bug: cannot update conn in lookup hash"); - return; - } - - /* Send rpc to owner thread to move conn */ - QUIC_DBG (2, "Requesting transfer of conn %u from thread %u", ctx_index, - owner_thread); - u64 arg = ((u64) ctx_index) << 32 | to_thread; - session_send_rpc_evt_to_thread (owner_thread, quic_transfer_connection_rpc, - (void *) arg); -} - static int quic_session_connected_callback (u32 quic_app_index, u32 ctx_index, session_t * udp_session, u8 is_fail) @@ -1577,11 +1530,11 @@ quic_session_connected_callback (u32 quic_app_index, u32 ctx_index, QUIC_DBG (2, "Registering conn with id %lu %lu", kv.key[0], kv.key[1]); clib_bihash_add_del_16_8 (&quic_main.connection_hash, &kv, 1 /* is_add */ ); - quic_send_packets (ctx); - /* UDP stack quirk? preemptively transfer connection if that happens */ if (udp_session->thread_index != thread_index) quic_transfer_connection (ctx_index, udp_session->thread_index); + else + quic_send_packets (ctx); return ret; } @@ -1598,6 +1551,36 @@ quic_session_reset_callback (session_t * s) clib_warning ("UDP session reset???"); } +static void +quic_session_migrate_callback (session_t * s, session_handle_t new_sh) +{ + /* + * TODO we need better way to get the connection from the session + * This will become possible once we stop storing the app id in the UDP + * session opaque + */ + u32 thread_index = vlib_get_thread_index (); + u64 old_session_handle = session_handle (s); + u32 new_thread = session_thread_from_handle (new_sh); + quic_ctx_t *ctx; + + QUIC_DBG (1, "Session %x migrated to %lx", s->session_index, new_sh); + /* *INDENT-OFF* */ + pool_foreach (ctx, quic_main.ctx_pool[thread_index], + ({ + if (ctx->udp_session_handle == old_session_handle) + { + /* Right ctx found, move associated conn */ + QUIC_DBG (5, "Found right ctx: %x", ctx->c_c_index); + ctx->udp_session_handle = new_sh; + quic_transfer_connection (ctx->c_c_index, new_thread); + return; + } + })); + /* *INDENT-ON* */ + QUIC_DBG (0, "BUG: Connection to migrate not found"); +} + int quic_session_accepted_callback (session_t * udp_session) { @@ -1657,7 +1640,6 @@ quic_del_segment_callback (u32 client_index, u64 seg_handle) return 0; } - static int quic_custom_app_rx_callback (transport_connection_t * tc) { @@ -1763,38 +1745,6 @@ quic_find_packet_ctx (u32 * ctx_thread, u32 * ctx_index, return -1; } -static int -quic_receive (quic_ctx_t * ctx, quicly_conn_t * conn, - quicly_decoded_packet_t packet) -{ - int rv; - u32 ctx_id = ctx->c_c_index; - u32 thread_index = ctx->c_thread_index; - /* TODO : QUICLY_ERROR_PACKET_IGNORED sould be handled */ - rv = quicly_receive (conn, &packet); - if (rv) - { - QUIC_DBG (2, "quicly_receive errored %U", quic_format_err, rv); - return 0; - } - /* ctx pointer may change if a new stream is opened */ - ctx = quic_ctx_get (ctx_id, thread_index); - /* Conn may be set to null if the connection is terminated */ - if (ctx->conn && ctx->conn_state == QUIC_CONN_STATE_HANDSHAKE) - { - if (quicly_connection_is_ready (conn)) - { - ctx->conn_state = QUIC_CONN_STATE_READY; - if (quicly_is_client (conn)) - { - quic_on_client_connected (ctx); - ctx = quic_ctx_get (ctx_id, thread_index); - } - } - } - return quic_send_packets (ctx); -} - static int quic_create_quic_session (quic_ctx_t * ctx) { @@ -1908,30 +1858,169 @@ quic_reset_connection (u64 udp_session_handle, return rv; } -static int -quic_app_rx_callback (session_t * udp_session) +typedef struct quic_rx_packet_ctx_ { - /* Read data from UDP rx_fifo and pass it to the quicly conn. */ quicly_decoded_packet_t packet; + u8 data[QUIC_MAX_PACKET_SIZE]; + u32 ctx_index; + u32 thread_index; +} quic_rx_packet_ctx_t; + +static void +check_quic_client_connected (struct quic_rx_packet_ctx_ *quic_rx_ctx) +{ + /* ctx pointer may change if a new stream is opened */ + quic_ctx_t *ctx = quic_ctx_get (quic_rx_ctx->ctx_index, + quic_rx_ctx->thread_index); + /* Conn may be set to null if the connection is terminated */ + if (ctx->conn && ctx->conn_state == QUIC_CONN_STATE_HANDSHAKE) + { + if (quicly_connection_is_ready (ctx->conn)) + { + ctx->conn_state = QUIC_CONN_STATE_READY; + if (quicly_is_client (ctx->conn)) + { + quic_on_client_connected (ctx); + } + } + } + +} + +static int +quic_process_one_rx_packet (u64 udp_session_handle, + quicly_context_t * quicly_ctx, svm_fifo_t * f, + u32 * fifo_offset, u32 * max_packet, u32 packet_n, + quic_rx_packet_ctx_t * packet_ctx) +{ session_dgram_hdr_t ph; - application_t *app; quic_ctx_t *ctx = NULL; - svm_fifo_t *f; size_t plen; struct sockaddr_in6 sa6; struct sockaddr *sa = (struct sockaddr *) &sa6; socklen_t salen; - u32 max_deq, full_len, ctx_index = UINT32_MAX, ctx_thread = UINT32_MAX, ret; - u8 *data; - int err; + u32 full_len, ret; + int err, rv = 0; + packet_ctx->thread_index = UINT32_MAX; + packet_ctx->ctx_index = UINT32_MAX; + u32 thread_index = vlib_get_thread_index (); u32 *opening_ctx_pool, *ctx_index_ptr; + u32 cur_deq = svm_fifo_max_dequeue (f) - *fifo_offset; + + if (cur_deq == 0) + { + *max_packet = packet_n + 1; + return 0; + } + + if (cur_deq < SESSION_CONN_HDR_LEN) + { + QUIC_DBG (1, "Not enough data for even a header in RX"); + return 1; + } + ret = svm_fifo_peek (f, *fifo_offset, SESSION_CONN_HDR_LEN, (u8 *) & ph); + if (ret != SESSION_CONN_HDR_LEN) + { + QUIC_DBG (1, "Not enough data for header in RX"); + return 1; + } + ASSERT (ph.data_offset == 0); + full_len = ph.data_length + SESSION_CONN_HDR_LEN; + if (full_len > cur_deq) + { + QUIC_DBG (1, "Not enough data in fifo RX"); + return 1; + } + + /* Quicly can read len bytes from the fifo at offset: + * ph.data_offset + SESSION_CONN_HDR_LEN */ + ret = + svm_fifo_peek (f, SESSION_CONN_HDR_LEN + *fifo_offset, ph.data_length, + packet_ctx->data); + if (ret != ph.data_length) + { + QUIC_DBG (1, "Not enough data peeked in RX"); + return 1; + } + + rv = 0; + quic_build_sockaddr (sa, &salen, &ph.rmt_ip, ph.rmt_port, ph.is_ip4); + quicly_ctx = quic_get_quicly_ctx_from_udp (udp_session_handle); + plen = + quicly_decode_packet (quicly_ctx, &packet_ctx->packet, packet_ctx->data, + ph.data_length); + + if (plen == SIZE_MAX) + { + *fifo_offset += SESSION_CONN_HDR_LEN + ph.data_length; + return 1; + } + + err = + quic_find_packet_ctx (&packet_ctx->thread_index, &packet_ctx->ctx_index, + sa, salen, &packet_ctx->packet, thread_index); + if (err == 0) + { + ctx = quic_ctx_get (packet_ctx->ctx_index, thread_index); + rv = quicly_receive (ctx->conn, &packet_ctx->packet); + if (rv) + QUIC_DBG (1, "quicly_receive return error %d", rv); + } + else if (packet_ctx->ctx_index != UINT32_MAX) + { + /* Connection found but on wrong thread, ask move */ + *max_packet = packet_n + 1; + return 0; + } + else if ((packet_ctx->packet.octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) == + QUICLY_PACKET_TYPE_INITIAL) + { + /* Try to find matching "opening" ctx */ + opening_ctx_pool = quic_main.wrk_ctx[thread_index].opening_ctx_pool; + + /* *INDENT-OFF* */ + pool_foreach (ctx_index_ptr, opening_ctx_pool, + ({ + ctx = quic_ctx_get (*ctx_index_ptr, thread_index); + if (ctx->udp_session_handle == udp_session_handle) + { + /* Right ctx found, create conn & remove from pool */ + quic_create_connection(*ctx_index_ptr, sa, salen, packet_ctx->packet); + pool_put (opening_ctx_pool, ctx_index_ptr); + *max_packet = packet_n + 1; + packet_ctx->thread_index = thread_index; + packet_ctx->ctx_index = *ctx_index_ptr; + goto updateOffset; + } + })); + /* *INDENT-ON* */ + } + else + { + quic_reset_connection (udp_session_handle, sa, salen, + packet_ctx->packet); + } + +updateOffset: + *fifo_offset += SESSION_CONN_HDR_LEN + ph.data_length; + return 0; +} + +static int +quic_app_rx_callback (session_t * udp_session) +{ + /* Read data from UDP rx_fifo and pass it to the quicly conn. */ + application_t *app; + quic_ctx_t *ctx = NULL; + svm_fifo_t *f; + u32 max_deq; u32 app_index = udp_session->opaque; u64 udp_session_handle = session_handle (udp_session); int rv = 0; + app = application_get_if_valid (app_index); u32 thread_index = vlib_get_thread_index (); - quicly_context_t *quicly_ctx; + quic_rx_packet_ctx_t packets_ctx[16]; - app = application_get_if_valid (app_index); if (!app) { QUIC_DBG (1, "Got RX on detached app"); @@ -1945,89 +2034,32 @@ quic_app_rx_callback (session_t * udp_session) f = udp_session->rx_fifo; max_deq = svm_fifo_max_dequeue (f); if (max_deq == 0) - return 0; - - if (max_deq < SESSION_CONN_HDR_LEN) - { - QUIC_DBG (1, "Not enough data for even a header in RX"); - return 1; - } - ret = svm_fifo_peek (f, 0, SESSION_CONN_HDR_LEN, (u8 *) & ph); - if (ret != SESSION_CONN_HDR_LEN) { - QUIC_DBG (1, "Not enough data for header in RX"); - return 1; - } - ASSERT (ph.data_offset == 0); - full_len = ph.data_length + SESSION_CONN_HDR_LEN; - if (full_len > max_deq) - { - QUIC_DBG (1, "Not enough data in fifo RX"); - return 1; + return 0; } - /* Quicly can read len bytes from the fifo at offset: - * ph.data_offset + SESSION_CONN_HDR_LEN */ - data = malloc (ph.data_length); - ret = svm_fifo_peek (f, SESSION_CONN_HDR_LEN, ph.data_length, data); - if (ret != ph.data_length) + u32 fifo_offset = 0; + u32 max_packets = 16; + for (int i = 0; i < max_packets; i++) { - QUIC_DBG (1, "Not enough data peeked in RX"); - free (data); - return 1; + quic_process_one_rx_packet (udp_session_handle, + (quicly_context_t *) app->quicly_ctx, f, + &fifo_offset, &max_packets, i, + &packets_ctx[i]); } - rv = 0; - quic_build_sockaddr (sa, &salen, &ph.rmt_ip, ph.rmt_port, ph.is_ip4); - - quicly_ctx = quic_get_quicly_ctx_from_udp (udp_session_handle); - plen = quicly_decode_packet (quicly_ctx, &packet, data, ph.data_length); - if (plen != SIZE_MAX) + for (int i = 0; i < max_packets; i++) { - - err = quic_find_packet_ctx (&ctx_thread, &ctx_index, sa, salen, - &packet, thread_index); - if (err == 0) - { - ctx = quic_ctx_get (ctx_index, thread_index); - quic_receive (ctx, ctx->conn, packet); - } - else if (ctx_index != UINT32_MAX) - { - /* Connection found but on wrong thread, ask move */ - quic_move_connection_to_thread (ctx_index, ctx_thread, - thread_index, &packet); - } - else if ((packet.octets.base[0] & QUICLY_PACKET_TYPE_BITMASK) == - QUICLY_PACKET_TYPE_INITIAL) - { - /* Try to find matching "opening" ctx */ - opening_ctx_pool = - quic_main.wrk_ctx[thread_index].opening_ctx_pool; - - /* *INDENT-OFF* */ - pool_foreach (ctx_index_ptr, opening_ctx_pool, - ({ - ctx = quic_ctx_get (*ctx_index_ptr, thread_index); - if (ctx->udp_session_handle == udp_session_handle) - { - /* Right ctx found, create conn & remove from pool */ - quic_create_connection (*ctx_index_ptr, sa, salen, packet); - pool_put (opening_ctx_pool, ctx_index_ptr); - goto ctx_search_done; - } - })); - /* *INDENT-ON* */ - - } - else - { - quic_reset_connection (udp_session_handle, sa, salen, packet); - } + if (packets_ctx[i].thread_index != thread_index) + continue; + + check_quic_client_connected (&packets_ctx[i]); + ctx = + quic_ctx_get (packets_ctx[i].ctx_index, + packets_ctx[i].thread_index); + quic_send_packets (ctx); } - ctx_search_done: - svm_fifo_dequeue_drop (f, full_len); - free (data); + svm_fifo_dequeue_drop (f, fifo_offset); } while (1); return rv; @@ -2081,6 +2113,7 @@ static session_cb_vft_t quic_app_cb_vft = { .session_disconnect_callback = quic_session_disconnect_callback, .session_connected_callback = quic_session_connected_callback, .session_reset_callback = quic_session_reset_callback, + .session_migrate_callback = quic_session_migrate_callback, .add_segment_callback = quic_add_segment_callback, .del_segment_callback = quic_del_segment_callback, .builtin_app_rx_callback = quic_app_rx_callback, @@ -2217,6 +2250,7 @@ VLIB_PLUGIN_REGISTER () = { .version = VPP_BUILD_VER, .description = "Quic transport protocol", + .default_disabled = 1, }; /* *INDENT-ON* */