$DIR/http_unittest_plugin.so \
$DIR/unittest_plugin.so \
$DIR/quic_plugin.so \
+ $DIR/quic_quicly_plugin.so \
$DIR/http_static_plugin.so \
$DIR/ping_plugin.so \
$DIR/nsim_plugin.so \
plugin unittest_plugin.so { enable }
plugin quic_plugin.so { enable }
+ plugin quic_quicly_plugin.so { enable }
plugin af_packet_plugin.so { enable }
plugin hs_apps_plugin.so { enable }
plugin http_plugin.so { enable }
if [[ -n "$QUIC_ENABLE" ]]; then
QUIC_PLUGIN="plugin quic_plugin.so {enable}"
+ QUIC_QUICLY_PLUGIN="plugin quic_quicly_plugin.so {enable}"
fi
if [[ -n "$SRTP_ENABLE" ]]; then
plugin unittest_plugin.so {enable} \
plugin http_unittest_plugin.so {enable} \
$QUIC_PLUGIN \
+ $QUIC_QUICLY_PLUGIN \
$SRTP_PLUGIN \
$DPDK_PLUGIN_DISABLE \
}
-
-# Copyright (c) 2021 Cisco
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2025 Cisco
if(NOT OPENSSL_FOUND)
message(WARNING "OpenSSL not found - quic plugin disabled")
return()
endif()
-unset(QUIC_LINK_LIBRARIES)
-set(EXPECTED_QUICLY_VERSION "0.1.5-vpp")
-
-vpp_find_path(QUICLY_INCLUDE_DIR NAMES quicly.h)
-vpp_find_path(PICOTLS_INCLUDE_DIR NAMES picotls.h)
-vpp_find_library(QUICLY_LIBRARY NAMES "libquicly.a")
-vpp_find_library(PICOTLS_CORE_LIBRARY NAMES "libpicotls-core.a")
-vpp_find_library(PICOTLS_OPENSSL_LIBRARY NAMES "libpicotls-openssl.a")
+add_vpp_plugin(quic
+ SOURCES
+ quic.c
-list(APPEND QUIC_LINK_LIBRARIES
- ${QUICLY_LIBRARY}
- ${PICOTLS_CORE_LIBRARY}
- ${PICOTLS_OPENSSL_LIBRARY}
+ LINK_LIBRARIES ${OPENSSL_LIBRARIES}
)
-
-if(QUICLY_INCLUDE_DIR AND QUIC_LINK_LIBRARIES)
- if(EXISTS "${QUICLY_INCLUDE_DIR}/quicly/version.h")
- file(STRINGS "${QUICLY_INCLUDE_DIR}/quicly/version.h" quicly_version_str REGEX "^#define[\t ]+LIBQUICLY_VERSION[\t ]+\".*\"")
- string(REGEX REPLACE "^#define[\t ]+LIBQUICLY_VERSION[\t ]+\"([^\"]*)\".*" "\\1" QUICLY_VERSION_STRING "${quicly_version_str}")
- unset(quicly_version_str)
- endif()
-
- if (${QUICLY_VERSION_STRING} MATCHES "${EXPECTED_QUICLY_VERSION}")
- include_directories (${QUICLY_INCLUDE_DIR})
-
- if(PICOTLS_INCLUDE_DIR)
- include_directories (${PICOTLS_INCLUDE_DIR})
- endif()
-
- add_vpp_plugin(quic
- SOURCES
- certs.c
- error.c
- quic.c
- quic_crypto.c
-
- LINK_LIBRARIES ${QUIC_LINK_LIBRARIES} ${OPENSSL_LIBRARIES}
- )
- message(STATUS "Found quicly ${EXPECTED_QUICLY_VERSION} in ${QUICLY_INCLUDE_DIR}")
- else()
- message(STATUS "-- quicly ${EXPECTED_QUICLY_VERSION} not found - QUIC plugin disabled")
- endif()
-else()
- message(WARNING "-- quicly headers not found - QUIC plugin disabled")
-endif()
---
name: QUIC Protocol
-maintainer: Aloys Augustin <aloaugus@cisco.com>
+maintainer: Dave Wallace <dwallacelf@gmail.com>
features:
- host stack integration via session layer
- - "based on the Quicly library: https://github.com/h2o/quicly"
+ quic library api to allow selection of quic implementation
description: "IETF QUIC Protocol implementation"
state: experimental
properties: [API, CLI, STATS, MULTITHREAD]
+++ /dev/null
-/*
- * Copyright (c) 2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __included_quic_certs_h__
-#define __included_quic_certs_h__
-
-
-#include <picotls/openssl.h>
-#include <picotls/pembase64.h>
-
-int ptls_compare_separator_line (const char *line, const char *begin_or_end,
- const char *label);
-
-int ptls_get_bio_pem_object (BIO * bio, const char *label,
- ptls_buffer_t * buf);
-
-int ptls_load_bio_pem_objects (BIO * bio, const char *label,
- ptls_iovec_t * list, size_t list_max,
- size_t * nb_objects);
-
-int ptls_load_bio_certificates (ptls_context_t * ctx, BIO * bio);
-
-int load_bio_certificate_chain (ptls_context_t * ctx, const char *cert_data);
-
-int load_bio_private_key (ptls_context_t * ctx, const char *pk_data);
-
-
-#endif /* __included_quic_certs_h__ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
+++ /dev/null
-/*
- * Copyright (c) 2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __included_quic_error_h__
-#define __included_quic_error_h__
-
-#include <stdarg.h>
-
-#include <vppinfra/format.h>
-
-u8 *quic_format_err (u8 * s, va_list * args);
-
-#endif /* __included_quic_error_h__ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
-/*
- * Copyright (c) 2021 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
*/
+#include "quic.h"
#include <sys/socket.h>
#include <sys/syscall.h>
#include <vppinfra/lock.h>
#include <quic/quic.h>
-#include <quic/certs.h>
-#include <quic/error.h>
-
-#include <quicly/constants.h>
-#include <quicly/defaults.h>
-#include <picotls.h>
-
-#include <quic/quic_crypto.h>
-
-extern quicly_crypto_engine_t quic_crypto_engine;
+#include <quic/quic_timer.h>
+#include <quic/quic_inlines.h>
static char *quic_error_strings[] = {
#define quic_error(n,s) s,
#undef quic_error
};
-#define DEFAULT_MAX_PACKETS_PER_KEY 16777216
-
quic_main_t quic_main;
-static void quic_update_timer (quic_ctx_t * ctx);
-static void quic_check_quic_session_connected (quic_ctx_t * ctx);
-static int quic_reset_connection (u64 udp_session_handle,
- quic_rx_packet_ctx_t * pctx);
+quic_engine_vft_t *quic_engine_vfts;
+
static void quic_proto_on_close (u32 ctx_index,
clib_thread_index_t thread_index);
-static quicly_stream_open_t on_stream_open;
-static quicly_closed_by_remote_t on_closed_by_remote;
-static quicly_now_t quicly_vpp_now_cb;
-
-/* Crypto contexts */
-
-static inline void
-quic_crypto_context_make_key_from_ctx (clib_bihash_kv_24_8_t * kv,
- quic_ctx_t * ctx)
-{
- application_t *app = application_get (ctx->parent_app_id);
- kv->key[0] = ((u64) ctx->ckpair_index) << 32 | (u64) ctx->crypto_engine;
- kv->key[1] = app->sm_properties.rx_fifo_size - 1;
- kv->key[2] = app->sm_properties.tx_fifo_size - 1;
-}
-
-static inline void
-quic_crypto_context_make_key_from_crctx (clib_bihash_kv_24_8_t * kv,
- crypto_context_t * crctx)
+static_always_inline quic_engine_type_t
+quic_get_engine_type (quic_engine_type_t requested,
+ quic_engine_type_t preferred)
{
- quic_crypto_context_data_t *data =
- (quic_crypto_context_data_t *) crctx->data;
- kv->key[0] = ((u64) crctx->ckpair_index) << 32 | (u64) crctx->crypto_engine;
- kv->key[1] = data->quicly_ctx.transport_params.max_stream_data.bidi_local;
- kv->key[2] = data->quicly_ctx.transport_params.max_stream_data.bidi_remote;
-}
-
-static void
-quic_crypto_context_free_if_needed (crypto_context_t * crctx, u8 thread_index)
-{
- quic_main_t *qm = &quic_main;
- clib_bihash_kv_24_8_t kv;
- if (crctx->n_subscribers)
- return;
- quic_crypto_context_make_key_from_crctx (&kv, crctx);
- clib_bihash_add_del_24_8 (&qm->wrk_ctx[thread_index].crypto_context_hash,
- &kv, 0 /* is_add */ );
- clib_mem_free (crctx->data);
- pool_put (qm->wrk_ctx[thread_index].crypto_ctx_pool, crctx);
-}
-
-static int
-quic_app_cert_key_pair_delete_callback (app_cert_key_pair_t * ckpair)
-{
- quic_main_t *qm = &quic_main;
- crypto_context_t *crctx;
- clib_bihash_kv_24_8_t kv;
- vlib_thread_main_t *vtm = vlib_get_thread_main ();
- int num_threads = 1 /* main thread */ + vtm->n_threads;
- int i;
+ quic_engine_type_t engine_type = QUIC_ENGINE_NONE;
- for (i = 0; i < num_threads; i++)
+ if ((requested != QUIC_ENGINE_NONE) &&
+ (vec_len (quic_engine_vfts) > requested) &&
+ (quic_engine_vfts[requested].engine_init))
{
- pool_foreach (crctx, qm->wrk_ctx[i].crypto_ctx_pool) {
- if (crctx->ckpair_index == ckpair->cert_key_index)
- {
- quic_crypto_context_make_key_from_crctx (&kv, crctx);
- clib_bihash_add_del_24_8 (&qm->wrk_ctx[i].crypto_context_hash, &kv, 0 /* is_add */ );
- }
- }
+ engine_type = requested;
}
- return 0;
+ else if ((preferred != QUIC_ENGINE_NONE) &&
+ (vec_len (quic_engine_vfts) > preferred) &&
+ (quic_engine_vfts[preferred].engine_init))
+ {
+ engine_type = preferred;
+ }
+ return engine_type;
}
-static crypto_context_t *
-quic_crypto_context_alloc (u8 thread_index)
+__clib_export void
+quic_register_engine (const quic_engine_vft_t *vft,
+ quic_engine_type_t engine_type)
{
- quic_main_t *qm = &quic_main;
- crypto_context_t *crctx;
- u32 idx;
-
- pool_get (qm->wrk_ctx[thread_index].crypto_ctx_pool, crctx);
- clib_memset (crctx, 0, sizeof (*crctx));
- idx = (crctx - qm->wrk_ctx[thread_index].crypto_ctx_pool);
- crctx->ctx_index = ((u32) thread_index) << 24 | idx;
-
- return crctx;
+ vec_validate (quic_engine_vfts, engine_type);
+ quic_engine_vfts[engine_type] = *vft;
}
-static crypto_context_t *
-quic_crypto_context_get (u32 cr_index, clib_thread_index_t thread_index)
+static int
+quic_app_cert_key_pair_delete_callback (app_cert_key_pair_t *ckpair)
{
- quic_main_t *qm = &quic_main;
- ASSERT (cr_index >> 24 == thread_index);
- return pool_elt_at_index (qm->wrk_ctx[thread_index].crypto_ctx_pool,
- cr_index & 0x00ffffff);
+ return quic_eng_app_cert_key_pair_delete (ckpair);
}
static clib_error_t *
-quic_list_crypto_context_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+quic_list_crypto_context_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
{
- quic_main_t *qm = &quic_main;
crypto_context_t *crctx;
vlib_thread_main_t *vtm = vlib_get_thread_main ();
int i, num_threads = 1 /* main thread */ + vtm->n_threads;
for (i = 0; i < num_threads; i++)
{
- pool_foreach (crctx, qm->wrk_ctx[i].crypto_ctx_pool) {
- vlib_cli_output (vm, "[%d][Q]%U", i, format_crypto_context, crctx);
- }
+ pool_foreach (crctx, quic_wrk_ctx_get (&quic_main, i)->crypto_ctx_pool)
+ {
+ vlib_cli_output (vm, "[%d][Q]%U", i, format_crypto_context, crctx);
+ }
}
return 0;
}
static clib_error_t *
-quic_set_max_packets_per_key_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+quic_set_max_packets_per_key_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
{
- quic_main_t *qm = &quic_main;
unformat_input_t _line_input, *line_input = &_line_input;
u64 tmp;
{
if (unformat (line_input, "%U", unformat_memory_size, &tmp))
{
- qm->max_packets_per_key = tmp;
+ quic_main.max_packets_per_key = tmp;
}
else
return clib_error_return (0, "unknown input '%U'",
return e;
}
-static void
-quic_release_crypto_context (u32 crypto_context_index, u8 thread_index)
-{
- crypto_context_t *crctx;
- crctx = quic_crypto_context_get (crypto_context_index, thread_index);
- crctx->n_subscribers--;
- quic_crypto_context_free_if_needed (crctx, thread_index);
-}
-
-static int
-quic_init_crypto_context (crypto_context_t * crctx, quic_ctx_t * ctx)
-{
- quic_main_t *qm = &quic_main;
- quicly_context_t *quicly_ctx;
- ptls_iovec_t key_vec;
- app_cert_key_pair_t *ckpair;
- application_t *app;
- quic_crypto_context_data_t *data;
- ptls_context_t *ptls_ctx;
-
- QUIC_DBG (2, "Init quic crctx %d thread %d", crctx->ctx_index,
- ctx->c_thread_index);
-
- data = clib_mem_alloc (sizeof (*data));
- /* picotls depends on data being zeroed */
- clib_memset (data, 0, sizeof (*data));
- crctx->data = (void *) data;
- quicly_ctx = &data->quicly_ctx;
- ptls_ctx = &data->ptls_ctx;
-
- ptls_ctx->random_bytes = ptls_openssl_random_bytes;
- ptls_ctx->get_time = &ptls_get_time;
- ptls_ctx->key_exchanges = ptls_openssl_key_exchanges;
- ptls_ctx->cipher_suites = qm->quic_ciphers[ctx->crypto_engine];
- ptls_ctx->certificates.list = NULL;
- ptls_ctx->certificates.count = 0;
- ptls_ctx->on_client_hello = NULL;
- ptls_ctx->emit_certificate = NULL;
- ptls_ctx->sign_certificate = NULL;
- ptls_ctx->verify_certificate = NULL;
- ptls_ctx->ticket_lifetime = 86400;
- ptls_ctx->max_early_data_size = 8192;
- ptls_ctx->hkdf_label_prefix__obsolete = NULL;
- ptls_ctx->require_dhe_on_psk = 1;
- ptls_ctx->encrypt_ticket = &qm->session_cache.super;
- clib_memcpy (quicly_ctx, &quicly_spec_context, sizeof (quicly_context_t));
-
- quicly_ctx->max_packets_per_key = qm->max_packets_per_key;
- quicly_ctx->tls = ptls_ctx;
- quicly_ctx->stream_open = &on_stream_open;
- quicly_ctx->closed_by_remote = &on_closed_by_remote;
- quicly_ctx->now = &quicly_vpp_now_cb;
- quicly_amend_ptls_context (quicly_ctx->tls);
-
- if (qm->vnet_crypto_enabled &&
- qm->default_crypto_engine == CRYPTO_ENGINE_VPP)
- quicly_ctx->crypto_engine = &quic_crypto_engine;
- else
- quicly_ctx->crypto_engine = &quicly_default_crypto_engine;
-
- quicly_ctx->transport_params.max_data = QUIC_INT_MAX;
- quicly_ctx->transport_params.max_streams_uni = (uint64_t) 1 << 60;
- quicly_ctx->transport_params.max_streams_bidi = (uint64_t) 1 << 60;
- quicly_ctx->transport_params.max_idle_timeout = qm->connection_timeout;
-
- if (qm->default_quic_cc == QUIC_CC_CUBIC)
- quicly_ctx->init_cc = &quicly_cc_cubic_init;
- else if (qm->default_quic_cc == QUIC_CC_RENO)
- quicly_ctx->init_cc = &quicly_cc_reno_init;
-
- app = application_get (ctx->parent_app_id);
- quicly_ctx->transport_params.max_stream_data.bidi_local =
- app->sm_properties.rx_fifo_size - 1;
- quicly_ctx->transport_params.max_stream_data.bidi_remote =
- app->sm_properties.tx_fifo_size - 1;
- quicly_ctx->transport_params.max_stream_data.uni = QUIC_INT_MAX;
-
- quicly_ctx->transport_params.max_udp_payload_size = QUIC_MAX_PACKET_SIZE;
- if (!app->quic_iv_set)
- {
- ptls_openssl_random_bytes (app->quic_iv, QUIC_IV_LEN - 1);
- app->quic_iv[QUIC_IV_LEN - 1] = 0;
- app->quic_iv_set = 1;
- }
-
- clib_memcpy (data->cid_key, app->quic_iv, QUIC_IV_LEN);
- key_vec = ptls_iovec_init (data->cid_key, QUIC_IV_LEN);
- quicly_ctx->cid_encryptor =
- quicly_new_default_cid_encryptor (&ptls_openssl_bfecb,
- &ptls_openssl_aes128ecb,
- &ptls_openssl_sha256, key_vec);
-
- ckpair = app_cert_key_pair_get_if_valid (crctx->ckpair_index);
- if (!ckpair || !ckpair->key || !ckpair->cert)
- {
- QUIC_DBG (1, "Wrong ckpair id %d\n", crctx->ckpair_index);
- return -1;
- }
- if (load_bio_private_key (quicly_ctx->tls, (char *) ckpair->key))
- {
- QUIC_DBG (1, "failed to read private key from app configuration\n");
- return -1;
- }
- if (load_bio_certificate_chain (quicly_ctx->tls, (char *) ckpair->cert))
- {
- QUIC_DBG (1, "failed to load certificate\n");
- return -1;
- }
- return 0;
-
-}
-
-static int
-quic_acquire_crypto_context (quic_ctx_t * ctx)
-{
- quic_main_t *qm = &quic_main;
- crypto_context_t *crctx;
- clib_bihash_kv_24_8_t kv;
-
- if (ctx->crypto_engine == CRYPTO_ENGINE_NONE)
- {
- QUIC_DBG (2, "No crypto engine specified, using %d",
- qm->default_crypto_engine);
- ctx->crypto_engine = qm->default_crypto_engine;
- }
- if (!clib_bitmap_get (qm->available_crypto_engines, ctx->crypto_engine))
- {
- QUIC_DBG (1, "Quic does not support crypto engine %d",
- ctx->crypto_engine);
- return SESSION_E_NOCRYPTOENG;
- }
-
- /* Check for exisiting crypto ctx */
- quic_crypto_context_make_key_from_ctx (&kv, ctx);
- if (clib_bihash_search_24_8
- (&qm->wrk_ctx[ctx->c_thread_index].crypto_context_hash, &kv, &kv) == 0)
- {
- crctx = quic_crypto_context_get (kv.value, ctx->c_thread_index);
- QUIC_DBG (2, "Found exisiting crypto context %d", kv.value);
- ctx->crypto_context_index = kv.value;
- crctx->n_subscribers++;
- return 0;
- }
-
- crctx = quic_crypto_context_alloc (ctx->c_thread_index);
- ctx->crypto_context_index = crctx->ctx_index;
- kv.value = crctx->ctx_index;
- crctx->crypto_engine = ctx->crypto_engine;
- crctx->ckpair_index = ctx->ckpair_index;
- if (quic_init_crypto_context (crctx, ctx))
- goto error;
- if (vnet_app_add_cert_key_interest (ctx->ckpair_index, qm->app_index))
- goto error;
- crctx->n_subscribers++;
- clib_bihash_add_del_24_8 (&qm->
- wrk_ctx[ctx->c_thread_index].crypto_context_hash,
- &kv, 1 /* is_add */ );
- return 0;
-
-error:
- quic_crypto_context_free_if_needed (crctx, ctx->c_thread_index);
- return SESSION_E_NOCRYPTOCKP;
-}
-
-/* Helper functions */
-
-static u32
-quic_ctx_alloc (clib_thread_index_t thread_index)
-{
- quic_main_t *qm = &quic_main;
- quic_ctx_t *ctx;
-
- pool_get_aligned_safe (qm->ctx_pool[thread_index], ctx,
- CLIB_CACHE_LINE_BYTES);
-
- clib_memset (ctx, 0, sizeof (quic_ctx_t));
- ctx->c_thread_index = thread_index;
- ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
- QUIC_DBG (3, "Allocated quic_ctx %u on thread %u",
- ctx - qm->ctx_pool[thread_index], thread_index);
- return ctx - qm->ctx_pool[thread_index];
-}
-
-static void
-quic_ctx_free (quic_ctx_t * ctx)
-{
- QUIC_DBG (2, "Free ctx %u %x", ctx->c_thread_index, ctx->c_c_index);
- clib_thread_index_t thread_index = ctx->c_thread_index;
- QUIC_ASSERT (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID);
- if (CLIB_DEBUG)
- clib_memset (ctx, 0xfb, sizeof (*ctx));
- pool_put (quic_main.ctx_pool[thread_index], ctx);
-}
-
-static quic_ctx_t *
-quic_ctx_get (u32 ctx_index, clib_thread_index_t thread_index)
-{
- return pool_elt_at_index (quic_main.ctx_pool[thread_index], ctx_index);
-}
-
-static quic_ctx_t *
-quic_ctx_get_if_valid (u32 ctx_index, clib_thread_index_t thread_index)
-{
- if (pool_is_free_index (quic_main.ctx_pool[thread_index], ctx_index))
- return 0;
- return pool_elt_at_index (quic_main.ctx_pool[thread_index], ctx_index);
-}
-
-quic_ctx_t *
-quic_get_conn_ctx (quicly_conn_t * conn)
-{
- u64 conn_data;
- conn_data = (u64) * quicly_get_data (conn);
- return quic_ctx_get (conn_data & UINT32_MAX, conn_data >> 32);
-}
-
-static void
-quic_store_conn_ctx (quicly_conn_t * conn, quic_ctx_t * ctx)
-{
- *quicly_get_data (conn) =
- (void *) (((u64) ctx->c_thread_index) << 32 | (u64) ctx->c_c_index);
-}
-
-static inline int
-quic_ctx_is_stream (quic_ctx_t * ctx)
-{
- return (ctx->flags & QUIC_F_IS_STREAM);
-}
-
-static inline int
-quic_ctx_is_listener (quic_ctx_t * ctx)
-{
- return (ctx->flags & QUIC_F_IS_LISTENER);
-}
-
-static inline int
-quic_ctx_is_conn (quic_ctx_t * ctx)
-{
- return !(quic_ctx_is_listener (ctx) || quic_ctx_is_stream (ctx));
-}
-
-static inline session_t *
-get_stream_session_and_ctx_from_stream (quicly_stream_t * stream,
- quic_ctx_t ** ctx)
-{
- quic_stream_data_t *stream_data;
-
- stream_data = (quic_stream_data_t *) stream->data;
- *ctx = quic_ctx_get (stream_data->ctx_id, stream_data->thread_index);
- return session_get ((*ctx)->c_s_index, stream_data->thread_index);
-}
-
-static inline void
-quic_make_connection_key (clib_bihash_kv_16_8_t * kv,
- const quicly_cid_plaintext_t * id)
-{
- kv->key[0] = ((u64) id->master_id) << 32 | (u64) id->thread_id;
- kv->key[1] = id->node_id;
-}
-
-static int
-quic_sendable_packet_count (session_t * udp_session)
-{
- u32 max_enqueue;
- u32 packet_size = QUIC_MAX_PACKET_SIZE + SESSION_CONN_HDR_LEN;
- max_enqueue = svm_fifo_max_enqueue (udp_session->tx_fifo);
- return clib_min (max_enqueue / packet_size, QUIC_SEND_PACKET_VEC_SIZE);
-}
-
-static quicly_context_t *
-quic_get_quicly_ctx_from_ctx (quic_ctx_t * ctx)
-{
- crypto_context_t *crctx =
- quic_crypto_context_get (ctx->crypto_context_index, ctx->c_thread_index);
- quic_crypto_context_data_t *data =
- (quic_crypto_context_data_t *) crctx->data;
- return &data->quicly_ctx;
-}
-
-static quicly_context_t *
-quic_get_quicly_ctx_from_udp (u64 udp_session_handle)
-{
- session_t *udp_session = session_get_from_handle (udp_session_handle);
- quic_ctx_t *ctx =
- quic_ctx_get (udp_session->opaque, udp_session->thread_index);
- return quic_get_quicly_ctx_from_ctx (ctx);
-}
-
-static inline void
-quic_set_udp_tx_evt (session_t * udp_session)
-{
- int rv = 0;
- if (svm_fifo_set_event (udp_session->tx_fifo))
- rv = session_program_tx_io_evt (udp_session->handle, SESSION_IO_EVT_TX);
- if (PREDICT_FALSE (rv))
- clib_warning ("Event enqueue errored %d", rv);
-}
-
-static inline void
-quic_stop_ctx_timer (quic_ctx_t * ctx)
-{
- tw_timer_wheel_1t_3w_1024sl_ov_t *tw;
- if (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID)
- return;
- tw = &quic_main.wrk_ctx[ctx->c_thread_index].timer_wheel;
- tw_timer_stop_1t_3w_1024sl_ov (tw, ctx->timer_handle);
- ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
- QUIC_DBG (4, "Stopping timer for ctx %u", ctx->c_c_index);
-}
-
-/* QUIC protocol actions */
-
-static void
-quic_ack_rx_data (session_t * stream_session)
-{
- u32 max_deq;
- quic_ctx_t *sctx;
- svm_fifo_t *f;
- quicly_stream_t *stream;
- quic_stream_data_t *stream_data;
-
- sctx = quic_ctx_get (stream_session->connection_index,
- stream_session->thread_index);
- QUIC_ASSERT (quic_ctx_is_stream (sctx));
- stream = sctx->stream;
- stream_data = (quic_stream_data_t *) stream->data;
-
- f = stream_session->rx_fifo;
- max_deq = svm_fifo_max_dequeue (f);
-
- QUIC_ASSERT (stream_data->app_rx_data_len >= max_deq);
- quicly_stream_sync_recvbuf (stream, stream_data->app_rx_data_len - max_deq);
- QUIC_DBG (3, "Acking %u bytes", stream_data->app_rx_data_len - max_deq);
- stream_data->app_rx_data_len = max_deq;
-}
-
-static void
-quic_disconnect_transport (quic_ctx_t * ctx)
-{
- QUIC_DBG (2, "Disconnecting transport 0x%lx", ctx->udp_session_handle);
- vnet_disconnect_args_t a = {
- .handle = ctx->udp_session_handle,
- .app_index = quic_main.app_index,
- };
-
- if (vnet_disconnect_session (&a))
- clib_warning ("UDP session 0x%lx disconnect errored",
- ctx->udp_session_handle);
-}
-
-static void
-quic_connection_delete (quic_ctx_t * ctx)
-{
- clib_bihash_kv_16_8_t kv;
- quicly_conn_t *conn;
-
- if (ctx->conn == NULL)
- {
- QUIC_DBG (2, "Skipping redundant delete of connection %u",
- ctx->c_c_index);
- return;
- }
- QUIC_DBG (2, "Deleting connection %u", ctx->c_c_index);
-
- QUIC_ASSERT (!quic_ctx_is_stream (ctx));
- quic_stop_ctx_timer (ctx);
-
- /* Delete the connection from the connection map */
- conn = ctx->conn;
- ctx->conn = NULL;
- quic_make_connection_key (&kv, quicly_get_master_id (conn));
- QUIC_DBG (2, "Deleting conn with id %lu %lu from map", kv.key[0],
- kv.key[1]);
- clib_bihash_add_del_16_8 (&quic_main.connection_hash, &kv, 0 /* is_add */ );
-
- quic_disconnect_transport (ctx);
-
- if (conn)
- quicly_free (conn);
- session_transport_delete_notify (&ctx->connection);
-}
-
-void
-quic_increment_counter (u8 evt, u8 val)
-{
- vlib_main_t *vm = vlib_get_main ();
- vlib_node_increment_counter (vm, quic_input_node.index, evt, val);
-}
-
-/**
- * Called when quicly return an error
- * This function interacts tightly with quic_proto_on_close
- */
-static void
-quic_connection_closed (quic_ctx_t * ctx)
-{
- QUIC_DBG (2, "QUIC connection %u/%u closed", ctx->c_thread_index,
- ctx->c_c_index);
-
- /* TODO if connection is not established, just delete the session? */
- /* Actually should send connect or accept error */
-
- switch (ctx->conn_state)
- {
- case QUIC_CONN_STATE_READY:
- /* Error on an opened connection (timeout...)
- This puts the session in closing state, we should receive a notification
- when the app has closed its session */
- session_transport_reset_notify (&ctx->connection);
- /* This ensures we delete the connection when the app confirms the close */
- ctx->conn_state = QUIC_CONN_STATE_PASSIVE_CLOSING_QUIC_CLOSED;
- break;
- case QUIC_CONN_STATE_PASSIVE_CLOSING:
- ctx->conn_state = QUIC_CONN_STATE_PASSIVE_CLOSING_QUIC_CLOSED;
- /* quic_proto_on_close will eventually be called when the app confirms the close
- , we delete the connection at that point */
- break;
- case QUIC_CONN_STATE_PASSIVE_CLOSING_APP_CLOSED:
- /* App already confirmed close, we can delete the connection */
- quic_connection_delete (ctx);
- break;
- case QUIC_CONN_STATE_OPENED:
- case QUIC_CONN_STATE_HANDSHAKE:
- case QUIC_CONN_STATE_ACTIVE_CLOSING:
- quic_connection_delete (ctx);
- break;
- default:
- QUIC_DBG (0, "BUG %d", ctx->conn_state);
- break;
- }
-}
-
-static int
-quic_send_datagram (session_t *udp_session, struct iovec *packet,
- quicly_address_t *dest, quicly_address_t *src)
-{
- u32 max_enqueue, len;
- session_dgram_hdr_t hdr;
- svm_fifo_t *f;
- transport_connection_t *tc;
- int ret;
-
- len = packet->iov_len;
- f = udp_session->tx_fifo;
- tc = session_get_transport (udp_session);
- max_enqueue = svm_fifo_max_enqueue (f);
- if (max_enqueue < SESSION_CONN_HDR_LEN + len)
- {
- QUIC_ERR ("Too much data to send, max_enqueue %u, len %u",
- max_enqueue, len + SESSION_CONN_HDR_LEN);
- return QUIC_ERROR_FULL_FIFO;
- }
-
- /* Build packet header for fifo */
- hdr.data_length = len;
- hdr.data_offset = 0;
- hdr.is_ip4 = tc->is_ip4;
- clib_memcpy (&hdr.lcl_ip, &tc->lcl_ip, sizeof (ip46_address_t));
- hdr.lcl_port = tc->lcl_port;
- hdr.gso_size = 0;
-
- /* Read dest address from quicly-provided sockaddr */
- if (hdr.is_ip4)
- {
- QUIC_ASSERT (dest->sa.sa_family == AF_INET);
- struct sockaddr_in *sa4 = (struct sockaddr_in *) &dest->sa;
- hdr.rmt_port = sa4->sin_port;
- hdr.rmt_ip.ip4.as_u32 = sa4->sin_addr.s_addr;
- }
- else
- {
- QUIC_ASSERT (dest->sa.sa_family == AF_INET6);
- struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &dest->sa;
- hdr.rmt_port = sa6->sin6_port;
- clib_memcpy_fast (&hdr.rmt_ip.ip6, &sa6->sin6_addr, 16);
- }
-
- svm_fifo_seg_t segs[2] = { { (u8 *) &hdr, sizeof (hdr) },
- { packet->iov_base, len } };
-
- ret = svm_fifo_enqueue_segments (f, segs, 2, 0 /* allow partial */);
- if (PREDICT_FALSE (ret < 0))
- {
- QUIC_ERR ("Not enough space to enqueue dgram");
- return QUIC_ERROR_FULL_FIFO;
- }
-
- quic_increment_counter (QUIC_ERROR_TX_PACKETS, 1);
-
- return 0;
-}
-
-static int
-quic_send_packets (quic_ctx_t * ctx)
-{
- struct iovec packets[QUIC_SEND_PACKET_VEC_SIZE];
- uint8_t
- buf[QUIC_SEND_PACKET_VEC_SIZE * quic_get_quicly_ctx_from_ctx (ctx)
- ->transport_params.max_udp_payload_size];
- session_t *udp_session;
- quicly_conn_t *conn;
- size_t num_packets, i, max_packets;
- u32 n_sent = 0;
- int err = 0;
-
- /* We have sctx, get qctx */
- if (quic_ctx_is_stream (ctx))
- ctx = quic_ctx_get (ctx->quic_connection_ctx_id, ctx->c_thread_index);
-
- QUIC_ASSERT (!quic_ctx_is_stream (ctx));
-
- udp_session = session_get_from_handle_if_valid (ctx->udp_session_handle);
- if (!udp_session)
- goto quicly_error;
-
- conn = ctx->conn;
- if (!conn)
- return 0;
-
- do
- {
- /* TODO : quicly can assert it can send min_packets up to 2 */
- max_packets = quic_sendable_packet_count (udp_session);
- if (max_packets < 2)
- break;
-
- num_packets = max_packets;
- if ((err = quicly_send (conn, &ctx->rmt_ip, &ctx->lcl_ip, packets,
- &num_packets, buf, sizeof (buf))))
- goto quicly_error;
-
- for (i = 0; i != num_packets; ++i)
- {
-
- if ((err = quic_send_datagram (udp_session, &packets[i],
- &ctx->rmt_ip, &ctx->lcl_ip)))
- goto quicly_error;
- }
- n_sent += num_packets;
- }
- while (num_packets > 0 && num_packets == max_packets);
-
- quic_set_udp_tx_evt (udp_session);
-
- QUIC_DBG (3, "%u[TX] %u[RX]", svm_fifo_max_dequeue (udp_session->tx_fifo),
- svm_fifo_max_dequeue (udp_session->rx_fifo));
- quic_update_timer (ctx);
- return n_sent;
-
-quicly_error:
- if (err && err != QUICLY_ERROR_PACKET_IGNORED
- && err != QUICLY_ERROR_FREE_CONNECTION)
- clib_warning ("Quic error '%U'.", quic_format_err, err);
- quic_connection_closed (ctx);
- return 0;
-}
-
-/* Quicly callbacks */
-
-static void
-quic_on_stream_destroy (quicly_stream_t * stream, int err)
-{
- quic_stream_data_t *stream_data = (quic_stream_data_t *) stream->data;
- quic_ctx_t *sctx = quic_ctx_get (stream_data->ctx_id,
- stream_data->thread_index);
- QUIC_DBG (2, "DESTROYED_STREAM: session 0x%lx (%U)",
- session_handle (stream_session), quic_format_err, err);
-
- session_transport_closing_notify (&sctx->connection);
- session_transport_delete_notify (&sctx->connection);
-
- quic_increment_counter (QUIC_ERROR_CLOSED_STREAM, 1);
- quic_ctx_free (sctx);
- clib_mem_free (stream->data);
-}
-
-static void
-quic_on_stop_sending (quicly_stream_t * stream, int err)
-{
-#if QUIC_DEBUG >= 2
- quic_stream_data_t *stream_data = (quic_stream_data_t *) stream->data;
- quic_ctx_t *sctx = quic_ctx_get (stream_data->ctx_id,
- stream_data->thread_index);
- session_t *stream_session = session_get (sctx->c_s_index,
- sctx->c_thread_index);
- clib_warning ("(NOT IMPLEMENTD) STOP_SENDING: session 0x%lx (%U)",
- session_handle (stream_session), quic_format_err, err);
-#endif
- /* TODO : handle STOP_SENDING */
-}
-
-static void
-quic_on_receive_reset (quicly_stream_t * stream, int err)
-{
- quic_stream_data_t *stream_data = (quic_stream_data_t *) stream->data;
- quic_ctx_t *sctx = quic_ctx_get (stream_data->ctx_id,
- stream_data->thread_index);
-#if QUIC_DEBUG >= 2
- session_t *stream_session = session_get (sctx->c_s_index,
- sctx->c_thread_index);
- clib_warning ("RESET_STREAM: session 0x%lx (%U)",
- session_handle (stream_session), quic_format_err, err);
-#endif
- session_transport_closing_notify (&sctx->connection);
-}
-
-static void
-quic_on_receive (quicly_stream_t * stream, size_t off, const void *src,
- size_t len)
-{
- QUIC_DBG (3, "received data: %lu bytes, offset %lu", len, off);
- u32 max_enq;
- quic_ctx_t *sctx;
- session_t *stream_session;
- app_worker_t *app_wrk;
- svm_fifo_t *f;
- quic_stream_data_t *stream_data;
- int rlen;
-
- if (!len)
- return;
-
- stream_data = (quic_stream_data_t *) stream->data;
- sctx = quic_ctx_get (stream_data->ctx_id, stream_data->thread_index);
- stream_session = session_get (sctx->c_s_index, stream_data->thread_index);
- f = stream_session->rx_fifo;
-
- max_enq = svm_fifo_max_enqueue_prod (f);
- QUIC_DBG (3, "Enqueuing %u at off %u in %u space", len, off, max_enq);
- /* Handle duplicate packet/chunk from quicly */
- if (off < stream_data->app_rx_data_len)
- {
- QUIC_DBG (3, "Session [idx %u, app_wrk %u, thread %u, rx-fifo 0x%llx]: "
- "DUPLICATE PACKET (max_enq %u, len %u, "
- "app_rx_data_len %u, off %u, ToBeNQ %u)",
- stream_session->session_index,
- stream_session->app_wrk_index,
- stream_session->thread_index, f,
- max_enq, len, stream_data->app_rx_data_len, off,
- off - stream_data->app_rx_data_len + len);
- return;
- }
- if (PREDICT_FALSE ((off - stream_data->app_rx_data_len + len) > max_enq))
- {
- QUIC_ERR ("Session [idx %u, app_wrk %u, thread %u, rx-fifo 0x%llx]: "
- "RX FIFO IS FULL (max_enq %u, len %u, "
- "app_rx_data_len %u, off %u, ToBeNQ %u)",
- stream_session->session_index,
- stream_session->app_wrk_index,
- stream_session->thread_index, f,
- max_enq, len, stream_data->app_rx_data_len, off,
- off - stream_data->app_rx_data_len + len);
- return; /* This shouldn't happen */
- }
- if (off == stream_data->app_rx_data_len)
- {
- /* Streams live on the same thread so (f, stream_data) should stay consistent */
- rlen = svm_fifo_enqueue (f, len, (u8 *) src);
- if (PREDICT_FALSE (rlen < 0))
- {
- /*
- * drop, fifo full
- * drop, fifo grow
- */
- return;
- }
- QUIC_DBG (3, "Session [idx %u, app_wrk %u, ti %u, rx-fifo 0x%llx]: "
- "Enqueuing %u (rlen %u) at off %u in %u space, ",
- stream_session->session_index,
- stream_session->app_wrk_index,
- stream_session->thread_index, f, len, rlen, off, max_enq);
- stream_data->app_rx_data_len += rlen;
- QUIC_ASSERT (rlen >= len);
- app_wrk = app_worker_get_if_valid (stream_session->app_wrk_index);
- if (PREDICT_TRUE (app_wrk != 0))
- {
- app_worker_rx_notify (app_wrk, stream_session);
- }
- quic_ack_rx_data (stream_session);
- }
- else
- {
- rlen = svm_fifo_enqueue_with_offset (f,
- off - stream_data->app_rx_data_len,
- len, (u8 *) src);
- if (PREDICT_FALSE (rlen < 0))
- {
- /*
- * drop, fifo full
- * drop, fifo grow
- */
- return;
- }
- QUIC_ASSERT (rlen == 0);
- }
- return;
-}
-
-void
-quic_fifo_egress_shift (quicly_stream_t * stream, size_t delta)
-{
- quic_stream_data_t *stream_data;
- session_t *stream_session;
- quic_ctx_t *ctx;
- svm_fifo_t *f;
- u32 rv;
-
- stream_data = (quic_stream_data_t *) stream->data;
- stream_session = get_stream_session_and_ctx_from_stream (stream, &ctx);
- f = stream_session->tx_fifo;
-
- QUIC_ASSERT (stream_data->app_tx_data_len >= delta);
- stream_data->app_tx_data_len -= delta;
- ctx->bytes_written += delta;
- rv = svm_fifo_dequeue_drop (f, delta);
- QUIC_ASSERT (rv == delta);
-
- rv = quicly_stream_sync_sendbuf (stream, 0);
- QUIC_ASSERT (!rv);
-}
-
-void
-quic_fifo_egress_emit (quicly_stream_t * stream, size_t off, void *dst,
- size_t * len, int *wrote_all)
-{
- quic_stream_data_t *stream_data;
- quic_ctx_t *ctx;
- session_t *stream_session;
- svm_fifo_t *f;
- u32 deq_max;
-
- stream_data = (quic_stream_data_t *) stream->data;
- stream_session = get_stream_session_and_ctx_from_stream (stream, &ctx);
- f = stream_session->tx_fifo;
-
- QUIC_DBG (3, "Emitting %u, offset %u", *len, off);
-
- deq_max = svm_fifo_max_dequeue_cons (f);
- QUIC_ASSERT (off <= deq_max);
- if (off + *len < deq_max)
- {
- *wrote_all = 0;
- }
- else
- {
- *wrote_all = 1;
- *len = deq_max - off;
- }
- QUIC_ASSERT (*len > 0);
-
- if (off + *len > stream_data->app_tx_data_len)
- stream_data->app_tx_data_len = off + *len;
-
- svm_fifo_peek (f, off, *len, dst);
-}
-
-static const quicly_stream_callbacks_t quic_stream_callbacks = {
- .on_destroy = quic_on_stream_destroy,
- .on_send_shift = quic_fifo_egress_shift,
- .on_send_emit = quic_fifo_egress_emit,
- .on_send_stop = quic_on_stop_sending,
- .on_receive = quic_on_receive,
- .on_receive_reset = quic_on_receive_reset
-};
-
-static int
-quic_on_stream_open (quicly_stream_open_t * self, quicly_stream_t * stream)
-{
- /* Return code for this function ends either
- * - in quicly_receive : if not QUICLY_ERROR_PACKET_IGNORED, will close connection
- * - in quicly_open_stream, returned directly
- */
-
- session_t *stream_session, *quic_session;
- quic_stream_data_t *stream_data;
- app_worker_t *app_wrk;
- quic_ctx_t *qctx, *sctx;
- u32 sctx_id;
- int rv;
-
- QUIC_DBG (2, "on_stream_open called");
- stream->data = clib_mem_alloc (sizeof (quic_stream_data_t));
- stream->callbacks = &quic_stream_callbacks;
- /* Notify accept on parent qsession, but only if this is not a locally
- * initiated stream */
- if (quicly_stream_is_self_initiated (stream))
- return 0;
-
- sctx_id = quic_ctx_alloc (vlib_get_thread_index ());
- qctx = quic_get_conn_ctx (stream->conn);
-
- /* Might need to signal that the connection is ready if the first thing the
- * server does is open a stream */
- quic_check_quic_session_connected (qctx);
- /* ctx might be invalidated */
- qctx = quic_get_conn_ctx (stream->conn);
-
- stream_session = session_alloc (qctx->c_thread_index);
- QUIC_DBG (2, "ACCEPTED stream_session 0x%lx ctx %u",
- session_handle (stream_session), sctx_id);
- sctx = quic_ctx_get (sctx_id, qctx->c_thread_index);
- sctx->parent_app_wrk_id = qctx->parent_app_wrk_id;
- sctx->parent_app_id = qctx->parent_app_id;
- sctx->quic_connection_ctx_id = qctx->c_c_index;
- sctx->c_c_index = sctx_id;
- sctx->c_s_index = stream_session->session_index;
- sctx->stream = stream;
- sctx->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP;
- sctx->flags |= QUIC_F_IS_STREAM;
- sctx->crypto_context_index = qctx->crypto_context_index;
- if (quicly_stream_is_unidirectional (stream->stream_id))
- stream_session->flags |= SESSION_F_UNIDIRECTIONAL;
-
- stream_data = (quic_stream_data_t *) stream->data;
- stream_data->ctx_id = sctx_id;
- stream_data->thread_index = sctx->c_thread_index;
- stream_data->app_rx_data_len = 0;
- stream_data->app_tx_data_len = 0;
-
- sctx->c_s_index = stream_session->session_index;
- stream_session->session_state = SESSION_STATE_CREATED;
- stream_session->app_wrk_index = sctx->parent_app_wrk_id;
- stream_session->connection_index = sctx->c_c_index;
- stream_session->session_type =
- session_type_from_proto_and_ip (TRANSPORT_PROTO_QUIC, qctx->udp_is_ip4);
- quic_session = session_get (qctx->c_s_index, qctx->c_thread_index);
- /* Make sure quic session is in listening state */
- quic_session->session_state = SESSION_STATE_LISTENING;
- stream_session->listener_handle = listen_session_get_handle (quic_session);
-
- app_wrk = app_worker_get (stream_session->app_wrk_index);
- if ((rv = app_worker_init_connected (app_wrk, stream_session)))
- {
- QUIC_ERR ("failed to allocate fifos");
- quicly_reset_stream (stream, QUIC_APP_ALLOCATION_ERROR);
- return 0; /* Frame is still valid */
- }
- svm_fifo_add_want_deq_ntf (stream_session->rx_fifo,
- SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL |
- SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY);
- svm_fifo_init_ooo_lookup (stream_session->rx_fifo, 0 /* ooo enq */);
- svm_fifo_init_ooo_lookup (stream_session->tx_fifo, 1 /* ooo deq */);
-
- stream_session->session_state = SESSION_STATE_ACCEPTING;
- if ((rv = app_worker_accept_notify (app_wrk, stream_session)))
- {
- QUIC_ERR ("failed to notify accept worker app");
- quicly_reset_stream (stream, QUIC_APP_ACCEPT_NOTIFY_ERROR);
- return 0; /* Frame is still valid */
- }
-
- return 0;
-}
-
-static void
-quic_on_closed_by_remote (quicly_closed_by_remote_t *self, quicly_conn_t *conn,
- int code, uint64_t frame_type, const char *reason,
- size_t reason_len)
-{
- quic_ctx_t *ctx = quic_get_conn_ctx (conn);
-#if QUIC_DEBUG >= 2
- session_t *quic_session = session_get (ctx->c_s_index, ctx->c_thread_index);
- clib_warning ("Session 0x%lx closed by peer (%U) %.*s ",
- session_handle (quic_session), quic_format_err, code,
- reason_len, reason);
-#endif
- ctx->conn_state = QUIC_CONN_STATE_PASSIVE_CLOSING;
- session_transport_closing_notify (&ctx->connection);
-}
-
-/* Timer handling */
-
-static int64_t
-quic_get_thread_time (u8 thread_index)
-{
- return quic_main.wrk_ctx[thread_index].time_now;
-}
-
-static int64_t
-quic_get_time (quicly_now_t * self)
-{
- u8 thread_index = vlib_get_thread_index ();
- return quic_get_thread_time (thread_index);
-}
-
-static u32
-quic_set_time_now (clib_thread_index_t thread_index)
-{
- vlib_main_t *vlib_main = vlib_get_main ();
- f64 time = vlib_time_now (vlib_main);
- quic_main.wrk_ctx[thread_index].time_now = (int64_t) (time * 1000.f);
- return quic_main.wrk_ctx[thread_index].time_now;
-}
-
-/* Transport proto callback */
-static void
-quic_update_time (f64 now, u8 thread_index)
-{
- tw_timer_wheel_1t_3w_1024sl_ov_t *tw;
-
- tw = &quic_main.wrk_ctx[thread_index].timer_wheel;
- quic_set_time_now (thread_index);
- tw_timer_expire_timers_1t_3w_1024sl_ov (tw, now);
-}
-
-static void
-quic_timer_expired (u32 conn_index)
-{
- quic_ctx_t *ctx;
- QUIC_DBG (4, "Timer expired for conn %u at %ld", conn_index,
- quic_get_time (NULL));
- ctx = quic_ctx_get (conn_index, vlib_get_thread_index ());
- ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
- quic_send_packets (ctx);
-}
-
-static void
-quic_update_timer (quic_ctx_t * ctx)
-{
- tw_timer_wheel_1t_3w_1024sl_ov_t *tw;
- int64_t next_timeout, next_interval;
- session_t *quic_session;
- int rv;
-
- /* This timeout is in ms which is the unit of our timer */
- next_timeout = quicly_get_first_timeout (ctx->conn);
- next_interval = next_timeout - quic_get_time (NULL);
-
- if (next_timeout == 0 || next_interval <= 0)
- {
- if (ctx->c_s_index == QUIC_SESSION_INVALID)
- {
- next_interval = 1;
- }
- else
- {
- quic_session = session_get (ctx->c_s_index, ctx->c_thread_index);
- if (svm_fifo_set_event (quic_session->tx_fifo))
- {
- rv = session_program_tx_io_evt (quic_session->handle,
- SESSION_IO_EVT_TX);
- if (PREDICT_FALSE (rv))
- QUIC_ERR ("Failed to enqueue builtin_tx %d", rv);
- }
- return;
- }
- }
-
- ASSERT (vlib_get_thread_index () == ctx->c_thread_index ||
- vlib_get_thread_index () == 0);
- tw = &quic_main.wrk_ctx[ctx->c_thread_index].timer_wheel;
-
- QUIC_DBG (4, "Timer set to %ld (int %ld) for ctx %u", next_timeout,
- next_interval, ctx->c_c_index);
-
- if (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID)
- {
- if (next_timeout == INT64_MAX)
- {
- QUIC_DBG (4, "timer for ctx %u already stopped", ctx->c_c_index);
- return;
- }
- ctx->timer_handle = tw_timer_start_1t_3w_1024sl_ov (tw, ctx->c_c_index,
- 0, next_interval);
- }
- else
- {
- if (next_timeout == INT64_MAX)
- {
- quic_stop_ctx_timer (ctx);
- }
- else
- tw_timer_update_1t_3w_1024sl_ov (tw, ctx->timer_handle,
- next_interval);
- }
- return;
-}
-
-static void
-quic_expired_timers_dispatch (u32 * expired_timers)
-{
- int i;
+/* Helper functions */
- for (i = 0; i < vec_len (expired_timers); i++)
- {
- quic_timer_expired (expired_timers[i]);
- }
+static_always_inline quic_ctx_t *
+quic_ctx_get (u32 ctx_index, clib_thread_index_t thread_index)
+{
+ return pool_elt_at_index (quic_main.ctx_pool[thread_index], ctx_index);
}
/* Transport proto functions */
static int
quic_connect_stream (session_t * quic_session, session_endpoint_cfg_t * sep)
{
+ quic_main_t *qm = &quic_main;
uint64_t quic_session_handle;
session_t *stream_session;
quic_stream_data_t *stream_data;
- quicly_stream_t *stream;
- quicly_conn_t *conn;
+ void *stream;
+ void *conn;
app_worker_t *app_wrk;
quic_ctx_t *qctx, *sctx;
u32 sctx_index;
return -1;
}
- sctx_index = quic_ctx_alloc (quic_session->thread_index); /* Allocate before we get pointers */
+ sctx_index = quic_ctx_alloc (
+ qm, quic_session->thread_index); /* Allocate before we get pointers */
sctx = quic_ctx_get (sctx_index, quic_session->thread_index);
- qctx = quic_ctx_get (quic_session->connection_index,
- quic_session->thread_index);
+ qctx =
+ quic_ctx_get (quic_session->connection_index, quic_session->thread_index);
if (quic_ctx_is_stream (qctx))
{
QUIC_ERR ("session is a stream");
- quic_ctx_free (sctx);
+ quic_ctx_free (qm, sctx);
return -1;
}
sctx->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP;
sctx->flags |= QUIC_F_IS_STREAM;
- conn = qctx->conn;
-
- if (!conn || !quicly_connection_is_ready (conn))
+ if (!(conn = qctx->conn))
return -1;
is_unidir = sep->transport_flags & TRANSPORT_CFG_F_UNIDIRECTIONAL;
- if ((rv = quicly_open_stream (conn, &stream, is_unidir)))
+ rv = quic_eng_connect_stream (conn, &stream, &stream_data, is_unidir);
+ if (rv)
{
- QUIC_DBG (2, "Stream open failed with %d", rv);
+ QUIC_DBG (2,
+ "quic_eng_connect_stream (c=0x%lx, s=0x%lx, sd=0x%lx, u=%d) "
+ "failed (rv=%d)",
+ conn, &stream, &stream_data, is_unidir, rv);
return -1;
}
- quic_increment_counter (QUIC_ERROR_OPENED_STREAM, 1);
+ quic_increment_counter (qm, QUIC_ERROR_OPENED_STREAM, 1);
sctx->stream = stream;
sctx->crypto_context_index = qctx->crypto_context_index;
- QUIC_DBG (2, "Opened stream %d, creating session", stream->stream_id);
-
stream_session = session_alloc (qctx->c_thread_index);
QUIC_DBG (2, "Allocated stream_session 0x%lx ctx %u",
session_handle (stream_session), sctx_index);
stream_session->flags |= SESSION_F_UNIDIRECTIONAL;
sctx->c_s_index = stream_session->session_index;
- stream_data = (quic_stream_data_t *) stream->data;
stream_data->ctx_id = sctx->c_c_index;
stream_data->thread_index = sctx->c_thread_index;
stream_data->app_rx_data_len = 0;
if ((rv = app_worker_init_connected (app_wrk, stream_session)))
{
QUIC_ERR ("failed to app_worker_init_connected");
- quicly_reset_stream (stream, QUIC_APP_CONNECT_NOTIFY_ERROR);
+ quic_eng_connect_stream_error_reset (stream);
return app_worker_connect_notify (app_wrk, NULL, rv, sep->opaque);
}
svm_fifo_init_ooo_lookup (stream_session->tx_fifo, 1 /* ooo deq */);
svm_fifo_add_want_deq_ntf (stream_session->rx_fifo,
SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL |
- SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY);
+ SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY);
if (app_worker_connect_notify (app_wrk, stream_session, SESSION_E_NONE,
sep->opaque))
{
QUIC_ERR ("failed to notify app");
- quic_increment_counter (QUIC_ERROR_CLOSED_STREAM, 1);
- quicly_reset_stream (stream, QUIC_APP_CONNECT_NOTIFY_ERROR);
+ quic_increment_counter (qm, QUIC_ERROR_CLOSED_STREAM, 1);
+ quic_eng_connect_stream_error_reset (stream);
return -1;
}
ccfg = &ext_cfg->crypto;
clib_memset (cargs, 0, sizeof (*cargs));
- ctx_index = quic_ctx_alloc (thread_index);
+ ctx_index = quic_ctx_alloc (qm, thread_index);
ctx = quic_ctx_get (ctx_index, thread_index);
ctx->parent_app_wrk_id = sep->app_wrk_index;
ctx->c_s_index = QUIC_SESSION_INVALID;
ctx->srv_hostname = format (0, "%s", ccfg->hostname);
else
/* needed by quic for crypto + determining client / server */
- ctx->srv_hostname = format (0, "%U", format_ip46_address,
- &sep->ip, sep->is_ip4);
+ ctx->srv_hostname =
+ format (0, "%U", format_ip46_address, &sep->ip, sep->is_ip4);
vec_terminate_c_string (ctx->srv_hostname);
clib_memcpy (&cargs->sep_ext, sep, sizeof (session_endpoint_cfg_t));
ctx->crypto_engine = ccfg->crypto_engine;
ctx->ckpair_index = ccfg->ckpair_index;
- if ((error = quic_acquire_crypto_context (ctx)))
+ error = quic_eng_crypto_context_acquire (ctx);
+ if (error)
return error;
- if ((error = vnet_connect (cargs)))
+ error = vnet_connect (cargs);
+ if (error)
return error;
return 0;
static int
quic_connect (transport_endpoint_cfg_t * tep)
{
- QUIC_DBG (2, "Called quic_connect");
session_endpoint_cfg_t *sep = (session_endpoint_cfg_t *) tep;
session_t *quic_session;
sep = (session_endpoint_cfg_t *) tep;
static void
quic_proto_on_close (u32 ctx_index, clib_thread_index_t thread_index)
{
- int err;
- quic_ctx_t *ctx = quic_ctx_get_if_valid (ctx_index, thread_index);
- if (!ctx)
- return;
- session_t *stream_session = session_get (ctx->c_s_index,
- ctx->c_thread_index);
-#if QUIC_DEBUG >= 2
- clib_warning ("Closing session 0x%lx", session_handle (stream_session));
-#endif
- if (quic_ctx_is_stream (ctx))
- {
- quicly_stream_t *stream = ctx->stream;
- if (!quicly_stream_has_send_side (quicly_is_client (stream->conn),
- stream->stream_id))
- return;
- quicly_sendstate_shutdown (&stream->sendstate, ctx->bytes_written +
- svm_fifo_max_dequeue
- (stream_session->tx_fifo));
- err = quicly_stream_sync_sendbuf (stream, 1);
- if (err)
- {
- QUIC_DBG (1, "sendstate_shutdown failed for stream session %lu",
- session_handle (stream_session));
- quicly_reset_stream (stream, QUIC_APP_ERROR_CLOSE_NOTIFY);
- }
- quic_send_packets (ctx);
- return;
- }
-
- switch (ctx->conn_state)
- {
- case QUIC_CONN_STATE_OPENED:
- case QUIC_CONN_STATE_HANDSHAKE:
- case QUIC_CONN_STATE_READY:
- ctx->conn_state = QUIC_CONN_STATE_ACTIVE_CLOSING;
- quicly_conn_t *conn = ctx->conn;
- /* Start connection closing. Keep sending packets until quicly_send
- returns QUICLY_ERROR_FREE_CONNECTION */
-
- quic_increment_counter (QUIC_ERROR_CLOSED_CONNECTION, 1);
- quicly_close (conn, QUIC_APP_ERROR_CLOSE_NOTIFY, "Closed by peer");
- /* This also causes all streams to be closed (and the cb called) */
- quic_send_packets (ctx);
- break;
- case QUIC_CONN_STATE_PASSIVE_CLOSING:
- ctx->conn_state = QUIC_CONN_STATE_PASSIVE_CLOSING_APP_CLOSED;
- /* send_packets will eventually return an error, we delete the conn at
- that point */
- break;
- case QUIC_CONN_STATE_PASSIVE_CLOSING_QUIC_CLOSED:
- quic_connection_delete (ctx);
- break;
- case QUIC_CONN_STATE_ACTIVE_CLOSING:
- break;
- default:
- QUIC_ERR ("Trying to close conn in state %d", ctx->conn_state);
- break;
- }
+ quic_eng_proto_on_close (ctx_index, thread_index);
}
static u32
if ((rv = vnet_listen (args)))
return rv;
- lctx_index = quic_ctx_alloc (0);
+ lctx_index = quic_ctx_alloc (qm, 0);
udp_handle = args->handle;
app_listener = app_listener_get_w_handle (udp_handle);
udp_listen_session = app_listener_get_session (app_listener);
lctx->c_s_index = quic_listen_session_index;
lctx->crypto_engine = ccfg->crypto_engine;
lctx->ckpair_index = ccfg->ckpair_index;
- if ((rv = quic_acquire_crypto_context (lctx)))
+ if ((rv = quic_eng_crypto_context_acquire (lctx)))
return rv;
QUIC_DBG (2, "Listening UDP session 0x%lx",
quic_stop_listen (u32 lctx_index)
{
QUIC_DBG (2, "Called quic_stop_listen");
- quic_ctx_t *lctx;
- lctx = quic_ctx_get (lctx_index, 0);
- QUIC_ASSERT (quic_ctx_is_listener (lctx));
- vnet_unlisten_args_t a = {
- .handle = lctx->udp_session_handle,
- .app_index = quic_main.app_index,
- .wrk_map_index = 0 /* default wrk */
- };
- if (vnet_unlisten (&a))
- clib_warning ("unlisten errored");
-
- quic_release_crypto_context (lctx->crypto_context_index,
- 0 /* thread_index */ );
- quic_ctx_free (lctx);
+ if (PREDICT_TRUE (lctx_index))
+ {
+ quic_ctx_t *lctx;
+ lctx = quic_ctx_get (lctx_index, 0);
+ QUIC_ASSERT (quic_ctx_is_listener (lctx));
+ vnet_unlisten_args_t a = {
+ .handle = lctx->udp_session_handle,
+ .app_index = quic_main.app_index,
+ .wrk_map_index = 0 /* default wrk */
+ };
+ if (vnet_unlisten (&a))
+ clib_warning ("unlisten errored");
+
+ quic_eng_crypto_context_release (lctx->crypto_context_index,
+ 0 /* thread_index */);
+ quic_ctx_free (&quic_main, lctx);
+ }
return 0;
}
if (quic_ctx_is_listener (ctx))
str = format (str, "Listener, UDP %ld", ctx->udp_session_handle);
else if (quic_ctx_is_stream (ctx))
- str = format (str, "Stream %ld conn %d",
- ctx->stream->stream_id, ctx->quic_connection_ctx_id);
- else /* connection */
- str = format (str, "Conn %d UDP %d", ctx->c_c_index,
- ctx->udp_session_handle);
+ str = format (str, "%U", quic_eng_format_stream_connection, ctx);
+ else /* connection */
+ str =
+ format (str, "Conn %d UDP %d", ctx->c_c_index, ctx->udp_session_handle);
- str = format (str, " app %d wrk %d", ctx->parent_app_id,
- ctx->parent_app_wrk_id);
+ str =
+ format (str, " app %d wrk %d", ctx->parent_app_id, ctx->parent_app_wrk_id);
if (verbose == 1)
s = format (s, "%-" SESSION_CLI_ID_LEN "s%-" SESSION_CLI_STATE_LEN "d",
u32 qc_index = va_arg (*args, u32);
clib_thread_index_t thread_index = va_arg (*args, u32);
quic_ctx_t *ctx = quic_ctx_get (qc_index, thread_index);
- s = format (s, "[#%d][Q] half-open app %u", thread_index,
- ctx->parent_app_id);
+ s =
+ format (s, "[#%d][Q] half-open app %u", thread_index, ctx->parent_app_id);
return s;
}
/* Session layer callbacks */
-static inline void
-quic_build_sockaddr (struct sockaddr *sa, socklen_t * salen,
- ip46_address_t * addr, u16 port, u8 is_ip4)
-{
- if (is_ip4)
- {
- struct sockaddr_in *sa4 = (struct sockaddr_in *) sa;
- sa4->sin_family = AF_INET;
- sa4->sin_port = port;
- sa4->sin_addr.s_addr = addr->ip4.as_u32;
- *salen = sizeof (struct sockaddr_in);
- }
- else
- {
- struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) sa;
- sa6->sin6_family = AF_INET6;
- sa6->sin6_port = port;
- clib_memcpy (&sa6->sin6_addr, &addr->ip6, 16);
- *salen = sizeof (struct sockaddr_in6);
- }
-}
-
-static void
-quic_on_quic_session_connected (quic_ctx_t * ctx)
-{
- session_t *quic_session;
- app_worker_t *app_wrk;
- u32 ctx_id = ctx->c_c_index;
- clib_thread_index_t thread_index = ctx->c_thread_index;
- int rv;
-
- quic_session = session_alloc (thread_index);
-
- QUIC_DBG (2, "Allocated quic session 0x%lx", session_handle (quic_session));
- ctx->c_s_index = quic_session->session_index;
- quic_session->app_wrk_index = ctx->parent_app_wrk_id;
- quic_session->connection_index = ctx->c_c_index;
- quic_session->listener_handle = SESSION_INVALID_HANDLE;
- quic_session->session_type =
- session_type_from_proto_and_ip (TRANSPORT_PROTO_QUIC, ctx->udp_is_ip4);
-
- /* If quic session connected fails, immediatly close connection */
- app_wrk = app_worker_get (ctx->parent_app_wrk_id);
- if ((rv = app_worker_init_connected (app_wrk, quic_session)))
- {
- QUIC_ERR ("failed to app_worker_init_connected");
- quic_proto_on_close (ctx_id, thread_index);
- app_worker_connect_notify (app_wrk, NULL, rv, ctx->client_opaque);
- return;
- }
-
- svm_fifo_init_ooo_lookup (quic_session->rx_fifo, 0 /* ooo enq */);
- svm_fifo_init_ooo_lookup (quic_session->tx_fifo, 1 /* ooo deq */);
-
- quic_session->session_state = SESSION_STATE_CONNECTING;
- if ((rv = app_worker_connect_notify (app_wrk, quic_session,
- SESSION_E_NONE, ctx->client_opaque)))
- {
- QUIC_ERR ("failed to notify app %d", rv);
- quic_proto_on_close (ctx_id, thread_index);
- return;
- }
-}
-
-static void
-quic_check_quic_session_connected (quic_ctx_t * ctx)
-{
- /* Called when we need to trigger quic session connected
- * we may call this function on the server side / at
- * stream opening */
-
- /* Conn may be set to null if the connection is terminated */
- if (!ctx->conn || ctx->conn_state != QUIC_CONN_STATE_HANDSHAKE)
- return;
- if (!quicly_connection_is_ready (ctx->conn))
- return;
- ctx->conn_state = QUIC_CONN_STATE_READY;
- if (!quicly_is_client (ctx->conn))
- return;
- quic_on_quic_session_connected (ctx);
-}
-
-static inline void
-quic_update_conn_ctx (quicly_conn_t * conn, quicly_context_t * quicly_context)
-{
- /* we need to update the quicly_conn on migrate
- * as it contains a pointer to the crypto context */
- ptls_context_t **tls;
- quicly_context_t **_quicly_context;
- _quicly_context = (quicly_context_t **) conn;
- *_quicly_context = quicly_context;
- tls = (ptls_context_t **) quicly_get_tls (conn);
- *tls = quicly_context->tls;
-}
-
-static void
-quic_receive_connection (void *arg)
-{
- u32 new_ctx_id, thread_index = vlib_get_thread_index ();
- quic_ctx_t *temp_ctx, *new_ctx;
- clib_bihash_kv_16_8_t kv;
- quicly_conn_t *conn;
- quicly_context_t *quicly_context;
- session_t *udp_session;
-
- temp_ctx = arg;
- new_ctx_id = quic_ctx_alloc (thread_index);
- new_ctx = quic_ctx_get (new_ctx_id, thread_index);
-
- QUIC_DBG (2, "Received conn %u (now %u)", temp_ctx->c_thread_index,
- new_ctx_id);
-
- clib_memcpy (new_ctx, temp_ctx, sizeof (quic_ctx_t));
- clib_mem_free (temp_ctx);
-
- new_ctx->c_thread_index = thread_index;
- new_ctx->c_c_index = new_ctx_id;
- quic_acquire_crypto_context (new_ctx);
-
- conn = new_ctx->conn;
- quicly_context = quic_get_quicly_ctx_from_ctx (new_ctx);
- quic_update_conn_ctx (conn, quicly_context);
-
- quic_store_conn_ctx (conn, new_ctx);
- quic_make_connection_key (&kv, quicly_get_master_id (conn));
- kv.value = ((u64) thread_index) << 32 | (u64) new_ctx_id;
- QUIC_DBG (2, "Registering conn with id %lu %lu", kv.key[0], kv.key[1]);
- clib_bihash_add_del_16_8 (&quic_main.connection_hash, &kv, 1 /* is_add */ );
- new_ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
- quic_update_timer (new_ctx);
-
- /* Trigger write on this connection if necessary */
- udp_session = session_get_from_handle (new_ctx->udp_session_handle);
- udp_session->opaque = new_ctx_id;
- udp_session->flags &= ~SESSION_F_IS_MIGRATING;
- if (svm_fifo_max_dequeue (udp_session->tx_fifo))
- quic_set_udp_tx_evt (udp_session);
-}
-
static void
quic_transfer_connection (u32 ctx_index, u32 dest_thread)
{
quic_ctx_t *ctx, *temp_ctx;
clib_thread_index_t thread_index = vlib_get_thread_index ();
+ quic_main_t *qm = &quic_main;
QUIC_DBG (2, "Transferring conn %u to thread %u", ctx_index, dest_thread);
clib_memcpy (temp_ctx, ctx, sizeof (quic_ctx_t));
- quic_stop_ctx_timer (ctx);
- quic_release_crypto_context (ctx->crypto_context_index, thread_index);
- quic_ctx_free (ctx);
+ quic_stop_ctx_timer (
+ &quic_wrk_ctx_get (qm, ctx->c_thread_index)->timer_wheel, ctx);
+ QUIC_DBG (4, "Stopped timer for ctx %u", ctx->c_c_index);
+ quic_eng_crypto_context_release (ctx->crypto_context_index, thread_index);
+ quic_ctx_free (qm, ctx);
/* Send connection to destination thread */
- session_send_rpc_evt_to_thread (dest_thread, quic_receive_connection,
- (void *) temp_ctx);
+ quic_eng_rpc_evt_to_thread_connection_migrate (dest_thread, temp_ctx);
}
static int
quic_udp_session_connected_callback (u32 quic_app_index, u32 ctx_index,
- session_t * udp_session,
+ session_t *udp_session,
session_error_t err)
{
- QUIC_DBG (2, "UDP Session is now connected (id %u)",
- udp_session->session_index);
/* This should always be called before quic_connect returns since UDP always
* connects instantly. */
- clib_bihash_kv_16_8_t kv;
struct sockaddr_in6 sa6;
struct sockaddr *sa = (struct sockaddr *) &sa6;
socklen_t salen;
transport_connection_t *tc;
app_worker_t *app_wrk;
- quicly_conn_t *conn;
quic_ctx_t *ctx;
clib_thread_index_t thread_index;
int ret;
- quicly_context_t *quicly_ctx;
+
+ QUIC_DBG (2, "UDP Session connect callback (id %u)",
+ udp_session->session_index);
/* Allocate session on whatever thread udp used, i.e., probably first
* worker, although this may be main thread. If it is main, it's done
return 0;
}
- QUIC_DBG (2, "New ctx [%u]%x", thread_index, (ctx) ? ctx_index : ~0);
+ QUIC_DBG (2, "New ctx [thread=0x%x] ctx_index=0x%x", thread_index,
+ (ctx) ? ctx_index : ~0);
ctx->udp_session_handle = session_handle (udp_session);
udp_session->opaque = ctx_index;
tc = session_get_transport (udp_session);
quic_build_sockaddr (sa, &salen, &tc->rmt_ip, tc->rmt_port, tc->is_ip4);
- quicly_ctx = quic_get_quicly_ctx_from_ctx (ctx);
- ret = quicly_connect (&ctx->conn, quicly_ctx, (char *) ctx->srv_hostname, sa,
- NULL, &quic_main.wrk_ctx[thread_index].next_cid,
- ptls_iovec_init (NULL, 0), &quic_main.hs_properties,
- NULL, NULL);
- ++quic_main.wrk_ctx[thread_index].next_cid.master_id;
- /* Save context handle in quicly connection */
- quic_store_conn_ctx (ctx->conn, ctx);
- assert (ret == 0);
-
- /* Register connection in connections map */
- conn = ctx->conn;
- quic_make_connection_key (&kv, quicly_get_master_id (conn));
- kv.value = ((u64) thread_index) << 32 | (u64) ctx_index;
- QUIC_DBG (2, "Registering conn with id %lu %lu", kv.key[0], kv.key[1]);
- clib_bihash_add_del_16_8 (&quic_main.connection_hash, &kv, 1 /* is_add */ );
-
- quic_send_packets (ctx);
+ ret = quic_eng_connect (ctx, ctx_index, thread_index, sa);
+ quic_eng_send_packets (ctx);
return ret;
}
}
static void
-quic_udp_session_cleanup_callback (session_t * udp_session,
+quic_udp_session_cleanup_callback (session_t *udp_session,
session_cleanup_ntf_t ntf)
{
+ quic_main_t *qm = &quic_main;
quic_ctx_t *ctx;
if (ntf != SESSION_CLEANUP_SESSION)
return;
ctx = quic_ctx_get (udp_session->opaque, udp_session->thread_index);
- quic_stop_ctx_timer (ctx);
- quic_release_crypto_context (ctx->crypto_context_index,
- ctx->c_thread_index);
- quic_ctx_free (ctx);
+ quic_stop_ctx_timer (
+ &quic_wrk_ctx_get (qm, ctx->c_thread_index)->timer_wheel, ctx);
+ QUIC_DBG (4, "Stopped timer for ctx %u", ctx->c_c_index);
+ quic_eng_crypto_context_release (ctx->crypto_context_index,
+ ctx->c_thread_index);
+ quic_ctx_free (qm, ctx);
}
static void
udp_listen_session =
listen_session_get_from_handle (udp_session->listener_handle);
- ctx_index = quic_ctx_alloc (thread_index);
+ ctx_index = quic_ctx_alloc (&quic_main, thread_index);
ctx = quic_ctx_get (ctx_index, thread_index);
ctx->c_thread_index = udp_session->thread_index;
ctx->c_c_index = ctx_index;
ctx->crypto_engine = lctx->crypto_engine;
ctx->ckpair_index = lctx->ckpair_index;
- quic_acquire_crypto_context (ctx);
+ quic_eng_crypto_context_acquire (ctx);
udp_session->opaque = ctx_index;
udp_session->session_state = SESSION_STATE_READY;
quic_ctx_t *ctx;
session_t *stream_session = session_get (tc->s_index, tc->thread_index);
QUIC_DBG (3, "Received app READ notification");
- quic_ack_rx_data (stream_session);
+ quic_eng_ack_rx_data (stream_session);
svm_fifo_reset_has_deq_ntf (stream_session->rx_fifo);
/* Need to send packets (acks may never be sent otherwise) */
ctx = quic_ctx_get (stream_session->connection_index,
stream_session->thread_index);
- quic_send_packets (ctx);
+ quic_eng_send_packets (ctx);
return 0;
}
quic_custom_tx_callback (void *s, transport_send_params_t * sp)
{
session_t *stream_session = (session_t *) s;
- quic_stream_data_t *stream_data;
- quicly_stream_t *stream;
quic_ctx_t *ctx;
- u32 max_deq;
- int rv;
if (PREDICT_FALSE
(stream_session->session_state >= SESSION_STATE_TRANSPORT_CLOSING))
stream_session->thread_index);
if (PREDICT_FALSE (!quic_ctx_is_stream (ctx)))
{
- goto tx_end; /* Most probably a reschedule */
+ goto tx_end; /* Most probably a reschedule */
}
QUIC_DBG (3, "Stream TX event");
- quic_ack_rx_data (stream_session);
- stream = ctx->stream;
- if (!quicly_sendstate_is_open (&stream->sendstate))
- {
- QUIC_ERR ("Warning: tried to send on closed stream");
- return 0;
- }
+ quic_eng_ack_rx_data (stream_session);
- stream_data = (quic_stream_data_t *) stream->data;
- max_deq = svm_fifo_max_dequeue (stream_session->tx_fifo);
- QUIC_ASSERT (max_deq >= stream_data->app_tx_data_len);
- if (max_deq == stream_data->app_tx_data_len)
+ if (PREDICT_FALSE (!quic_eng_stream_tx (ctx, stream_session)))
{
- QUIC_DBG (3, "TX but no data %d / %d", max_deq,
- stream_data->app_tx_data_len);
+ QUIC_DBG (3, "quic_eng_stream_tx(ctx=0x%lx) failed!", ctx);
return 0;
}
- stream_data->app_tx_data_len = max_deq;
- rv = quicly_stream_sync_sendbuf (stream, 1);
- QUIC_ASSERT (!rv);
tx_end:
- return quic_send_packets (ctx);
-}
-
-/*
- * Returns 0 if a matching connection is found and is on the right thread.
- * Otherwise returns -1.
- * If a connection is found, even on the wrong thread, ctx_thread and ctx_index
- * will be set.
- */
-static inline int
-quic_find_packet_ctx (quic_rx_packet_ctx_t * pctx, u32 caller_thread_index)
-{
- clib_bihash_kv_16_8_t kv;
- clib_bihash_16_8_t *h;
- quic_ctx_t *ctx;
- u32 index, thread_id;
-
- h = &quic_main.connection_hash;
- quic_make_connection_key (&kv, &pctx->packet.cid.dest.plaintext);
- QUIC_DBG (3, "Searching conn with id %lu %lu", kv.key[0], kv.key[1]);
-
- if (clib_bihash_search_16_8 (h, &kv, &kv))
- {
- QUIC_DBG (3, "connection not found");
- return QUIC_PACKET_TYPE_NONE;
- }
-
- index = kv.value & UINT32_MAX;
- thread_id = kv.value >> 32;
- /* Check if this connection belongs to this thread, otherwise
- * ask for it to be moved */
- if (thread_id != caller_thread_index)
- {
- QUIC_DBG (2, "Connection is on wrong thread");
- /* Cannot make full check with quicly_is_destination... */
- pctx->ctx_index = index;
- pctx->thread_index = thread_id;
- return QUIC_PACKET_TYPE_MIGRATE;
- }
- ctx = quic_ctx_get (index, vlib_get_thread_index ());
- if (!ctx->conn)
- {
- QUIC_ERR ("ctx has no conn");
- return QUIC_PACKET_TYPE_NONE;
- }
- if (!quicly_is_destination (ctx->conn, NULL, &pctx->sa, &pctx->packet))
- return QUIC_PACKET_TYPE_NONE;
-
- QUIC_DBG (3, "Connection found");
- pctx->ctx_index = index;
- pctx->thread_index = thread_id;
- return QUIC_PACKET_TYPE_RECEIVE;
-}
-
-static void
-quic_accept_connection (quic_rx_packet_ctx_t * pctx)
-{
- quicly_context_t *quicly_ctx;
- session_t *quic_session;
- clib_bihash_kv_16_8_t kv;
- app_worker_t *app_wrk;
- quicly_conn_t *conn;
- quic_ctx_t *ctx;
- quic_ctx_t *lctx;
- int rv;
-
- /* new connection, accept and create context if packet is valid
- * TODO: check if socket is actually listening? */
- ctx = quic_ctx_get (pctx->ctx_index, pctx->thread_index);
- if (ctx->c_s_index != QUIC_SESSION_INVALID)
- {
- QUIC_DBG (2, "already accepted ctx 0x%x", ctx->c_s_index);
- return;
- }
-
- quicly_ctx = quic_get_quicly_ctx_from_ctx (ctx);
- if ((rv = quicly_accept (
- &conn, quicly_ctx, NULL, &pctx->sa, &pctx->packet, NULL,
- &quic_main.wrk_ctx[pctx->thread_index].next_cid, NULL, NULL)))
- {
- /* Invalid packet, pass */
- assert (conn == NULL);
- QUIC_ERR ("Accept failed with %U", quic_format_err, rv);
- /* TODO: cleanup created quic ctx and UDP session */
- return;
- }
- assert (conn != NULL);
-
- ++quic_main.wrk_ctx[pctx->thread_index].next_cid.master_id;
- /* Save ctx handle in quicly connection */
- quic_store_conn_ctx (conn, ctx);
- ctx->conn = conn;
-
- quic_session = session_alloc (ctx->c_thread_index);
- QUIC_DBG (2, "Allocated quic_session, 0x%lx ctx %u",
- session_handle (quic_session), ctx->c_c_index);
- ctx->c_s_index = quic_session->session_index;
-
- lctx = quic_ctx_get (ctx->listener_ctx_id, 0);
-
- quic_session->app_wrk_index = lctx->parent_app_wrk_id;
- quic_session->connection_index = ctx->c_c_index;
- quic_session->session_type =
- session_type_from_proto_and_ip (TRANSPORT_PROTO_QUIC, ctx->udp_is_ip4);
- quic_session->listener_handle = lctx->c_s_index;
-
- /* Register connection in connections map */
- quic_make_connection_key (&kv, quicly_get_master_id (conn));
- kv.value = ((u64) pctx->thread_index) << 32 | (u64) pctx->ctx_index;
- clib_bihash_add_del_16_8 (&quic_main.connection_hash, &kv, 1 /* is_add */ );
- QUIC_DBG (2, "Registering conn with id %lu %lu", kv.key[0], kv.key[1]);
-
- /* If notify fails, reset connection immediatly */
- if ((rv = app_worker_init_accepted (quic_session)))
- {
- QUIC_ERR ("failed to allocate fifos");
- quic_proto_on_close (pctx->ctx_index, pctx->thread_index);
- return;
- }
-
- svm_fifo_init_ooo_lookup (quic_session->rx_fifo, 0 /* ooo enq */);
- svm_fifo_init_ooo_lookup (quic_session->tx_fifo, 1 /* ooo deq */);
-
- app_wrk = app_worker_get (quic_session->app_wrk_index);
- quic_session->session_state = SESSION_STATE_ACCEPTING;
- if ((rv = app_worker_accept_notify (app_wrk, quic_session)))
- {
- QUIC_ERR ("failed to notify accept worker app");
- quic_proto_on_close (pctx->ctx_index, pctx->thread_index);
- return;
- }
-
- ctx->conn_state = QUIC_CONN_STATE_READY;
-}
-
-static int
-quic_reset_connection (u64 udp_session_handle, quic_rx_packet_ctx_t * pctx)
-{
- /* short header packet; potentially a dead connection. No need to check the
- * length of the incoming packet, because loop is prevented by authenticating
- * the CID (by checking node_id and thread_id). If the peer is also sending a
- * reset, then the next CID is highly likely to contain a non-authenticating
- * CID, ... */
- QUIC_DBG (2, "Sending stateless reset");
- int rv;
- session_t *udp_session;
- quicly_context_t *quicly_ctx;
- if (pctx->packet.cid.dest.plaintext.node_id != 0
- || pctx->packet.cid.dest.plaintext.thread_id != 0)
- return 0;
- quicly_ctx = quic_get_quicly_ctx_from_udp (udp_session_handle);
- quic_ctx_t *qctx = quic_ctx_get (pctx->ctx_index, pctx->thread_index);
-
- quicly_address_t src;
- uint8_t payload[quicly_ctx->transport_params.max_udp_payload_size];
- size_t payload_len =
- quicly_send_stateless_reset (quicly_ctx, &src.sa, payload);
- if (payload_len == 0)
- return 1;
-
- struct iovec packet;
- packet.iov_len = payload_len;
- packet.iov_base = payload;
-
- udp_session = session_get_from_handle (udp_session_handle);
- rv = quic_send_datagram (udp_session, &packet, &qctx->rmt_ip, &qctx->lcl_ip);
- quic_set_udp_tx_evt (udp_session);
- return rv;
-}
-
-static int
-quic_process_one_rx_packet (u64 udp_session_handle, svm_fifo_t * f,
- u32 fifo_offset, quic_rx_packet_ctx_t * pctx)
-{
- size_t plen;
- u32 full_len, ret;
- clib_thread_index_t thread_index = vlib_get_thread_index ();
- u32 cur_deq = svm_fifo_max_dequeue (f) - fifo_offset;
- quicly_context_t *quicly_ctx;
- session_t *udp_session;
- int rv;
-
- ret = svm_fifo_peek (f, fifo_offset,
- SESSION_CONN_HDR_LEN, (u8 *) & pctx->ph);
- QUIC_ASSERT (ret == SESSION_CONN_HDR_LEN);
- QUIC_ASSERT (pctx->ph.data_offset == 0);
- full_len = pctx->ph.data_length + SESSION_CONN_HDR_LEN;
- if (full_len > cur_deq)
- {
- QUIC_ERR ("Not enough data in fifo RX");
- return 1;
- }
-
- /* Quicly can read len bytes from the fifo at offset:
- * ph.data_offset + SESSION_CONN_HDR_LEN */
- ret = svm_fifo_peek (f, SESSION_CONN_HDR_LEN + fifo_offset,
- pctx->ph.data_length, pctx->data);
- if (ret != pctx->ph.data_length)
- {
- QUIC_ERR ("Not enough data peeked in RX");
- return 1;
- }
-
- quic_increment_counter (QUIC_ERROR_RX_PACKETS, 1);
- quic_build_sockaddr (&pctx->sa, &pctx->salen, &pctx->ph.rmt_ip,
- pctx->ph.rmt_port, pctx->ph.is_ip4);
- quicly_ctx = quic_get_quicly_ctx_from_udp (udp_session_handle);
-
- size_t off = 0;
- plen = quicly_decode_packet (quicly_ctx, &pctx->packet, pctx->data,
- pctx->ph.data_length, &off);
-
- if (plen == SIZE_MAX)
- {
- return 1;
- }
-
- rv = quic_find_packet_ctx (pctx, thread_index);
- if (rv == QUIC_PACKET_TYPE_RECEIVE)
- {
- pctx->ptype = QUIC_PACKET_TYPE_RECEIVE;
-
- if (quic_main.vnet_crypto_enabled &&
- quic_main.default_crypto_engine == CRYPTO_ENGINE_VPP)
- {
- quic_ctx_t *qctx = quic_ctx_get (pctx->ctx_index, thread_index);
- quic_crypto_decrypt_packet (qctx, pctx);
- }
- return 0;
- }
- else if (rv == QUIC_PACKET_TYPE_MIGRATE)
- {
- pctx->ptype = QUIC_PACKET_TYPE_MIGRATE;
- /* Connection found but on wrong thread, ask move */
- }
- else if (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]))
- {
- pctx->ptype = QUIC_PACKET_TYPE_ACCEPT;
- udp_session = session_get_from_handle (udp_session_handle);
- pctx->ctx_index = udp_session->opaque;
- pctx->thread_index = thread_index;
- }
- else
- {
- pctx->ptype = QUIC_PACKET_TYPE_RESET;
- }
- return 1;
+ return quic_eng_send_packets (ctx);
}
static int
quic_udp_session_rx_callback (session_t * udp_session)
{
- /* Read data from UDP rx_fifo and pass it to the quicly conn. */
- quic_ctx_t *ctx = NULL, *prev_ctx = NULL;
- svm_fifo_t *f = udp_session->rx_fifo;
- u32 max_deq;
- u64 udp_session_handle = session_handle (udp_session);
- int rv = 0;
- clib_thread_index_t thread_index = vlib_get_thread_index ();
- u32 cur_deq, fifo_offset, max_packets, i;
-
- quic_rx_packet_ctx_t packets_ctx[QUIC_RCV_MAX_PACKETS];
-
- if (udp_session->flags & SESSION_F_IS_MIGRATING)
- {
- QUIC_DBG (3, "RX on migrating udp session");
- return 0;
- }
-
-rx_start:
- max_deq = svm_fifo_max_dequeue (f);
- if (max_deq == 0)
- return 0;
-
- fifo_offset = 0;
- max_packets = QUIC_RCV_MAX_PACKETS;
-
-#if CLIB_DEBUG > 0
- clib_memset (packets_ctx, 0xfa,
- QUIC_RCV_MAX_PACKETS * sizeof (quic_rx_packet_ctx_t));
-#endif
- for (i = 0; i < max_packets; i++)
- {
- packets_ctx[i].thread_index = CLIB_INVALID_THREAD_INDEX;
- packets_ctx[i].ctx_index = UINT32_MAX;
- packets_ctx[i].ptype = QUIC_PACKET_TYPE_DROP;
-
- cur_deq = max_deq - fifo_offset;
- if (cur_deq == 0)
- {
- max_packets = i + 1;
- break;
- }
- if (cur_deq < SESSION_CONN_HDR_LEN)
- {
- fifo_offset = max_deq;
- max_packets = i + 1;
- QUIC_ERR ("Fifo %d < header size in RX", cur_deq);
- break;
- }
- rv = quic_process_one_rx_packet (udp_session_handle, f,
- fifo_offset, &packets_ctx[i]);
- if (packets_ctx[i].ptype != QUIC_PACKET_TYPE_MIGRATE)
- fifo_offset += SESSION_CONN_HDR_LEN + packets_ctx[i].ph.data_length;
- if (rv)
- {
- max_packets = i + 1;
- break;
- }
- }
-
- for (i = 0; i < max_packets; i++)
- {
- switch (packets_ctx[i].ptype)
- {
- case QUIC_PACKET_TYPE_RECEIVE:
- ctx = quic_ctx_get (packets_ctx[i].ctx_index, thread_index);
- rv = quicly_receive (ctx->conn, NULL, &packets_ctx[i].sa,
- &packets_ctx[i].packet);
- if (rv && rv != QUICLY_ERROR_PACKET_IGNORED)
- {
- QUIC_ERR ("quicly_receive return error %U",
- quic_format_err, rv);
- }
- break;
- case QUIC_PACKET_TYPE_ACCEPT:
- quic_accept_connection (&packets_ctx[i]);
- break;
- case QUIC_PACKET_TYPE_RESET:
- quic_reset_connection (udp_session_handle, &packets_ctx[i]);
- break;
- }
- }
- ctx = prev_ctx = NULL;
- for (i = 0; i < max_packets; i++)
- {
- prev_ctx = ctx;
- switch (packets_ctx[i].ptype)
- {
- case QUIC_PACKET_TYPE_RECEIVE:
- ctx = quic_ctx_get (packets_ctx[i].ctx_index,
- packets_ctx[i].thread_index);
- quic_check_quic_session_connected (ctx);
- ctx = quic_ctx_get (packets_ctx[i].ctx_index,
- packets_ctx[i].thread_index);
- break;
- case QUIC_PACKET_TYPE_ACCEPT:
- ctx = quic_ctx_get (packets_ctx[i].ctx_index,
- packets_ctx[i].thread_index);
- break;
- default:
- continue; /* this exits the for loop since other packet types are
- necessarily the last in the batch */
- }
- if (ctx != prev_ctx)
- quic_send_packets (ctx);
- }
-
- udp_session = session_get_from_handle (udp_session_handle); /* session alloc might have happened */
- f = udp_session->rx_fifo;
- svm_fifo_dequeue_drop (f, fifo_offset);
-
- if (svm_fifo_max_dequeue (f))
- goto rx_start;
-
- return 0;
+ return quic_eng_udp_session_rx_packets (udp_session);
}
always_inline void
-quic_common_get_transport_endpoint (quic_ctx_t * ctx,
- transport_endpoint_t * tep, u8 is_lcl)
+quic_common_get_transport_endpoint (quic_ctx_t *ctx, transport_endpoint_t *tep,
+ u8 is_lcl)
{
session_t *udp_session;
if (!quic_ctx_is_stream (ctx))
static void
quic_get_transport_listener_endpoint (u32 listener_index,
- transport_endpoint_t * tep, u8 is_lcl)
+ transport_endpoint_t *tep, u8 is_lcl)
{
quic_ctx_t *ctx;
app_listener_t *app_listener;
},
};
-static quicly_stream_open_t on_stream_open = { quic_on_stream_open };
-static quicly_closed_by_remote_t on_closed_by_remote = {
- quic_on_closed_by_remote
-};
-static quicly_now_t quicly_vpp_now_cb = { quic_get_time };
-
-static void
-quic_register_cipher_suite (crypto_engine_type_t type,
- ptls_cipher_suite_t ** ciphers)
-{
- quic_main_t *qm = &quic_main;
- vec_validate (qm->quic_ciphers, type);
- clib_bitmap_set (qm->available_crypto_engines, type, 1);
- qm->quic_ciphers[type] = ciphers;
-}
-
static void
quic_update_fifo_size ()
{
if (!seg_mgr_props)
{
- clib_warning
- ("error while getting segment_manager_props_t, can't update fifo-size");
+ clib_warning (
+ "error while getting segment_manager_props_t, can't update fifo-size");
return;
}
{
u32 segment_size = 256 << 20;
vlib_thread_main_t *vtm = vlib_get_thread_main ();
- tw_timer_wheel_1t_3w_1024sl_ov_t *tw;
vnet_app_attach_args_t _a, *a = &_a;
u64 options[APP_OPTIONS_N_OPTIONS];
quic_main_t *qm = &quic_main;
- u32 num_threads, i;
u8 seed[32];
+ QUIC_DBG (1, "QUIC plugin init");
+ qm->quic_input_node = &quic_input_node;
+
if (syscall (SYS_getrandom, &seed, sizeof (seed), 0) != sizeof (seed))
return clib_error_return_unix (0, "getrandom() failed");
RAND_seed (seed, sizeof (seed));
- num_threads = 1 /* main thread */ + vtm->n_threads;
+ qm->num_threads = 1 /* main thread */ + vtm->n_threads;
clib_memset (a, 0, sizeof (*a));
clib_memset (options, 0, sizeof (options));
return clib_error_return (0, "failed to attach quic app");
}
- vec_validate (qm->ctx_pool, num_threads - 1);
- vec_validate (qm->wrk_ctx, num_threads - 1);
-
- for (i = 0; i < num_threads; i++)
- {
- qm->wrk_ctx[i].next_cid.thread_id = i;
- tw = &qm->wrk_ctx[i].timer_wheel;
- tw_timer_wheel_init_1t_3w_1024sl_ov (tw, quic_expired_timers_dispatch,
- 1e-3 /* timer period 1ms */ , ~0);
- tw->last_run_time = vlib_time_now (vlib_get_main ());
- clib_bihash_init_24_8 (&qm->wrk_ctx[i].crypto_context_hash,
- "quic crypto contexts", 64, 128 << 10);
- }
-
- clib_bihash_init_16_8 (&qm->connection_hash, "quic connections", 1024,
- 4 << 20);
+ vec_validate (qm->ctx_pool, qm->num_threads - 1);
qm->app_index = a->app_index;
- qm->tstamp_ticks_per_clock = vm->clib_time.seconds_per_clock
- / QUIC_TSTAMP_RESOLUTION;
- qm->session_cache.super.cb = quic_encrypt_ticket_cb;
transport_register_protocol (TRANSPORT_PROTO_QUIC, &quic_proto,
FIB_PROTOCOL_IP4, ~0);
transport_register_protocol (TRANSPORT_PROTO_QUIC, &quic_proto,
FIB_PROTOCOL_IP6, ~0);
- quic_load_openssl3_legacy_provider ();
- clib_bitmap_alloc (qm->available_crypto_engines,
- app_crypto_engine_n_types ());
- quic_register_cipher_suite (CRYPTO_ENGINE_PICOTLS,
- ptls_openssl_cipher_suites);
- qm->default_crypto_engine = CRYPTO_ENGINE_PICOTLS;
+ vec_free (a->name);
+ return 0;
+}
- vnet_crypto_main_t *cm = &crypto_main;
- if (vec_len (cm->engines) == 0)
- qm->vnet_crypto_enabled = 0;
- else
- qm->vnet_crypto_enabled = 1;
- if (qm->vnet_crypto_enabled == 1)
+static void
+quic_engine_init ()
+{
+ quic_main_t *qm = &quic_main;
+ crypto_context_t *crctx;
+ int i;
+
+ vec_validate (quic_main.wrk_ctx, qm->num_threads - 1);
+ vec_validate (quic_main.ctx_pool, qm->num_threads - 1);
+
+ QUIC_DBG (1, "Initializing quic engine to %s",
+ quic_engine_type_str (qm->engine_type));
+
+ for (i = 0; i < qm->num_threads; i++)
{
- u8 empty_key[32] = {};
- quic_register_cipher_suite (CRYPTO_ENGINE_VPP,
- quic_crypto_cipher_suites);
- qm->default_crypto_engine = CRYPTO_ENGINE_VPP;
- vec_validate (qm->per_thread_crypto_key_indices, num_threads);
- for (i = 0; i < num_threads; i++)
- {
- qm->per_thread_crypto_key_indices[i] = vnet_crypto_key_add (
- vm, VNET_CRYPTO_ALG_AES_256_CTR, empty_key, 32);
- }
+ pool_get_aligned_safe (quic_wrk_ctx_get (qm, i)->crypto_ctx_pool, crctx,
+ CLIB_CACHE_LINE_BYTES);
+ pool_program_safe_realloc (
+ (void **) &quic_wrk_ctx_get (qm, i)->crypto_ctx_pool,
+ QUIC_CRYPTO_CTX_POOL_PER_THREAD_SIZE, CLIB_CACHE_LINE_BYTES);
}
- qm->max_packets_per_key = DEFAULT_MAX_PACKETS_PER_KEY;
- qm->default_quic_cc = QUIC_CC_RENO;
+ quic_eng_engine_init (qm);
+ qm->engine_is_initialized[qm->engine_type] = 1;
+}
- vec_free (a->name);
+static clib_error_t *
+quic_main_loop_init (vlib_main_t *vm)
+{
+ quic_main_t *qm = &quic_main;
+
+ qm->engine_type =
+ quic_get_engine_type (QUIC_ENGINE_QUICLY, QUIC_ENGINE_OPENSSL);
+ QUIC_ASSERT (qm->engine_type != QUIC_ENGINE_NONE);
+
+ quic_engine_init ();
return 0;
}
VLIB_INIT_FUNCTION (quic_init);
+VLIB_MAIN_LOOP_ENTER_FUNCTION (quic_main_loop_init);
static clib_error_t *
-quic_plugin_crypto_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+quic_plugin_crypto_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
{
unformat_input_t _line_input, *line_input = &_line_input;
quic_main_t *qm = &quic_main;
u64 quic_fifosize = 0;
static clib_error_t *
-quic_plugin_set_fifo_size_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+quic_plugin_set_fifo_size_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
{
- quic_main_t *qm = &quic_main;
unformat_input_t _line_input, *line_input = &_line_input;
uword tmp;
{
if (tmp >= 0x100000000ULL)
{
- return clib_error_return
- (0, "fifo-size %llu (0x%llx) too large", tmp, tmp);
+ return clib_error_return (0, "fifo-size %llu (0x%llx) too large",
+ tmp, tmp);
}
- qm->udp_fifo_size = tmp;
+ quic_main.udp_fifo_size = tmp;
quic_update_fifo_size ();
}
else
quic_show_aggregated_stats (vlib_main_t * vm)
{
u32 num_workers = vlib_num_workers ();
- quic_main_t *qm = &quic_main;
quic_ctx_t *ctx = NULL;
- quicly_stats_t st, agg_stats;
+ quic_stats_t st, agg_stats;
u32 i, nconn = 0, nstream = 0;
clib_memset (&agg_stats, 0, sizeof (agg_stats));
for (i = 0; i < num_workers + 1; i++)
{
- pool_foreach (ctx, qm->ctx_pool[i])
- {
- if (quic_ctx_is_conn (ctx) && ctx->conn)
- {
- quicly_get_stats (ctx->conn, &st);
- agg_stats.rtt.smoothed += st.rtt.smoothed;
- agg_stats.rtt.minimum += st.rtt.minimum;
- agg_stats.rtt.variance += st.rtt.variance;
- agg_stats.num_packets.received += st.num_packets.received;
- agg_stats.num_packets.sent += st.num_packets.sent;
- agg_stats.num_packets.lost += st.num_packets.lost;
- agg_stats.num_packets.ack_received += st.num_packets.ack_received;
- agg_stats.num_bytes.received += st.num_bytes.received;
- agg_stats.num_bytes.sent += st.num_bytes.sent;
- nconn++;
- }
- else if (quic_ctx_is_stream (ctx))
- nstream++;
- }
+ pool_foreach (ctx, quic_main.ctx_pool[i])
+ {
+ if (quic_ctx_is_conn (ctx) && ctx->conn)
+ {
+ quic_eng_connection_get_stats (ctx->conn, &st);
+ agg_stats.rtt_smoothed += st.rtt_smoothed;
+ agg_stats.rtt_minimum += st.rtt_minimum;
+ agg_stats.rtt_variance += st.rtt_variance;
+ agg_stats.num_packets_received += st.num_packets_received;
+ agg_stats.num_packets_sent += st.num_packets_sent;
+ agg_stats.num_packets_lost += st.num_packets_lost;
+ agg_stats.num_packets_ack_received +=
+ st.num_packets_ack_received;
+ agg_stats.num_bytes_received += st.num_bytes_received;
+ agg_stats.num_bytes_sent += st.num_bytes_sent;
+ nconn++;
+ }
+ else if (quic_ctx_is_stream (ctx))
+ nstream++;
+ }
}
vlib_cli_output (vm, "-------- Connections --------");
vlib_cli_output (vm, "Current: %u", nconn);
quic_get_counter_value (QUIC_ERROR_TX_PACKETS));
vlib_cli_output (vm, "----------- Stats -----------");
vlib_cli_output (vm, "Min RTT %f",
- nconn > 0 ? agg_stats.rtt.minimum / nconn : 0);
+ nconn > 0 ? agg_stats.rtt_minimum / nconn : 0);
vlib_cli_output (vm, "Smoothed RTT %f",
- nconn > 0 ? agg_stats.rtt.smoothed / nconn : 0);
+ nconn > 0 ? agg_stats.rtt_smoothed / nconn : 0);
vlib_cli_output (vm, "Variance on RTT %f",
- nconn > 0 ? agg_stats.rtt.variance / nconn : 0);
- vlib_cli_output (vm, "Packets Received %lu",
- agg_stats.num_packets.received);
- vlib_cli_output (vm, "Packets Sent %lu", agg_stats.num_packets.sent);
- vlib_cli_output (vm, "Packets Lost %lu", agg_stats.num_packets.lost);
+ nconn > 0 ? agg_stats.rtt_variance / nconn : 0);
+ vlib_cli_output (vm, "Packets Received %lu", agg_stats.num_packets_received);
+ vlib_cli_output (vm, "Packets Sent %lu", agg_stats.num_packets_sent);
+ vlib_cli_output (vm, "Packets Lost %lu", agg_stats.num_packets_lost);
vlib_cli_output (vm, "Packets Acks %lu",
- agg_stats.num_packets.ack_received);
- vlib_cli_output (vm, "RX bytes %lu", agg_stats.num_bytes.received);
- vlib_cli_output (vm, "TX bytes %lu", agg_stats.num_bytes.sent);
-}
-
-static u8 *
-quic_format_quicly_conn_id (u8 * s, va_list * args)
-{
- quicly_cid_plaintext_t *mid = va_arg (*args, quicly_cid_plaintext_t *);
- s = format (s, "C%x_%x", mid->master_id, mid->thread_id);
- return s;
-}
-
-static u8 *
-quic_format_quicly_stream_id (u8 * s, va_list * args)
-{
- quicly_stream_t *stream = va_arg (*args, quicly_stream_t *);
- s =
- format (s, "%U S%lx", quic_format_quicly_conn_id,
- quicly_get_master_id (stream->conn), stream->stream_id);
- return s;
+ agg_stats.num_packets_ack_received);
+ vlib_cli_output (vm, "RX bytes %lu", agg_stats.num_bytes_received);
+ vlib_cli_output (vm, "TX bytes %lu", agg_stats.num_bytes_sent);
}
static u8 *
quic_format_connection_ctx (u8 * s, va_list * args)
{
quic_ctx_t *ctx = va_arg (*args, quic_ctx_t *);
- quicly_stats_t quicly_stats;
s = format (s, "[#%d][%x]", ctx->c_thread_index, ctx->c_c_index);
s = format (s, "- no conn -\n");
return s;
}
- s = format (s, "[%U]",
- quic_format_quicly_conn_id, quicly_get_master_id (ctx->conn));
- quicly_get_stats (ctx->conn, &quicly_stats);
-
- s = format (s, "[RTT >%3d, ~%3d, V%3d, last %3d]",
- quicly_stats.rtt.minimum, quicly_stats.rtt.smoothed,
- quicly_stats.rtt.variance, quicly_stats.rtt.latest);
- s = format (s, " TX:%d RX:%d loss:%d ack:%d",
- quicly_stats.num_packets.sent,
- quicly_stats.num_packets.received,
- quicly_stats.num_packets.lost,
- quicly_stats.num_packets.ack_received);
- s =
- format (s, "\ncwnd:%u ssthresh:%u recovery_end:%lu", quicly_stats.cc.cwnd,
- quicly_stats.cc.ssthresh, quicly_stats.cc.recovery_end);
- quicly_context_t *quicly_ctx = quic_get_quicly_ctx_from_ctx (ctx);
- if (quicly_ctx->init_cc == &quicly_cc_cubic_init)
- {
- s = format (
- s,
- "\nk:%d w_max:%u w_last_max:%u avoidance_start:%ld last_sent_time:%ld",
- quicly_stats.cc.state.cubic.k, quicly_stats.cc.state.cubic.w_max,
- quicly_stats.cc.state.cubic.w_last_max,
- quicly_stats.cc.state.cubic.avoidance_start,
- quicly_stats.cc.state.cubic.last_sent_time);
- }
- else if (quicly_ctx->init_cc == &quicly_cc_reno_init)
- {
- s = format (s, " stash:%u", quicly_stats.cc.state.reno.stash);
- }
+ s = format (s, "%U", quic_eng_format_connection_stats, ctx);
return s;
}
{
quic_ctx_t *ctx = va_arg (*args, quic_ctx_t *);
session_t *stream_session;
- quicly_stream_t *stream = ctx->stream;
u32 txs, rxs;
s = format (s, "[#%d][%x]", ctx->c_thread_index, ctx->c_c_index);
- s = format (s, "[%U]", quic_format_quicly_stream_id, stream);
+ s = format (s, "[%U]", quic_eng_format_stream_ctx_stream_id, ctx);
stream_session = session_get_if_valid (ctx->c_s_index, ctx->c_thread_index);
if (!stream_session)
}
static clib_error_t *
-quic_show_connections_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+quic_show_connections_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
{
unformat_input_t _line_input, *line_input = &_line_input;
u8 show_listeners = 0, show_conn = 0, show_stream = 0;
u32 num_workers = vlib_num_workers ();
- quic_main_t *qm = &quic_main;
clib_error_t *error = 0;
quic_ctx_t *ctx = NULL;
for (int i = 0; i < num_workers + 1; i++)
{
- pool_foreach (ctx, qm->ctx_pool[i])
- {
- if (quic_ctx_is_stream (ctx) && show_stream)
- vlib_cli_output (vm, "%U", quic_format_stream_ctx, ctx);
- else if (quic_ctx_is_listener (ctx) && show_listeners)
- vlib_cli_output (vm, "%U", quic_format_listener_ctx, ctx);
- else if (quic_ctx_is_conn (ctx) && show_conn)
- vlib_cli_output (vm, "%U", quic_format_connection_ctx, ctx);
- }
+ pool_foreach (ctx, quic_main.ctx_pool[i])
+ {
+ if (quic_ctx_is_stream (ctx) && show_stream)
+ vlib_cli_output (vm, "%U", quic_format_stream_ctx, ctx);
+ else if (quic_ctx_is_listener (ctx) && show_listeners)
+ vlib_cli_output (vm, "%U", quic_format_listener_ctx, ctx);
+ else if (quic_ctx_is_conn (ctx) && show_conn)
+ vlib_cli_output (vm, "%U", quic_format_connection_ctx, ctx);
+ }
}
done:
.short_help = "set quic cc [reno|cubic]",
.function = quic_set_cc_fn,
};
-VLIB_PLUGIN_REGISTER () =
-{
- .version = VPP_BUILD_VER,
- .description = "Quic transport protocol",
- .default_disabled = 1,
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER, .description = "Quic transport protocol",
+ // .default_disabled = 1,
};
static clib_error_t *
{
if (tmp >= 0x100000000ULL)
{
- error = clib_error_return (0,
- "fifo-size %llu (0x%llx) too large",
- tmp, tmp);
+ error = clib_error_return (
+ 0, "fifo-size %llu (0x%llx) too large", tmp, tmp);
goto done;
}
qm->udp_fifo_size = tmp;
qm->connection_timeout = i;
else if (unformat (line_input, "fifo-prealloc %u", &i))
qm->udp_fifo_prealloc = i;
+ // TODO: add cli selection of quic_eng_<types>
else
{
error = clib_error_return (0, "unknown input '%U'",
VLIB_EARLY_CONFIG_FUNCTION (quic_config_fn, "quic");
static uword
-quic_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+quic_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
return 0;
}
-/*
- * Copyright (c) 2021 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
*/
#ifndef __included_quic_h__
#define __included_quic_h__
#include <vnet/session/application_interface.h>
+#include <vnet/session/session.h>
+#include <vppinfra/clib.h>
#include <vppinfra/lock.h>
#include <vppinfra/tw_timer_1t_3w_1024sl_ov.h>
#include <vppinfra/bihash_16_8.h>
-#include <quicly.h>
-
#include <vnet/crypto/crypto.h>
-#include <vppinfra/lock.h>
/* QUIC log levels
* 1 - errors
* 4 - timer events
**/
-#define QUIC_DEBUG 0
-#define QUIC_TSTAMP_RESOLUTION 0.001 /* QUIC tick resolution (1ms) */
+#define QUIC_DEBUG 0
#define QUIC_TIMER_HANDLE_INVALID ((u32) ~0)
#define QUIC_SESSION_INVALID ((u32) ~0 - 1)
#define QUIC_MAX_PACKET_SIZE 1280
#define QUIC_INT_MAX 0x3FFFFFFFFFFFFFFF
#define QUIC_DEFAULT_FIFO_SIZE (64 << 10)
#define QUIC_SEND_PACKET_VEC_SIZE 16
-#define QUIC_IV_LEN 17
#define QUIC_MAX_COALESCED_PACKET 4
#define QUIC_RCV_MAX_PACKETS 16
-#define QUIC_DEFAULT_CONN_TIMEOUT (30 * 1000) /* 30 seconds */
-
-/* Taken from quicly.c */
-#define QUICLY_QUIC_BIT 0x40
-
-#define QUICLY_PACKET_TYPE_INITIAL (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0)
-#define QUICLY_PACKET_TYPE_0RTT (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0x10)
-#define QUICLY_PACKET_TYPE_HANDSHAKE (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0x20)
-#define QUICLY_PACKET_TYPE_RETRY (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0x30)
-#define QUICLY_PACKET_TYPE_BITMASK 0xf0
-
-/* error codes */
-#define QUIC_ERROR_FULL_FIFO 0xff10
-#define QUIC_APP_ERROR_CLOSE_NOTIFY QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(0)
-#define QUIC_APP_ALLOCATION_ERROR QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(0x1)
-#define QUIC_APP_ACCEPT_NOTIFY_ERROR QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(0x2)
-#define QUIC_APP_CONNECT_NOTIFY_ERROR QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE(0x3)
-
#define QUIC_DECRYPT_PACKET_OK 0
#define QUIC_DECRYPT_PACKET_NOTOFFLOADED 1
#define QUIC_DECRYPT_PACKET_ERROR 2
+#define DEFAULT_MAX_PACKETS_PER_KEY 16777216
+#define QUIC_CRYPTO_CTX_POOL_PER_THREAD_SIZE 256
+
#if QUIC_DEBUG
#define QUIC_DBG(_lvl, _fmt, _args...) \
if (_lvl <= QUIC_DEBUG) \
#endif
#if CLIB_ASSERT_ENABLE
-#define QUIC_ASSERT(truth) ASSERT (truth)
+#define QUIC_ASSERT(truth) ASSERT ((truth))
#else
#define QUIC_ASSERT(truth) \
do { \
clib_warning ("QUIC-ERR: " _fmt, ##_args); \
} while (0)
-
-
+typedef enum quic_engine_type_
+{
+ QUIC_ENGINE_NONE,
+ QUIC_ENGINE_QUICLY,
+ QUIC_ENGINE_OPENSSL,
+ QUIC_ENGINE_LAST = QUIC_ENGINE_OPENSSL,
+} quic_engine_type_t;
+
+static_always_inline char *
+quic_engine_type_str (quic_engine_type_t engine_type)
+{
+ switch (engine_type)
+ {
+ case QUIC_ENGINE_NONE:
+ return ("QUIC_ENGINE_NONE");
+ case QUIC_ENGINE_QUICLY:
+ return ("QUIC_ENGINE_QUICLY");
+ case QUIC_ENGINE_OPENSSL:
+ return ("QUIC_ENGINE_OPENSSL");
+ default:
+ return ("UNKNOWN");
+ }
+}
extern vlib_node_registration_t quic_input_node;
typedef enum
transport_connection_t connection;
struct
{ /** QUIC ctx case */
- quicly_conn_t *conn;
+ void *conn;
u32 listener_ctx_id;
u32 client_opaque;
u8 *srv_hostname;
};
struct
{ /** STREAM ctx case */
- quicly_stream_t *stream;
+ void *stream;
u64 bytes_written;
u32 quic_connection_ctx_id;
u8 _sctx_end_marker; /* Leave this at the end */
struct
{
- ptls_cipher_context_t *hp_ctx;
- ptls_aead_context_t *aead_ctx;
+ void *hp_ctx;
+ void *aead_ctx;
} ingress_keys;
int key_phase_ingress;
- quicly_address_t rmt_ip;
- quicly_address_t lcl_ip;
+ ip46_address_t rmt_ip;
+ u16 rmt_port;
+ ip46_address_t lcl_ip;
+ u16 lcl_port;
} quic_ctx_t;
TRANSPORT_CONN_ID_LEN,
"connection data must be less than TRANSPORT_CONN_ID_LEN bytes");
-/* single-entry session cache */
-typedef struct quic_session_cache_
-{
- ptls_encrypt_ticket_t super;
- uint8_t id[32];
- ptls_iovec_t data;
-} quic_session_cache_t;
-
typedef struct quic_stream_data_
{
u32 ctx_id;
u32 app_tx_data_len; /**< bytes sent */
} quic_stream_data_t;
-typedef struct quic_crypto_context_data_
+typedef struct quic_stats_
{
- quicly_context_t quicly_ctx;
- char cid_key[QUIC_IV_LEN];
- ptls_context_t ptls_ctx;
-} quic_crypto_context_data_t;
+ u64 num_bytes_sent;
+ u64 num_bytes_received;
+ u64 num_packets_sent;
+ u64 num_packets_received;
+ u64 num_packets_ack_received;
+ u64 num_packets_lost;
+ u64 rtt_smoothed;
+ u64 rtt_minimum;
+ u64 rtt_variance;
+} quic_stats_t;
+
+#define foreach_quic_rx_pkt_ctx_field \
+ _ (u32, ctx_index) \
+ _ (u32, thread_index) \
+ _ (u8, ptype)
+
+typedef struct quic_rx_packet_ctx_
+{
+#define _(type, name) type name;
+ foreach_quic_rx_pkt_ctx_field
+#undef _
+ u8 padding[1024 * 128]; // FIXME: remove hardcoded size
+} quic_rx_packet_ctx_t;
typedef struct quic_worker_ctx_
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- int64_t time_now; /**< worker time */
- tw_timer_wheel_1t_3w_1024sl_ov_t timer_wheel; /**< worker timer wheel */
- quicly_cid_plaintext_t next_cid;
- crypto_context_t *crypto_ctx_pool; /**< per thread pool of crypto contexes */
- clib_bihash_24_8_t crypto_context_hash; /**< per thread [params:crypto_ctx_index] hash */
+ int64_t time_now;
+ tw_timer_wheel_1t_3w_1024sl_ov_t timer_wheel;
+ crypto_context_t *crypto_ctx_pool;
} quic_worker_ctx_t;
-typedef struct quic_rx_packet_ctx_
-{
- quicly_decoded_packet_t packet;
- u8 data[QUIC_MAX_PACKET_SIZE];
- u32 ctx_index;
- clib_thread_index_t thread_index;
- union
- {
- struct sockaddr sa;
- struct sockaddr_in6 sa6;
- };
- socklen_t salen;
- u8 ptype;
- session_dgram_hdr_t ph;
-} quic_rx_packet_ctx_t;
-
typedef struct quic_main_
{
+ vlib_node_registration_t *quic_input_node;
u32 app_index;
quic_ctx_t **ctx_pool;
quic_worker_ctx_t *wrk_ctx;
- clib_bihash_16_8_t connection_hash; /**< quic connection id -> conn handle */
- f64 tstamp_ticks_per_clock;
- ptls_cipher_suite_t ***quic_ciphers; /**< available ciphers by crypto engine */
- uword *available_crypto_engines; /**< Bitmap for registered engines */
- u8 default_crypto_engine; /**< Used if you do connect with CRYPTO_ENGINE_NONE (0) */
- u64 max_packets_per_key; /**< number of packets that can be sent without a key update */
+ u8 default_crypto_engine; /**< Used if you do connect with CRYPTO_ENGINE_NONE
+ (0) */
+ u64 max_packets_per_key; /**< number of packets that can be sent without a
+ key update */
u8 default_quic_cc;
- ptls_handshake_properties_t hs_properties;
- quic_session_cache_t session_cache;
-
u32 udp_fifo_size;
u32 udp_fifo_prealloc;
u32 connection_timeout;
-
- u8 vnet_crypto_enabled;
- u32 *per_thread_crypto_key_indices;
+ int num_threads;
+ quic_engine_type_t engine_type;
+ u8 engine_is_initialized[QUIC_ENGINE_LAST + 1];
} quic_main_t;
+extern quic_main_t quic_main;
+
+static_always_inline quic_worker_ctx_t *
+quic_wrk_ctx_get (quic_main_t *qm, clib_thread_index_t thread_index)
+{
+ return &qm->wrk_ctx[thread_index];
+}
+
+static_always_inline u32
+quic_ctx_alloc (quic_main_t *qm, clib_thread_index_t thread_index)
+{
+ quic_ctx_t *ctx;
+
+ pool_get_aligned_safe (qm->ctx_pool[thread_index], ctx,
+ CLIB_CACHE_LINE_BYTES);
+
+ clib_memset (ctx, 0, sizeof (quic_ctx_t));
+ ctx->c_thread_index = thread_index;
+ ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
+ QUIC_DBG (3, "Allocated quic_ctx %u on thread %u",
+ ctx - qm->ctx_pool[thread_index], thread_index);
+ return ctx - qm->ctx_pool[thread_index];
+}
+
+static_always_inline void
+quic_ctx_free (quic_main_t *qm, quic_ctx_t *ctx)
+{
+ QUIC_DBG (2, "Free ctx %u %x", ctx->c_thread_index, ctx->c_c_index);
+ clib_thread_index_t thread_index = ctx->c_thread_index;
+ QUIC_ASSERT (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID);
+ if (CLIB_DEBUG)
+ clib_memset (ctx, 0xfb, sizeof (*ctx));
+ pool_put (qm->ctx_pool[thread_index], ctx);
+}
+
+static_always_inline void
+quic_increment_counter (quic_main_t *qm, u8 evt, u8 val)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_node_increment_counter (vm, qm->quic_input_node->index, evt, val);
+}
+
+static_always_inline int
+quic_ctx_is_stream (quic_ctx_t *ctx)
+{
+ return (ctx->flags & QUIC_F_IS_STREAM);
+}
+
+static_always_inline int
+quic_ctx_is_listener (quic_ctx_t *ctx)
+{
+ return (ctx->flags & QUIC_F_IS_LISTENER);
+}
+
+static_always_inline int
+quic_ctx_is_conn (quic_ctx_t *ctx)
+{
+ return !(quic_ctx_is_listener (ctx) || quic_ctx_is_stream (ctx));
+}
+
+static_always_inline void
+quic_build_sockaddr (struct sockaddr *sa, socklen_t *salen,
+ ip46_address_t *addr, u16 port, u8 is_ip4)
+{
+ if (is_ip4)
+ {
+ struct sockaddr_in *sa4 = (struct sockaddr_in *) sa;
+ sa4->sin_family = AF_INET;
+ sa4->sin_port = port;
+ sa4->sin_addr.s_addr = addr->ip4.as_u32;
+ *salen = sizeof (struct sockaddr_in);
+ }
+ else
+ {
+ struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) sa;
+ sa6->sin6_family = AF_INET6;
+ sa6->sin6_port = port;
+ clib_memcpy (&sa6->sin6_addr, &addr->ip6, 16);
+ *salen = sizeof (struct sockaddr_in6);
+ }
+}
+
+static_always_inline void
+quic_disconnect_transport (quic_ctx_t *ctx, u32 app_index)
+{
+ QUIC_DBG (2, "Disconnecting transport 0x%lx", ctx->udp_session_handle);
+ vnet_disconnect_args_t a = {
+ .handle = ctx->udp_session_handle,
+ .app_index = app_index,
+ };
+
+ if (vnet_disconnect_session (&a))
+ clib_warning ("UDP session 0x%lx disconnect errored",
+ ctx->udp_session_handle);
+}
+
+typedef enum quic_session_connected_
+{
+ QUIC_SESSION_CONNECTED_NONE,
+ QUIC_SESSION_CONNECTED_CLIENT,
+ QUIC_SESSION_CONNECTED_SERVER,
+} quic_session_connected_t;
+
+// TODO: Define appropriate QUIC return values for quic_engine_vft functions!
+typedef struct quic_engine_vft_
+{
+ void (*engine_init) (quic_main_t *qm);
+ int (*app_cert_key_pair_delete) (app_cert_key_pair_t *ckpair);
+ int (*crypto_context_acquire) (quic_ctx_t *ctx);
+ void (*crypto_context_release) (u32 crypto_context_index, u8 thread_index);
+ int (*connect) (quic_ctx_t *ctx, u32 ctx_index,
+ clib_thread_index_t thread_index, struct sockaddr *sa);
+ int (*connect_stream) (void *conn, void **quic_stream,
+ quic_stream_data_t **quic_stream_data, u8 is_unidir);
+ void (*connect_stream_error_reset) (void *quic_stream);
+ void (*connection_migrate) (quic_ctx_t *ctx);
+ void (*connection_get_stats) (void *conn, quic_stats_t *conn_stats);
+ int (*udp_session_rx_packets) (session_t *udp_session);
+ void (*ack_rx_data) (session_t *stream_session);
+ int (*stream_tx) (quic_ctx_t *ctx, session_t *stream_session);
+ int (*send_packets) (quic_ctx_t *ctx);
+ u8 *(*format_connection_stats) (u8 *s, va_list *args);
+ u8 *(*format_stream_connection) (u8 *s, va_list *args);
+ u8 *(*format_stream_ctx_stream_id) (u8 *s, va_list *args);
+ void (*proto_on_close) (u32 ctx_index, clib_thread_index_t thread_index);
+} quic_engine_vft_t;
+
+extern quic_engine_vft_t *quic_engine_vfts;
+extern void quic_register_engine (const quic_engine_vft_t *vft,
+ quic_engine_type_t engine_type);
+typedef void (*quic_register_engine_fn) (const quic_engine_vft_t *vft,
+ quic_engine_type_t engine_type);
+
#endif /* __included_quic_h__ */
/*
+++ /dev/null
-/*
- * Copyright (c) 2021 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <quic/quic.h>
-#include <quic/quic_crypto.h>
-#include <vnet/session/session.h>
-
-#include <quicly.h>
-#include <picotls/openssl.h>
-#include <pthread.h>
-
-#define QUICLY_EPOCH_1RTT 3
-
-extern quic_main_t quic_main;
-extern quic_ctx_t *quic_get_conn_ctx (quicly_conn_t *conn);
-vnet_crypto_main_t *cm = &crypto_main;
-
-typedef struct crypto_key_
-{
- vnet_crypto_alg_t algo;
- u8 key[32];
- u16 key_len;
-} crypto_key_t;
-
-struct cipher_context_t
-{
- ptls_cipher_context_t super;
- vnet_crypto_op_t op;
- vnet_crypto_op_id_t id;
- crypto_key_t key;
-};
-
-struct aead_crypto_context_t
-{
- ptls_aead_context_t super;
- EVP_CIPHER_CTX *evp_ctx;
- uint8_t static_iv[PTLS_MAX_IV_SIZE];
- vnet_crypto_op_t op;
- crypto_key_t key;
-
- vnet_crypto_op_id_t id;
- uint8_t iv[PTLS_MAX_IV_SIZE];
-};
-
-static int
-quic_crypto_setup_cipher (quicly_crypto_engine_t *engine, quicly_conn_t *conn,
- size_t epoch, int is_enc,
- ptls_cipher_context_t **header_protect_ctx,
- ptls_aead_context_t **packet_protect_ctx,
- ptls_aead_algorithm_t *aead,
- ptls_hash_algorithm_t *hash, const void *secret)
-{
- uint8_t hpkey[PTLS_MAX_SECRET_SIZE];
- int ret;
-
- *packet_protect_ctx = NULL;
- /* generate new header protection key */
- if (header_protect_ctx != NULL)
- {
- *header_protect_ctx = NULL;
- ret =
- ptls_hkdf_expand_label (hash, hpkey, aead->ctr_cipher->key_size,
- ptls_iovec_init (secret, hash->digest_size),
- "quic hp", ptls_iovec_init (NULL, 0), NULL);
- if (ret)
- goto Exit;
- *header_protect_ctx = ptls_cipher_new (aead->ctr_cipher, is_enc, hpkey);
- if (NULL == *header_protect_ctx)
- {
- ret = PTLS_ERROR_NO_MEMORY;
- goto Exit;
- }
- }
-
- /* generate new AEAD context */
- *packet_protect_ctx =
- ptls_aead_new (aead, hash, is_enc, secret, QUICLY_AEAD_BASE_LABEL);
- if (NULL == *packet_protect_ctx)
- {
- ret = PTLS_ERROR_NO_MEMORY;
- goto Exit;
- }
-
- if (epoch == QUICLY_EPOCH_1RTT && !is_enc)
- {
- quic_ctx_t *qctx = quic_get_conn_ctx (conn);
- if (qctx->ingress_keys.aead_ctx != NULL)
- qctx->key_phase_ingress++;
-
- qctx->ingress_keys.aead_ctx = *packet_protect_ctx;
- if (header_protect_ctx != NULL)
- qctx->ingress_keys.hp_ctx = *header_protect_ctx;
- }
-
- ret = 0;
-
-Exit:
- if (ret)
- {
- if (*packet_protect_ctx != NULL)
- {
- ptls_aead_free (*packet_protect_ctx);
- *packet_protect_ctx = NULL;
- }
- if (header_protect_ctx && *header_protect_ctx != NULL)
- {
- ptls_cipher_free (*header_protect_ctx);
- *header_protect_ctx = NULL;
- }
- }
- ptls_clear_memory (hpkey, sizeof (hpkey));
- return ret;
-}
-
-static u32
-quic_crypto_set_key (crypto_key_t *key)
-{
- u8 thread_index = vlib_get_thread_index ();
- u32 key_id = quic_main.per_thread_crypto_key_indices[thread_index];
- vnet_crypto_key_t *vnet_key = vnet_crypto_get_key (key_id);
- vnet_crypto_engine_t *engine;
-
- vec_foreach (engine, cm->engines)
- if (engine->key_op_handler)
- engine->key_op_handler (VNET_CRYPTO_KEY_OP_DEL, key_id);
-
- vnet_key->alg = key->algo;
- clib_memcpy (vnet_key->data, key->key, key->key_len);
-
- vec_foreach (engine, cm->engines)
- if (engine->key_op_handler)
- engine->key_op_handler (VNET_CRYPTO_KEY_OP_ADD, key_id);
-
- return key_id;
-}
-
-static size_t
-quic_crypto_aead_decrypt (quic_ctx_t *qctx, ptls_aead_context_t *_ctx,
- void *_output, const void *input, size_t inlen,
- uint64_t decrypted_pn, const void *aad,
- size_t aadlen)
-{
- vlib_main_t *vm = vlib_get_main ();
-
- struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
-
- vnet_crypto_op_init (&ctx->op, ctx->id);
- ctx->op.aad = (u8 *) aad;
- ctx->op.aad_len = aadlen;
- ctx->op.iv = ctx->iv;
- ptls_aead__build_iv (ctx->super.algo, ctx->op.iv, ctx->static_iv,
- decrypted_pn);
- ctx->op.src = (u8 *) input;
- ctx->op.dst = _output;
- ctx->op.key_index = quic_crypto_set_key (&ctx->key);
- ctx->op.len = inlen - ctx->super.algo->tag_size;
- ctx->op.tag_len = ctx->super.algo->tag_size;
- ctx->op.tag = ctx->op.src + ctx->op.len;
-
- vnet_crypto_process_ops (vm, &(ctx->op), 1);
-
- return ctx->op.len;
-}
-
-void
-quic_crypto_decrypt_packet (quic_ctx_t *qctx, quic_rx_packet_ctx_t *pctx)
-{
- ptls_cipher_context_t *header_protection = NULL;
- ptls_aead_context_t *aead = NULL;
- int pn;
-
- /* Long Header packets are not decrypted by vpp */
- if (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]))
- return;
-
- uint64_t next_expected_packet_number =
- quicly_get_next_expected_packet_number (qctx->conn);
- if (next_expected_packet_number == UINT64_MAX)
- return;
-
- aead = qctx->ingress_keys.aead_ctx;
- header_protection = qctx->ingress_keys.hp_ctx;
-
- if (!aead || !header_protection)
- return;
-
- size_t encrypted_len = pctx->packet.octets.len - pctx->packet.encrypted_off;
- uint8_t hpmask[5] = { 0 };
- uint32_t pnbits = 0;
- size_t pnlen, ptlen, i;
-
- /* decipher the header protection, as well as obtaining pnbits, pnlen */
- if (encrypted_len < header_protection->algo->iv_size + QUICLY_MAX_PN_SIZE)
- return;
- ptls_cipher_init (header_protection, pctx->packet.octets.base +
- pctx->packet.encrypted_off +
- QUICLY_MAX_PN_SIZE);
- ptls_cipher_encrypt (header_protection, hpmask, hpmask, sizeof (hpmask));
- pctx->packet.octets.base[0] ^=
- hpmask[0] &
- (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ? 0xf : 0x1f);
- pnlen = (pctx->packet.octets.base[0] & 0x3) + 1;
- for (i = 0; i != pnlen; ++i)
- {
- pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
- hpmask[i + 1];
- pnbits = (pnbits << 8) |
- pctx->packet.octets.base[pctx->packet.encrypted_off + i];
- }
-
- size_t aead_off = pctx->packet.encrypted_off + pnlen;
-
- pn = quicly_determine_packet_number (pnbits, pnlen * 8,
- next_expected_packet_number);
-
- int key_phase_bit =
- (pctx->packet.octets.base[0] & QUICLY_KEY_PHASE_BIT) != 0;
-
- if (key_phase_bit != (qctx->key_phase_ingress & 1))
- {
- pctx->packet.octets.base[0] ^=
- hpmask[0] &
- (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ? 0xf :
- 0x1f);
- for (i = 0; i != pnlen; ++i)
- {
- pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
- hpmask[i + 1];
- }
- return;
- }
-
- if ((ptlen = quic_crypto_aead_decrypt (
- qctx, aead, pctx->packet.octets.base + aead_off,
- pctx->packet.octets.base + aead_off,
- pctx->packet.octets.len - aead_off, pn, pctx->packet.octets.base,
- aead_off)) == SIZE_MAX)
- {
- fprintf (stderr, "%s: aead decryption failure (pn: %d)\n", __func__, pn);
- return;
- }
-
- pctx->packet.encrypted_off = aead_off;
- pctx->packet.octets.len = ptlen + aead_off;
-
- pctx->packet.decrypted.pn = pn;
- pctx->packet.decrypted.key_phase = qctx->key_phase_ingress;
-}
-
-void
-quic_crypto_encrypt_packet (struct st_quicly_crypto_engine_t *engine,
- quicly_conn_t *conn,
- ptls_cipher_context_t *header_protect_ctx,
- ptls_aead_context_t *packet_protect_ctx,
- ptls_iovec_t datagram, size_t first_byte_at,
- size_t payload_from, uint64_t packet_number,
- int coalesced)
-{
- vlib_main_t *vm = vlib_get_main ();
-
- struct cipher_context_t *hp_ctx =
- (struct cipher_context_t *) header_protect_ctx;
- struct aead_crypto_context_t *aead_ctx =
- (struct aead_crypto_context_t *) packet_protect_ctx;
-
- void *input = datagram.base + payload_from;
- void *output = input;
- size_t inlen =
- datagram.len - payload_from - packet_protect_ctx->algo->tag_size;
- const void *aad = datagram.base + first_byte_at;
- size_t aadlen = payload_from - first_byte_at;
-
- /* Build AEAD encrypt crypto operation */
- vnet_crypto_op_init (&aead_ctx->op, aead_ctx->id);
- aead_ctx->op.aad = (u8 *) aad;
- aead_ctx->op.aad_len = aadlen;
- aead_ctx->op.iv = aead_ctx->iv;
- ptls_aead__build_iv (aead_ctx->super.algo, aead_ctx->op.iv,
- aead_ctx->static_iv, packet_number);
- aead_ctx->op.key_index = quic_crypto_set_key (&aead_ctx->key);
- aead_ctx->op.src = (u8 *) input;
- aead_ctx->op.dst = output;
- aead_ctx->op.len = inlen;
- aead_ctx->op.tag_len = aead_ctx->super.algo->tag_size;
- aead_ctx->op.tag = aead_ctx->op.src + inlen;
- vnet_crypto_process_ops (vm, &(aead_ctx->op), 1);
- assert (aead_ctx->op.status == VNET_CRYPTO_OP_STATUS_COMPLETED);
-
- /* Build Header protection crypto operation */
- ptls_aead_supplementary_encryption_t supp = {
- .ctx = header_protect_ctx,
- .input =
- datagram.base + payload_from - QUICLY_SEND_PN_SIZE + QUICLY_MAX_PN_SIZE
- };
-
- /* Build Header protection crypto operation */
- vnet_crypto_op_init (&hp_ctx->op, hp_ctx->id);
- memset (supp.output, 0, sizeof (supp.output));
- hp_ctx->op.iv = (u8 *) supp.input;
- hp_ctx->op.key_index = quic_crypto_set_key (&hp_ctx->key);
- ;
- hp_ctx->op.src = (u8 *) supp.output;
- hp_ctx->op.dst = (u8 *) supp.output;
- hp_ctx->op.len = sizeof (supp.output);
- vnet_crypto_process_ops (vm, &(hp_ctx->op), 1);
- assert (hp_ctx->op.status == VNET_CRYPTO_OP_STATUS_COMPLETED);
-
- datagram.base[first_byte_at] ^=
- supp.output[0] &
- (QUICLY_PACKET_IS_LONG_HEADER (datagram.base[first_byte_at]) ? 0xf : 0x1f);
- for (size_t i = 0; i != QUICLY_SEND_PN_SIZE; ++i)
- datagram.base[payload_from + i - QUICLY_SEND_PN_SIZE] ^=
- supp.output[i + 1];
-}
-
-static int
-quic_crypto_cipher_setup_crypto (ptls_cipher_context_t *_ctx, int is_enc,
- const void *key, const EVP_CIPHER *cipher)
-{
- struct cipher_context_t *ctx = (struct cipher_context_t *) _ctx;
-
- vnet_crypto_alg_t algo;
- if (!strcmp (ctx->super.algo->name, "AES128-CTR"))
- {
- algo = VNET_CRYPTO_ALG_AES_128_CTR;
- ctx->id = is_enc ? VNET_CRYPTO_OP_AES_128_CTR_ENC :
- VNET_CRYPTO_OP_AES_128_CTR_DEC;
- ptls_openssl_aes128ctr.setup_crypto (_ctx, is_enc, key);
- }
- else if (!strcmp (ctx->super.algo->name, "AES256-CTR"))
- {
- algo = VNET_CRYPTO_ALG_AES_256_CTR;
- ctx->id = is_enc ? VNET_CRYPTO_OP_AES_256_CTR_ENC :
- VNET_CRYPTO_OP_AES_256_CTR_DEC;
- ptls_openssl_aes256ctr.setup_crypto (_ctx, is_enc, key);
- }
- else
- {
- QUIC_DBG (1, "%s, Invalid crypto cipher : ", __func__, _ctx->algo->name);
- assert (0);
- }
-
- if (quic_main.vnet_crypto_enabled)
- {
- // ctx->key_index =
- // quic_crypto_go_setup_key (algo, key, _ctx->algo->key_size);
- ctx->key.algo = algo;
- ctx->key.key_len = _ctx->algo->key_size;
- assert (ctx->key.key_len <= 32);
- clib_memcpy (&ctx->key.key, key, ctx->key.key_len);
- }
-
- return 0;
-}
-
-static int
-quic_crypto_aes128ctr_setup_crypto (ptls_cipher_context_t *ctx, int is_enc,
- const void *key)
-{
- return quic_crypto_cipher_setup_crypto (ctx, 1, key, EVP_aes_128_ctr ());
-}
-
-static int
-quic_crypto_aes256ctr_setup_crypto (ptls_cipher_context_t *ctx, int is_enc,
- const void *key)
-{
- return quic_crypto_cipher_setup_crypto (ctx, 1, key, EVP_aes_256_ctr ());
-}
-
-static int
-quic_crypto_aead_setup_crypto (ptls_aead_context_t *_ctx, int is_enc,
- const void *key, const void *iv,
- const EVP_CIPHER *cipher)
-{
- struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
-
- vnet_crypto_alg_t algo;
- if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
- {
- algo = VNET_CRYPTO_ALG_AES_128_GCM;
- ctx->id = is_enc ? VNET_CRYPTO_OP_AES_128_GCM_ENC :
- VNET_CRYPTO_OP_AES_128_GCM_DEC;
- ptls_openssl_aes128gcm.setup_crypto (_ctx, is_enc, key, iv);
- }
- else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
- {
- algo = VNET_CRYPTO_ALG_AES_256_GCM;
- ctx->id = is_enc ? VNET_CRYPTO_OP_AES_256_GCM_ENC :
- VNET_CRYPTO_OP_AES_256_GCM_DEC;
- ptls_openssl_aes256gcm.setup_crypto (_ctx, is_enc, key, iv);
- }
- else
- {
- QUIC_DBG (1, "%s, invalied aead cipher %s", __func__, _ctx->algo->name);
- assert (0);
- }
-
- if (quic_main.vnet_crypto_enabled)
- {
- clib_memcpy (ctx->static_iv, iv, ctx->super.algo->iv_size);
- // ctx->key_index =
- // quic_crypto_go_setup_key (algo, key, _ctx->algo->key_size);
- ctx->key.algo = algo;
- ctx->key.key_len = _ctx->algo->key_size;
- assert (ctx->key.key_len <= 32);
- clib_memcpy (&ctx->key.key, key, ctx->key.key_len);
- }
-
- return 0;
-}
-
-static int
-quic_crypto_aead_aes128gcm_setup_crypto (ptls_aead_context_t *ctx, int is_enc,
- const void *key, const void *iv)
-{
- return quic_crypto_aead_setup_crypto (ctx, is_enc, key, iv,
- EVP_aes_128_gcm ());
-}
-
-static int
-quic_crypto_aead_aes256gcm_setup_crypto (ptls_aead_context_t *ctx, int is_enc,
- const void *key, const void *iv)
-{
- return quic_crypto_aead_setup_crypto (ctx, is_enc, key, iv,
- EVP_aes_256_gcm ());
-}
-
-int
-quic_encrypt_ticket_cb (ptls_encrypt_ticket_t *_self, ptls_t *tls,
- int is_encrypt, ptls_buffer_t *dst, ptls_iovec_t src)
-{
- quic_session_cache_t *self = (void *) _self;
- int ret;
-
- if (is_encrypt)
- {
-
- /* replace the cached entry along with a newly generated session id */
- clib_mem_free (self->data.base);
- if ((self->data.base = clib_mem_alloc (src.len)) == NULL)
- return PTLS_ERROR_NO_MEMORY;
-
- ptls_get_context (tls)->random_bytes (self->id, sizeof (self->id));
- clib_memcpy (self->data.base, src.base, src.len);
- self->data.len = src.len;
-
- /* store the session id in buffer */
- if ((ret = ptls_buffer_reserve (dst, sizeof (self->id))) != 0)
- return ret;
- clib_memcpy (dst->base + dst->off, self->id, sizeof (self->id));
- dst->off += sizeof (self->id);
- }
- else
- {
- /* check if session id is the one stored in cache */
- if (src.len != sizeof (self->id))
- return PTLS_ERROR_SESSION_NOT_FOUND;
- if (clib_memcmp (self->id, src.base, sizeof (self->id)) != 0)
- return PTLS_ERROR_SESSION_NOT_FOUND;
-
- /* return the cached value */
- if ((ret = ptls_buffer_reserve (dst, self->data.len)) != 0)
- return ret;
- clib_memcpy (dst->base + dst->off, self->data.base, self->data.len);
- dst->off += self->data.len;
- }
-
- return 0;
-}
-
-ptls_cipher_algorithm_t quic_crypto_aes128ctr = {
- "AES128-CTR",
- PTLS_AES128_KEY_SIZE,
- 1,
- PTLS_AES_IV_SIZE,
- sizeof (struct cipher_context_t),
- quic_crypto_aes128ctr_setup_crypto
-};
-
-ptls_cipher_algorithm_t quic_crypto_aes256ctr = {
- "AES256-CTR",
- PTLS_AES256_KEY_SIZE,
- 1 /* block size */,
- PTLS_AES_IV_SIZE,
- sizeof (struct cipher_context_t),
- quic_crypto_aes256ctr_setup_crypto
-};
-
-#define PTLS_X86_CACHE_LINE_ALIGN_BITS 6
-ptls_aead_algorithm_t quic_crypto_aes128gcm = {
- "AES128-GCM",
- PTLS_AESGCM_CONFIDENTIALITY_LIMIT,
- PTLS_AESGCM_INTEGRITY_LIMIT,
- &quic_crypto_aes128ctr,
- &ptls_openssl_aes128ecb,
- PTLS_AES128_KEY_SIZE,
- PTLS_AESGCM_IV_SIZE,
- PTLS_AESGCM_TAG_SIZE,
- { PTLS_TLS12_AESGCM_FIXED_IV_SIZE, PTLS_TLS12_AESGCM_RECORD_IV_SIZE },
- 1,
- PTLS_X86_CACHE_LINE_ALIGN_BITS,
- sizeof (struct aead_crypto_context_t),
- quic_crypto_aead_aes128gcm_setup_crypto
-};
-
-ptls_aead_algorithm_t quic_crypto_aes256gcm = {
- "AES256-GCM",
- PTLS_AESGCM_CONFIDENTIALITY_LIMIT,
- PTLS_AESGCM_INTEGRITY_LIMIT,
- &quic_crypto_aes256ctr,
- &ptls_openssl_aes256ecb,
- PTLS_AES256_KEY_SIZE,
- PTLS_AESGCM_IV_SIZE,
- PTLS_AESGCM_TAG_SIZE,
- { PTLS_TLS12_AESGCM_FIXED_IV_SIZE, PTLS_TLS12_AESGCM_RECORD_IV_SIZE },
- 1,
- PTLS_X86_CACHE_LINE_ALIGN_BITS,
- sizeof (struct aead_crypto_context_t),
- quic_crypto_aead_aes256gcm_setup_crypto
-};
-
-ptls_cipher_suite_t quic_crypto_aes128gcmsha256 = {
- PTLS_CIPHER_SUITE_AES_128_GCM_SHA256, &quic_crypto_aes128gcm,
- &ptls_openssl_sha256
-};
-
-ptls_cipher_suite_t quic_crypto_aes256gcmsha384 = {
- PTLS_CIPHER_SUITE_AES_256_GCM_SHA384, &quic_crypto_aes256gcm,
- &ptls_openssl_sha384
-};
-
-ptls_cipher_suite_t *quic_crypto_cipher_suites[] = {
- &quic_crypto_aes256gcmsha384, &quic_crypto_aes128gcmsha256, NULL
-};
-
-quicly_crypto_engine_t quic_crypto_engine = { quic_crypto_setup_cipher,
- quic_crypto_encrypt_packet };
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
+++ /dev/null
-/*
- * Copyright (c) 2021 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __included_vpp_quic_crypto_h__
-#define __included_vpp_quic_crypto_h__
-
-#include <quicly.h>
-
-#if OPENSSL_VERSION_NUMBER >= 0x30000000L
-#include <openssl/provider.h>
-
-#define quic_load_openssl3_legacy_provider() \
- do \
- { \
- (void) OSSL_PROVIDER_load (NULL, "legacy"); \
- } \
- while (0)
-#else
-#define quic_load_openssl3_legacy_provider()
-#endif
-
-struct quic_ctx_t;
-
-extern ptls_cipher_suite_t *quic_crypto_cipher_suites[];
-
-int quic_encrypt_ticket_cb (ptls_encrypt_ticket_t * _self, ptls_t * tls,
- int is_encrypt, ptls_buffer_t * dst,
- ptls_iovec_t src);
-void quic_crypto_decrypt_packet (quic_ctx_t * qctx,
- quic_rx_packet_ctx_t * pctx);
-
-#endif /* __included_vpp_quic_crypto_h__ */
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef __included_quic_inlines_h__
+#define __included_quic_inlines_h__
+
+#include <quic/quic.h>
+
+static_always_inline void
+quic_eng_engine_init (quic_main_t *qm)
+{
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return;
+ }
+ if (PREDICT_FALSE (!quic_engine_vfts[qm->engine_type].engine_init))
+ {
+ QUIC_DBG (1, "engine_init() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return;
+ }
+ quic_engine_vfts[qm->engine_type].engine_init (qm);
+}
+
+static_always_inline int
+quic_eng_app_cert_key_pair_delete (app_cert_key_pair_t *ckpair)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return -1;
+ }
+ if (PREDICT_FALSE (
+ !quic_engine_vfts[qm->engine_type].app_cert_key_pair_delete))
+ {
+ QUIC_DBG (1, "app_cert_key_pair_delete() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return -1;
+ }
+ return (quic_engine_vfts[qm->engine_type].app_cert_key_pair_delete (ckpair));
+}
+
+static_always_inline int
+quic_eng_crypto_context_acquire (quic_ctx_t *ctx)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return -1;
+ }
+ if (PREDICT_FALSE (
+ !quic_engine_vfts[qm->engine_type].crypto_context_acquire))
+ {
+ QUIC_DBG (1, "crypto_context_acquire() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return -1;
+ }
+ return (quic_engine_vfts[qm->engine_type].crypto_context_acquire (ctx));
+}
+
+static_always_inline void
+quic_eng_crypto_context_release (u32 crypto_context_index, u8 thread_index)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return;
+ }
+ if (PREDICT_FALSE (
+ !quic_engine_vfts[qm->engine_type].crypto_context_release))
+ {
+ QUIC_DBG (1, "crypto_context_release() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return;
+ }
+ quic_engine_vfts[qm->engine_type].crypto_context_release (
+ crypto_context_index, thread_index);
+}
+
+static_always_inline int
+quic_eng_connect (quic_ctx_t *ctx, u32 ctx_index,
+ clib_thread_index_t thread_index, struct sockaddr *sa)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return -1;
+ }
+ if (PREDICT_FALSE (!quic_engine_vfts[qm->engine_type].connect))
+ {
+ QUIC_DBG (1, "connect() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return -1;
+ }
+ return (quic_engine_vfts[qm->engine_type].connect (ctx, ctx_index,
+ thread_index, sa));
+}
+
+static_always_inline int
+quic_eng_connect_stream (void *quic_conn, void **quic_stream,
+ quic_stream_data_t **quic_stream_data, u8 is_unidir)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return -1;
+ }
+ if (PREDICT_FALSE (!quic_engine_vfts[qm->engine_type].connect_stream))
+ {
+ QUIC_DBG (1, "connect_stream() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return -1;
+ }
+ return (quic_engine_vfts[qm->engine_type].connect_stream (
+ quic_conn, quic_stream, quic_stream_data, is_unidir));
+}
+
+static_always_inline void
+quic_eng_connect_stream_error_reset (void *quic_stream)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return;
+ }
+ if (PREDICT_FALSE (
+ !quic_engine_vfts[qm->engine_type].connect_stream_error_reset))
+ {
+ QUIC_DBG (1,
+ "connect_stream_error_reset() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return;
+ }
+ quic_engine_vfts[qm->engine_type].connect_stream_error_reset (quic_stream);
+}
+
+static_always_inline void
+quic_eng_rpc_evt_to_thread_connection_migrate (u32 dest_thread,
+ quic_ctx_t *ctx)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return;
+ }
+ if (PREDICT_FALSE (!quic_engine_vfts[qm->engine_type].connection_migrate))
+ {
+ QUIC_DBG (1, "connection_migrate() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return;
+ }
+ session_send_rpc_evt_to_thread (
+ dest_thread, quic_engine_vfts[qm->engine_type].connection_migrate, ctx);
+}
+
+static_always_inline void
+quic_eng_connection_get_stats (void *conn, quic_stats_t *conn_stats)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return;
+ }
+ if (PREDICT_FALSE (!quic_engine_vfts[qm->engine_type].connection_get_stats))
+ {
+ QUIC_DBG (1, "connection_get_stats() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return;
+ }
+ quic_engine_vfts[qm->engine_type].connection_get_stats (conn, conn_stats);
+}
+
+static_always_inline int
+quic_eng_udp_session_rx_packets (session_t *udp_session)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return -1;
+ }
+ if (PREDICT_FALSE (
+ !quic_engine_vfts[qm->engine_type].udp_session_rx_packets))
+ {
+ QUIC_DBG (1, "udp_session_rx_packets() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return -1;
+ }
+ return (
+ quic_engine_vfts[qm->engine_type].udp_session_rx_packets (udp_session));
+}
+
+static_always_inline void
+quic_eng_ack_rx_data (session_t *stream_session)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return;
+ }
+ if (PREDICT_FALSE (!quic_engine_vfts[qm->engine_type].ack_rx_data))
+ {
+ QUIC_DBG (1, "ack_rx_data() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return;
+ }
+ quic_engine_vfts[qm->engine_type].ack_rx_data (stream_session);
+}
+
+static_always_inline int
+quic_eng_stream_tx (quic_ctx_t *ctx, session_t *stream_session)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return -1;
+ }
+ if (PREDICT_FALSE (!quic_engine_vfts[qm->engine_type].stream_tx))
+ {
+ QUIC_DBG (1, "stream_tx() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return -1;
+ }
+ return (quic_engine_vfts[qm->engine_type].stream_tx (ctx, stream_session));
+}
+
+static_always_inline int
+quic_eng_send_packets (void *conn)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return 0;
+ }
+ if (PREDICT_FALSE (!quic_engine_vfts[qm->engine_type].send_packets))
+ {
+ QUIC_DBG (1, "send_packets() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return 0;
+ }
+ return (quic_engine_vfts[qm->engine_type].send_packets (conn));
+}
+
+static_always_inline u8 *
+quic_eng_format_connection_stats (u8 *s, va_list *args)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return s;
+ }
+ if (PREDICT_FALSE (
+ !quic_engine_vfts[qm->engine_type].format_connection_stats))
+ {
+ QUIC_DBG (1, "format_connection_stats() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return s;
+ }
+ return (quic_engine_vfts[qm->engine_type].format_connection_stats (s, args));
+}
+
+static_always_inline u8 *
+quic_eng_format_stream_connection (u8 *s, va_list *args)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return s;
+ }
+ if (PREDICT_FALSE (
+ !quic_engine_vfts[qm->engine_type].format_stream_connection))
+ {
+ QUIC_DBG (1, "format_stream_connection() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return s;
+ }
+ return (
+ quic_engine_vfts[qm->engine_type].format_stream_connection (s, args));
+}
+
+static_always_inline u8 *
+quic_eng_format_stream_ctx_stream_id (u8 *s, va_list *args)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return s;
+ }
+ if (PREDICT_FALSE (
+ !quic_engine_vfts[qm->engine_type].format_stream_ctx_stream_id))
+ {
+ QUIC_DBG (1,
+ "format_stream_ctx_stream_id() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return s;
+ }
+ return (
+ quic_engine_vfts[qm->engine_type].format_stream_ctx_stream_id (s, args));
+}
+
+static_always_inline void
+quic_eng_proto_on_close (u32 ctx_index, u32 thread_index)
+{
+ quic_main_t *qm = &quic_main;
+
+ if (PREDICT_FALSE (qm->engine_type == QUIC_ENGINE_NONE))
+ {
+ QUIC_DBG (1, "No QUIC engine is available\n");
+ return;
+ }
+ if (PREDICT_FALSE (!quic_engine_vfts[qm->engine_type].proto_on_close))
+ {
+ QUIC_DBG (1, "proto_on_close() not available for %s engine\n",
+ quic_engine_type_str (qm->engine_type));
+ return;
+ }
+ quic_engine_vfts[qm->engine_type].proto_on_close (ctx_index, thread_index);
+}
+
+#endif /* __included_quic_inliqm->nes_h__ */
QUIC HostStack
==============
-The quic plugin provides an `IETF QUIC protocol <https://tools.ietf.org/html/draft-ietf-quic-transport-22>`_ implementation. It is based on
-the quicly_ library.
+The quic plugin provides an
+`IETF QUIC protocol <https://tools.ietf.org/html/draft-ietf-quic-transport-22>`_ implementation.
+It is designed with a modular architecture that separates the core QUIC functionality
+from specific QUIC engine implementations.
This plugin adds the QUIC protocol to VPP's Host Stack. As a result QUIC is
usable both in internal VPP applications and in external apps.
- This plugin is under current development: it should mostly work, but has not been thoroughly tested and should not be used in production.
- Only bidirectional streams are supported currently.
+Architecture
+^^^^^^^^^^^^
+
+The QUIC plugin is split into two main components:
+
+1. Core QUIC Plugin
+ - Provides the framework and common functionality for QUIC protocol support
+ - Manages QUIC contexts, sessions, and connections
+ - Handles UDP transport layer integration
+ - Provides a virtual function table interface for engine implementations
+
+2. QUIC Engine Implementations
+
+ - Implemented as separate plugins (e.g., quic_quicly, quic_openssl)
+ - Each engine plugin provides a specific QUIC implementation
+ - Engines register themselves with the core plugin through a virtual function table
+ - Currently supported engines:
+
+ - QUICLY engine (quic_quicly plugin)
+ - OpenSSL engine (quic_openssl plugin)
+
+Engine Interface
+^^^^^^^^^^^^^^^^
+
+The core QUIC plugin defines a virtual function table interface that each engine implementation must provide. This interface includes functions for:
+
+- Engine initialization
+- Connection management
+- Stream handling
+- Packet processing
+- Crypto operations
+- Statistics and debugging
+
+Engine implementations register themselves with the core plugin using the registration functions:
+
+.. code-block:: c
+
+ void quic_register_engine (const quic_engine_vft_t *vft,
+ quic_engine_type_t engine_type);
+
+Configuration
+^^^^^^^^^^^^^
+
+The QUIC plugin can be configured through VPP's configuration system. Key configuration options include:
+
+- Default crypto engine selection
+- Maximum packets per key
+- Default congestion control algorithm
+- UDP FIFO size and preallocation
+- Connection timeout settings
+
+Usage
+^^^^^
+
+To use the QUIC plugin:
+
+1. Enable the core QUIC plugin and desired engine plugin(s) in VPP's plugin configuration
+2. Configure the plugin settings as needed
+3. Use the QUIC API to create and manage QUIC connections and streams
+
+Example configuration:
+
+.. code-block:: shell
+
+ # Enable QUIC plugins
+ plugins {
+ plugin quic_plugin.so { enable }
+ plugin quic_quicly_plugin.so { enable }
+ }
+
+ # Configure QUIC settings
+ quic {
+ default-crypto-engine quicly
+ max-packets-per-key 16777216
+ default-quic-cc cubic
+ udp-fifo-size 65536
+ connection-timeout 30
+ }
+
Getting started
----------------
+^^^^^^^^^^^^^^^
* A common sample setup is with two vpp instances interconnected #twovppinstances
* Ensure your vpp configuration file contains ``session { evt_qs_memfd_seg }``
.. image:: /_images/quic_plugin_datastructures.png
+Debugging
+^^^^^^^^^
+
+The QUIC plugin provides several debugging features:
+
+- Log levels for different types of events (errors, connection/stream events, packet events, timer events)
+- Connection and stream statistics
+- Debug macros for development
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef __included_quic_timer_h__
+#define __included_quic_timer_h__
+
+#include <quic/quic.h>
+
+#define QUIC_TSTAMP_RESOLUTION 0.001 /* QUIC tick resolution (1ms) */
+#define QUIC_DEFAULT_CONN_TIMEOUT (30 * 1000) /* 30 seconds */
+
+static_always_inline void
+quic_update_time (f64 now, u8 thread_index)
+{
+ quic_main_t *qm = &quic_main;
+ tw_timer_wheel_1t_3w_1024sl_ov_t *tw =
+ &quic_wrk_ctx_get (qm, thread_index)->timer_wheel;
+
+ if (PREDICT_TRUE (qm->engine_is_initialized[qm->engine_type] != 0))
+ {
+ vlib_main_t *vlib_main = vlib_get_main ();
+ f64 time = vlib_time_now (vlib_main);
+ quic_wrk_ctx_get (qm, thread_index)->time_now =
+ (int64_t) (time * 1000.f);
+ tw_timer_expire_timers_1t_3w_1024sl_ov (tw, now);
+ }
+}
+
+static_always_inline void
+quic_stop_ctx_timer (tw_timer_wheel_1t_3w_1024sl_ov_t *tw, quic_ctx_t *ctx)
+{
+ if (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID)
+ return;
+ tw_timer_stop_1t_3w_1024sl_ov (tw, ctx->timer_handle);
+ ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
+}
+
+static_always_inline void
+quic_update_timer (quic_worker_ctx_t *wc, quic_ctx_t *ctx,
+ int64_t next_timeout)
+{
+ tw_timer_wheel_1t_3w_1024sl_ov_t *tw = &wc->timer_wheel;
+ int64_t next_interval;
+ session_t *quic_session;
+ int rv;
+
+ /* This timeout is in ms which is the unit of our timer */
+ next_interval = next_timeout - wc->time_now;
+
+ if (next_timeout == 0 || next_interval <= 0)
+ {
+ if (ctx->c_s_index == QUIC_SESSION_INVALID)
+ {
+ next_interval = 1;
+ }
+ else
+ {
+ quic_session = session_get (ctx->c_s_index, ctx->c_thread_index);
+ if (svm_fifo_set_event (quic_session->tx_fifo))
+ {
+ rv = session_program_tx_io_evt (quic_session->handle,
+ SESSION_IO_EVT_TX);
+ if (PREDICT_FALSE (rv))
+ {
+ QUIC_ERR ("Failed to enqueue builtin_tx %d", rv);
+ }
+ }
+ return;
+ }
+ }
+
+ ASSERT (vlib_get_thread_index () == ctx->c_thread_index ||
+ vlib_get_thread_index () == 0);
+
+ QUIC_DBG (4, "Timer set to %ld (int %ld) for ctx %u", next_timeout,
+ next_interval, ctx->c_c_index);
+
+ if (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID)
+ {
+ if (next_timeout == INT64_MAX)
+ {
+ QUIC_DBG (4, "timer for ctx %u already stopped", ctx->c_c_index);
+ return;
+ }
+ ctx->timer_handle =
+ tw_timer_start_1t_3w_1024sl_ov (tw, ctx->c_c_index, 0, next_interval);
+ }
+ else
+ {
+ if (next_timeout == INT64_MAX)
+ {
+ quic_stop_ctx_timer (tw, ctx);
+ QUIC_DBG (4, "Stopping timer for ctx %u", ctx->c_c_index);
+ }
+ else
+ {
+ tw_timer_update_1t_3w_1024sl_ov (tw, ctx->timer_handle,
+ next_interval);
+ }
+ }
+}
+
+#endif /* __included_quic_timer_h__ */
--- /dev/null
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2025 Cisco
+
+if(NOT OPENSSL_FOUND)
+ message(WARNING "OpenSSL not found - quic_quicly plugin disabled")
+ return()
+endif()
+
+unset(QUICLY_LINK_LIBRARIES)
+set(EXPECTED_QUICLY_VERSION "0.1.5-vpp")
+
+vpp_find_path(QUICLY_INCLUDE_DIR NAMES quicly.h)
+vpp_find_path(PICOTLS_INCLUDE_DIR NAMES picotls.h)
+vpp_find_library(QUICLY_LIBRARY NAMES "libquicly.a")
+vpp_find_library(PICOTLS_CORE_LIBRARY NAMES "libpicotls-core.a")
+vpp_find_library(PICOTLS_OPENSSL_LIBRARY NAMES "libpicotls-openssl.a")
+
+list(APPEND QUICLY_LINK_LIBRARIES
+ ${QUICLY_LIBRARY}
+ ${PICOTLS_CORE_LIBRARY}
+ ${PICOTLS_OPENSSL_LIBRARY}
+)
+
+if(QUICLY_INCLUDE_DIR AND QUICLY_LINK_LIBRARIES)
+ if(EXISTS "${QUICLY_INCLUDE_DIR}/quicly/version.h")
+ file(STRINGS "${QUICLY_INCLUDE_DIR}/quicly/version.h" quicly_version_str REGEX "^#define[\t ]+LIBQUICLY_VERSION[\t ]+\".*\"")
+ string(REGEX REPLACE "^#define[\t ]+LIBQUICLY_VERSION[\t ]+\"([^\"]*)\".*" "\\1" QUICLY_VERSION_STRING "${quicly_version_str}")
+ unset(quicly_version_str)
+ endif()
+
+ if (${QUICLY_VERSION_STRING} MATCHES "${EXPECTED_QUICLY_VERSION}")
+ include_directories (${QUICLY_INCLUDE_DIR})
+
+ if(PICOTLS_INCLUDE_DIR)
+ include_directories (${PICOTLS_INCLUDE_DIR})
+ endif()
+
+ add_vpp_plugin(quic_quicly
+ SOURCES
+ quic_quicly.c
+ quic_quicly_error.c
+ quic_quicly_crypto.c
+ ptls_certs.c
+
+ LINK_LIBRARIES ${QUICLY_LINK_LIBRARIES} ${OPENSSL_LIBRARIES}
+ )
+ message(STATUS "Found quicly ${EXPECTED_QUICLY_VERSION} in ${QUICLY_INCLUDE_DIR}")
+ else()
+ message(STATUS "-- quicly ${EXPECTED_QUICLY_VERSION} not found - quic_quicly plugin disabled")
+ endif()
+else()
+ message(WARNING "-- quicly headers not found - quic_quicly plugin disabled")
+endif()
--- /dev/null
+---
+name: QUICLY QUIC Protocol Library Plugin
+features:
+ - "based on the Quicly library: https://github.com/h2o/quicly"
+description: "IETF QUIC Protocol implementation"
+state: experimental
+properties: [API, CLI, STATS, MULTITHREAD]
+
-/*
- * Copyright (c) 2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
*/
#include <openssl/pem.h>
#include <vppinfra/error.h>
-#include <quic/certs.h>
-
+#include <quic_quicly/ptls_certs.h>
int
ptls_compare_separator_line (const char *line, const char *begin_or_end,
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef __included_ptls_certs_h__
+#define __included_ptls_certs_h__
+
+#include <picotls/openssl.h>
+#include <picotls/pembase64.h>
+
+int ptls_compare_separator_line (const char *line, const char *begin_or_end,
+ const char *label);
+
+int ptls_get_bio_pem_object (BIO *bio, const char *label, ptls_buffer_t *buf);
+
+int ptls_load_bio_pem_objects (BIO *bio, const char *label, ptls_iovec_t *list,
+ size_t list_max, size_t *nb_objects);
+
+int ptls_load_bio_certificates (ptls_context_t *ctx, BIO *bio);
+
+int load_bio_certificate_chain (ptls_context_t *ctx, const char *cert_data);
+
+int load_bio_private_key (ptls_context_t *ctx, const char *pk_data);
+
+#endif /* __included_ptls_certs_h__ */
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#include <vlib/vlib.h>
+#include <vlib/unix/plugin.h>
+#include <vpp/app/version.h>
+#include <quic/quic.h>
+#include <quic/quic_timer.h>
+#include <quic_quicly/quic_quicly.h>
+#include <quic_quicly/quic_quicly_error.h>
+#include <quic_quicly/quic_quicly_crypto.h>
+#include <vnet/session/application.h>
+#include <vnet/session/session.h>
+
+quic_quicly_main_t quic_quicly_main;
+
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "Quicly QUIC Engine",
+};
+
+static_always_inline quicly_context_t *
+quic_quicly_get_quicly_ctx_from_ctx (quic_ctx_t *ctx)
+{
+ crypto_context_t *crctx = quic_quicly_crypto_context_get (
+ ctx->crypto_context_index, ctx->c_thread_index);
+ quic_quicly_crypto_context_data_t *data =
+ (quic_quicly_crypto_context_data_t *) crctx->data;
+ return &data->quicly_ctx;
+}
+
+static_always_inline quicly_context_t *
+quic_quicly_get_quicly_ctx_from_udp (u64 udp_session_handle)
+{
+ session_t *udp_session = session_get_from_handle (udp_session_handle);
+ quic_ctx_t *ctx =
+ quic_quicly_get_quic_ctx (udp_session->opaque, udp_session->thread_index);
+ return quic_quicly_get_quicly_ctx_from_ctx (ctx);
+}
+
+static_always_inline int
+quic_quicly_sendable_packet_count (session_t *udp_session)
+{
+ u32 max_enqueue;
+ u32 packet_size = QUIC_MAX_PACKET_SIZE + SESSION_CONN_HDR_LEN;
+ max_enqueue = svm_fifo_max_enqueue (udp_session->tx_fifo);
+ return clib_min (max_enqueue / packet_size, QUIC_SEND_PACKET_VEC_SIZE);
+}
+
+static_always_inline void
+quic_quicly_make_connection_key (clib_bihash_kv_16_8_t *kv,
+ const quicly_cid_plaintext_t *id)
+{
+ kv->key[0] = ((u64) id->master_id) << 32 | (u64) id->thread_id;
+ kv->key[1] = id->node_id;
+}
+
+static void
+quic_quicly_connection_delete (quic_ctx_t *ctx)
+{
+ clib_bihash_kv_16_8_t kv;
+ quicly_conn_t *conn;
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+ quic_main_t *qm = qqm->qm;
+
+ if (ctx->conn == NULL)
+ {
+ QUIC_DBG (2, "Skipping redundant delete of connection %u",
+ ctx->c_c_index);
+ return;
+ }
+ QUIC_DBG (2, "Deleting connection %u", ctx->c_c_index);
+
+ QUIC_ASSERT (!quic_ctx_is_stream (ctx));
+ quic_stop_ctx_timer (
+ &quic_wrk_ctx_get (qm, ctx->c_thread_index)->timer_wheel, ctx);
+ QUIC_DBG (4, "Stopped timer for ctx %u", ctx->c_c_index);
+
+ /* Delete the connection from the connection map */
+ conn = ctx->conn;
+ ctx->conn = NULL;
+ quic_quicly_make_connection_key (&kv, quicly_get_master_id (conn));
+ QUIC_DBG (2, "Deleting conn with id %lu %lu from map", kv.key[0], kv.key[1]);
+ clib_bihash_add_del_16_8 (&qqm->connection_hash, &kv, 0 /* is_add */);
+
+ quic_disconnect_transport (ctx, qm->app_index);
+ quicly_free (conn);
+ session_transport_delete_notify (&ctx->connection);
+}
+
+/**
+ * Called when quicly return an error
+ * This function interacts tightly with quic_quicly_proto_on_close
+ */
+static void
+quic_quicly_connection_closed (quic_ctx_t *ctx)
+{
+ QUIC_DBG (2, "QUIC connection %u/%u closed", ctx->c_thread_index,
+ ctx->c_c_index);
+
+ /* TODO if connection is not established, just delete the session? */
+ /* Actually should send connect or accept error */
+
+ switch (ctx->conn_state)
+ {
+ case QUIC_CONN_STATE_READY:
+ /* Error on an opened connection (timeout...)
+ This puts the session in closing state, we should receive a
+ notification when the app has closed its session */
+ session_transport_reset_notify (&ctx->connection);
+ /* This ensures we delete the connection when the app confirms the close
+ */
+ ctx->conn_state = QUIC_CONN_STATE_PASSIVE_CLOSING_QUIC_CLOSED;
+ break;
+ case QUIC_CONN_STATE_PASSIVE_CLOSING:
+ ctx->conn_state = QUIC_CONN_STATE_PASSIVE_CLOSING_QUIC_CLOSED;
+ /* quic_quicly_proto_on_close will eventually be called when the app
+ confirms the close , we delete the connection at that point */
+ break;
+ case QUIC_CONN_STATE_PASSIVE_CLOSING_APP_CLOSED:
+ /* App already confirmed close, we can delete the connection */
+ quic_quicly_connection_delete (ctx);
+ break;
+ case QUIC_CONN_STATE_OPENED:
+ case QUIC_CONN_STATE_HANDSHAKE:
+ case QUIC_CONN_STATE_ACTIVE_CLOSING:
+ quic_quicly_connection_delete (ctx);
+ break;
+ default:
+ QUIC_DBG (0, "BUG %d", ctx->conn_state);
+ break;
+ }
+}
+
+static int
+quic_quicly_send_datagram (session_t *udp_session, struct iovec *packet,
+ ip46_address_t *rmt_ip, u16 rmt_port)
+{
+ u32 max_enqueue, len;
+ session_dgram_hdr_t hdr;
+ svm_fifo_t *f;
+ transport_connection_t *tc;
+ int ret;
+
+ len = packet->iov_len;
+ f = udp_session->tx_fifo;
+ tc = session_get_transport (udp_session);
+ max_enqueue = svm_fifo_max_enqueue (f);
+ if (max_enqueue < SESSION_CONN_HDR_LEN + len)
+ {
+ QUIC_ERR ("Too much data to send, max_enqueue %u, len %u", max_enqueue,
+ len + SESSION_CONN_HDR_LEN);
+ return QUIC_QUICLY_ERROR_FULL_FIFO;
+ }
+
+ /* Build packet header for fifo */
+ hdr.data_length = len;
+ hdr.data_offset = 0;
+ hdr.is_ip4 = tc->is_ip4;
+ clib_memcpy (&hdr.lcl_ip, &tc->lcl_ip, sizeof (ip46_address_t));
+ hdr.lcl_port = tc->lcl_port;
+ hdr.gso_size = 0;
+
+ hdr.rmt_port = rmt_port;
+ if (hdr.is_ip4)
+ {
+ hdr.rmt_ip.ip4.as_u32 = rmt_ip->ip4.as_u32;
+ }
+ else
+ {
+ clib_memcpy_fast (&hdr.rmt_ip.ip6, &rmt_ip->ip6, sizeof (rmt_ip->ip6));
+ }
+
+ svm_fifo_seg_t segs[2] = { { (u8 *) &hdr, sizeof (hdr) },
+ { packet->iov_base, len } };
+
+ ret = svm_fifo_enqueue_segments (f, segs, 2, 0 /* allow partial */);
+ if (PREDICT_FALSE (ret < 0))
+ {
+ QUIC_ERR ("Not enough space to enqueue dgram");
+ return QUIC_QUICLY_ERROR_FULL_FIFO;
+ }
+
+ quic_increment_counter (quic_quicly_main.qm, QUIC_ERROR_TX_PACKETS, 1);
+
+ return 0;
+}
+
+static_always_inline void
+quic_quicly_set_udp_tx_evt (session_t *udp_session)
+{
+ int rv = 0;
+ if (svm_fifo_set_event (udp_session->tx_fifo))
+ {
+ rv = session_program_tx_io_evt (udp_session->handle, SESSION_IO_EVT_TX);
+ }
+ if (PREDICT_FALSE (rv))
+ {
+ clib_warning ("Event enqueue errored %d", rv);
+ }
+}
+
+static_always_inline void
+quic_quicly_addr_to_ip46_addr (quicly_address_t *quicly_addr,
+ ip46_address_t *ip46_addr, u16 *ip46_port)
+{
+ if (quicly_addr->sa.sa_family == AF_INET)
+ {
+ struct sockaddr_in *sa4 = (struct sockaddr_in *) &quicly_addr->sa;
+ *ip46_port = sa4->sin_port;
+ ip46_addr->ip4.as_u32 = sa4->sin_addr.s_addr;
+ }
+ else
+ {
+ QUIC_ASSERT (quicly_addr->sa.sa_family == AF_INET6);
+ struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &quicly_addr->sa;
+ *ip46_port = sa6->sin6_port;
+ clib_memcpy_fast (&ip46_addr->ip6, &sa6->sin6_addr, 16);
+ }
+}
+
+static int
+quic_quicly_send_packets (quic_ctx_t *ctx)
+{
+ // TODO: GET THIS IOVEC OFF OF THE STACK!!!
+ struct iovec
+ packets[QUIC_SEND_PACKET_VEC_SIZE]; // TODO: GET THIS OFF OF THE STACK
+
+ uint8_t buf[QUIC_SEND_PACKET_VEC_SIZE *
+ quic_quicly_get_quicly_ctx_from_ctx (ctx)
+ ->transport_params.max_udp_payload_size]; // TODO: GET THIS OFF
+ // OF THE STACK
+ session_t *udp_session;
+ quicly_conn_t *conn;
+ size_t num_packets, i, max_packets;
+ u32 n_sent = 0;
+ int err = 0;
+ quicly_address_t quicly_rmt_ip, quicly_lcl_ip;
+ int64_t next_timeout; // TODO: GET THIS OFF OF THE STACK
+
+ /* We have sctx, get qctx */
+ if (quic_ctx_is_stream (ctx))
+ {
+ ctx = quic_quicly_get_quic_ctx (ctx->quic_connection_ctx_id,
+ ctx->c_thread_index);
+ }
+
+ QUIC_ASSERT (!quic_ctx_is_stream (ctx));
+
+ udp_session = session_get_from_handle_if_valid (ctx->udp_session_handle);
+ if (!udp_session)
+ {
+ goto quicly_error;
+ }
+
+ conn = ctx->conn;
+ if (!conn)
+ {
+ return 0;
+ }
+
+ do
+ {
+ /* TODO : quicly can assert it can send min_packets up to 2 */
+ max_packets = quic_quicly_sendable_packet_count (udp_session);
+ if (max_packets < 2)
+ {
+ break;
+ }
+
+ num_packets = max_packets;
+ if ((err = quicly_send (conn, &quicly_rmt_ip, &quicly_lcl_ip, packets,
+ &num_packets, buf, sizeof (buf))))
+ {
+ goto quicly_error;
+ }
+ if (num_packets > 0)
+ {
+ quic_quicly_addr_to_ip46_addr (&quicly_rmt_ip, &ctx->rmt_ip,
+ &ctx->rmt_port);
+ for (i = 0; i != num_packets; ++i)
+ {
+ if ((err = quic_quicly_send_datagram (
+ udp_session, &packets[i], &ctx->rmt_ip, ctx->rmt_port)))
+ {
+ goto quicly_error;
+ }
+ }
+ n_sent += num_packets;
+ }
+ }
+ while (num_packets > 0 && num_packets == max_packets);
+
+ quic_quicly_set_udp_tx_evt (udp_session);
+
+ QUIC_DBG (3, "%u[TX] %u[RX]", svm_fifo_max_dequeue (udp_session->tx_fifo),
+ svm_fifo_max_dequeue (udp_session->rx_fifo));
+
+ next_timeout = quicly_get_first_timeout (conn);
+ quic_update_timer (
+ quic_wrk_ctx_get (quic_quicly_main.qm, ctx->c_thread_index), ctx,
+ next_timeout);
+ return n_sent;
+
+quicly_error:
+ if (err && err != QUICLY_ERROR_PACKET_IGNORED &&
+ err != QUICLY_ERROR_FREE_CONNECTION)
+ {
+ clib_warning ("Quic error '%U'.", quic_quicly_format_err, err);
+ }
+ quic_quicly_connection_closed (ctx);
+ return 0;
+}
+
+static_always_inline void
+quic_quicly_timer_expired (u32 conn_index)
+{
+ quic_ctx_t *ctx;
+
+ ctx = quic_quicly_get_quic_ctx (conn_index, vlib_get_thread_index ());
+ ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
+ quic_quicly_send_packets (ctx);
+}
+
+static void
+quic_quicly_expired_timers_dispatch (u32 *expired_timers)
+{
+ int i;
+#if QUIC_DEBUG >= 4
+ int64_t time_now = quic_wrk_ctx_get(quic_quicly_main.qm, vlib_get_thread_index ())->time_now);
+#endif
+ for (i = 0; i < vec_len (expired_timers); i++)
+ {
+ QUIC_DBG (4, "Timer expired for conn %u at %ld", i, time_now);
+ quic_quicly_timer_expired (expired_timers[i]);
+ }
+}
+
+static_always_inline session_t *
+get_stream_session_and_ctx_from_stream (quicly_stream_t *stream,
+ quic_ctx_t **ctx)
+{
+ quic_stream_data_t *stream_data;
+
+ stream_data = (quic_stream_data_t *) stream->data;
+ *ctx =
+ quic_quicly_get_quic_ctx (stream_data->ctx_id, stream_data->thread_index);
+ return session_get ((*ctx)->c_s_index, stream_data->thread_index);
+}
+
+/* Quicly callbacks */
+
+static void
+quic_quicly_on_stream_destroy (quicly_stream_t *stream, int err)
+{
+ quic_stream_data_t *stream_data = (quic_stream_data_t *) stream->data;
+ quic_ctx_t *sctx =
+ quic_quicly_get_quic_ctx (stream_data->ctx_id, stream_data->thread_index);
+
+ QUIC_DBG (2, "DESTROYED_STREAM: session 0x%lx (%U)",
+ sctx->udp_session_handle, quic_quicly_format_err, err);
+
+ session_transport_closing_notify (&sctx->connection);
+ session_transport_delete_notify (&sctx->connection);
+
+ quic_increment_counter (quic_quicly_main.qm, QUIC_ERROR_CLOSED_STREAM, 1);
+ quic_ctx_free (quic_quicly_main.qm, sctx);
+ clib_mem_free (stream->data);
+}
+
+static void
+quic_quicly_fifo_egress_shift (quicly_stream_t *stream, size_t delta)
+{
+ quic_stream_data_t *stream_data;
+ session_t *stream_session;
+ quic_ctx_t *ctx;
+ svm_fifo_t *f;
+ u32 rv;
+
+ stream_data = (quic_stream_data_t *) stream->data;
+ stream_session = get_stream_session_and_ctx_from_stream (stream, &ctx);
+ f = stream_session->tx_fifo;
+
+ QUIC_ASSERT (stream_data->app_tx_data_len >= delta);
+ stream_data->app_tx_data_len -= delta;
+ ctx->bytes_written += delta;
+ rv = svm_fifo_dequeue_drop (f, delta);
+ QUIC_ASSERT (rv == delta);
+
+ rv = quicly_stream_sync_sendbuf (stream, 0);
+ QUIC_ASSERT (!rv);
+}
+
+static void
+quic_quicly_fifo_egress_emit (quicly_stream_t *stream, size_t off, void *dst,
+ size_t *len, int *wrote_all)
+{
+ quic_stream_data_t *stream_data;
+ quic_ctx_t *ctx;
+ session_t *stream_session;
+ svm_fifo_t *f;
+ u32 deq_max;
+
+ stream_data = (quic_stream_data_t *) stream->data;
+ stream_session = get_stream_session_and_ctx_from_stream (stream, &ctx);
+ f = stream_session->tx_fifo;
+
+ QUIC_DBG (3, "Emitting %u, offset %u", *len, off);
+
+ deq_max = svm_fifo_max_dequeue_cons (f);
+ QUIC_ASSERT (off <= deq_max);
+ if (off + *len < deq_max)
+ {
+ *wrote_all = 0;
+ }
+ else
+ {
+ *wrote_all = 1;
+ *len = deq_max - off;
+ }
+ QUIC_ASSERT (*len > 0);
+
+ if (off + *len > stream_data->app_tx_data_len)
+ {
+ stream_data->app_tx_data_len = off + *len;
+ }
+ svm_fifo_peek (f, off, *len, dst);
+}
+
+static void
+quic_quicly_on_stop_sending (quicly_stream_t *stream, int err)
+{
+ /* TODO : handle STOP_SENDING */
+#if QUIC_DEBUG >= 2
+ quic_stream_data_t *stream_data = (quic_stream_data_t *) stream->data;
+ quic_ctx_t *sctx =
+ quic_quicly_get_quic_ctx (stream_data->ctx_id, stream_data->thread_index);
+ session_t *stream_session =
+ session_get (sctx->c_s_index, sctx->c_thread_index);
+ clib_warning ("(NOT IMPLEMENTD) STOP_SENDING: session 0x%lx (%U)",
+ session_handle (stream_session), quic_quicly_format_err, err);
+#endif
+}
+
+static void
+quic_quicly_ack_rx_data (session_t *stream_session)
+{
+ u32 max_deq;
+ quic_ctx_t *sctx;
+ svm_fifo_t *f;
+ quicly_stream_t *stream;
+ quic_stream_data_t *stream_data;
+
+ sctx = quic_quicly_get_quic_ctx (stream_session->connection_index,
+ stream_session->thread_index);
+ QUIC_ASSERT (quic_ctx_is_stream (sctx));
+ stream = sctx->stream;
+ stream_data = (quic_stream_data_t *) stream->data;
+
+ f = stream_session->rx_fifo;
+ max_deq = svm_fifo_max_dequeue (f);
+
+ QUIC_ASSERT (stream_data->app_rx_data_len >= max_deq);
+ quicly_stream_sync_recvbuf (stream, stream_data->app_rx_data_len - max_deq);
+ QUIC_DBG (3, "Acking %u bytes", stream_data->app_rx_data_len - max_deq);
+ stream_data->app_rx_data_len = max_deq;
+}
+
+static void
+quic_quicly_on_receive (quicly_stream_t *stream, size_t off, const void *src,
+ size_t len)
+{
+ QUIC_DBG (3, "received data: %lu bytes, offset %lu", len, off);
+ u32 max_enq;
+ quic_ctx_t *sctx;
+ session_t *stream_session;
+ app_worker_t *app_wrk;
+ svm_fifo_t *f;
+ quic_stream_data_t *stream_data;
+ int rlen;
+
+ if (!len)
+ {
+ return;
+ }
+
+ stream_data = (quic_stream_data_t *) stream->data;
+ sctx =
+ quic_quicly_get_quic_ctx (stream_data->ctx_id, stream_data->thread_index);
+ stream_session = session_get (sctx->c_s_index, stream_data->thread_index);
+ f = stream_session->rx_fifo;
+
+ max_enq = svm_fifo_max_enqueue_prod (f);
+ QUIC_DBG (3, "Enqueuing %u at off %u in %u space", len, off, max_enq);
+ /* Handle duplicate packet/chunk from quicly */
+ if (off < stream_data->app_rx_data_len)
+ {
+ QUIC_DBG (3,
+ "Session [idx %u, app_wrk %u, thread %u, rx-fifo 0x%llx]: "
+ "DUPLICATE PACKET (max_enq %u, len %u, "
+ "app_rx_data_len %u, off %u, ToBeNQ %u)",
+ stream_session->session_index, stream_session->app_wrk_index,
+ stream_session->thread_index, f, max_enq, len,
+ stream_data->app_rx_data_len, off,
+ off - stream_data->app_rx_data_len + len);
+ return;
+ }
+ if (PREDICT_FALSE ((off - stream_data->app_rx_data_len + len) > max_enq))
+ {
+ QUIC_ERR ("Session [idx %u, app_wrk %u, thread %u, rx-fifo 0x%llx]: "
+ "RX FIFO IS FULL (max_enq %u, len %u, "
+ "app_rx_data_len %u, off %u, ToBeNQ %u)",
+ stream_session->session_index, stream_session->app_wrk_index,
+ stream_session->thread_index, f, max_enq, len,
+ stream_data->app_rx_data_len, off,
+ off - stream_data->app_rx_data_len + len);
+ return; /* This shouldn't happen */
+ }
+ if (off == stream_data->app_rx_data_len)
+ {
+ /* Streams live on the same thread so (f, stream_data) should stay
+ * consistent */
+ rlen = svm_fifo_enqueue (f, len, (u8 *) src);
+ if (PREDICT_FALSE (rlen < 0))
+ {
+ /*
+ * drop, fifo full
+ * drop, fifo grow
+ */
+ return;
+ }
+ QUIC_DBG (3,
+ "Session [idx %u, app_wrk %u, ti %u, rx-fifo 0x%llx]: "
+ "Enqueuing %u (rlen %u) at off %u in %u space, ",
+ stream_session->session_index, stream_session->app_wrk_index,
+ stream_session->thread_index, f, len, rlen, off, max_enq);
+ stream_data->app_rx_data_len += rlen;
+ QUIC_ASSERT (rlen >= len);
+ app_wrk = app_worker_get_if_valid (stream_session->app_wrk_index);
+ if (PREDICT_TRUE (app_wrk != 0))
+ {
+ app_worker_rx_notify (app_wrk, stream_session);
+ }
+ quic_quicly_ack_rx_data (stream_session);
+ }
+ else
+ {
+ rlen = svm_fifo_enqueue_with_offset (
+ f, off - stream_data->app_rx_data_len, len, (u8 *) src);
+ if (PREDICT_FALSE (rlen < 0))
+ {
+ /*
+ * drop, fifo full
+ * drop, fifo grow
+ */
+ return;
+ }
+ QUIC_ASSERT (rlen == 0);
+ }
+}
+
+static void
+quic_quicly_on_receive_reset (quicly_stream_t *stream, int quicly_error)
+{
+ quic_stream_data_t *stream_data = (quic_stream_data_t *) stream->data;
+ quic_ctx_t *sctx =
+ quic_quicly_get_quic_ctx (stream_data->ctx_id, stream_data->thread_index);
+#if QUIC_DEBUG >= 2
+ session_t *stream_session =
+ session_get (sctx->c_s_index, sctx->c_thread_index);
+ clib_warning ("RESET_STREAM: session 0x%lx (%U)",
+ session_handle (stream_session), quic_quicly_format_err,
+ quicly_error);
+#endif
+ session_transport_closing_notify (&sctx->connection);
+}
+
+const quicly_stream_callbacks_t quic_quicly_stream_callbacks = {
+ .on_destroy = quic_quicly_on_stream_destroy,
+ .on_send_shift = quic_quicly_fifo_egress_shift,
+ .on_send_emit = quic_quicly_fifo_egress_emit,
+ .on_send_stop = quic_quicly_on_stop_sending,
+ .on_receive = quic_quicly_on_receive,
+ .on_receive_reset = quic_quicly_on_receive_reset
+};
+
+quic_ctx_t *
+quic_quicly_get_conn_ctx (void *conn)
+{
+ u64 conn_data;
+ conn_data = (u64) *quicly_get_data ((quicly_conn_t *) conn);
+ return quic_quicly_get_quic_ctx (conn_data & UINT32_MAX, conn_data >> 32);
+}
+
+static_always_inline void
+quic_quicly_store_conn_ctx (void *conn, quic_ctx_t *ctx)
+{
+ *quicly_get_data ((quicly_conn_t *) conn) =
+ (void *) (((u64) ctx->c_thread_index) << 32 | (u64) ctx->c_c_index);
+}
+
+static_always_inline void
+quic_quicly_update_conn_ctx (quicly_conn_t *conn,
+ quicly_context_t *quicly_context)
+{
+ /* we need to update the quicly_conn on migrate
+ * as it contains a pointer to the crypto context */
+ ptls_context_t **tls;
+ quicly_context_t **_quicly_context;
+ _quicly_context = (quicly_context_t **) conn;
+ *_quicly_context = quicly_context;
+ tls = (ptls_context_t **) quicly_get_tls (conn);
+ *tls = quicly_context->tls;
+}
+
+static void
+quic_quicly_connection_migrate (quic_ctx_t *ctx)
+{
+ u32 new_ctx_id, thread_index = vlib_get_thread_index ();
+ quic_ctx_t *new_ctx;
+ clib_bihash_kv_16_8_t kv;
+ quicly_conn_t *conn;
+ quicly_context_t *quicly_context;
+ session_t *udp_session;
+ int64_t next_timeout;
+
+ new_ctx_id = quic_ctx_alloc (quic_quicly_main.qm, thread_index);
+ new_ctx = quic_quicly_get_quic_ctx (new_ctx_id, thread_index);
+
+ QUIC_DBG (2, "Received conn %u (now %u)", ctx->c_thread_index, new_ctx_id);
+
+ clib_memcpy (new_ctx, ctx, sizeof (quic_ctx_t));
+ clib_mem_free (ctx);
+
+ new_ctx->c_thread_index = thread_index;
+ new_ctx->c_c_index = new_ctx_id;
+ quic_quicly_crypto_context_acquire (new_ctx);
+
+ conn = new_ctx->conn;
+ quicly_context = quic_quicly_get_quicly_ctx_from_ctx (new_ctx);
+ quic_quicly_update_conn_ctx (conn, quicly_context);
+
+ quic_quicly_store_conn_ctx (conn, new_ctx);
+ quic_quicly_make_connection_key (&kv, quicly_get_master_id (conn));
+ kv.value = ((u64) thread_index) << 32 | (u64) new_ctx_id;
+ QUIC_DBG (2, "Registering conn with id %lu %lu", kv.key[0], kv.key[1]);
+ clib_bihash_add_del_16_8 (&quic_quicly_main.connection_hash, &kv,
+ 1 /* is_add */);
+ new_ctx->timer_handle = QUIC_TIMER_HANDLE_INVALID;
+ next_timeout = quicly_get_first_timeout (ctx->conn);
+
+ quic_update_timer (quic_wrk_ctx_get (quic_quicly_main.qm, thread_index),
+ new_ctx, next_timeout);
+
+ /* Trigger write on this connection if necessary */
+ udp_session = session_get_from_handle (new_ctx->udp_session_handle);
+ udp_session->opaque = new_ctx_id;
+ udp_session->flags &= ~SESSION_F_IS_MIGRATING;
+ if (svm_fifo_max_dequeue (udp_session->tx_fifo))
+ {
+ quic_quicly_set_udp_tx_evt (udp_session);
+ }
+}
+
+static int
+quic_quicly_reset_connection (u64 udp_session_handle,
+ quic_quicly_rx_packet_ctx_t *pctx)
+{
+ /* short header packet; potentially a dead connection. No need to check the
+ * length of the incoming packet, because loop is prevented by authenticating
+ * the CID (by checking node_id and thread_id). If the peer is also sending a
+ * reset, then the next CID is highly likely to contain a non-authenticating
+ * CID, ... */
+ QUIC_DBG (2, "Sending stateless reset");
+ int rv;
+ session_t *udp_session;
+ quicly_context_t *quicly_ctx;
+ if (pctx->packet.cid.dest.plaintext.node_id != 0 ||
+ pctx->packet.cid.dest.plaintext.thread_id != 0)
+ {
+ return 0;
+ }
+ quicly_ctx = quic_quicly_get_quicly_ctx_from_udp (udp_session_handle);
+ quic_ctx_t *ctx =
+ quic_quicly_get_quic_ctx (pctx->ctx_index, pctx->thread_index);
+
+ quicly_address_t src;
+ uint8_t payload[quicly_ctx->transport_params.max_udp_payload_size];
+ size_t payload_len =
+ quicly_send_stateless_reset (quicly_ctx, &src.sa, payload);
+ if (payload_len == 0)
+ {
+ return 1;
+ }
+
+ struct iovec packet;
+ packet.iov_len = payload_len;
+ packet.iov_base = payload;
+
+ udp_session = session_get_from_handle (udp_session_handle);
+ quic_quicly_addr_to_ip46_addr (&src, &ctx->rmt_ip, &ctx->rmt_port);
+ rv = quic_quicly_send_datagram (udp_session, &packet, &ctx->rmt_ip,
+ ctx->rmt_port);
+ quic_quicly_set_udp_tx_evt (udp_session);
+ return rv;
+}
+
+static_always_inline quic_ctx_t *
+quic_quicly_get_quic_ctx_if_valid (u32 ctx_index,
+ clib_thread_index_t thread_index)
+{
+ quic_main_t *qm = quic_quicly_main.qm;
+
+ if (pool_is_free_index (qm->ctx_pool[thread_index], ctx_index))
+ return 0;
+ return pool_elt_at_index (qm->ctx_pool[thread_index], ctx_index);
+}
+
+static void
+quic_quicly_proto_on_close (u32 ctx_index, clib_thread_index_t thread_index)
+{
+ int err;
+ quic_ctx_t *ctx =
+ quic_quicly_get_quic_ctx_if_valid (ctx_index, thread_index);
+ if (!ctx)
+ {
+ return;
+ }
+ session_t *stream_session =
+ session_get (ctx->c_s_index, ctx->c_thread_index);
+#if QUIC_DEBUG >= 2
+ clib_warning ("Closing session 0x%lx", session_handle (stream_session));
+#endif
+ if (quic_ctx_is_stream (ctx))
+ {
+ quicly_stream_t *stream = ctx->stream;
+ if (!quicly_stream_has_send_side (quicly_is_client (stream->conn),
+ stream->stream_id))
+ {
+ return;
+ }
+ quicly_sendstate_shutdown (
+ &stream->sendstate,
+ ctx->bytes_written + svm_fifo_max_dequeue (stream_session->tx_fifo));
+ err = quicly_stream_sync_sendbuf (stream, 1);
+ if (err)
+ {
+ QUIC_DBG (1, "sendstate_shutdown failed for stream session %lu",
+ session_handle (stream_session));
+ quicly_reset_stream (stream, QUIC_QUICLY_APP_ERROR_CLOSE_NOTIFY);
+ }
+ quic_quicly_send_packets (ctx);
+ return;
+ }
+
+ switch (ctx->conn_state)
+ {
+ case QUIC_CONN_STATE_OPENED:
+ case QUIC_CONN_STATE_HANDSHAKE:
+ case QUIC_CONN_STATE_READY:
+ ctx->conn_state = QUIC_CONN_STATE_ACTIVE_CLOSING;
+ quicly_conn_t *conn = ctx->conn;
+ /* Start connection closing. Keep sending packets until quicly_send
+ returns QUICLY_ERROR_FREE_CONNECTION */
+
+ quic_increment_counter (quic_quicly_main.qm,
+ QUIC_ERROR_CLOSED_CONNECTION, 1);
+ quicly_close (conn, QUIC_QUICLY_APP_ERROR_CLOSE_NOTIFY,
+ "Closed by peer");
+ /* This also causes all streams to be closed (and the cb called) */
+ quic_quicly_send_packets (ctx);
+ break;
+ case QUIC_CONN_STATE_PASSIVE_CLOSING:
+ ctx->conn_state = QUIC_CONN_STATE_PASSIVE_CLOSING_APP_CLOSED;
+ /* send_packets will eventually return an error, we delete the conn at
+ that point */
+ break;
+ case QUIC_CONN_STATE_PASSIVE_CLOSING_QUIC_CLOSED:
+ quic_quicly_connection_delete (ctx);
+ break;
+ case QUIC_CONN_STATE_ACTIVE_CLOSING:
+ break;
+ default:
+ QUIC_ERR ("Trying to close conn in state %d", ctx->conn_state);
+ break;
+ }
+}
+
+/*
+ * Returns 0 if a matching connection is found and is on the right thread.
+ * Otherwise returns -1.
+ * If a connection is found, even on the wrong thread, ctx_thread and ctx_index
+ * will be set.
+ */
+static_always_inline int
+quic_quicly_find_packet_ctx (quic_quicly_rx_packet_ctx_t *pctx,
+ u32 caller_thread_index)
+{
+ clib_bihash_kv_16_8_t kv;
+ clib_bihash_16_8_t *h;
+ quic_ctx_t *ctx;
+ u32 index, thread_id;
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+
+ h = &qqm->connection_hash;
+ quic_quicly_make_connection_key (&kv, &pctx->packet.cid.dest.plaintext);
+ QUIC_DBG (3, "Searching conn with id %lu %lu", kv.key[0], kv.key[1]);
+
+ if (clib_bihash_search_16_8 (h, &kv, &kv))
+ {
+ QUIC_DBG (3, "connection not found");
+ return QUIC_PACKET_TYPE_NONE;
+ }
+
+ index = kv.value & UINT32_MAX;
+ thread_id = kv.value >> 32;
+ /* Check if this connection belongs to this thread, otherwise
+ * ask for it to be moved */
+ if (thread_id != caller_thread_index)
+ {
+ QUIC_DBG (2, "Connection is on wrong thread");
+ /* Cannot make full check with quicly_is_destination... */
+ pctx->ctx_index = index;
+ pctx->thread_index = thread_id;
+ return QUIC_PACKET_TYPE_MIGRATE;
+ }
+ ctx = quic_quicly_get_quic_ctx (index, vlib_get_thread_index ());
+ if (!ctx->conn)
+ {
+ QUIC_ERR ("ctx has no conn");
+ return QUIC_PACKET_TYPE_NONE;
+ }
+ if (!quicly_is_destination (ctx->conn, NULL, &pctx->sa, &pctx->packet))
+ {
+ return QUIC_PACKET_TYPE_NONE;
+ }
+
+ QUIC_DBG (3, "Connection found");
+ pctx->ctx_index = index;
+ pctx->thread_index = thread_id;
+ return QUIC_PACKET_TYPE_RECEIVE;
+}
+
+static void
+quic_quicly_accept_connection (quic_quicly_rx_packet_ctx_t *pctx)
+{
+ quicly_context_t *quicly_ctx;
+ session_t *quic_session;
+ clib_bihash_kv_16_8_t kv;
+ app_worker_t *app_wrk;
+ quicly_conn_t *conn;
+ quic_ctx_t *ctx;
+ quic_ctx_t *lctx;
+ int rv;
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+
+ /* new connection, accept and create context if packet is valid
+ * TODO: check if socket is actually listening? */
+ ctx = quic_quicly_get_quic_ctx (pctx->ctx_index, pctx->thread_index);
+ if (ctx->c_s_index != QUIC_SESSION_INVALID)
+ {
+ QUIC_DBG (2, "already accepted ctx 0x%x", ctx->c_s_index);
+ return;
+ }
+
+ quicly_ctx = quic_quicly_get_quicly_ctx_from_ctx (ctx);
+ rv = quicly_accept (&conn, quicly_ctx, NULL, &pctx->sa, &pctx->packet, NULL,
+ &qqm->next_cid[pctx->thread_index], NULL, NULL);
+ if (rv)
+ {
+ /* Invalid packet, pass */
+ assert (conn == NULL);
+ QUIC_ERR ("Accept failed with %U", quic_quicly_format_err, rv);
+ /* TODO: cleanup created quic ctx and UDP session */
+ return;
+ }
+ assert (conn != NULL); // TODO: why this is in release image???
+
+ ++qqm->next_cid[pctx->thread_index].master_id;
+ /* Save ctx handle in quicly connection */
+ quic_quicly_store_conn_ctx (conn, ctx);
+ ctx->conn = conn;
+
+ quic_session = session_alloc (ctx->c_thread_index);
+ QUIC_DBG (2, "Allocated quic_session, 0x%lx ctx %u",
+ session_handle (quic_session), ctx->c_c_index);
+ ctx->c_s_index = quic_session->session_index;
+
+ lctx = quic_quicly_get_quic_ctx (ctx->listener_ctx_id, 0);
+
+ quic_session->app_wrk_index = lctx->parent_app_wrk_id;
+ quic_session->connection_index = ctx->c_c_index;
+ quic_session->session_type =
+ session_type_from_proto_and_ip (TRANSPORT_PROTO_QUIC, ctx->udp_is_ip4);
+ quic_session->listener_handle = lctx->c_s_index;
+
+ /* Register connection in connections map */
+ quic_quicly_make_connection_key (&kv, quicly_get_master_id (conn));
+ kv.value = ((u64) pctx->thread_index) << 32 | (u64) pctx->ctx_index;
+ clib_bihash_add_del_16_8 (&qqm->connection_hash, &kv, 1 /* is_add */);
+ QUIC_DBG (2, "Registering conn with id %lu %lu", kv.key[0], kv.key[1]);
+
+ /* If notify fails, reset connection immediatly */
+ rv = app_worker_init_accepted (quic_session);
+ if (rv)
+ {
+ QUIC_ERR ("failed to allocate fifos");
+ quic_quicly_proto_on_close (pctx->ctx_index, pctx->thread_index);
+ return;
+ }
+
+ svm_fifo_init_ooo_lookup (quic_session->rx_fifo, 0 /* ooo enq */);
+ svm_fifo_init_ooo_lookup (quic_session->tx_fifo, 1 /* ooo deq */);
+
+ app_wrk = app_worker_get (quic_session->app_wrk_index);
+ quic_session->session_state = SESSION_STATE_ACCEPTING;
+ rv = app_worker_accept_notify (app_wrk, quic_session);
+ if (rv)
+ {
+ QUIC_ERR ("failed to notify accept worker app");
+ quic_quicly_proto_on_close (pctx->ctx_index, pctx->thread_index);
+ return;
+ }
+
+ ctx->conn_state = QUIC_CONN_STATE_READY;
+}
+
+static int
+quic_quicly_process_one_rx_packet (u64 udp_session_handle, svm_fifo_t *f,
+ u32 fifo_offset,
+ quic_quicly_rx_packet_ctx_t *pctx)
+{
+ size_t plen;
+ u32 full_len, ret;
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
+ u32 cur_deq = svm_fifo_max_dequeue (f) - fifo_offset;
+ quicly_context_t *quicly_ctx;
+ session_t *udp_session;
+ int rv;
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+ quic_main_t *qm = qqm->qm;
+
+ ret = svm_fifo_peek (f, fifo_offset, SESSION_CONN_HDR_LEN, (u8 *) &pctx->ph);
+ QUIC_ASSERT (ret == SESSION_CONN_HDR_LEN);
+ QUIC_ASSERT (pctx->ph.data_offset == 0);
+ full_len = pctx->ph.data_length + SESSION_CONN_HDR_LEN;
+ if (full_len > cur_deq)
+ {
+ QUIC_ERR ("Not enough data in fifo RX");
+ return 1;
+ }
+
+ /* Quicly can read len bytes from the fifo at offset:
+ * ph.data_offset + SESSION_CONN_HDR_LEN */
+ ret = svm_fifo_peek (f, SESSION_CONN_HDR_LEN + fifo_offset,
+ pctx->ph.data_length, pctx->data);
+ if (ret != pctx->ph.data_length)
+ {
+ QUIC_ERR ("Not enough data peeked in RX");
+ return 1;
+ }
+
+ quic_increment_counter (quic_quicly_main.qm, QUIC_ERROR_RX_PACKETS, 1);
+ quic_build_sockaddr (&pctx->sa, &pctx->salen, &pctx->ph.rmt_ip,
+ pctx->ph.rmt_port, pctx->ph.is_ip4);
+ quicly_ctx = quic_quicly_get_quicly_ctx_from_udp (udp_session_handle);
+ size_t off = 0;
+ plen = quicly_decode_packet (quicly_ctx, &pctx->packet, pctx->data,
+ pctx->ph.data_length, &off);
+ if (plen == SIZE_MAX)
+ {
+ return 1;
+ }
+
+ rv = quic_quicly_find_packet_ctx (pctx, thread_index);
+ if (rv == QUIC_PACKET_TYPE_RECEIVE)
+ {
+ pctx->ptype = QUIC_PACKET_TYPE_RECEIVE;
+ if (qqm->vnet_crypto_enabled &&
+ qm->default_crypto_engine == CRYPTO_ENGINE_VPP)
+ {
+ quic_ctx_t *qctx =
+ quic_quicly_get_quic_ctx (pctx->ctx_index, thread_index);
+ quic_quicly_crypto_decrypt_packet (qctx, pctx);
+ }
+ return 0;
+ }
+ else if (rv == QUIC_PACKET_TYPE_MIGRATE)
+ {
+ /* Connection found but on wrong thread, ask move */
+ pctx->ptype = QUIC_PACKET_TYPE_MIGRATE;
+ }
+ else if (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]))
+ {
+ pctx->ptype = QUIC_PACKET_TYPE_ACCEPT;
+ udp_session = session_get_from_handle (udp_session_handle);
+ pctx->ctx_index = udp_session->opaque;
+ pctx->thread_index = thread_index;
+ }
+ else
+ {
+ pctx->ptype = QUIC_PACKET_TYPE_RESET;
+ }
+ return 1;
+}
+
+static int
+quic_quicly_connect (quic_ctx_t *ctx, u32 ctx_index,
+ clib_thread_index_t thread_index, struct sockaddr *sa)
+{
+ clib_bihash_kv_16_8_t kv;
+ quicly_context_t *quicly_ctx;
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+ int ret;
+
+ quicly_ctx = quic_quicly_get_quicly_ctx_from_ctx (ctx);
+ ret = quicly_connect (
+ (quicly_conn_t **) &ctx->conn, quicly_ctx, (char *) ctx->srv_hostname, sa,
+ NULL, &qqm->next_cid[thread_index], ptls_iovec_init (NULL, 0),
+ &qqm->hs_properties, NULL, NULL);
+ ++qqm->next_cid[thread_index].master_id;
+ /* save context handle in quicly connection */
+ quic_quicly_store_conn_ctx (ctx->conn, ctx);
+ assert (ret == 0); // TODO: why is this in the release image???
+
+ /* Register connection in connections map */
+ quic_quicly_make_connection_key (
+ &kv, quicly_get_master_id ((quicly_conn_t *) ctx->conn));
+ kv.value = ((u64) thread_index) << 32 | (u64) ctx_index;
+ QUIC_DBG (2, "Registering conn with id %lu %lu", kv.key[0], kv.key[1]);
+ clib_bihash_add_del_16_8 (&qqm->connection_hash, &kv, 1 /* is_add */);
+
+ return (ret);
+}
+
+static u8 *
+quic_quicly_format_quicly_conn_id (u8 *s, va_list *args)
+{
+ quicly_cid_plaintext_t *mid = va_arg (*args, quicly_cid_plaintext_t *);
+ s = format (s, "C%x_%x", mid->master_id, mid->thread_id);
+ return s;
+}
+
+static u8 *
+quic_quicly_format_stream_ctx_stream_id (u8 *s, va_list *args)
+{
+ quic_ctx_t *ctx = va_arg (*args, quic_ctx_t *);
+ quicly_stream_t *stream = (quicly_stream_t *) ctx->stream;
+
+ s = format (s, "%U S%lx", quic_quicly_format_quicly_conn_id,
+ quicly_get_master_id (stream->conn), stream->stream_id);
+ return s;
+}
+
+static u8 *
+quic_quicly_format_stream_connection (u8 *s, va_list *args)
+{
+ quic_ctx_t *ctx = va_arg (*args, quic_ctx_t *);
+ quicly_stream_t *stream = (quicly_stream_t *) ctx->stream;
+
+ s = format (s, "Stream %ld conn %d", stream->stream_id,
+ ctx->quic_connection_ctx_id);
+ return s;
+}
+
+static_always_inline void
+quic_quicly_connection_get_stats (void *conn, quic_stats_t *conn_stats)
+{
+ quicly_stats_t qstats;
+
+ quicly_get_stats ((quicly_conn_t *) conn, &qstats);
+ conn_stats->rtt_smoothed = qstats.rtt.smoothed;
+ conn_stats->rtt_minimum = qstats.rtt.minimum;
+ conn_stats->rtt_variance = qstats.rtt.variance;
+ conn_stats->num_packets_received = qstats.num_packets.received;
+ conn_stats->num_packets_sent = qstats.num_packets.sent;
+ conn_stats->num_packets_lost = qstats.num_packets.lost;
+ conn_stats->num_packets_ack_received = qstats.num_packets.ack_received;
+ conn_stats->num_bytes_received = qstats.num_bytes.received;
+ conn_stats->num_bytes_sent = qstats.num_bytes.sent;
+}
+
+static u8 *
+quic_quicly_format_connection_stats (u8 *s, va_list *args)
+{
+ quic_ctx_t *ctx = va_arg (*args, quic_ctx_t *);
+ quicly_stats_t quicly_stats;
+
+ s = format (s, "[%U]", quic_quicly_format_quicly_conn_id,
+ quicly_get_master_id (ctx->conn));
+
+ quicly_get_stats (ctx->conn, &quicly_stats);
+
+ s = format (s, "[RTT >%3d, ~%3d, V%3d, last %3d]", quicly_stats.rtt.minimum,
+ quicly_stats.rtt.smoothed, quicly_stats.rtt.variance,
+ quicly_stats.rtt.latest);
+ s = format (s, " TX:%d RX:%d loss:%d ack:%d", quicly_stats.num_packets.sent,
+ quicly_stats.num_packets.received, quicly_stats.num_packets.lost,
+ quicly_stats.num_packets.ack_received);
+ s =
+ format (s, "\ncwnd:%u ssthresh:%u recovery_end:%lu", quicly_stats.cc.cwnd,
+ quicly_stats.cc.ssthresh, quicly_stats.cc.recovery_end);
+
+ quicly_context_t *quicly_ctx = quic_quicly_get_quicly_ctx_from_ctx (ctx);
+ if (quicly_ctx->init_cc == &quicly_cc_cubic_init)
+ {
+ s = format (s,
+ "\nk:%d w_max:%u w_last_max:%u avoidance_start:%ld "
+ "last_sent_time:%ld",
+ quicly_stats.cc.state.cubic.k,
+ quicly_stats.cc.state.cubic.w_max,
+ quicly_stats.cc.state.cubic.w_last_max,
+ quicly_stats.cc.state.cubic.avoidance_start,
+ quicly_stats.cc.state.cubic.last_sent_time);
+ }
+ else if (quicly_ctx->init_cc == &quicly_cc_reno_init)
+ {
+ s = format (s, " stash:%u", quicly_stats.cc.state.reno.stash);
+ }
+ return s;
+}
+
+static_always_inline int
+quic_quicly_receive_a_packet (quic_ctx_t *ctx,
+ quic_quicly_rx_packet_ctx_t *pctx)
+{
+ int rv = quicly_receive (ctx->conn, NULL, &pctx->sa, &pctx->packet);
+ if (rv && rv != QUICLY_ERROR_PACKET_IGNORED)
+ {
+ QUIC_ERR ("quicly_receive return error %U", quic_quicly_format_err, rv);
+ }
+
+ // FIXME: Don't return quicly error codes here.
+ // TODO: Define appropriate QUIC return values for QUIC VFT's!
+ return rv;
+}
+
+static_always_inline int
+quic_quicly_connect_stream (void *quic_conn, void **quic_stream,
+ quic_stream_data_t **quic_stream_data,
+ u8 is_unidir)
+{
+ quicly_conn_t *conn = quic_conn;
+ quicly_stream_t *quicly_stream;
+ int rv;
+
+ if (!quicly_connection_is_ready (conn))
+ {
+ return -1; // TODO: Define appropriate QUIC return values for QUIC VFT's!
+ }
+
+ rv = quicly_open_stream (conn, (quicly_stream_t **) quic_stream, is_unidir);
+ if (rv)
+ {
+ QUIC_DBG (2, "quicly_open_stream() failed with %d", rv);
+ return -1; // TODO: Define appropriate QUIC return values for QUIC VFT's!
+ }
+
+ quicly_stream = *(quicly_stream_t **) quic_stream;
+ *quic_stream_data = (quic_stream_data_t *) quicly_stream->data;
+
+ QUIC_DBG (2, "Opened quicly_stream %d, creating session",
+ quicly_stream->stream_id);
+ return 0;
+}
+
+static_always_inline void
+quic_quicly_connect_stream_error_reset (void *quic_stream)
+{
+ quicly_reset_stream ((quicly_stream_t *) quic_stream,
+ QUIC_QUICLY_APP_CONNECT_NOTIFY_ERROR);
+}
+
+static_always_inline int
+quic_quicly_stream_tx (quic_ctx_t *ctx, session_t *stream_session)
+{
+ quic_stream_data_t *stream_data;
+ quicly_stream_t *stream;
+ u32 max_deq;
+ int rv = 0;
+
+ stream = ctx->stream;
+ if (!quicly_sendstate_is_open (&stream->sendstate))
+ {
+ QUIC_ERR ("Warning: tried to send on closed stream");
+ return 0;
+ }
+
+ stream_data = (quic_stream_data_t *) stream->data;
+ max_deq = svm_fifo_max_dequeue (stream_session->tx_fifo);
+ QUIC_ASSERT (max_deq >= stream_data->app_tx_data_len);
+ if (max_deq == stream_data->app_tx_data_len)
+ {
+ QUIC_DBG (3, "TX but no data %d / %d", max_deq,
+ stream_data->app_tx_data_len);
+ return 0;
+ }
+ stream_data->app_tx_data_len = max_deq;
+ rv = quicly_stream_sync_sendbuf (stream, 1);
+ QUIC_ASSERT (!rv);
+
+ return (rv);
+}
+
+static void
+quic_quicly_engine_init (quic_main_t *qm)
+{
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+ quicly_cid_plaintext_t *next_cid;
+ clib_bihash_24_8_t *crctx_hash;
+ tw_timer_wheel_1t_3w_1024sl_ov_t *tw;
+ u32 i;
+
+ qm->default_crypto_engine = CRYPTO_ENGINE_PICOTLS;
+ qm->default_quic_cc = QUIC_CC_RENO;
+ qm->max_packets_per_key = DEFAULT_MAX_PACKETS_PER_KEY;
+ qqm->session_cache.super.cb = quic_quicly_encrypt_ticket_cb;
+ qqm->qm = qm;
+
+ vec_validate (quic_quicly_main.next_cid, qm->num_threads - 1);
+ next_cid = qqm->next_cid;
+ vec_validate (quic_quicly_main.crypto_ctx_hash, qm->num_threads - 1);
+ crctx_hash = qqm->crypto_ctx_hash;
+ clib_bitmap_alloc (quic_quicly_main.available_crypto_engines,
+ app_crypto_engine_n_types ());
+ clib_bihash_init_16_8 (&qqm->connection_hash,
+ "quic (quicly engine) connections", 1024, 4 << 20);
+ quic_quicly_register_cipher_suite (CRYPTO_ENGINE_PICOTLS,
+ ptls_openssl_cipher_suites);
+
+ /* TODO: Review comment from Florin
+ * Should we move this to quic timers and have quic framework call it?
+ * If we have dependencies issues, at least move it to quic framework.
+ */
+ for (i = 0; i < qm->num_threads; i++)
+ {
+ tw = &quic_wrk_ctx_get (qm, i)->timer_wheel;
+ tw_timer_wheel_init_1t_3w_1024sl_ov (tw,
+ quic_quicly_expired_timers_dispatch,
+ 1e-3 /* timer period 1ms */, ~0);
+ tw->last_run_time = vlib_time_now (vlib_get_main ());
+ next_cid[i].thread_id = i;
+ clib_bihash_init_24_8 (&crctx_hash[i], "quic crypto contexts", 64,
+ 128 << 10);
+ QUIC_DBG (2,
+ "Initialized crctx_hash[%u] "
+ "(buckets = 0x%lx)",
+ i, crctx_hash[i].buckets);
+ }
+}
+
+static void
+quic_quicly_on_quic_session_connected (quic_ctx_t *ctx)
+{
+ session_t *quic_session;
+ app_worker_t *app_wrk;
+ u32 ctx_id = ctx->c_c_index;
+ clib_thread_index_t thread_index = ctx->c_thread_index;
+ int rv;
+
+ quic_session = session_alloc (thread_index);
+
+ QUIC_DBG (2, "Allocated quic session 0x%lx", session_handle (quic_session));
+ ctx->c_s_index = quic_session->session_index;
+ quic_session->app_wrk_index = ctx->parent_app_wrk_id;
+ quic_session->connection_index = ctx->c_c_index;
+ quic_session->listener_handle = SESSION_INVALID_HANDLE;
+ quic_session->session_type =
+ session_type_from_proto_and_ip (TRANSPORT_PROTO_QUIC, ctx->udp_is_ip4);
+
+ /* If quic session connected fails, immediatly close connection */
+ app_wrk = app_worker_get (ctx->parent_app_wrk_id);
+ if ((rv = app_worker_init_connected (app_wrk, quic_session)))
+ {
+ QUIC_ERR ("failed to app_worker_init_connected");
+ quic_quicly_proto_on_close (ctx_id, thread_index);
+ app_worker_connect_notify (app_wrk, NULL, rv, ctx->client_opaque);
+ return;
+ }
+
+ svm_fifo_init_ooo_lookup (quic_session->rx_fifo, 0 /* ooo enq */);
+ svm_fifo_init_ooo_lookup (quic_session->tx_fifo, 1 /* ooo deq */);
+
+ quic_session->session_state = SESSION_STATE_CONNECTING;
+ if ((rv = app_worker_connect_notify (app_wrk, quic_session, SESSION_E_NONE,
+ ctx->client_opaque)))
+ {
+ QUIC_ERR ("failed to notify app %d", rv);
+ quic_quicly_proto_on_close (ctx_id, thread_index);
+ return;
+ }
+}
+
+void
+quic_quicly_check_quic_session_connected (quic_ctx_t *ctx)
+{
+ /* Called when we need to trigger quic session connected
+ * we may call this function on the server side / at
+ * stream opening */
+ quic_session_connected_t session_connected;
+
+ /* Conn may be set to null if the connection is terminated */
+ if (!ctx->conn || ctx->conn_state != QUIC_CONN_STATE_HANDSHAKE)
+ return;
+
+ session_connected = quic_quicly_is_session_connected (ctx);
+ if (session_connected == QUIC_SESSION_CONNECTED_NONE)
+ return;
+
+ ctx->conn_state = QUIC_CONN_STATE_READY;
+ if (session_connected == QUIC_SESSION_CONNECTED_CLIENT)
+ {
+ quic_quicly_on_quic_session_connected (ctx);
+ }
+}
+
+static int
+quic_quicly_udp_session_rx_packets (session_t *udp_session)
+{
+ /* Read data from UDP rx_fifo and pass it to the quic_eng conn. */
+ quic_ctx_t *ctx = NULL, *prev_ctx = NULL;
+ svm_fifo_t *f = udp_session->rx_fifo;
+ u32 max_deq;
+ u64 udp_session_handle = session_handle (udp_session);
+ int rv = 0;
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
+ u32 cur_deq, fifo_offset, max_packets, i;
+ // TODO: move packet buffer off of the stack and
+ // allocate a vector of packet_ct_t.
+ quic_quicly_rx_packet_ctx_t packets_ctx[QUIC_RCV_MAX_PACKETS];
+
+ if (udp_session->flags & SESSION_F_IS_MIGRATING)
+ {
+ QUIC_DBG (3, "RX on migrating udp session");
+ return 0;
+ }
+
+rx_start:
+ max_deq = svm_fifo_max_dequeue (f);
+ if (max_deq == 0)
+ {
+ return 0;
+ }
+
+ fifo_offset = 0;
+ max_packets = QUIC_RCV_MAX_PACKETS;
+
+#if CLIB_DEBUG > 0
+ clib_memset (packets_ctx, 0xfa,
+ QUIC_RCV_MAX_PACKETS * sizeof (quic_quicly_rx_packet_ctx_t));
+#endif
+ for (i = 0; i < max_packets; i++)
+ {
+ packets_ctx[i].thread_index = UINT32_MAX;
+ packets_ctx[i].ctx_index = UINT32_MAX;
+ packets_ctx[i].ptype = QUIC_PACKET_TYPE_DROP;
+
+ cur_deq = max_deq - fifo_offset;
+ if (cur_deq == 0)
+ {
+ max_packets = i + 1;
+ break;
+ }
+ if (cur_deq < SESSION_CONN_HDR_LEN)
+ {
+ fifo_offset = max_deq;
+ max_packets = i + 1;
+ QUIC_ERR ("Fifo %d < header size in RX", cur_deq);
+ break;
+ }
+ rv = quic_quicly_process_one_rx_packet (udp_session_handle, f,
+ fifo_offset, &packets_ctx[i]);
+ if (packets_ctx[i].ptype != QUIC_PACKET_TYPE_MIGRATE)
+ {
+ fifo_offset += SESSION_CONN_HDR_LEN + packets_ctx[i].ph.data_length;
+ }
+ if (rv)
+ {
+ max_packets = i + 1;
+ break;
+ }
+ }
+
+ for (i = 0; i < max_packets; i++)
+ {
+ switch (packets_ctx[i].ptype)
+ {
+ case QUIC_PACKET_TYPE_RECEIVE:
+ ctx =
+ quic_quicly_get_quic_ctx (packets_ctx[i].ctx_index, thread_index);
+ // FIXME: Process return value and handle errors.
+ quic_quicly_receive_a_packet (ctx, &packets_ctx[i]);
+ break;
+ case QUIC_PACKET_TYPE_ACCEPT:
+ // FIXME: Process return value and handle errors.
+ quic_quicly_accept_connection (&packets_ctx[i]);
+ break;
+ case QUIC_PACKET_TYPE_RESET:
+ // FIXME: Process return value and handle errors.
+ quic_quicly_reset_connection (udp_session_handle, &packets_ctx[i]);
+ break;
+ }
+ }
+ ctx = prev_ctx = NULL;
+ for (i = 0; i < max_packets; i++)
+ {
+ prev_ctx = ctx;
+ switch (packets_ctx[i].ptype)
+ {
+ case QUIC_PACKET_TYPE_RECEIVE:
+ ctx = quic_quicly_get_quic_ctx (packets_ctx[i].ctx_index,
+ packets_ctx[i].thread_index);
+ quic_quicly_check_quic_session_connected (ctx);
+ ctx = quic_quicly_get_quic_ctx (packets_ctx[i].ctx_index,
+ packets_ctx[i].thread_index);
+ break;
+ case QUIC_PACKET_TYPE_ACCEPT:
+ ctx = quic_quicly_get_quic_ctx (packets_ctx[i].ctx_index,
+ packets_ctx[i].thread_index);
+ break;
+ default:
+ continue; /* this exits the for loop since other packet types are
+ * necessarily the last in the batch */
+ }
+ if (ctx != prev_ctx)
+ {
+ quic_quicly_send_packets (ctx);
+ }
+ }
+
+ /* session alloc might have happened, so get session again */
+ udp_session = session_get_from_handle (udp_session_handle);
+ f = udp_session->rx_fifo;
+ svm_fifo_dequeue_drop (f, fifo_offset);
+
+ if (svm_fifo_max_dequeue (f))
+ {
+ goto rx_start;
+ }
+
+ return 0;
+}
+
+const static quic_engine_vft_t quic_quicly_engine_vft = {
+ .engine_init = quic_quicly_engine_init,
+ .app_cert_key_pair_delete = quic_quicly_app_cert_key_pair_delete,
+ .crypto_context_acquire = quic_quicly_crypto_context_acquire,
+ .crypto_context_release = quic_quicly_crypto_context_release,
+ .connect = quic_quicly_connect,
+ .connect_stream = quic_quicly_connect_stream,
+ .connect_stream_error_reset = quic_quicly_connect_stream_error_reset,
+ .connection_migrate = quic_quicly_connection_migrate,
+ .connection_get_stats = quic_quicly_connection_get_stats,
+ .udp_session_rx_packets = quic_quicly_udp_session_rx_packets,
+ .ack_rx_data = quic_quicly_ack_rx_data,
+ .stream_tx = quic_quicly_stream_tx,
+ .send_packets = quic_quicly_send_packets,
+ .format_connection_stats = quic_quicly_format_connection_stats,
+ .format_stream_connection = quic_quicly_format_stream_connection,
+ .format_stream_ctx_stream_id = quic_quicly_format_stream_ctx_stream_id,
+ .proto_on_close = quic_quicly_proto_on_close,
+};
+
+static clib_error_t *
+quic_quicly_init (vlib_main_t *vm)
+{
+ quic_register_engine_fn register_engine;
+
+ register_engine =
+ vlib_get_plugin_symbol ("quic_plugin.so", "quic_register_engine");
+ if (register_engine == 0)
+ {
+ clib_warning ("quic_plugin.so not loaded...");
+ return clib_error_return (0, "Unable to get plugin symbol: "
+ "'quic_register_engine'");
+ }
+ (*register_engine) (&quic_quicly_engine_vft, QUIC_ENGINE_QUICLY);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (quic_quicly_init) = {
+ .runs_after = VLIB_INITS ("quic_init"),
+};
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef __included_quic_quicly_h__
+#define __included_quic_quicly_h__
+
+#include <quic/quic.h>
+#include <quic_quicly/ptls_certs.h>
+#include <vnet/session/session.h>
+#include <quicly.h>
+#include <quicly/constants.h>
+#include <quicly/defaults.h>
+#include <picotls.h>
+#include <picotls/openssl.h>
+
+/* Taken from quicly.c */
+#define QUICLY_QUIC_BIT 0x40
+
+#define QUICLY_PACKET_TYPE_INITIAL \
+ (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0)
+#define QUICLY_PACKET_TYPE_0RTT \
+ (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0x10)
+#define QUICLY_PACKET_TYPE_HANDSHAKE \
+ (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0x20)
+#define QUICLY_PACKET_TYPE_RETRY \
+ (QUICLY_LONG_HEADER_BIT | QUICLY_QUIC_BIT | 0x30)
+#define QUICLY_PACKET_TYPE_BITMASK 0xf0
+
+typedef struct quic_quicly_rx_packet_ctx_
+{
+#define _(type, name) type name;
+ foreach_quic_rx_pkt_ctx_field
+#undef _
+ quicly_decoded_packet_t packet;
+ u8 data[QUIC_MAX_PACKET_SIZE];
+ union
+ {
+ struct sockaddr sa;
+ struct sockaddr_in6 sa6;
+ };
+ socklen_t salen;
+ session_dgram_hdr_t ph;
+} quic_quicly_rx_packet_ctx_t;
+
+/* single-entry session cache */
+typedef struct quic_quicly_session_cache_
+{
+ ptls_encrypt_ticket_t super;
+ uint8_t id[32];
+ ptls_iovec_t data;
+} quic_quicly_session_cache_t;
+
+typedef struct quic_quicly_main_
+{
+ quic_main_t *qm;
+ ptls_cipher_suite_t **
+ *quic_ciphers; /**< available ciphers by crypto engine */
+ u32 *per_thread_crypto_key_indices;
+ ptls_handshake_properties_t hs_properties;
+ clib_bihash_16_8_t connection_hash; /**< quic connection id -> conn handle */
+ quic_quicly_session_cache_t session_cache;
+ quicly_cid_plaintext_t *next_cid;
+ clib_bihash_24_8_t *crypto_ctx_hash;
+ uword *available_crypto_engines; /**< Bitmap for registered engines */
+ u8 vnet_crypto_enabled;
+} quic_quicly_main_t;
+
+extern quic_quicly_main_t quic_quicly_main;
+extern const quicly_stream_callbacks_t quic_quicly_stream_callbacks;
+extern quic_ctx_t *quic_quicly_get_conn_ctx (void *conn);
+extern void quic_quicly_check_quic_session_connected (quic_ctx_t *ctx);
+
+static_always_inline quic_ctx_t *
+quic_quicly_get_quic_ctx (u32 ctx_index, u32 thread_index)
+{
+ return pool_elt_at_index (quic_quicly_main.qm->ctx_pool[thread_index],
+ ctx_index);
+}
+
+static_always_inline quic_session_connected_t
+quic_quicly_is_session_connected (quic_ctx_t *ctx)
+{
+ quic_session_connected_t session_connected = QUIC_SESSION_CONNECTED_NONE;
+
+ if (quicly_connection_is_ready (ctx->conn))
+ {
+ session_connected = quicly_is_client (ctx->conn) ?
+ QUIC_SESSION_CONNECTED_CLIENT :
+ QUIC_SESSION_CONNECTED_SERVER;
+ }
+
+ return (session_connected);
+}
+
+#endif /* __included_quic_quicly_h__ */
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#include <quic_quicly/quic_quicly.h>
+#include <quic_quicly/quic_quicly_error.h>
+#include <quic_quicly/quic_quicly_crypto.h>
+#include <vnet/session/application.h>
+#include <vnet/session/session.h>
+
+#include <quic/quic_timer.h>
+#include <quicly.h>
+#include <picotls/openssl.h>
+#include <pthread.h>
+
+#define QUICLY_EPOCH_1RTT 3
+
+vnet_crypto_main_t *cm = &crypto_main;
+
+static_always_inline void
+quic_quicly_crypto_context_make_key_from_ctx (clib_bihash_kv_24_8_t *kv,
+ quic_ctx_t *ctx)
+{
+ application_t *app = application_get (ctx->parent_app_id);
+ kv->key[0] = ((u64) ctx->ckpair_index) << 32 | (u64) ctx->crypto_engine;
+ kv->key[1] = app->sm_properties.rx_fifo_size - 1;
+ kv->key[2] = app->sm_properties.tx_fifo_size - 1;
+}
+
+void
+quic_quicly_crypto_context_make_key_from_crctx (clib_bihash_kv_24_8_t *kv,
+ crypto_context_t *crctx)
+{
+ quic_quicly_crypto_context_data_t *data =
+ (quic_quicly_crypto_context_data_t *) crctx->data;
+ kv->key[0] = ((u64) crctx->ckpair_index) << 32 | (u64) crctx->crypto_engine;
+ kv->key[1] = data->quicly_ctx.transport_params.max_stream_data.bidi_local;
+ kv->key[2] = data->quicly_ctx.transport_params.max_stream_data.bidi_remote;
+}
+
+int
+quic_quicly_app_cert_key_pair_delete (app_cert_key_pair_t *ckpair)
+{
+ quic_main_t *qm = quic_quicly_main.qm;
+ clib_bihash_24_8_t *crctx_hash = quic_quicly_main.crypto_ctx_hash;
+ crypto_context_t *crctx;
+ clib_bihash_kv_24_8_t kv;
+ int i;
+
+ for (i = 0; i < qm->num_threads; i++)
+ {
+ pool_foreach (crctx, quic_wrk_ctx_get (qm, i)->crypto_ctx_pool)
+ {
+ if (crctx->ckpair_index == ckpair->cert_key_index)
+ {
+ quic_quicly_crypto_context_make_key_from_crctx (&kv, crctx);
+ clib_bihash_add_del_24_8 (&crctx_hash[i], &kv, 0 /* is_add */);
+ }
+ }
+ }
+ return 0;
+}
+
+static crypto_context_t *
+quic_quicly_crypto_context_alloc (u8 thread_index)
+{
+ quic_worker_ctx_t *wc = quic_wrk_ctx_get (quic_quicly_main.qm, thread_index);
+ crypto_context_t *crctx;
+ u32 idx;
+
+ pool_get_aligned_safe (wc->crypto_ctx_pool, crctx, CLIB_CACHE_LINE_BYTES);
+ clib_memset (crctx, 0, sizeof (*crctx));
+ idx = (crctx - wc->crypto_ctx_pool);
+ crctx->ctx_index = ((u32) thread_index) << 24 | idx;
+ QUIC_DBG (3, "Allocated crctx %u on thread %u", idx, thread_index);
+
+ return crctx;
+}
+
+static void
+quic_quicly_crypto_context_free_if_needed (crypto_context_t *crctx,
+ u8 thread_index)
+{
+ clib_bihash_24_8_t *crctx_hash = quic_quicly_main.crypto_ctx_hash;
+ clib_bihash_kv_24_8_t kv;
+ if (crctx->n_subscribers)
+ return;
+ quic_quicly_crypto_context_make_key_from_crctx (&kv, crctx);
+ clib_bihash_add_del_24_8 (&crctx_hash[thread_index], &kv, 0 /* is_add */);
+ clib_mem_free (crctx->data);
+ pool_put (
+ quic_wrk_ctx_get (quic_quicly_main.qm, thread_index)->crypto_ctx_pool,
+ crctx);
+}
+
+static int
+quic_quicly_on_stream_open (quicly_stream_open_t *self,
+ quicly_stream_t *stream)
+{
+ /* Return code for this function ends either
+ * - in quicly_receive : if not QUICLY_ERROR_PACKET_IGNORED, will close
+ * connection
+ * - in quicly_open_stream, returned directly
+ */
+
+ session_t *stream_session, *quic_session;
+ quic_stream_data_t *stream_data;
+ app_worker_t *app_wrk;
+ quic_ctx_t *qctx, *sctx;
+ u32 sctx_id;
+ int rv;
+
+ QUIC_DBG (2, "on_stream_open called");
+ stream->data = clib_mem_alloc (sizeof (quic_stream_data_t));
+ stream->callbacks = &quic_quicly_stream_callbacks;
+ /* Notify accept on parent qsession, but only if this is not a locally
+ * initiated stream */
+ if (quicly_stream_is_self_initiated (stream))
+ {
+ QUIC_DBG (2, "Nothing to do on locally initiated stream");
+ return 0;
+ }
+
+ sctx_id = quic_ctx_alloc (quic_quicly_main.qm, vlib_get_thread_index ());
+ qctx = quic_quicly_get_conn_ctx (stream->conn);
+
+ /* Might need to signal that the connection is ready if the first thing the
+ * server does is open a stream */
+ quic_quicly_check_quic_session_connected (qctx);
+ /* ctx might be invalidated */
+ qctx = quic_quicly_get_conn_ctx (stream->conn);
+ QUIC_DBG (2, "qctx->c_s_index %u, qctx->c_c_index %u", qctx->c_s_index,
+ qctx->c_c_index);
+
+ if (qctx->c_s_index == QUIC_SESSION_INVALID)
+ {
+ QUIC_DBG (2, "Invalid session index on quic c_index %u",
+ qctx->c_c_index);
+ return 0;
+ }
+ stream_session = session_alloc (qctx->c_thread_index);
+ QUIC_DBG (2, "ACCEPTED stream_session 0x%lx ctx %u",
+ session_handle (stream_session), sctx_id);
+ sctx = quic_quicly_get_quic_ctx (sctx_id, qctx->c_thread_index);
+ sctx->parent_app_wrk_id = qctx->parent_app_wrk_id;
+ sctx->parent_app_id = qctx->parent_app_id;
+ sctx->quic_connection_ctx_id = qctx->c_c_index;
+ sctx->c_c_index = sctx_id;
+ sctx->c_s_index = stream_session->session_index;
+ sctx->stream = stream;
+ sctx->c_flags |= TRANSPORT_CONNECTION_F_NO_LOOKUP;
+ sctx->flags |= QUIC_F_IS_STREAM;
+ sctx->crypto_context_index = qctx->crypto_context_index;
+ if (quicly_stream_is_unidirectional (stream->stream_id))
+ stream_session->flags |= SESSION_F_UNIDIRECTIONAL;
+
+ stream_data = (quic_stream_data_t *) stream->data;
+ stream_data->ctx_id = sctx_id;
+ stream_data->thread_index = sctx->c_thread_index;
+ stream_data->app_rx_data_len = 0;
+ stream_data->app_tx_data_len = 0;
+
+ sctx->c_s_index = stream_session->session_index;
+ stream_session->session_state = SESSION_STATE_CREATED;
+ stream_session->app_wrk_index = sctx->parent_app_wrk_id;
+ stream_session->connection_index = sctx->c_c_index;
+ stream_session->session_type =
+ session_type_from_proto_and_ip (TRANSPORT_PROTO_QUIC, qctx->udp_is_ip4);
+ quic_session = session_get (qctx->c_s_index, qctx->c_thread_index);
+ /* Make sure quic session is in listening state */
+ quic_session->session_state = SESSION_STATE_LISTENING;
+ stream_session->listener_handle = listen_session_get_handle (quic_session);
+
+ app_wrk = app_worker_get (stream_session->app_wrk_index);
+ if ((rv = app_worker_init_connected (app_wrk, stream_session)))
+ {
+ QUIC_ERR ("failed to allocate fifos");
+ quicly_reset_stream (stream, QUIC_QUICLY_APP_ALLOCATION_ERROR);
+ return 0; /* Frame is still valid */
+ }
+ svm_fifo_add_want_deq_ntf (stream_session->rx_fifo,
+ SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL |
+ SVM_FIFO_WANT_DEQ_NOTIF_IF_EMPTY);
+ svm_fifo_init_ooo_lookup (stream_session->rx_fifo, 0 /* ooo enq */);
+ svm_fifo_init_ooo_lookup (stream_session->tx_fifo, 1 /* ooo deq */);
+
+ stream_session->session_state = SESSION_STATE_ACCEPTING;
+ if ((rv = app_worker_accept_notify (app_wrk, stream_session)))
+ {
+ QUIC_ERR ("failed to notify accept worker app");
+ quicly_reset_stream (stream, QUIC_QUICLY_APP_ACCEPT_NOTIFY_ERROR);
+ return 0; /* Frame is still valid */
+ }
+
+ return 0;
+}
+
+static void
+quic_quicly_on_closed_by_remote (quicly_closed_by_remote_t *self,
+ quicly_conn_t *conn, int code,
+ uint64_t frame_type, const char *reason,
+ size_t reason_len)
+{
+ quic_ctx_t *ctx = quic_quicly_get_conn_ctx (conn);
+#if QUIC_DEBUG >= 2
+ session_t *quic_session = session_get (ctx->c_s_index, ctx->c_thread_index);
+ clib_warning ("Session 0x%lx closed by peer (%U) %.*s ",
+ session_handle (quic_session), quic_quicly_format_err, code,
+ reason_len, reason);
+#endif
+ ctx->conn_state = QUIC_CONN_STATE_PASSIVE_CLOSING;
+ session_transport_closing_notify (&ctx->connection);
+}
+
+static int64_t
+quic_quicly_get_time (quicly_now_t *self)
+{
+ return (int64_t) quic_wrk_ctx_get (quic_quicly_main.qm,
+ vlib_get_thread_index ())
+ ->time_now;
+}
+
+static quicly_stream_open_t on_stream_open = { quic_quicly_on_stream_open };
+static quicly_closed_by_remote_t on_closed_by_remote = {
+ quic_quicly_on_closed_by_remote
+};
+static quicly_now_t quicly_vpp_now_cb = { quic_quicly_get_time };
+
+static int
+quic_quicly_init_crypto_context (crypto_context_t *crctx, quic_ctx_t *ctx)
+{
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+ quic_main_t *qm = qqm->qm;
+ quicly_context_t *quicly_ctx;
+ ptls_iovec_t key_vec;
+ app_cert_key_pair_t *ckpair;
+ application_t *app;
+ quic_quicly_crypto_context_data_t *data;
+ ptls_context_t *ptls_ctx;
+ u32 i;
+
+ QUIC_DBG (2, "Init quic crctx %d thread %d", crctx->ctx_index,
+ ctx->c_thread_index);
+ quic_quicly_register_cipher_suite (CRYPTO_ENGINE_PICOTLS,
+ ptls_openssl_cipher_suites);
+
+ vnet_crypto_main_t *cm = &crypto_main;
+ if (vec_len (cm->engines) > 0)
+ qqm->vnet_crypto_enabled = 0;
+ else
+ {
+ qqm->vnet_crypto_enabled = 1;
+ u8 empty_key[32] = {};
+ quic_quicly_register_cipher_suite (CRYPTO_ENGINE_VPP,
+ quic_quicly_crypto_cipher_suites);
+ qm->default_crypto_engine = CRYPTO_ENGINE_VPP;
+ vec_validate (qqm->per_thread_crypto_key_indices, qm->num_threads);
+ for (i = 0; i < qm->num_threads; i++)
+ {
+ qqm->per_thread_crypto_key_indices[i] = vnet_crypto_key_add (
+ vlib_get_main (), VNET_CRYPTO_ALG_AES_256_CTR, empty_key, 32);
+ }
+ }
+
+ /* TODO: Remove this and clean up legacy provider code in quicly */
+ quic_quicly_load_openssl3_legacy_provider ();
+
+ data = clib_mem_alloc (sizeof (*data));
+ /* picotls depends on data being zeroed */
+ clib_memset (data, 0, sizeof (*data));
+ crctx->data = (void *) data;
+ quicly_ctx = &data->quicly_ctx;
+ ptls_ctx = &data->ptls_ctx;
+
+ ptls_ctx->random_bytes = ptls_openssl_random_bytes;
+ ptls_ctx->get_time = &ptls_get_time;
+ ptls_ctx->key_exchanges = ptls_openssl_key_exchanges;
+ ptls_ctx->cipher_suites = qqm->quic_ciphers[ctx->crypto_engine];
+ ptls_ctx->certificates.list = NULL;
+ ptls_ctx->certificates.count = 0;
+ ptls_ctx->on_client_hello = NULL;
+ ptls_ctx->emit_certificate = NULL;
+ ptls_ctx->sign_certificate = NULL;
+ ptls_ctx->verify_certificate = NULL;
+ ptls_ctx->ticket_lifetime = 86400;
+ ptls_ctx->max_early_data_size = 8192;
+ ptls_ctx->hkdf_label_prefix__obsolete = NULL;
+ ptls_ctx->require_dhe_on_psk = 1;
+ ptls_ctx->encrypt_ticket = &qqm->session_cache.super;
+ clib_memcpy (quicly_ctx, &quicly_spec_context, sizeof (quicly_context_t));
+
+ quicly_ctx->max_packets_per_key = qm->max_packets_per_key;
+ quicly_ctx->tls = ptls_ctx;
+ quicly_ctx->stream_open = &on_stream_open;
+ quicly_ctx->closed_by_remote = &on_closed_by_remote;
+ quicly_ctx->now = &quicly_vpp_now_cb;
+ quicly_amend_ptls_context (quicly_ctx->tls);
+
+ if (qqm->vnet_crypto_enabled &&
+ qm->default_crypto_engine == CRYPTO_ENGINE_VPP)
+ quicly_ctx->crypto_engine = &quic_quicly_crypto_engine;
+ else
+ quicly_ctx->crypto_engine = &quicly_default_crypto_engine;
+
+ quicly_ctx->transport_params.max_data = QUIC_INT_MAX;
+ quicly_ctx->transport_params.max_streams_uni = (uint64_t) 1 << 60;
+ quicly_ctx->transport_params.max_streams_bidi = (uint64_t) 1 << 60;
+ quicly_ctx->transport_params.max_idle_timeout = qm->connection_timeout;
+
+ quicly_ctx->init_cc = (qm->default_quic_cc == QUIC_CC_CUBIC) ?
+ &quicly_cc_cubic_init :
+ &quicly_cc_reno_init;
+
+ app = application_get (ctx->parent_app_id);
+ quicly_ctx->transport_params.max_stream_data.bidi_local =
+ app->sm_properties.rx_fifo_size - 1;
+ quicly_ctx->transport_params.max_stream_data.bidi_remote =
+ app->sm_properties.tx_fifo_size - 1;
+ quicly_ctx->transport_params.max_stream_data.uni = QUIC_INT_MAX;
+
+ quicly_ctx->transport_params.max_udp_payload_size = QUIC_MAX_PACKET_SIZE;
+ if (!app->quic_iv_set)
+ {
+ ptls_openssl_random_bytes (app->quic_iv, QUIC_IV_LEN - 1);
+ app->quic_iv[QUIC_IV_LEN - 1] = 0;
+ app->quic_iv_set = 1;
+ }
+
+ clib_memcpy (data->cid_key, app->quic_iv, QUIC_IV_LEN);
+ key_vec = ptls_iovec_init (data->cid_key, QUIC_IV_LEN);
+ quicly_ctx->cid_encryptor = quicly_new_default_cid_encryptor (
+ &ptls_openssl_bfecb, &ptls_openssl_aes128ecb, &ptls_openssl_sha256,
+ key_vec);
+
+ ckpair = app_cert_key_pair_get_if_valid (crctx->ckpair_index);
+ if (!ckpair || !ckpair->key || !ckpair->cert)
+ {
+ QUIC_DBG (1, "Wrong ckpair id %d\n", crctx->ckpair_index);
+ return -1;
+ }
+ if (load_bio_private_key (quicly_ctx->tls, (char *) ckpair->key))
+ {
+ QUIC_DBG (1, "failed to read private key from app configuration\n");
+ return -1;
+ }
+ if (load_bio_certificate_chain (quicly_ctx->tls, (char *) ckpair->cert))
+ {
+ QUIC_DBG (1, "failed to load certificate\n");
+ return -1;
+ }
+ return 0;
+}
+
+void
+quic_quicly_crypto_context_release (u32 crypto_context_index, u8 thread_index)
+{
+ crypto_context_t *crctx;
+ crctx = quic_quicly_crypto_context_get (crypto_context_index, thread_index);
+ crctx->n_subscribers--;
+ quic_quicly_crypto_context_free_if_needed (crctx, thread_index);
+}
+
+int
+quic_quicly_crypto_context_acquire (quic_ctx_t *ctx)
+{
+ /* import from src/vnet/session/application.c */
+ extern u8 *format_crypto_engine (u8 * s, va_list * args);
+
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+ quic_main_t *qm = qqm->qm;
+ clib_bihash_24_8_t *crctx_hash = qqm->crypto_ctx_hash;
+ crypto_context_t *crctx;
+ clib_bihash_kv_24_8_t kv;
+
+ if (ctx->crypto_engine == CRYPTO_ENGINE_NONE)
+ {
+ QUIC_DBG (2, "No crypto engine specified, using %U",
+ format_crypto_engine, qm->default_crypto_engine);
+ ctx->crypto_engine = qm->default_crypto_engine;
+ }
+ if (!clib_bitmap_get (qqm->available_crypto_engines, ctx->crypto_engine))
+ {
+ QUIC_DBG (1, "Quic does not support crypto engine %U",
+ format_crypto_engine, ctx->crypto_engine);
+ return SESSION_E_NOCRYPTOENG;
+ }
+ /* Check for exisiting crypto ctx */
+ quic_quicly_crypto_context_make_key_from_ctx (&kv, ctx);
+ if (clib_bihash_search_24_8 (&crctx_hash[ctx->c_thread_index], &kv, &kv) ==
+ 0)
+ {
+ crctx = quic_quicly_crypto_context_get (kv.value, ctx->c_thread_index);
+ QUIC_DBG (2, "Found exisiting crypto context %d", kv.value);
+ ctx->crypto_context_index = kv.value;
+ crctx->n_subscribers++;
+ return 0;
+ }
+
+ crctx = quic_quicly_crypto_context_alloc (ctx->c_thread_index);
+ ctx->crypto_context_index = crctx->ctx_index;
+ kv.value = crctx->ctx_index;
+ crctx->crypto_engine = ctx->crypto_engine;
+ crctx->ckpair_index = ctx->ckpair_index;
+ if (quic_quicly_init_crypto_context (crctx, ctx))
+ goto error;
+ if (vnet_app_add_cert_key_interest (ctx->ckpair_index, qm->app_index))
+ goto error;
+ crctx->n_subscribers++;
+ clib_bihash_add_del_24_8 (&crctx_hash[ctx->c_thread_index], &kv,
+ 1 /* is_add */);
+ return 0;
+
+error:
+ quic_quicly_crypto_context_free_if_needed (crctx, ctx->c_thread_index);
+ return SESSION_E_NOCRYPTOCKP;
+}
+
+static int
+quic_quicly_crypto_setup_cipher (quicly_crypto_engine_t *engine,
+ quicly_conn_t *conn, size_t epoch, int is_enc,
+ ptls_cipher_context_t **header_protect_ctx,
+ ptls_aead_context_t **packet_protect_ctx,
+ ptls_aead_algorithm_t *aead,
+ ptls_hash_algorithm_t *hash,
+ const void *secret)
+{
+ uint8_t hpkey[PTLS_MAX_SECRET_SIZE];
+ int ret;
+
+ *packet_protect_ctx = NULL;
+ /* generate new header protection key */
+ if (header_protect_ctx != NULL)
+ {
+ *header_protect_ctx = NULL;
+ ret =
+ ptls_hkdf_expand_label (hash, hpkey, aead->ctr_cipher->key_size,
+ ptls_iovec_init (secret, hash->digest_size),
+ "quic hp", ptls_iovec_init (NULL, 0), NULL);
+ if (ret)
+ goto Exit;
+ *header_protect_ctx = ptls_cipher_new (aead->ctr_cipher, is_enc, hpkey);
+ if (NULL == *header_protect_ctx)
+ {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ }
+
+ /* generate new AEAD context */
+ *packet_protect_ctx =
+ ptls_aead_new (aead, hash, is_enc, secret, QUICLY_AEAD_BASE_LABEL);
+ if (NULL == *packet_protect_ctx)
+ {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+
+ if (epoch == QUICLY_EPOCH_1RTT && !is_enc)
+ {
+ quic_ctx_t *qctx = quic_quicly_get_conn_ctx (conn);
+ if (qctx->ingress_keys.aead_ctx != NULL)
+ qctx->key_phase_ingress++;
+
+ qctx->ingress_keys.aead_ctx = (void *) *packet_protect_ctx;
+ if (header_protect_ctx != NULL)
+ {
+ qctx->ingress_keys.hp_ctx = (void *) *header_protect_ctx;
+ }
+ }
+
+ ret = 0;
+
+Exit:
+ if (ret)
+ {
+ if (*packet_protect_ctx != NULL)
+ {
+ ptls_aead_free (*packet_protect_ctx);
+ *packet_protect_ctx = NULL;
+ }
+ if (header_protect_ctx && *header_protect_ctx != NULL)
+ {
+ ptls_cipher_free (*header_protect_ctx);
+ *header_protect_ctx = NULL;
+ }
+ }
+ ptls_clear_memory (hpkey, sizeof (hpkey));
+ return ret;
+}
+
+static u32
+quic_quicly_crypto_set_key (crypto_key_t *key)
+{
+ u8 thread_index = vlib_get_thread_index ();
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+ u32 key_id = qqm->per_thread_crypto_key_indices[thread_index];
+ vnet_crypto_key_t *vnet_key = vnet_crypto_get_key (key_id);
+ vnet_crypto_engine_t *engine;
+ vnet_crypto_main_t *cm = &crypto_main;
+
+ vec_foreach (engine, cm->engines)
+ if (engine->key_op_handler)
+ engine->key_op_handler (VNET_CRYPTO_KEY_OP_DEL, key_id);
+
+ vnet_key->alg = key->algo;
+ clib_memcpy (vnet_key->data, key->key, key->key_len);
+
+ vec_foreach (engine, cm->engines)
+ if (engine->key_op_handler)
+ engine->key_op_handler (VNET_CRYPTO_KEY_OP_ADD, key_id);
+
+ return key_id;
+}
+
+static void
+quic_quicly_crypto_encrypt_packet (struct st_quicly_crypto_engine_t *engine,
+ quicly_conn_t *conn,
+ ptls_cipher_context_t *header_protect_ctx,
+ ptls_aead_context_t *packet_protect_ctx,
+ ptls_iovec_t datagram, size_t first_byte_at,
+ size_t payload_from, uint64_t packet_number,
+ int coalesced)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ struct cipher_context_t *hp_ctx =
+ (struct cipher_context_t *) header_protect_ctx;
+ struct aead_crypto_context_t *aead_ctx =
+ (struct aead_crypto_context_t *) packet_protect_ctx;
+
+ void *input = datagram.base + payload_from;
+ void *output = input;
+ size_t inlen =
+ datagram.len - payload_from - packet_protect_ctx->algo->tag_size;
+ const void *aad = datagram.base + first_byte_at;
+ size_t aadlen = payload_from - first_byte_at;
+
+ /* Build AEAD encrypt crypto operation */
+ vnet_crypto_op_init (&aead_ctx->op, aead_ctx->id);
+ aead_ctx->op.aad = (u8 *) aad;
+ aead_ctx->op.aad_len = aadlen;
+ aead_ctx->op.iv = aead_ctx->iv;
+ ptls_aead__build_iv (aead_ctx->super.algo, aead_ctx->op.iv,
+ aead_ctx->static_iv, packet_number);
+ aead_ctx->op.key_index = quic_quicly_crypto_set_key (&aead_ctx->key);
+ aead_ctx->op.src = (u8 *) input;
+ aead_ctx->op.dst = output;
+ aead_ctx->op.len = inlen;
+ aead_ctx->op.tag_len = aead_ctx->super.algo->tag_size;
+ aead_ctx->op.tag = aead_ctx->op.src + inlen;
+ vnet_crypto_process_ops (vm, &(aead_ctx->op), 1);
+ assert (aead_ctx->op.status == VNET_CRYPTO_OP_STATUS_COMPLETED);
+
+ /* Build Header protection crypto operation */
+ ptls_aead_supplementary_encryption_t supp = {
+ .ctx = header_protect_ctx,
+ .input =
+ datagram.base + payload_from - QUICLY_SEND_PN_SIZE + QUICLY_MAX_PN_SIZE
+ };
+
+ /* Build Header protection crypto operation */
+ vnet_crypto_op_init (&hp_ctx->op, hp_ctx->id);
+ memset (supp.output, 0, sizeof (supp.output));
+ hp_ctx->op.iv = (u8 *) supp.input;
+ hp_ctx->op.key_index = quic_quicly_crypto_set_key (&hp_ctx->key);
+ ;
+ hp_ctx->op.src = (u8 *) supp.output;
+ hp_ctx->op.dst = (u8 *) supp.output;
+ hp_ctx->op.len = sizeof (supp.output);
+ vnet_crypto_process_ops (vm, &(hp_ctx->op), 1);
+ assert (hp_ctx->op.status == VNET_CRYPTO_OP_STATUS_COMPLETED);
+
+ datagram.base[first_byte_at] ^=
+ supp.output[0] &
+ (QUICLY_PACKET_IS_LONG_HEADER (datagram.base[first_byte_at]) ? 0xf : 0x1f);
+ for (size_t i = 0; i != QUICLY_SEND_PN_SIZE; ++i)
+ datagram.base[payload_from + i - QUICLY_SEND_PN_SIZE] ^=
+ supp.output[i + 1];
+}
+
+static size_t
+quic_quicly_crypto_aead_decrypt (quic_ctx_t *qctx, ptls_aead_context_t *_ctx,
+ void *_output, const void *input,
+ size_t inlen, uint64_t decrypted_pn,
+ const void *aad, size_t aadlen)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
+
+ vnet_crypto_op_init (&ctx->op, ctx->id);
+ ctx->op.aad = (u8 *) aad;
+ ctx->op.aad_len = aadlen;
+ ctx->op.iv = ctx->iv;
+ ptls_aead__build_iv (ctx->super.algo, ctx->op.iv, ctx->static_iv,
+ decrypted_pn);
+ ctx->op.src = (u8 *) input;
+ ctx->op.dst = _output;
+ ctx->op.key_index = quic_quicly_crypto_set_key (&ctx->key);
+ ctx->op.len = inlen - ctx->super.algo->tag_size;
+ ctx->op.tag_len = ctx->super.algo->tag_size;
+ ctx->op.tag = ctx->op.src + ctx->op.len;
+
+ vnet_crypto_process_ops (vm, &(ctx->op), 1);
+
+ return ctx->op.len;
+}
+
+void
+quic_quicly_crypto_decrypt_packet (quic_ctx_t *qctx,
+ quic_quicly_rx_packet_ctx_t *pctx)
+{
+ ptls_cipher_context_t *header_protection = NULL;
+ ptls_aead_context_t *aead = NULL;
+ int pn;
+
+ /* Long Header packets are not decrypted by vpp */
+ if (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]))
+ return;
+
+ uint64_t next_expected_packet_number =
+ quicly_get_next_expected_packet_number (qctx->conn);
+ if (next_expected_packet_number == UINT64_MAX)
+ return;
+
+ aead = (ptls_aead_context_t *) qctx->ingress_keys.aead_ctx;
+ header_protection = (ptls_cipher_context_t *) qctx->ingress_keys.hp_ctx;
+
+ if (!aead || !header_protection)
+ return;
+
+ size_t encrypted_len = pctx->packet.octets.len - pctx->packet.encrypted_off;
+ uint8_t hpmask[5] = { 0 };
+ uint32_t pnbits = 0;
+ size_t pnlen, ptlen, i;
+
+ /* decipher the header protection, as well as obtaining pnbits, pnlen */
+ if (encrypted_len < header_protection->algo->iv_size + QUICLY_MAX_PN_SIZE)
+ return;
+ ptls_cipher_init (header_protection, pctx->packet.octets.base +
+ pctx->packet.encrypted_off +
+ QUICLY_MAX_PN_SIZE);
+ ptls_cipher_encrypt (header_protection, hpmask, hpmask, sizeof (hpmask));
+ pctx->packet.octets.base[0] ^=
+ hpmask[0] &
+ (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ? 0xf : 0x1f);
+ pnlen = (pctx->packet.octets.base[0] & 0x3) + 1;
+ for (i = 0; i != pnlen; ++i)
+ {
+ pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
+ hpmask[i + 1];
+ pnbits = (pnbits << 8) |
+ pctx->packet.octets.base[pctx->packet.encrypted_off + i];
+ }
+
+ size_t aead_off = pctx->packet.encrypted_off + pnlen;
+
+ pn = quicly_determine_packet_number (pnbits, pnlen * 8,
+ next_expected_packet_number);
+
+ int key_phase_bit =
+ (pctx->packet.octets.base[0] & QUICLY_KEY_PHASE_BIT) != 0;
+
+ if (key_phase_bit != (qctx->key_phase_ingress & 1))
+ {
+ pctx->packet.octets.base[0] ^=
+ hpmask[0] &
+ (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ? 0xf :
+ 0x1f);
+ for (i = 0; i != pnlen; ++i)
+ {
+ pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
+ hpmask[i + 1];
+ }
+ return;
+ }
+
+ if ((ptlen = quic_quicly_crypto_aead_decrypt (
+ qctx, aead, pctx->packet.octets.base + aead_off,
+ pctx->packet.octets.base + aead_off,
+ pctx->packet.octets.len - aead_off, pn, pctx->packet.octets.base,
+ aead_off)) == SIZE_MAX)
+ {
+ fprintf (stderr, "%s: aead decryption failure (pn: %d)\n", __FUNCTION__,
+ pn);
+ return;
+ }
+
+ pctx->packet.encrypted_off = aead_off;
+ pctx->packet.octets.len = ptlen + aead_off;
+
+ pctx->packet.decrypted.pn = pn;
+ pctx->packet.decrypted.key_phase = qctx->key_phase_ingress;
+}
+
+static int
+quic_quicly_crypto_cipher_setup_crypto (ptls_cipher_context_t *_ctx,
+ int is_enc, const void *key,
+ const EVP_CIPHER *cipher)
+{
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+ struct cipher_context_t *ctx = (struct cipher_context_t *) _ctx;
+
+ vnet_crypto_alg_t algo;
+ if (!strcmp (ctx->super.algo->name, "AES128-CTR"))
+ {
+ algo = VNET_CRYPTO_ALG_AES_128_CTR;
+ ctx->id = is_enc ? VNET_CRYPTO_OP_AES_128_CTR_ENC :
+ VNET_CRYPTO_OP_AES_128_CTR_DEC;
+ ptls_openssl_aes128ctr.setup_crypto (_ctx, is_enc, key);
+ }
+ else if (!strcmp (ctx->super.algo->name, "AES256-CTR"))
+ {
+ algo = VNET_CRYPTO_ALG_AES_256_CTR;
+ ctx->id = is_enc ? VNET_CRYPTO_OP_AES_256_CTR_ENC :
+ VNET_CRYPTO_OP_AES_256_CTR_DEC;
+ ptls_openssl_aes256ctr.setup_crypto (_ctx, is_enc, key);
+ }
+ else
+ {
+ QUIC_DBG (1, "%s, Invalid crypto cipher : ", __func__, _ctx->algo->name);
+ assert (0);
+ }
+
+ if (qqm->vnet_crypto_enabled)
+ {
+ // TODO: why is this commented out (from original quic plugin)?
+ // ctx->key_index =
+ // quic_quicly_crypto_go_setup_key (algo, key, _ctx->algo->key_size);
+ ctx->key.algo = algo;
+ ctx->key.key_len = _ctx->algo->key_size;
+ assert (ctx->key.key_len <= 32);
+ clib_memcpy (&ctx->key.key, key, ctx->key.key_len);
+ }
+
+ return 0;
+}
+
+static int
+quic_quicly_crypto_aes128ctr_setup_crypto (ptls_cipher_context_t *ctx,
+ int is_enc, const void *key)
+{
+ return quic_quicly_crypto_cipher_setup_crypto (ctx, 1, key,
+ EVP_aes_128_ctr ());
+}
+
+static int
+quic_quicly_crypto_aes256ctr_setup_crypto (ptls_cipher_context_t *ctx,
+ int is_enc, const void *key)
+{
+ return quic_quicly_crypto_cipher_setup_crypto (ctx, 1, key,
+ EVP_aes_256_ctr ());
+}
+
+static int
+quic_quicly_crypto_aead_setup_crypto (ptls_aead_context_t *_ctx, int is_enc,
+ const void *key, const void *iv,
+ const EVP_CIPHER *cipher)
+{
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+ struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
+
+ vnet_crypto_alg_t algo;
+ if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
+ {
+ algo = VNET_CRYPTO_ALG_AES_128_GCM;
+ ctx->id = is_enc ? VNET_CRYPTO_OP_AES_128_GCM_ENC :
+ VNET_CRYPTO_OP_AES_128_GCM_DEC;
+ ptls_openssl_aes128gcm.setup_crypto (_ctx, is_enc, key, iv);
+ }
+ else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
+ {
+ algo = VNET_CRYPTO_ALG_AES_256_GCM;
+ ctx->id = is_enc ? VNET_CRYPTO_OP_AES_256_GCM_ENC :
+ VNET_CRYPTO_OP_AES_256_GCM_DEC;
+ ptls_openssl_aes256gcm.setup_crypto (_ctx, is_enc, key, iv);
+ }
+ else
+ {
+ QUIC_DBG (1, "%s, invalied aead cipher %s", __func__, _ctx->algo->name);
+ assert (0);
+ }
+
+ if (qqm->vnet_crypto_enabled)
+ {
+ clib_memcpy (ctx->static_iv, iv, ctx->super.algo->iv_size);
+ // TODO: why is this commented out (from original quic plugin)?
+ // ctx->key_index =
+ // quic_quicly_crypto_go_setup_key (algo, key, _ctx->algo->key_size);
+ ctx->key.algo = algo;
+ ctx->key.key_len = _ctx->algo->key_size;
+ assert (ctx->key.key_len <= 32);
+ clib_memcpy (&ctx->key.key, key, ctx->key.key_len);
+ }
+
+ return 0;
+}
+
+static int
+quic_quicly_crypto_aead_aes128gcm_setup_crypto (ptls_aead_context_t *ctx,
+ int is_enc, const void *key,
+ const void *iv)
+{
+ return quic_quicly_crypto_aead_setup_crypto (ctx, is_enc, key, iv,
+ EVP_aes_128_gcm ());
+}
+
+static int
+quic_quicly_crypto_aead_aes256gcm_setup_crypto (ptls_aead_context_t *ctx,
+ int is_enc, const void *key,
+ const void *iv)
+{
+ return quic_quicly_crypto_aead_setup_crypto (ctx, is_enc, key, iv,
+ EVP_aes_256_gcm ());
+}
+
+int
+quic_quicly_encrypt_ticket_cb (ptls_encrypt_ticket_t *_self, ptls_t *tls,
+ int is_encrypt, ptls_buffer_t *dst,
+ ptls_iovec_t src)
+{
+ quic_quicly_session_cache_t *self = (void *) _self;
+ int ret;
+
+ if (is_encrypt)
+ {
+
+ /* replace the cached entry along with a newly generated session id */
+ clib_mem_free (self->data.base);
+ if ((self->data.base = clib_mem_alloc (src.len)) == NULL)
+ return PTLS_ERROR_NO_MEMORY;
+
+ ptls_get_context (tls)->random_bytes (self->id, sizeof (self->id));
+ clib_memcpy (self->data.base, src.base, src.len);
+ self->data.len = src.len;
+
+ /* store the session id in buffer */
+ if ((ret = ptls_buffer_reserve (dst, sizeof (self->id))) != 0)
+ return ret;
+ clib_memcpy (dst->base + dst->off, self->id, sizeof (self->id));
+ dst->off += sizeof (self->id);
+ }
+ else
+ {
+ /* check if session id is the one stored in cache */
+ if (src.len != sizeof (self->id))
+ return PTLS_ERROR_SESSION_NOT_FOUND;
+ if (clib_memcmp (self->id, src.base, sizeof (self->id)) != 0)
+ return PTLS_ERROR_SESSION_NOT_FOUND;
+
+ /* return the cached value */
+ if ((ret = ptls_buffer_reserve (dst, self->data.len)) != 0)
+ return ret;
+ clib_memcpy (dst->base + dst->off, self->data.base, self->data.len);
+ dst->off += self->data.len;
+ }
+
+ return 0;
+}
+
+ptls_cipher_algorithm_t quic_quicly_crypto_aes128ctr = {
+ "AES128-CTR",
+ PTLS_AES128_KEY_SIZE,
+ 1,
+ PTLS_AES_IV_SIZE,
+ sizeof (struct cipher_context_t),
+ quic_quicly_crypto_aes128ctr_setup_crypto
+};
+
+ptls_cipher_algorithm_t quic_quicly_crypto_aes256ctr = {
+ "AES256-CTR",
+ PTLS_AES256_KEY_SIZE,
+ 1 /* block size */,
+ PTLS_AES_IV_SIZE,
+ sizeof (struct cipher_context_t),
+ quic_quicly_crypto_aes256ctr_setup_crypto
+};
+
+#define PTLS_X86_CACHE_LINE_ALIGN_BITS 6
+ptls_aead_algorithm_t quic_quicly_crypto_aes128gcm = {
+ "AES128-GCM",
+ PTLS_AESGCM_CONFIDENTIALITY_LIMIT,
+ PTLS_AESGCM_INTEGRITY_LIMIT,
+ &quic_quicly_crypto_aes128ctr,
+ &ptls_openssl_aes128ecb,
+ PTLS_AES128_KEY_SIZE,
+ PTLS_AESGCM_IV_SIZE,
+ PTLS_AESGCM_TAG_SIZE,
+ { PTLS_TLS12_AESGCM_FIXED_IV_SIZE, PTLS_TLS12_AESGCM_RECORD_IV_SIZE },
+ 1,
+ PTLS_X86_CACHE_LINE_ALIGN_BITS,
+ sizeof (struct aead_crypto_context_t),
+ quic_quicly_crypto_aead_aes128gcm_setup_crypto
+};
+
+ptls_aead_algorithm_t quic_quicly_crypto_aes256gcm = {
+ "AES256-GCM",
+ PTLS_AESGCM_CONFIDENTIALITY_LIMIT,
+ PTLS_AESGCM_INTEGRITY_LIMIT,
+ &quic_quicly_crypto_aes256ctr,
+ &ptls_openssl_aes256ecb,
+ PTLS_AES256_KEY_SIZE,
+ PTLS_AESGCM_IV_SIZE,
+ PTLS_AESGCM_TAG_SIZE,
+ { PTLS_TLS12_AESGCM_FIXED_IV_SIZE, PTLS_TLS12_AESGCM_RECORD_IV_SIZE },
+ 1,
+ PTLS_X86_CACHE_LINE_ALIGN_BITS,
+ sizeof (struct aead_crypto_context_t),
+ quic_quicly_crypto_aead_aes256gcm_setup_crypto
+};
+
+ptls_cipher_suite_t quic_quicly_crypto_aes128gcmsha256 = {
+ PTLS_CIPHER_SUITE_AES_128_GCM_SHA256, &quic_quicly_crypto_aes128gcm,
+ &ptls_openssl_sha256
+};
+
+ptls_cipher_suite_t quic_quicly_crypto_aes256gcmsha384 = {
+ PTLS_CIPHER_SUITE_AES_256_GCM_SHA384, &quic_quicly_crypto_aes256gcm,
+ &ptls_openssl_sha384
+};
+
+ptls_cipher_suite_t *quic_quicly_crypto_cipher_suites[] = {
+ &quic_quicly_crypto_aes256gcmsha384, &quic_quicly_crypto_aes128gcmsha256,
+ NULL
+};
+
+quicly_crypto_engine_t quic_quicly_crypto_engine = {
+ quic_quicly_crypto_setup_cipher, quic_quicly_crypto_encrypt_packet
+};
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef __included_quic_quicly_crypto_h__
+#define __included_quic_quicly_crypto_h__
+
+#include <quic/quic.h>
+#include <quicly.h>
+#include <vnet/crypto/crypto.h>
+#include <picotls/openssl.h>
+#include <vppinfra/bihash_24_8.h>
+#include <quic_quicly/quic_quicly.h>
+#include <vnet/session/session.h>
+
+static_always_inline crypto_context_t *
+quic_quicly_crypto_context_get (u32 cr_index, u32 thread_index)
+{
+ ASSERT (cr_index >> 24 == thread_index);
+ return pool_elt_at_index (
+ quic_wrk_ctx_get (quic_quicly_main.qm, thread_index)->crypto_ctx_pool,
+ cr_index & 0x00ffffff);
+}
+
+#define QUIC_IV_LEN 17
+
+#if OPENSSL_VERSION_NUMBER >= 0x30000000L
+#include <openssl/provider.h>
+
+#define quic_quicly_load_openssl3_legacy_provider() \
+ do \
+ { \
+ (void) OSSL_PROVIDER_load (NULL, "legacy"); \
+ } \
+ while (0)
+#else
+#define quic_quicly_load_openssl3_legacy_provider()
+#endif
+
+extern vnet_crypto_main_t *cm;
+
+typedef struct crypto_key_
+{
+ vnet_crypto_alg_t algo;
+ u8 key[32];
+ u16 key_len;
+} crypto_key_t;
+
+struct aead_crypto_context_t
+{
+ ptls_aead_context_t super;
+ EVP_CIPHER_CTX *evp_ctx;
+ uint8_t static_iv[PTLS_MAX_IV_SIZE];
+ vnet_crypto_op_t op;
+ crypto_key_t key;
+
+ vnet_crypto_op_id_t id;
+ uint8_t iv[PTLS_MAX_IV_SIZE];
+};
+
+struct cipher_context_t
+{
+ ptls_cipher_context_t super;
+ vnet_crypto_op_t op;
+ vnet_crypto_op_id_t id;
+ crypto_key_t key;
+};
+
+typedef struct quic_quicly_crypto_context_data_
+{
+ quicly_context_t quicly_ctx;
+ char cid_key[QUIC_IV_LEN];
+ ptls_context_t ptls_ctx;
+} quic_quicly_crypto_context_data_t;
+
+static_always_inline void
+quic_quicly_register_cipher_suite (crypto_engine_type_t type,
+ ptls_cipher_suite_t **ciphers)
+{
+ quic_quicly_main_t *qqm = &quic_quicly_main;
+ vec_validate (qqm->quic_ciphers, type);
+ clib_bitmap_set (qqm->available_crypto_engines, type, 1);
+ qqm->quic_ciphers[type] = ciphers;
+}
+
+extern quicly_crypto_engine_t quic_quicly_crypto_engine;
+extern ptls_cipher_suite_t *quic_quicly_crypto_cipher_suites[];
+extern int quic_quicly_crypto_context_acquire (quic_ctx_t *ctx);
+extern void quic_quicly_crypto_context_release (u32 crypto_context_index,
+ u8 thread_index);
+extern int quic_quicly_app_cert_key_pair_delete (app_cert_key_pair_t *ckpair);
+extern int quic_quicly_encrypt_ticket_cb (ptls_encrypt_ticket_t *_self,
+ ptls_t *tls, int is_encrypt,
+ ptls_buffer_t *dst,
+ ptls_iovec_t src);
+extern void
+quic_quicly_crypto_decrypt_packet (quic_ctx_t *qctx,
+ quic_quicly_rx_packet_ctx_t *pctx);
+#endif /* __included_quic_quicly_crypto_h__ */
-/*
- * Copyright (c) 2021 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
*/
-#include <quic/quic.h>
+#include <quic_quicly/quic_quicly_error.h>
#include <quicly.h>
#include <quicly/constants.h>
u8 *
-quic_format_err (u8 * s, va_list * args)
+quic_quicly_format_err (u8 *s, va_list *args)
{
u64 code = va_arg (*args, u64);
switch (code)
s = format (s, "no error");
break;
/* app errors */
- case QUIC_ERROR_FULL_FIFO:
+ case QUIC_QUICLY_ERROR_FULL_FIFO:
s = format (s, "full fifo");
break;
- case QUIC_APP_ERROR_CLOSE_NOTIFY:
- s = format (s, "QUIC_APP_ERROR_CLOSE_NOTIFY");
+ case QUIC_QUICLY_APP_ERROR_CLOSE_NOTIFY:
+ s = format (s, "QUIC_QUICLY_APP_ERROR_CLOSE_NOTIFY");
break;
- case QUIC_APP_ALLOCATION_ERROR:
- s = format (s, "QUIC_APP_ALLOCATION_ERROR");
+ case QUIC_QUICLY_APP_ALLOCATION_ERROR:
+ s = format (s, "QUIC_QUICLY_APP_ALLOCATION_ERROR");
break;
- case QUIC_APP_ACCEPT_NOTIFY_ERROR:
- s = format (s, "QUIC_APP_ACCEPT_NOTIFY_ERROR");
+ case QUIC_QUICLY_APP_ACCEPT_NOTIFY_ERROR:
+ s = format (s, "QUIC_QUICLY_APP_ACCEPT_NOTIFY_ERROR");
break;
- case QUIC_APP_CONNECT_NOTIFY_ERROR:
- s = format (s, "QUIC_APP_CONNECT_NOTIFY_ERROR");
+ case QUIC_QUICLY_APP_CONNECT_NOTIFY_ERROR:
+ s = format (s, "QUIC_QUICLY_APP_CONNECT_NOTIFY_ERROR");
break;
/* quicly errors */
case QUICLY_ERROR_PACKET_IGNORED:
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef __included_quic_error_h__
+#define __included_quic_error_h__
+
+#include <stdarg.h>
+
+#include <vppinfra/format.h>
+
+/* error codes */
+#define QUIC_QUICLY_ERROR_FULL_FIFO 0xff10
+#define QUIC_QUICLY_APP_ERROR_CLOSE_NOTIFY \
+ QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE (0)
+#define QUIC_QUICLY_APP_ALLOCATION_ERROR \
+ QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE (0x1)
+#define QUIC_QUICLY_APP_ACCEPT_NOTIFY_ERROR \
+ QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE (0x2)
+#define QUIC_QUICLY_APP_CONNECT_NOTIFY_ERROR \
+ QUICLY_ERROR_FROM_APPLICATION_ERROR_CODE (0x3)
+
+u8 *quic_quicly_format_err (u8 *s, va_list *args);
+
+#endif /* __included_quic_error_h__ */
@classmethod
def setUpClass(cls):
cls.extra_vpp_plugin_config.append("plugin quic_plugin.so { enable }")
+ cls.extra_vpp_plugin_config.append("plugin quic_quicly_plugin.so { enable }")
super(QUICTestCase, cls).setUpClass()
def setUp(self):
@classmethod
def setUpClass(cls):
cls.extra_vpp_plugin_config.append("plugin quic_plugin.so { enable }")
+ cls.extra_vpp_plugin_config.append("plugin quic_quicly_plugin.so { enable }")
super(VCLThruHostStackQUIC, cls).setUpClass()
@classmethod