2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/crypto/crypto.h>
16 #include <vppinfra/lock.h>
18 #include <quic/quic.h>
19 #include <quic/quic_crypto.h>
22 #include <picotls/openssl.h>
24 #define QUICLY_EPOCH_1RTT 3
26 extern quic_main_t quic_main;
27 extern quic_ctx_t *quic_get_conn_ctx (quicly_conn_t * conn);
29 typedef void (*quicly_do_transform_fn) (ptls_cipher_context_t *, void *,
30 const void *, size_t);
32 struct cipher_context_t
34 ptls_cipher_context_t super;
39 struct aead_crypto_context_t
41 ptls_aead_context_t super;
47 quic_crypto_offload_aead_decrypt (quic_ctx_t * qctx,
48 ptls_aead_context_t * _ctx, void *_output,
49 const void *input, size_t inlen,
50 uint64_t decrypted_pn, const void *aad,
53 vnet_crypto_main_t *cm = &crypto_main;
56 quic_crypto_batch_tx_packets (quic_crypto_batch_ctx_t * batch_ctx)
58 vlib_main_t *vm = vlib_get_main ();
60 if (batch_ctx->nb_tx_packets <= 0)
63 clib_rwlock_reader_lock (&quic_main.crypto_keys_quic_rw_lock);
64 vnet_crypto_process_ops (vm, batch_ctx->aead_crypto_tx_packets_ops,
65 batch_ctx->nb_tx_packets);
66 clib_rwlock_reader_unlock (&quic_main.crypto_keys_quic_rw_lock);
68 for (int i = 0; i < batch_ctx->nb_tx_packets; i++)
69 clib_mem_free (batch_ctx->aead_crypto_tx_packets_ops[i].iv);
71 batch_ctx->nb_tx_packets = 0;
75 quic_crypto_batch_rx_packets (quic_crypto_batch_ctx_t * batch_ctx)
77 vlib_main_t *vm = vlib_get_main ();
79 if (batch_ctx->nb_rx_packets <= 0)
82 clib_rwlock_reader_lock (&quic_main.crypto_keys_quic_rw_lock);
83 vnet_crypto_process_ops (vm, batch_ctx->aead_crypto_rx_packets_ops,
84 batch_ctx->nb_rx_packets);
85 clib_rwlock_reader_unlock (&quic_main.crypto_keys_quic_rw_lock);
87 for (int i = 0; i < batch_ctx->nb_rx_packets; i++)
88 clib_mem_free (batch_ctx->aead_crypto_rx_packets_ops[i].iv);
90 batch_ctx->nb_rx_packets = 0;
94 build_iv (ptls_aead_context_t * ctx, uint8_t * iv, uint64_t seq)
96 size_t iv_size = ctx->algo->iv_size, i;
97 const uint8_t *s = ctx->static_iv;
100 for (i = iv_size - 8; i != 0; --i)
106 *d++ = *s++ ^ (uint8_t) (seq >> i);
112 do_finalize_send_packet (ptls_cipher_context_t * hp,
113 quicly_datagram_t * packet,
114 size_t first_byte_at, size_t payload_from)
116 uint8_t hpmask[1 + QUICLY_SEND_PN_SIZE] = {
121 ptls_cipher_init (hp,
122 packet->data.base + payload_from - QUICLY_SEND_PN_SIZE +
124 ptls_cipher_encrypt (hp, hpmask, hpmask, sizeof (hpmask));
126 packet->data.base[first_byte_at] ^=
128 (QUICLY_PACKET_IS_LONG_HEADER (packet->data.base[first_byte_at]) ? 0xf :
131 for (i = 0; i != QUICLY_SEND_PN_SIZE; ++i)
132 packet->data.base[payload_from + i - QUICLY_SEND_PN_SIZE] ^=
137 quic_crypto_finalize_send_packet (quicly_datagram_t * packet)
139 quic_encrypt_cb_ctx *encrypt_cb_ctx =
140 (quic_encrypt_cb_ctx *) ((uint8_t *) packet + sizeof (*packet));
142 for (int i = 0; i < encrypt_cb_ctx->snd_ctx_count; i++)
144 do_finalize_send_packet (encrypt_cb_ctx->snd_ctx[i].hp,
146 encrypt_cb_ctx->snd_ctx[i].first_byte_at,
147 encrypt_cb_ctx->snd_ctx[i].payload_from);
149 encrypt_cb_ctx->snd_ctx_count = 0;
153 quic_crypto_setup_cipher (quicly_crypto_engine_t * engine,
154 quicly_conn_t * conn, size_t epoch, int is_enc,
155 ptls_cipher_context_t ** hp_ctx,
156 ptls_aead_context_t ** aead_ctx,
157 ptls_aead_algorithm_t * aead,
158 ptls_hash_algorithm_t * hash, const void *secret)
160 uint8_t hpkey[PTLS_MAX_SECRET_SIZE];
164 /* generate new header protection key */
168 ret = ptls_hkdf_expand_label (hash, hpkey, aead->ctr_cipher->key_size,
169 ptls_iovec_init (secret,
171 "quic hp", ptls_iovec_init (NULL, 0),
175 *hp_ctx = ptls_cipher_new (aead->ctr_cipher, is_enc, hpkey);
178 ret = PTLS_ERROR_NO_MEMORY;
183 /* generate new AEAD context */
184 *aead_ctx = ptls_aead_new (aead, hash, is_enc, secret,
185 QUICLY_AEAD_BASE_LABEL);
186 if (NULL == *aead_ctx)
188 ret = PTLS_ERROR_NO_MEMORY;
192 if (epoch == QUICLY_EPOCH_1RTT && !is_enc)
194 quic_ctx_t *qctx = quic_get_conn_ctx (conn);
195 if (qctx->ingress_keys.aead_ctx != NULL)
196 qctx->key_phase_ingress++;
198 qctx->ingress_keys.aead_ctx = *aead_ctx;
200 qctx->ingress_keys.hp_ctx = *hp_ctx;
208 if (*aead_ctx != NULL)
210 ptls_aead_free (*aead_ctx);
213 if (hp_ctx && *hp_ctx != NULL)
215 ptls_cipher_free (*hp_ctx);
219 ptls_clear_memory (hpkey, sizeof (hpkey));
224 quic_crypto_finalize_send_packet_cb (struct st_quicly_crypto_engine_t
225 *engine, quicly_conn_t * conn,
226 ptls_cipher_context_t * hp,
227 ptls_aead_context_t * aead,
228 quicly_datagram_t * packet,
229 size_t first_byte_at,
230 size_t payload_from, int coalesced)
232 quic_encrypt_cb_ctx *encrypt_cb_ctx =
233 (quic_encrypt_cb_ctx *) ((uint8_t *) packet + sizeof (*packet));
235 encrypt_cb_ctx->snd_ctx[encrypt_cb_ctx->snd_ctx_count].hp = hp;
236 encrypt_cb_ctx->snd_ctx[encrypt_cb_ctx->snd_ctx_count].first_byte_at =
238 encrypt_cb_ctx->snd_ctx[encrypt_cb_ctx->snd_ctx_count].payload_from =
240 encrypt_cb_ctx->snd_ctx_count++;
244 quic_crypto_decrypt_packet (quic_ctx_t * qctx, quic_rx_packet_ctx_t * pctx)
246 ptls_cipher_context_t *header_protection = NULL;
247 ptls_aead_context_t *aead = NULL;
250 /* Long Header packets are not decrypted by vpp */
251 if (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]))
254 uint64_t next_expected_packet_number =
255 quicly_get_next_expected_packet_number (qctx->conn);
256 if (next_expected_packet_number == UINT64_MAX)
259 aead = qctx->ingress_keys.aead_ctx;
260 header_protection = qctx->ingress_keys.hp_ctx;
262 if (!aead || !header_protection)
265 size_t encrypted_len = pctx->packet.octets.len - pctx->packet.encrypted_off;
266 uint8_t hpmask[5] = { 0 };
268 size_t pnlen, ptlen, i;
270 /* decipher the header protection, as well as obtaining pnbits, pnlen */
271 if (encrypted_len < header_protection->algo->iv_size + QUICLY_MAX_PN_SIZE)
273 ptls_cipher_init (header_protection,
274 pctx->packet.octets.base + pctx->packet.encrypted_off +
276 ptls_cipher_encrypt (header_protection, hpmask, hpmask, sizeof (hpmask));
277 pctx->packet.octets.base[0] ^=
278 hpmask[0] & (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ?
280 pnlen = (pctx->packet.octets.base[0] & 0x3) + 1;
281 for (i = 0; i != pnlen; ++i)
283 pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
286 (pnbits << 8) | pctx->packet.octets.base[pctx->packet.encrypted_off +
290 size_t aead_off = pctx->packet.encrypted_off + pnlen;
293 quicly_determine_packet_number (pnbits, pnlen * 8,
294 next_expected_packet_number);
297 (pctx->packet.octets.base[0] & QUICLY_KEY_PHASE_BIT) != 0;
299 if (key_phase_bit != (qctx->key_phase_ingress & 1))
301 pctx->packet.octets.base[0] ^=
303 (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ? 0xf :
305 for (i = 0; i != pnlen; ++i)
307 pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
314 quic_crypto_offload_aead_decrypt (qctx, aead,
315 pctx->packet.octets.base + aead_off,
316 pctx->packet.octets.base + aead_off,
317 pctx->packet.octets.len - aead_off,
318 pn, pctx->packet.octets.base,
319 aead_off)) == SIZE_MAX)
322 "%s: aead decryption failure (pn: %d)\n", __FUNCTION__, pn);
326 pctx->packet.encrypted_off = aead_off;
327 pctx->packet.octets.len = ptlen + aead_off;
329 pctx->packet.decrypted.pn = pn;
330 pctx->packet.decrypted.key_phase = qctx->key_phase_ingress;
333 #ifdef QUIC_HP_CRYPTO
335 quic_crypto_cipher_do_init (ptls_cipher_context_t * _ctx, const void *iv)
337 struct cipher_context_t *ctx = (struct cipher_context_t *) _ctx;
338 vnet_crypto_op_id_t id;
339 if (!strcmp (ctx->super.algo->name, "AES128-CTR"))
341 id = VNET_CRYPTO_OP_AES_128_CTR_ENC;
343 else if (!strcmp (ctx->super.algo->name, "AES256-CTR"))
345 id = VNET_CRYPTO_OP_AES_256_CTR_ENC;
349 QUIC_DBG (1, "%s, Invalid crypto cipher : ", __FUNCTION__,
353 vnet_crypto_op_init (&ctx->op, id);
354 ctx->op.iv = (u8 *) iv;
355 ctx->op.key_index = ctx->key_index;
359 quic_crypto_cipher_dispose (ptls_cipher_context_t * _ctx)
365 quic_crypto_cipher_encrypt (ptls_cipher_context_t * _ctx, void *output,
366 const void *input, size_t _len)
368 vlib_main_t *vm = vlib_get_main ();
369 struct cipher_context_t *ctx = (struct cipher_context_t *) _ctx;
371 ctx->op.src = (u8 *) input;
372 ctx->op.dst = output;
375 vnet_crypto_process_ops (vm, &ctx->op, 1);
379 quic_crypto_cipher_setup_crypto (ptls_cipher_context_t * _ctx, int is_enc,
380 const void *key, const EVP_CIPHER * cipher,
381 quicly_do_transform_fn do_transform)
383 struct cipher_context_t *ctx = (struct cipher_context_t *) _ctx;
385 ctx->super.do_dispose = quic_crypto_cipher_dispose;
386 ctx->super.do_init = quic_crypto_cipher_do_init;
387 ctx->super.do_transform = do_transform;
389 vlib_main_t *vm = vlib_get_main ();
390 vnet_crypto_alg_t algo;
391 if (!strcmp (ctx->super.algo->name, "AES128-CTR"))
393 algo = VNET_CRYPTO_ALG_AES_128_CTR;
395 else if (!strcmp (ctx->super.algo->name, "AES256-CTR"))
397 algo = VNET_CRYPTO_ALG_AES_256_CTR;
401 QUIC_DBG (1, "%s, Invalid crypto cipher : ", __FUNCTION__,
406 ctx->key_index = vnet_crypto_key_add (vm, algo,
407 (u8 *) key, _ctx->algo->key_size);
413 quic_crypto_aes128ctr_setup_crypto (ptls_cipher_context_t * ctx, int is_enc,
416 return quic_crypto_cipher_setup_crypto (ctx, 1, key, EVP_aes_128_ctr (),
417 quic_crypto_cipher_encrypt);
421 quic_crypto_aes256ctr_setup_crypto (ptls_cipher_context_t * ctx, int is_enc,
424 return quic_crypto_cipher_setup_crypto (ctx, 1, key, EVP_aes_256_ctr (),
425 quic_crypto_cipher_encrypt);
428 #endif // QUIC_HP_CRYPTO
431 quic_crypto_aead_encrypt_init (ptls_aead_context_t * _ctx, const void *iv,
432 const void *aad, size_t aadlen)
434 quic_main_t *qm = &quic_main;
435 u32 thread_index = vlib_get_thread_index ();
437 struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
439 vnet_crypto_op_id_t id;
440 if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
442 id = VNET_CRYPTO_OP_AES_128_GCM_ENC;
444 else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
446 id = VNET_CRYPTO_OP_AES_256_GCM_ENC;
453 quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
454 &qm->wrk_ctx[thread_index].crypto_context_batch;
456 vnet_crypto_op_t *vnet_op =
457 &quic_crypto_batch_ctx->aead_crypto_tx_packets_ops
458 [quic_crypto_batch_ctx->nb_tx_packets];
459 vnet_crypto_op_init (vnet_op, id);
460 vnet_op->aad = (u8 *) aad;
461 vnet_op->aad_len = aadlen;
462 vnet_op->iv = clib_mem_alloc (PTLS_MAX_IV_SIZE);
463 clib_memcpy (vnet_op->iv, iv, PTLS_MAX_IV_SIZE);
464 vnet_op->key_index = ctx->key_index;
468 quic_crypto_aead_encrypt_update (ptls_aead_context_t * _ctx, void *output,
469 const void *input, size_t inlen)
471 struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
473 quic_main_t *qm = &quic_main;
474 u32 thread_index = vlib_get_thread_index ();
475 quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
476 &qm->wrk_ctx[thread_index].crypto_context_batch;
478 vnet_crypto_op_t *vnet_op =
479 &quic_crypto_batch_ctx->aead_crypto_tx_packets_ops
480 [quic_crypto_batch_ctx->nb_tx_packets];
481 vnet_op->src = (u8 *) input;
482 vnet_op->dst = output;
483 vnet_op->len = inlen;
484 vnet_op->tag_len = ctx->super.algo->tag_size;
486 vnet_op->tag = vnet_op->src + inlen;
492 quic_crypto_aead_encrypt_final (ptls_aead_context_t * _ctx, void *output)
494 quic_main_t *qm = &quic_main;
495 u32 thread_index = vlib_get_thread_index ();
496 quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
497 &qm->wrk_ctx[thread_index].crypto_context_batch;
499 vnet_crypto_op_t *vnet_op =
500 &quic_crypto_batch_ctx->
501 aead_crypto_tx_packets_ops[quic_crypto_batch_ctx->nb_tx_packets];
502 quic_crypto_batch_ctx->nb_tx_packets++;
503 return vnet_op->len + vnet_op->tag_len;
507 quic_crypto_aead_decrypt (ptls_aead_context_t * _ctx, void *_output,
508 const void *input, size_t inlen, const void *iv,
509 const void *aad, size_t aadlen)
511 vlib_main_t *vm = vlib_get_main ();
512 struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
514 vnet_crypto_op_id_t id;
515 if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
517 id = VNET_CRYPTO_OP_AES_128_GCM_DEC;
519 else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
521 id = VNET_CRYPTO_OP_AES_256_GCM_DEC;
528 vnet_crypto_op_init (&ctx->op, id);
529 ctx->op.aad = (u8 *) aad;
530 ctx->op.aad_len = aadlen;
531 ctx->op.iv = (u8 *) iv;
533 ctx->op.src = (u8 *) input;
534 ctx->op.dst = _output;
535 ctx->op.key_index = ctx->key_index;
536 ctx->op.len = inlen - ctx->super.algo->tag_size;
538 ctx->op.tag_len = ctx->super.algo->tag_size;
539 ctx->op.tag = ctx->op.src + ctx->op.len;
541 vnet_crypto_process_ops (vm, &ctx->op, 1);
543 if (ctx->op.status != VNET_CRYPTO_OP_STATUS_COMPLETED)
550 quic_crypto_offload_aead_decrypt (quic_ctx_t * qctx,
551 ptls_aead_context_t * _ctx, void *_output,
552 const void *input, size_t inlen,
553 uint64_t decrypted_pn, const void *aad,
556 struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
557 vnet_crypto_op_id_t id;
558 if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
560 id = VNET_CRYPTO_OP_AES_128_GCM_DEC;
562 else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
564 id = VNET_CRYPTO_OP_AES_256_GCM_DEC;
571 quic_main_t *qm = &quic_main;
572 quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
573 &qm->wrk_ctx[qctx->c_thread_index].crypto_context_batch;
575 vnet_crypto_op_t *vnet_op =
576 &quic_crypto_batch_ctx->aead_crypto_rx_packets_ops
577 [quic_crypto_batch_ctx->nb_rx_packets];
579 vnet_crypto_op_init (vnet_op, id);
580 vnet_op->aad = (u8 *) aad;
581 vnet_op->aad_len = aadlen;
582 vnet_op->iv = clib_mem_alloc (PTLS_MAX_IV_SIZE);
583 build_iv (_ctx, vnet_op->iv, decrypted_pn);
584 vnet_op->src = (u8 *) input;
585 vnet_op->dst = _output;
586 vnet_op->key_index = ctx->key_index;
587 vnet_op->len = inlen - ctx->super.algo->tag_size;
588 vnet_op->tag_len = ctx->super.algo->tag_size;
589 vnet_op->tag = vnet_op->src + vnet_op->len;
590 quic_crypto_batch_ctx->nb_rx_packets++;
595 quic_crypto_aead_dispose_crypto (ptls_aead_context_t * _ctx)
601 quic_crypto_aead_setup_crypto (ptls_aead_context_t * _ctx, int is_enc,
602 const void *key, const EVP_CIPHER * cipher)
604 vlib_main_t *vm = vlib_get_main ();
605 struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
607 vnet_crypto_alg_t algo;
608 if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
610 algo = VNET_CRYPTO_ALG_AES_128_GCM;
612 else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
614 algo = VNET_CRYPTO_ALG_AES_256_GCM;
618 QUIC_DBG (1, "%s, invalied aead cipher %s", __FUNCTION__,
623 if (quic_main.vnet_crypto_enabled)
625 ctx->super.do_decrypt = quic_crypto_aead_decrypt;
627 ctx->super.do_encrypt_init = quic_crypto_aead_encrypt_init;
628 ctx->super.do_encrypt_update = quic_crypto_aead_encrypt_update;
629 ctx->super.do_encrypt_final = quic_crypto_aead_encrypt_final;
630 ctx->super.dispose_crypto = quic_crypto_aead_dispose_crypto;
632 clib_rwlock_writer_lock (&quic_main.crypto_keys_quic_rw_lock);
633 ctx->key_index = vnet_crypto_key_add (vm, algo,
634 (u8 *) key, _ctx->algo->key_size);
635 clib_rwlock_writer_unlock (&quic_main.crypto_keys_quic_rw_lock);
639 if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
640 ptls_openssl_aes128gcm.setup_crypto (_ctx, is_enc, key);
641 else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
642 ptls_openssl_aes256gcm.setup_crypto (_ctx, is_enc, key);
649 quic_crypto_aead_aes128gcm_setup_crypto (ptls_aead_context_t * ctx,
650 int is_enc, const void *key)
652 return quic_crypto_aead_setup_crypto (ctx, is_enc, key, EVP_aes_128_gcm ());
656 quic_crypto_aead_aes256gcm_setup_crypto (ptls_aead_context_t * ctx,
657 int is_enc, const void *key)
659 return quic_crypto_aead_setup_crypto (ctx, is_enc, key, EVP_aes_256_gcm ());
662 #ifdef QUIC_HP_CRYPTO
663 ptls_cipher_algorithm_t quic_crypto_aes128ctr = {
665 PTLS_AES128_KEY_SIZE,
667 sizeof (struct cipher_context_t), aes128ctr_setup_crypto
670 ptls_cipher_algorithm_t quic_crypto_aes256ctr = {
671 "AES256-CTR", PTLS_AES256_KEY_SIZE, 1 /* block size */ ,
672 PTLS_AES_IV_SIZE, sizeof (struct cipher_context_t), aes256ctr_setup_crypto
676 ptls_aead_algorithm_t quic_crypto_aes128gcm = {
678 #ifdef QUIC_HP_CRYPTO
679 &quic_crypto_aes128ctr,
681 &ptls_openssl_aes128ctr,
683 &ptls_openssl_aes128ecb,
684 PTLS_AES128_KEY_SIZE,
686 PTLS_AESGCM_TAG_SIZE,
687 sizeof (struct aead_crypto_context_t),
688 quic_crypto_aead_aes128gcm_setup_crypto
691 ptls_aead_algorithm_t quic_crypto_aes256gcm = {
693 #ifdef QUIC_HP_CRYPTO
694 &quic_crypto_aes256ctr,
696 &ptls_openssl_aes256ctr,
698 &ptls_openssl_aes256ecb,
699 PTLS_AES256_KEY_SIZE,
701 PTLS_AESGCM_TAG_SIZE,
702 sizeof (struct aead_crypto_context_t),
703 quic_crypto_aead_aes256gcm_setup_crypto
706 ptls_cipher_suite_t quic_crypto_aes128gcmsha256 = {
707 PTLS_CIPHER_SUITE_AES_128_GCM_SHA256,
708 &quic_crypto_aes128gcm, &ptls_openssl_sha256
711 ptls_cipher_suite_t quic_crypto_aes256gcmsha384 = {
712 PTLS_CIPHER_SUITE_AES_256_GCM_SHA384,
713 &quic_crypto_aes256gcm, &ptls_openssl_sha384
716 ptls_cipher_suite_t *quic_crypto_cipher_suites[] = {
717 &quic_crypto_aes256gcmsha384, &quic_crypto_aes128gcmsha256, NULL
720 quicly_crypto_engine_t quic_crypto_engine = {
721 quic_crypto_setup_cipher, quic_crypto_finalize_send_packet_cb
725 quic_encrypt_ticket_cb (ptls_encrypt_ticket_t * _self, ptls_t * tls,
726 int is_encrypt, ptls_buffer_t * dst, ptls_iovec_t src)
728 quic_session_cache_t *self = (void *) _self;
734 /* replace the cached entry along with a newly generated session id */
735 clib_mem_free (self->data.base);
736 if ((self->data.base = clib_mem_alloc (src.len)) == NULL)
737 return PTLS_ERROR_NO_MEMORY;
739 ptls_get_context (tls)->random_bytes (self->id, sizeof (self->id));
740 clib_memcpy (self->data.base, src.base, src.len);
741 self->data.len = src.len;
743 /* store the session id in buffer */
744 if ((ret = ptls_buffer_reserve (dst, sizeof (self->id))) != 0)
746 clib_memcpy (dst->base + dst->off, self->id, sizeof (self->id));
747 dst->off += sizeof (self->id);
753 /* check if session id is the one stored in cache */
754 if (src.len != sizeof (self->id))
755 return PTLS_ERROR_SESSION_NOT_FOUND;
756 if (clib_memcmp (self->id, src.base, sizeof (self->id)) != 0)
757 return PTLS_ERROR_SESSION_NOT_FOUND;
759 /* return the cached value */
760 if ((ret = ptls_buffer_reserve (dst, self->data.len)) != 0)
762 clib_memcpy (dst->base + dst->off, self->data.base, self->data.len);
763 dst->off += self->data.len;
770 * fd.io coding-style-patch-verification: ON
773 * eval: (c-set-style "gnu")