2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/crypto/crypto.h>
16 #include <vppinfra/lock.h>
18 #include <quic/quic.h>
19 #include <quic/quic_crypto.h>
22 #include <picotls/openssl.h>
24 #define QUICLY_EPOCH_1RTT 3
26 extern quic_main_t quic_main;
27 extern quic_ctx_t *quic_get_conn_ctx (quicly_conn_t * conn);
29 typedef void (*quicly_do_transform_fn) (ptls_cipher_context_t *, void *,
30 const void *, size_t);
32 struct cipher_context_t
34 ptls_cipher_context_t super;
39 struct aead_crypto_context_t
41 ptls_aead_context_t super;
47 quic_crypto_offload_aead_decrypt (quic_ctx_t * qctx,
48 ptls_aead_context_t * _ctx, void *_output,
49 const void *input, size_t inlen,
50 uint64_t decrypted_pn, const void *aad,
53 vnet_crypto_main_t *cm = &crypto_main;
56 quic_crypto_batch_tx_packets (quic_crypto_batch_ctx_t * batch_ctx)
58 vlib_main_t *vm = vlib_get_main ();
60 if (batch_ctx->nb_tx_packets <= 0)
63 clib_rwlock_reader_lock (&quic_main.crypto_keys_quic_rw_lock);
64 vnet_crypto_process_ops (vm, batch_ctx->aead_crypto_tx_packets_ops,
65 batch_ctx->nb_tx_packets);
66 clib_rwlock_reader_unlock (&quic_main.crypto_keys_quic_rw_lock);
68 for (int i = 0; i < batch_ctx->nb_tx_packets; i++)
69 clib_mem_free (batch_ctx->aead_crypto_tx_packets_ops[i].iv);
71 batch_ctx->nb_tx_packets = 0;
75 quic_crypto_batch_rx_packets (quic_crypto_batch_ctx_t * batch_ctx)
77 vlib_main_t *vm = vlib_get_main ();
79 if (batch_ctx->nb_rx_packets <= 0)
82 clib_rwlock_reader_lock (&quic_main.crypto_keys_quic_rw_lock);
83 vnet_crypto_process_ops (vm, batch_ctx->aead_crypto_rx_packets_ops,
84 batch_ctx->nb_rx_packets);
85 clib_rwlock_reader_unlock (&quic_main.crypto_keys_quic_rw_lock);
87 for (int i = 0; i < batch_ctx->nb_rx_packets; i++)
88 clib_mem_free (batch_ctx->aead_crypto_rx_packets_ops[i].iv);
90 batch_ctx->nb_rx_packets = 0;
94 build_iv (ptls_aead_context_t * ctx, uint8_t * iv, uint64_t seq)
96 size_t iv_size = ctx->algo->iv_size, i;
97 const uint8_t *s = ctx->static_iv;
100 for (i = iv_size - 8; i != 0; --i)
106 *d++ = *s++ ^ (uint8_t) (seq >> i);
112 do_finalize_send_packet (ptls_cipher_context_t * hp,
113 quicly_datagram_t * packet,
114 size_t first_byte_at, size_t payload_from)
116 uint8_t hpmask[1 + QUICLY_SEND_PN_SIZE] = {
121 ptls_cipher_init (hp,
122 packet->data.base + payload_from - QUICLY_SEND_PN_SIZE +
124 ptls_cipher_encrypt (hp, hpmask, hpmask, sizeof (hpmask));
126 packet->data.base[first_byte_at] ^=
128 (QUICLY_PACKET_IS_LONG_HEADER (packet->data.base[first_byte_at]) ? 0xf :
131 for (i = 0; i != QUICLY_SEND_PN_SIZE; ++i)
132 packet->data.base[payload_from + i - QUICLY_SEND_PN_SIZE] ^=
137 quic_crypto_finalize_send_packet (quicly_datagram_t * packet)
139 quic_encrypt_cb_ctx *encrypt_cb_ctx =
140 (quic_encrypt_cb_ctx *) ((uint8_t *) packet + sizeof (*packet));
142 for (int i = 0; i < encrypt_cb_ctx->snd_ctx_count; i++)
144 do_finalize_send_packet (encrypt_cb_ctx->snd_ctx[i].hp,
146 encrypt_cb_ctx->snd_ctx[i].first_byte_at,
147 encrypt_cb_ctx->snd_ctx[i].payload_from);
149 encrypt_cb_ctx->snd_ctx_count = 0;
153 quic_crypto_setup_cipher (quicly_crypto_engine_t * engine,
154 quicly_conn_t * conn, size_t epoch, int is_enc,
155 ptls_cipher_context_t ** hp_ctx,
156 ptls_aead_context_t ** aead_ctx,
157 ptls_aead_algorithm_t * aead,
158 ptls_hash_algorithm_t * hash, const void *secret)
160 uint8_t hpkey[PTLS_MAX_SECRET_SIZE];
167 /* generate new header protection key */
171 ptls_hkdf_expand_label (hash, hpkey, aead->ctr_cipher->key_size,
172 ptls_iovec_init (secret,
174 "quic hp", ptls_iovec_init (NULL, 0),
178 ptls_cipher_new (aead->ctr_cipher, is_enc, hpkey)) == NULL)
180 ret = PTLS_ERROR_NO_MEMORY;
185 /* generate new AEAD context */
187 ptls_aead_new (aead, hash, is_enc, secret,
188 QUICLY_AEAD_BASE_LABEL)) == NULL)
190 ret = PTLS_ERROR_NO_MEMORY;
194 if (epoch == QUICLY_EPOCH_1RTT && !is_enc)
196 quic_ctx_t *qctx = quic_get_conn_ctx (conn);
197 if (qctx->ingress_keys.aead_ctx != NULL)
199 qctx->key_phase_ingress++;
202 qctx->ingress_keys.aead_ctx = *aead_ctx;
204 qctx->ingress_keys.hp_ctx = *hp_ctx;
212 if (aead_ctx && *aead_ctx != NULL)
214 ptls_aead_free (*aead_ctx);
217 if (hp_ctx && *hp_ctx != NULL)
219 ptls_cipher_free (*hp_ctx);
223 ptls_clear_memory (hpkey, sizeof (hpkey));
228 quic_crypto_finalize_send_packet_cb (struct st_quicly_crypto_engine_t
229 *engine, quicly_conn_t * conn,
230 ptls_cipher_context_t * hp,
231 ptls_aead_context_t * aead,
232 quicly_datagram_t * packet,
233 size_t first_byte_at,
234 size_t payload_from, int coalesced)
236 quic_encrypt_cb_ctx *encrypt_cb_ctx =
237 (quic_encrypt_cb_ctx *) ((uint8_t *) packet + sizeof (*packet));
239 encrypt_cb_ctx->snd_ctx[encrypt_cb_ctx->snd_ctx_count].hp = hp;
240 encrypt_cb_ctx->snd_ctx[encrypt_cb_ctx->snd_ctx_count].first_byte_at =
242 encrypt_cb_ctx->snd_ctx[encrypt_cb_ctx->snd_ctx_count].payload_from =
244 encrypt_cb_ctx->snd_ctx_count++;
248 quic_crypto_decrypt_packet (quic_ctx_t * qctx, quic_rx_packet_ctx_t * pctx)
250 ptls_cipher_context_t *header_protection = NULL;
251 ptls_aead_context_t *aead = NULL;
254 /* Long Header packets are not decrypted by vpp */
255 if (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]))
258 uint64_t next_expected_packet_number =
259 quicly_get_next_expected_packet_number (qctx->conn);
260 if (next_expected_packet_number == UINT64_MAX)
263 aead = qctx->ingress_keys.aead_ctx;
264 header_protection = qctx->ingress_keys.hp_ctx;
266 if (!aead || !header_protection)
269 size_t encrypted_len = pctx->packet.octets.len - pctx->packet.encrypted_off;
270 uint8_t hpmask[5] = { 0 };
272 size_t pnlen, ptlen, i;
274 /* decipher the header protection, as well as obtaining pnbits, pnlen */
275 if (encrypted_len < header_protection->algo->iv_size + QUICLY_MAX_PN_SIZE)
277 ptls_cipher_init (header_protection,
278 pctx->packet.octets.base + pctx->packet.encrypted_off +
280 ptls_cipher_encrypt (header_protection, hpmask, hpmask, sizeof (hpmask));
281 pctx->packet.octets.base[0] ^=
282 hpmask[0] & (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ?
284 pnlen = (pctx->packet.octets.base[0] & 0x3) + 1;
285 for (i = 0; i != pnlen; ++i)
287 pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
290 (pnbits << 8) | pctx->packet.octets.base[pctx->packet.encrypted_off +
294 size_t aead_off = pctx->packet.encrypted_off + pnlen;
297 quicly_determine_packet_number (pnbits, pnlen * 8,
298 next_expected_packet_number);
301 (pctx->packet.octets.base[0] & QUICLY_KEY_PHASE_BIT) != 0;
303 if (key_phase_bit != (qctx->key_phase_ingress & 1))
305 pctx->packet.octets.base[0] ^=
307 (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ? 0xf :
309 for (i = 0; i != pnlen; ++i)
311 pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
318 quic_crypto_offload_aead_decrypt (qctx, aead,
319 pctx->packet.octets.base + aead_off,
320 pctx->packet.octets.base + aead_off,
321 pctx->packet.octets.len - aead_off,
322 pn, pctx->packet.octets.base,
323 aead_off)) == SIZE_MAX)
326 "%s: aead decryption failure (pn: %d)\n", __FUNCTION__, pn);
330 pctx->packet.encrypted_off = aead_off;
331 pctx->packet.octets.len = ptlen + aead_off;
333 pctx->packet.decrypted.pn = pn;
334 pctx->packet.decrypted.key_phase = qctx->key_phase_ingress;
337 #ifdef QUIC_HP_CRYPTO
339 quic_crypto_cipher_do_init (ptls_cipher_context_t * _ctx, const void *iv)
341 struct cipher_context_t *ctx = (struct cipher_context_t *) _ctx;
342 vnet_crypto_op_id_t id;
343 if (!strcmp (ctx->super.algo->name, "AES128-CTR"))
345 id = VNET_CRYPTO_OP_AES_128_CTR_ENC;
347 else if (!strcmp (ctx->super.algo->name, "AES256-CTR"))
349 id = VNET_CRYPTO_OP_AES_256_CTR_ENC;
353 QUIC_DBG (1, "%s, Invalid crypto cipher : ", __FUNCTION__,
357 vnet_crypto_op_init (&ctx->op, id);
358 ctx->op.iv = (u8 *) iv;
359 ctx->op.key_index = ctx->key_index;
363 quic_crypto_cipher_dispose (ptls_cipher_context_t * _ctx)
369 quic_crypto_cipher_encrypt (ptls_cipher_context_t * _ctx, void *output,
370 const void *input, size_t _len)
372 vlib_main_t *vm = vlib_get_main ();
373 struct cipher_context_t *ctx = (struct cipher_context_t *) _ctx;
375 ctx->op.src = (u8 *) input;
376 ctx->op.dst = output;
379 vnet_crypto_process_ops (vm, &ctx->op, 1);
383 quic_crypto_cipher_setup_crypto (ptls_cipher_context_t * _ctx, int is_enc,
384 const void *key, const EVP_CIPHER * cipher,
385 quicly_do_transform_fn do_transform)
387 struct cipher_context_t *ctx = (struct cipher_context_t *) _ctx;
389 ctx->super.do_dispose = quic_crypto_cipher_dispose;
390 ctx->super.do_init = quic_crypto_cipher_do_init;
391 ctx->super.do_transform = do_transform;
393 vlib_main_t *vm = vlib_get_main ();
394 vnet_crypto_alg_t algo;
395 if (!strcmp (ctx->super.algo->name, "AES128-CTR"))
397 algo = VNET_CRYPTO_ALG_AES_128_CTR;
399 else if (!strcmp (ctx->super.algo->name, "AES256-CTR"))
401 algo = VNET_CRYPTO_ALG_AES_256_CTR;
405 QUIC_DBG (1, "%s, Invalid crypto cipher : ", __FUNCTION__,
410 ctx->key_index = vnet_crypto_key_add (vm, algo,
411 (u8 *) key, _ctx->algo->key_size);
417 quic_crypto_aes128ctr_setup_crypto (ptls_cipher_context_t * ctx, int is_enc,
420 return quic_crypto_cipher_setup_crypto (ctx, 1, key, EVP_aes_128_ctr (),
421 quic_crypto_cipher_encrypt);
425 quic_crypto_aes256ctr_setup_crypto (ptls_cipher_context_t * ctx, int is_enc,
428 return quic_crypto_cipher_setup_crypto (ctx, 1, key, EVP_aes_256_ctr (),
429 quic_crypto_cipher_encrypt);
432 #endif // QUIC_HP_CRYPTO
435 quic_crypto_aead_encrypt_init (ptls_aead_context_t * _ctx, const void *iv,
436 const void *aad, size_t aadlen)
438 quic_main_t *qm = &quic_main;
439 u32 thread_index = vlib_get_thread_index ();
441 struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
443 vnet_crypto_op_id_t id;
444 if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
446 id = VNET_CRYPTO_OP_AES_128_GCM_ENC;
448 else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
450 id = VNET_CRYPTO_OP_AES_256_GCM_ENC;
457 quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
458 &qm->wrk_ctx[thread_index].crypto_context_batch;
460 vnet_crypto_op_t *vnet_op =
461 &quic_crypto_batch_ctx->aead_crypto_tx_packets_ops
462 [quic_crypto_batch_ctx->nb_tx_packets];
463 vnet_crypto_op_init (vnet_op, id);
464 vnet_op->aad = (u8 *) aad;
465 vnet_op->aad_len = aadlen;
466 vnet_op->iv = clib_mem_alloc (PTLS_MAX_IV_SIZE);
467 clib_memcpy (vnet_op->iv, iv, PTLS_MAX_IV_SIZE);
468 vnet_op->key_index = ctx->key_index;
472 quic_crypto_aead_encrypt_update (ptls_aead_context_t * _ctx, void *output,
473 const void *input, size_t inlen)
475 struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
477 quic_main_t *qm = &quic_main;
478 u32 thread_index = vlib_get_thread_index ();
479 quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
480 &qm->wrk_ctx[thread_index].crypto_context_batch;
482 vnet_crypto_op_t *vnet_op =
483 &quic_crypto_batch_ctx->aead_crypto_tx_packets_ops
484 [quic_crypto_batch_ctx->nb_tx_packets];
485 vnet_op->src = (u8 *) input;
486 vnet_op->dst = output;
487 vnet_op->len = inlen;
488 vnet_op->tag_len = ctx->super.algo->tag_size;
490 vnet_op->tag = vnet_op->src + inlen;
496 quic_crypto_aead_encrypt_final (ptls_aead_context_t * _ctx, void *output)
498 quic_main_t *qm = &quic_main;
499 u32 thread_index = vlib_get_thread_index ();
500 quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
501 &qm->wrk_ctx[thread_index].crypto_context_batch;
503 vnet_crypto_op_t *vnet_op =
504 &quic_crypto_batch_ctx->
505 aead_crypto_tx_packets_ops[quic_crypto_batch_ctx->nb_tx_packets];
506 quic_crypto_batch_ctx->nb_tx_packets++;
507 return vnet_op->len + vnet_op->tag_len;
511 quic_crypto_aead_decrypt (ptls_aead_context_t * _ctx, void *_output,
512 const void *input, size_t inlen, const void *iv,
513 const void *aad, size_t aadlen)
515 vlib_main_t *vm = vlib_get_main ();
516 struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
518 vnet_crypto_op_id_t id;
519 if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
521 id = VNET_CRYPTO_OP_AES_128_GCM_DEC;
523 else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
525 id = VNET_CRYPTO_OP_AES_256_GCM_DEC;
532 vnet_crypto_op_init (&ctx->op, id);
533 ctx->op.aad = (u8 *) aad;
534 ctx->op.aad_len = aadlen;
535 ctx->op.iv = (u8 *) iv;
537 ctx->op.src = (u8 *) input;
538 ctx->op.dst = _output;
539 ctx->op.key_index = ctx->key_index;
540 ctx->op.len = inlen - ctx->super.algo->tag_size;
542 ctx->op.tag_len = ctx->super.algo->tag_size;
543 ctx->op.tag = ctx->op.src + ctx->op.len;
545 vnet_crypto_process_ops (vm, &ctx->op, 1);
547 if (ctx->op.status != VNET_CRYPTO_OP_STATUS_COMPLETED)
554 quic_crypto_offload_aead_decrypt (quic_ctx_t * qctx,
555 ptls_aead_context_t * _ctx, void *_output,
556 const void *input, size_t inlen,
557 uint64_t decrypted_pn, const void *aad,
560 struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
561 vnet_crypto_op_id_t id;
562 if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
564 id = VNET_CRYPTO_OP_AES_128_GCM_DEC;
566 else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
568 id = VNET_CRYPTO_OP_AES_256_GCM_DEC;
575 quic_main_t *qm = &quic_main;
576 quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
577 &qm->wrk_ctx[qctx->c_thread_index].crypto_context_batch;
579 vnet_crypto_op_t *vnet_op =
580 &quic_crypto_batch_ctx->aead_crypto_rx_packets_ops
581 [quic_crypto_batch_ctx->nb_rx_packets];
583 vnet_crypto_op_init (vnet_op, id);
584 vnet_op->aad = (u8 *) aad;
585 vnet_op->aad_len = aadlen;
586 vnet_op->iv = clib_mem_alloc (PTLS_MAX_IV_SIZE);
587 build_iv (_ctx, vnet_op->iv, decrypted_pn);
588 vnet_op->src = (u8 *) input;
589 vnet_op->dst = _output;
590 vnet_op->key_index = ctx->key_index;
591 vnet_op->len = inlen - ctx->super.algo->tag_size;
592 vnet_op->tag_len = ctx->super.algo->tag_size;
593 vnet_op->tag = vnet_op->src + vnet_op->len;
594 quic_crypto_batch_ctx->nb_rx_packets++;
599 quic_crypto_aead_dispose_crypto (ptls_aead_context_t * _ctx)
605 quic_crypto_aead_setup_crypto (ptls_aead_context_t * _ctx, int is_enc,
606 const void *key, const EVP_CIPHER * cipher)
608 vlib_main_t *vm = vlib_get_main ();
609 struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
611 vnet_crypto_alg_t algo;
612 if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
614 algo = VNET_CRYPTO_ALG_AES_128_GCM;
616 else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
618 algo = VNET_CRYPTO_ALG_AES_256_GCM;
622 QUIC_DBG (1, "%s, invalied aead cipher %s", __FUNCTION__,
627 ctx->super.do_decrypt = quic_crypto_aead_decrypt;
629 ctx->super.do_encrypt_init = quic_crypto_aead_encrypt_init;
630 ctx->super.do_encrypt_update = quic_crypto_aead_encrypt_update;
631 ctx->super.do_encrypt_final = quic_crypto_aead_encrypt_final;
632 ctx->super.dispose_crypto = quic_crypto_aead_dispose_crypto;
634 clib_rwlock_writer_lock (&quic_main.crypto_keys_quic_rw_lock);
635 ctx->key_index = vnet_crypto_key_add (vm, algo,
636 (u8 *) key, _ctx->algo->key_size);
637 clib_rwlock_writer_unlock (&quic_main.crypto_keys_quic_rw_lock);
643 quic_crypto_aead_aes128gcm_setup_crypto (ptls_aead_context_t * ctx,
644 int is_enc, const void *key)
646 return quic_crypto_aead_setup_crypto (ctx, is_enc, key, EVP_aes_128_gcm ());
650 quic_crypto_aead_aes256gcm_setup_crypto (ptls_aead_context_t * ctx,
651 int is_enc, const void *key)
653 return quic_crypto_aead_setup_crypto (ctx, is_enc, key, EVP_aes_256_gcm ());
656 #ifdef QUIC_HP_CRYPTO
657 ptls_cipher_algorithm_t quic_crypto_aes128ctr = {
659 PTLS_AES128_KEY_SIZE,
661 sizeof (struct cipher_context_t), aes128ctr_setup_crypto
664 ptls_cipher_algorithm_t quic_crypto_aes256ctr = {
665 "AES256-CTR", PTLS_AES256_KEY_SIZE, 1 /* block size */ ,
666 PTLS_AES_IV_SIZE, sizeof (struct cipher_context_t), aes256ctr_setup_crypto
670 ptls_aead_algorithm_t quic_crypto_aes128gcm = {
672 #ifdef QUIC_HP_CRYPTO
673 &quic_crypto_aes128ctr,
675 &ptls_openssl_aes128ctr,
677 &ptls_openssl_aes128ecb,
678 PTLS_AES128_KEY_SIZE,
680 PTLS_AESGCM_TAG_SIZE,
681 sizeof (struct aead_crypto_context_t),
682 quic_crypto_aead_aes128gcm_setup_crypto
685 ptls_aead_algorithm_t quic_crypto_aes256gcm = {
687 #ifdef QUIC_HP_CRYPTO
688 &quic_crypto_aes256ctr,
690 &ptls_openssl_aes256ctr,
692 &ptls_openssl_aes256ecb,
693 PTLS_AES256_KEY_SIZE,
695 PTLS_AESGCM_TAG_SIZE,
696 sizeof (struct aead_crypto_context_t),
697 quic_crypto_aead_aes256gcm_setup_crypto
700 ptls_cipher_suite_t quic_crypto_aes128gcmsha256 = {
701 PTLS_CIPHER_SUITE_AES_128_GCM_SHA256,
702 &quic_crypto_aes128gcm, &ptls_openssl_sha256
705 ptls_cipher_suite_t quic_crypto_aes256gcmsha384 = {
706 PTLS_CIPHER_SUITE_AES_256_GCM_SHA384,
707 &quic_crypto_aes256gcm, &ptls_openssl_sha384
710 ptls_cipher_suite_t *quic_crypto_cipher_suites[] = {
711 &quic_crypto_aes256gcmsha384, &quic_crypto_aes128gcmsha256, NULL
714 quicly_crypto_engine_t quic_crypto_engine = {
715 quic_crypto_setup_cipher, quic_crypto_finalize_send_packet_cb
719 quic_encrypt_ticket_cb (ptls_encrypt_ticket_t * _self, ptls_t * tls,
720 int is_encrypt, ptls_buffer_t * dst, ptls_iovec_t src)
722 quic_session_cache_t *self = (void *) _self;
728 /* replace the cached entry along with a newly generated session id */
729 clib_mem_free (self->data.base);
730 if ((self->data.base = clib_mem_alloc (src.len)) == NULL)
731 return PTLS_ERROR_NO_MEMORY;
733 ptls_get_context (tls)->random_bytes (self->id, sizeof (self->id));
734 clib_memcpy (self->data.base, src.base, src.len);
735 self->data.len = src.len;
737 /* store the session id in buffer */
738 if ((ret = ptls_buffer_reserve (dst, sizeof (self->id))) != 0)
740 clib_memcpy (dst->base + dst->off, self->id, sizeof (self->id));
741 dst->off += sizeof (self->id);
747 /* check if session id is the one stored in cache */
748 if (src.len != sizeof (self->id))
749 return PTLS_ERROR_SESSION_NOT_FOUND;
750 if (clib_memcmp (self->id, src.base, sizeof (self->id)) != 0)
751 return PTLS_ERROR_SESSION_NOT_FOUND;
753 /* return the cached value */
754 if ((ret = ptls_buffer_reserve (dst, self->data.len)) != 0)
756 clib_memcpy (dst->base + dst->off, self->data.base, self->data.len);
757 dst->off += self->data.len;
764 * fd.io coding-style-patch-verification: ON
767 * eval: (c-set-style "gnu")