#include <openssl/ssl.h>
#include <openssl/conf.h>
#include <openssl/err.h>
+
#ifdef HAVE_OPENSSL_ASYNC
#include <openssl/async.h>
#endif
#include <vnet/tls/tls.h>
#include <ctype.h>
#include <tlsopenssl/tls_openssl.h>
+#include <tlsopenssl/tls_bios.h>
+
+#define MAX_CRYPTO_LEN 64
-#define MAX_CRYPTO_LEN 16
+openssl_main_t openssl_main;
-static openssl_main_t openssl_main;
static u32
-openssl_ctx_alloc (void)
+openssl_ctx_alloc_w_thread (u32 thread_index)
{
- u8 thread_index = vlib_get_thread_index ();
- openssl_main_t *tm = &openssl_main;
+ openssl_main_t *om = &openssl_main;
openssl_ctx_t **ctx;
- pool_get (tm->ctx_pool[thread_index], ctx);
+ pool_get (om->ctx_pool[thread_index], ctx);
if (!(*ctx))
*ctx = clib_mem_alloc (sizeof (openssl_ctx_t));
clib_memset (*ctx, 0, sizeof (openssl_ctx_t));
(*ctx)->ctx.c_thread_index = thread_index;
- (*ctx)->ctx.tls_ctx_engine = TLS_ENGINE_OPENSSL;
+ (*ctx)->ctx.tls_ctx_engine = CRYPTO_ENGINE_OPENSSL;
(*ctx)->ctx.app_session_handle = SESSION_INVALID_HANDLE;
- (*ctx)->openssl_ctx_index = ctx - tm->ctx_pool[thread_index];
+ (*ctx)->openssl_ctx_index = ctx - om->ctx_pool[thread_index];
return ((*ctx)->openssl_ctx_index);
}
+static u32
+openssl_ctx_alloc (void)
+{
+ return openssl_ctx_alloc_w_thread (vlib_get_thread_index ());
+}
+
static void
openssl_ctx_free (tls_ctx_t * ctx)
{
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
- if (SSL_is_init_finished (oc->ssl) && !ctx->is_passive_close)
- SSL_shutdown (oc->ssl);
+ /* Cleanup ssl ctx unless migrated */
+ if (!ctx->is_migrated)
+ {
+ if (SSL_is_init_finished (oc->ssl) && !ctx->is_passive_close)
+ SSL_shutdown (oc->ssl);
+
+ SSL_free (oc->ssl);
+ vec_free (ctx->srv_hostname);
- SSL_free (oc->ssl);
+#ifdef HAVE_OPENSSL_ASYNC
+ openssl_evt_free (ctx->evt_index, ctx->c_thread_index);
+#endif
+ }
- vec_free (ctx->srv_hostname);
pool_put_index (openssl_main.ctx_pool[ctx->c_thread_index],
oc->openssl_ctx_index);
}
+static void *
+openssl_ctx_detach (tls_ctx_t *ctx)
+{
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx, *oc_copy;
+
+ oc_copy = clib_mem_alloc (sizeof (*oc_copy));
+ clib_memcpy (oc_copy, oc, sizeof (*oc));
+
+ return oc_copy;
+}
+
+static u32
+openssl_ctx_attach (u32 thread_index, void *ctx_ptr)
+{
+ openssl_main_t *om = &openssl_main;
+ session_handle_t sh;
+ openssl_ctx_t **oc;
+
+ pool_get (om->ctx_pool[thread_index], oc);
+ /* Free the old instance instead of looking for an empty spot */
+ if (*oc)
+ clib_mem_free (*oc);
+
+ *oc = ctx_ptr;
+ (*oc)->openssl_ctx_index = oc - om->ctx_pool[thread_index];
+ (*oc)->ctx.c_thread_index = thread_index;
+
+ sh = (*oc)->ctx.tls_session_handle;
+ BIO_set_data ((*oc)->rbio, uword_to_pointer (sh, void *));
+ BIO_set_data ((*oc)->wbio, uword_to_pointer (sh, void *));
+
+ return ((*oc)->openssl_ctx_index);
+}
+
tls_ctx_t *
openssl_ctx_get (u32 ctx_index)
{
}
static int
-openssl_try_handshake_read (openssl_ctx_t * oc, session_t * tls_session)
+openssl_read_from_ssl_into_fifo (svm_fifo_t * f, SSL * ssl)
{
- u32 deq_max, deq_now;
- svm_fifo_t *f;
- int wrote, rv;
+ int read, rv, n_fs, i;
+ const int n_segs = 2;
+ svm_fifo_seg_t fs[n_segs];
+ u32 max_enq;
- f = tls_session->rx_fifo;
- deq_max = svm_fifo_max_dequeue_cons (f);
- if (!deq_max)
+ max_enq = svm_fifo_max_enqueue_prod (f);
+ if (!max_enq)
+ return 0;
+
+ n_fs = svm_fifo_provision_chunks (f, fs, n_segs, max_enq);
+ if (n_fs < 0)
return 0;
- deq_now = clib_min (svm_fifo_max_read_chunk (f), deq_max);
- wrote = BIO_write (oc->wbio, svm_fifo_head (f), deq_now);
- if (wrote <= 0)
+ /* Return early if we can't read anything */
+ read = SSL_read (ssl, fs[0].data, fs[0].len);
+ if (read <= 0)
return 0;
- svm_fifo_dequeue_drop (f, wrote);
- if (wrote < deq_max)
+ for (i = 1; i < n_fs; i++)
{
- deq_now = clib_min (svm_fifo_max_read_chunk (f), deq_max - wrote);
- rv = BIO_write (oc->wbio, svm_fifo_head (f), deq_now);
- if (rv > 0)
- {
- svm_fifo_dequeue_drop (f, rv);
- wrote += rv;
- }
+ rv = SSL_read (ssl, fs[i].data, fs[i].len);
+ read += rv > 0 ? rv : 0;
+
+ if (rv < (int) fs[i].len)
+ break;
}
- return wrote;
+
+ svm_fifo_enqueue_nocopy (f, read);
+
+ return read;
}
static int
-openssl_try_handshake_write (openssl_ctx_t * oc, session_t * tls_session)
+openssl_write_from_fifo_into_ssl (svm_fifo_t *f, SSL *ssl, u32 max_len)
{
- u32 enq_max, deq_now;
- svm_fifo_t *f;
- int read, rv;
-
- if (BIO_ctrl_pending (oc->rbio) <= 0)
- return 0;
+ int wrote = 0, rv, i = 0, len;
+ const int n_segs = 2;
+ svm_fifo_seg_t fs[n_segs];
- f = tls_session->tx_fifo;
- enq_max = svm_fifo_max_enqueue_prod (f);
- if (!enq_max)
+ len = svm_fifo_segments (f, 0, fs, n_segs, max_len);
+ if (len <= 0)
return 0;
- deq_now = clib_min (svm_fifo_max_write_chunk (f), enq_max);
- read = BIO_read (oc->rbio, svm_fifo_tail (f), deq_now);
- if (read <= 0)
- return 0;
-
- svm_fifo_enqueue_nocopy (f, read);
- tls_add_vpp_q_tx_evt (tls_session);
-
- if (read < enq_max)
+ while (wrote < len && i < n_segs)
{
- deq_now = clib_min (svm_fifo_max_write_chunk (f), enq_max - read);
- rv = BIO_read (oc->rbio, svm_fifo_tail (f), deq_now);
- if (rv > 0)
- {
- svm_fifo_enqueue_nocopy (f, rv);
- read += rv;
- }
+ rv = SSL_write (ssl, fs[i].data, fs[i].len);
+ wrote += (rv > 0) ? rv : 0;
+ if (rv < (int) fs[i].len)
+ break;
+ i++;
}
- return read;
+ if (wrote)
+ svm_fifo_dequeue_drop (f, wrote);
+
+ return wrote;
}
#ifdef HAVE_OPENSSL_ASYNC
static int
-vpp_ssl_async_process_event (tls_ctx_t * ctx,
- openssl_resume_handler * handler)
+openssl_check_async_status (tls_ctx_t * ctx, openssl_resume_handler * handler,
+ session_t * session)
{
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
- openssl_tls_callback_t *engine_cb;
+ int estatus;
- engine_cb = vpp_add_async_pending_event (ctx, handler);
- if (engine_cb)
+ SSL_get_async_status (oc->ssl, &estatus);
+ if (estatus == ASYNC_STATUS_EAGAIN)
{
- SSL_set_async_callback_arg (oc->ssl, (void *) engine_cb->arg);
- TLS_DBG (2, "set callback to engine %p\n", engine_cb->callback);
+ vpp_tls_async_update_event (ctx, 1);
+ }
+ else
+ {
+ vpp_tls_async_update_event (ctx, 0);
}
- return 0;
-}
+ return 1;
-/* Due to engine busy stat, VPP need to retry later */
-static int
-vpp_ssl_async_retry_func (tls_ctx_t * ctx, openssl_resume_handler * handler)
-{
+}
- if (vpp_add_async_run_event (ctx, handler))
- return 1;
+#endif
- return 0;
+static void
+openssl_handle_handshake_failure (tls_ctx_t * ctx)
+{
+ session_t *app_session;
+ if (SSL_is_server (((openssl_ctx_t *) ctx)->ssl))
+ {
+ /*
+ * Cleanup pre-allocated app session and close transport
+ */
+ app_session =
+ session_get_if_valid (ctx->c_s_index, ctx->c_thread_index);
+ if (app_session)
+ {
+ session_free (app_session);
+ ctx->no_app_session = 1;
+ ctx->c_s_index = SESSION_INVALID_INDEX;
+ tls_disconnect_transport (ctx);
+ }
+ }
+ else
+ {
+ /*
+ * Also handles cleanup of the pre-allocated session
+ */
+ tls_notify_app_connected (ctx, SESSION_E_TLS_HANDSHAKE);
+ }
}
-#endif
-
int
openssl_ctx_handshake_rx (tls_ctx_t * ctx, session_t * tls_session)
{
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
int rv = 0, err;
-#ifdef HAVE_OPENSSL_ASYNC
- int estatus;
- openssl_resume_handler *myself;
-#endif
while (SSL_in_init (oc->ssl))
{
{
ctx->resume = 0;
}
- else if (!openssl_try_handshake_read (oc, tls_session))
- {
- break;
- }
-
-#ifdef HAVE_OPENSSL_ASYNC
- myself = openssl_ctx_handshake_rx;
- vpp_ssl_async_process_event (ctx, myself);
-#endif
+ else if (!svm_fifo_max_dequeue_cons (tls_session->rx_fifo))
+ break;
rv = SSL_do_handshake (oc->ssl);
err = SSL_get_error (oc->ssl, rv);
- openssl_try_handshake_write (oc, tls_session);
+
#ifdef HAVE_OPENSSL_ASYNC
if (err == SSL_ERROR_WANT_ASYNC)
{
- SSL_get_async_status (oc->ssl, &estatus);
-
- if (estatus == ASYNC_STATUS_EAGAIN)
- {
- vpp_ssl_async_retry_func (ctx, myself);
- }
+ openssl_check_async_status (ctx, openssl_ctx_handshake_rx,
+ tls_session);
}
#endif
-
- if (err != SSL_ERROR_WANT_WRITE)
+ if (err == SSL_ERROR_SSL)
{
- if (err == SSL_ERROR_SSL)
- {
- char buf[512];
- ERR_error_string (ERR_get_error (), buf);
- clib_warning ("Err: %s", buf);
- }
- break;
+ char buf[512];
+ ERR_error_string (ERR_get_error (), buf);
+ clib_warning ("Err: %s", buf);
+
+ openssl_handle_handshake_failure (ctx);
+ return -1;
}
+
+ if (err != SSL_ERROR_WANT_WRITE && err != SSL_ERROR_WANT_READ)
+ break;
}
TLS_DBG (2, "tls state for %u is %s", oc->openssl_ctx_index,
SSL_state_string_long (oc->ssl));
if (SSL_in_init (oc->ssl))
- return 0;
+ return -1;
/*
* Handshake complete
*/
if (ctx->srv_hostname)
{
- tls_notify_app_connected (ctx, /* is failed */ 0);
+ tls_notify_app_connected (ctx, SESSION_E_TLS_HANDSHAKE);
return -1;
}
}
- tls_notify_app_connected (ctx, /* is failed */ 0);
+ tls_notify_app_connected (ctx, SESSION_E_NONE);
}
else
{
- tls_notify_app_accept (ctx);
+ /* Need to check transport status */
+ if (ctx->is_passive_close)
+ {
+ openssl_handle_handshake_failure (ctx);
+ return -1;
+ }
+
+ /* Accept failed, cleanup */
+ if (tls_notify_app_accept (ctx))
+ {
+ ctx->c_s_index = SESSION_INVALID_INDEX;
+ tls_disconnect_transport (ctx);
+ return -1;
+ }
}
TLS_DBG (1, "Handshake for %u complete. TLS cipher is %s",
return rv;
}
-static inline int
-openssl_ctx_write (tls_ctx_t * ctx, session_t * app_session)
+static void
+openssl_confirm_app_close (tls_ctx_t * ctx)
+{
+ tls_disconnect_transport (ctx);
+ session_transport_closed_notify (&ctx->connection);
+}
+
+static int
+openssl_ctx_write_tls (tls_ctx_t *ctx, session_t *app_session,
+ transport_send_params_t *sp)
{
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
- int wrote = 0, rv, read, max_buf = 100 * TLS_CHUNK_SIZE, max_space;
- u32 enq_max, deq_max, deq_now, to_write;
- session_t *tls_session;
+ u32 deq_max, space, enq_buf;
+ session_t *ts;
+ int wrote = 0;
svm_fifo_t *f;
+ ts = session_get_from_handle (ctx->tls_session_handle);
+ space = svm_fifo_max_enqueue_prod (ts->tx_fifo);
+ /* Leave a bit of extra space for tls ctrl data, if any needed */
+ space = clib_max ((int) space - TLSO_CTRL_BYTES, 0);
+
f = app_session->tx_fifo;
+
deq_max = svm_fifo_max_dequeue_cons (f);
+ deq_max = clib_min (deq_max, space);
if (!deq_max)
goto check_tls_fifo;
- max_space = max_buf - BIO_ctrl_pending (oc->rbio);
- max_space = (max_space < 0) ? 0 : max_space;
- deq_now = clib_min (deq_max, (u32) max_space);
- to_write = clib_min (svm_fifo_max_read_chunk (f), deq_now);
- wrote = SSL_write (oc->ssl, svm_fifo_head (f), to_write);
- if (wrote <= 0)
- {
- tls_add_vpp_q_builtin_tx_evt (app_session);
- goto check_tls_fifo;
- }
- svm_fifo_dequeue_drop (app_session->tx_fifo, wrote);
- if (wrote < deq_now)
- {
- to_write = clib_min (svm_fifo_max_read_chunk (f), deq_now - wrote);
- rv = SSL_write (oc->ssl, svm_fifo_head (f), to_write);
- if (rv > 0)
- {
- svm_fifo_dequeue_drop (app_session->tx_fifo, rv);
- wrote += rv;
- }
- }
+ deq_max = clib_min (deq_max, sp->max_burst_size);
+
+ /* Make sure tcp's tx fifo can actually buffer all bytes to be dequeued.
+ * If under memory pressure, tls's fifo segment might not be able to
+ * allocate the chunks needed. This also avoids errors from the underlying
+ * custom bio to the ssl infra which at times can get stuck. */
+ if (svm_fifo_provision_chunks (ts->tx_fifo, 0, 0, deq_max + TLSO_CTRL_BYTES))
+ goto check_tls_fifo;
+
+ wrote = openssl_write_from_fifo_into_ssl (f, oc->ssl, deq_max);
+ if (!wrote)
+ goto check_tls_fifo;
- if (wrote < deq_max)
- tls_add_vpp_q_builtin_tx_evt (app_session);
+ if (svm_fifo_needs_deq_ntf (f, wrote))
+ session_dequeue_notify (app_session);
check_tls_fifo:
- if (BIO_ctrl_pending (oc->rbio) <= 0)
- return wrote;
+ if (PREDICT_FALSE (ctx->app_closed && BIO_ctrl_pending (oc->rbio) <= 0))
+ openssl_confirm_app_close (ctx);
- tls_session = session_get_from_handle (ctx->tls_session_handle);
- f = tls_session->tx_fifo;
- enq_max = svm_fifo_max_enqueue_prod (f);
- if (!enq_max)
+ /* Deschedule and wait for deq notification if fifo is almost full */
+ enq_buf = clib_min (svm_fifo_size (ts->tx_fifo) / 2, TLSO_MIN_ENQ_SPACE);
+ if (space < wrote + enq_buf)
{
- tls_add_vpp_q_builtin_tx_evt (app_session);
- return wrote;
+ svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+ transport_connection_deschedule (&ctx->connection);
+ sp->flags |= TRANSPORT_SND_F_DESCHED;
}
+ else
+ /* Request tx reschedule of the app session */
+ app_session->flags |= SESSION_F_CUSTOM_TX;
- deq_now = clib_min (svm_fifo_max_write_chunk (f), enq_max);
- read = BIO_read (oc->rbio, svm_fifo_tail (f), deq_now);
- if (read <= 0)
- {
- tls_add_vpp_q_builtin_tx_evt (app_session);
- return wrote;
- }
+ return wrote;
+}
- svm_fifo_enqueue_nocopy (f, read);
- tls_add_vpp_q_tx_evt (tls_session);
+static int
+openssl_ctx_write_dtls (tls_ctx_t *ctx, session_t *app_session,
+ transport_send_params_t *sp)
+{
+ openssl_main_t *om = &openssl_main;
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
+ u32 read = 0, to_deq, dgram_sz, enq_max;
+ session_dgram_pre_hdr_t hdr;
+ session_t *us;
+ int wrote, rv;
+ u8 *buf;
- if (read < enq_max && BIO_ctrl_pending (oc->rbio) > 0)
+ us = session_get_from_handle (ctx->tls_session_handle);
+ to_deq = svm_fifo_max_dequeue_cons (app_session->tx_fifo);
+ buf = om->tx_bufs[ctx->c_thread_index];
+
+ while (to_deq > 0)
{
- deq_now = clib_min (svm_fifo_max_write_chunk (f), enq_max - read);
- read = BIO_read (oc->rbio, svm_fifo_tail (f), deq_now);
- if (read > 0)
- svm_fifo_enqueue_nocopy (f, read);
+ /* Peeking only pre-header dgram because the session is connected */
+ rv = svm_fifo_peek (app_session->tx_fifo, 0, sizeof (hdr), (u8 *) &hdr);
+ ASSERT (rv == sizeof (hdr) && hdr.data_length < vec_len (buf));
+ ASSERT (to_deq >= hdr.data_length + SESSION_CONN_HDR_LEN);
+
+ dgram_sz = hdr.data_length + SESSION_CONN_HDR_LEN;
+ enq_max = dgram_sz + TLSO_CTRL_BYTES;
+ if (svm_fifo_max_enqueue_prod (us->tx_fifo) < enq_max ||
+ svm_fifo_provision_chunks (us->tx_fifo, 0, 0, enq_max))
+ {
+ svm_fifo_add_want_deq_ntf (us->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+ transport_connection_deschedule (&ctx->connection);
+ sp->flags |= TRANSPORT_SND_F_DESCHED;
+ goto done;
+ }
+
+ rv = svm_fifo_peek (app_session->tx_fifo, SESSION_CONN_HDR_LEN,
+ hdr.data_length, buf);
+ ASSERT (rv == hdr.data_length);
+ svm_fifo_dequeue_drop (app_session->tx_fifo, dgram_sz);
+
+ wrote = SSL_write (oc->ssl, buf, rv);
+ ASSERT (wrote > 0);
+
+ read += rv;
+ to_deq -= dgram_sz;
}
- if (BIO_ctrl_pending (oc->rbio) > 0)
- tls_add_vpp_q_builtin_tx_evt (app_session);
+done:
- return wrote;
+ if (svm_fifo_needs_deq_ntf (app_session->tx_fifo, read))
+ session_dequeue_notify (app_session);
+
+ if (read)
+ tls_add_vpp_q_tx_evt (us);
+
+ if (PREDICT_FALSE (ctx->app_closed &&
+ !svm_fifo_max_enqueue_prod (us->rx_fifo)))
+ openssl_confirm_app_close (ctx);
+
+ return read;
}
static inline int
-openssl_ctx_read (tls_ctx_t * ctx, session_t * tls_session)
+openssl_ctx_write (tls_ctx_t *ctx, session_t *app_session,
+ transport_send_params_t *sp)
+{
+ if (ctx->tls_type == TRANSPORT_PROTO_TLS)
+ return openssl_ctx_write_tls (ctx, app_session, sp);
+ else
+ return openssl_ctx_write_dtls (ctx, app_session, sp);
+}
+
+static inline int
+openssl_ctx_read_tls (tls_ctx_t *ctx, session_t *tls_session)
{
- int read, wrote = 0, max_space, max_buf = 100 * TLS_CHUNK_SIZE, rv;
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
- u32 deq_max, enq_max, deq_now, to_read;
session_t *app_session;
+ int read;
svm_fifo_t *f;
if (PREDICT_FALSE (SSL_in_init (oc->ssl)))
{
- openssl_ctx_handshake_rx (ctx, tls_session);
- return 0;
+ if (openssl_ctx_handshake_rx (ctx, tls_session) < 0)
+ return 0;
}
- f = tls_session->rx_fifo;
- deq_max = svm_fifo_max_dequeue_cons (f);
- max_space = max_buf - BIO_ctrl_pending (oc->wbio);
- max_space = max_space < 0 ? 0 : max_space;
- deq_now = clib_min (deq_max, max_space);
- if (!deq_now)
- goto check_app_fifo;
+ app_session = session_get_from_handle (ctx->app_session_handle);
+ f = app_session->rx_fifo;
+
+ read = openssl_read_from_ssl_into_fifo (f, oc->ssl);
- to_read = clib_min (svm_fifo_max_read_chunk (f), deq_now);
- wrote = BIO_write (oc->wbio, svm_fifo_head (f), to_read);
- if (wrote <= 0)
+ /* If handshake just completed, session may still be in accepting state */
+ if (read && app_session->session_state >= SESSION_STATE_READY)
+ tls_notify_app_enqueue (ctx, app_session);
+
+ if ((SSL_pending (oc->ssl) > 0) ||
+ svm_fifo_max_dequeue_cons (tls_session->rx_fifo))
+ tls_add_vpp_q_builtin_rx_evt (tls_session);
+
+ return read;
+}
+
+static inline int
+openssl_ctx_read_dtls (tls_ctx_t *ctx, session_t *us)
+{
+ openssl_main_t *om = &openssl_main;
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
+ session_dgram_hdr_t hdr;
+ session_t *app_session;
+ u32 wrote = 0;
+ int read, rv;
+ u8 *buf;
+
+ if (PREDICT_FALSE (SSL_in_init (oc->ssl)))
{
- tls_add_vpp_q_builtin_rx_evt (tls_session);
- goto check_app_fifo;
+ u32 us_index = us->session_index;
+ if (openssl_ctx_handshake_rx (ctx, us) < 0)
+ return 0;
+ /* Session pool might grow when allocating the app's session */
+ us = session_get (us_index, ctx->c_thread_index);
}
- svm_fifo_dequeue_drop (f, wrote);
- if (wrote < deq_now)
+
+ buf = om->rx_bufs[ctx->c_thread_index];
+ app_session = session_get_from_handle (ctx->app_session_handle);
+ svm_fifo_fill_chunk_list (app_session->rx_fifo);
+
+ while (svm_fifo_max_dequeue_cons (us->rx_fifo) > 0)
{
- to_read = clib_min (svm_fifo_max_read_chunk (f), deq_now - wrote);
- rv = BIO_write (oc->wbio, svm_fifo_head (f), to_read);
- if (rv > 0)
+ if (svm_fifo_max_enqueue_prod (app_session->rx_fifo) < DTLSO_MAX_DGRAM)
{
- svm_fifo_dequeue_drop (f, rv);
- wrote += rv;
+ tls_add_vpp_q_builtin_rx_evt (us);
+ goto done;
}
- }
- if (svm_fifo_max_dequeue_cons (f))
- tls_add_vpp_q_builtin_rx_evt (tls_session);
-check_app_fifo:
+ read = SSL_read (oc->ssl, buf, vec_len (buf));
+ if (PREDICT_FALSE (read <= 0))
+ {
+ if (read < 0)
+ tls_add_vpp_q_builtin_rx_evt (us);
+ goto done;
+ }
+ wrote += read;
- if (BIO_ctrl_pending (oc->wbio) <= 0)
- return wrote;
+ hdr.data_length = read;
+ hdr.data_offset = 0;
- app_session = session_get_from_handle (ctx->app_session_handle);
- f = app_session->rx_fifo;
- enq_max = svm_fifo_max_enqueue_prod (f);
- if (!enq_max)
- {
- tls_add_vpp_q_builtin_rx_evt (tls_session);
- return wrote;
- }
+ svm_fifo_seg_t segs[2] = { { (u8 *) &hdr, sizeof (hdr) },
+ { buf, read } };
- deq_now = clib_min (svm_fifo_max_write_chunk (f), enq_max);
- read = SSL_read (oc->ssl, svm_fifo_tail (f), deq_now);
- if (read <= 0)
- {
- tls_add_vpp_q_builtin_rx_evt (tls_session);
- return wrote;
- }
- svm_fifo_enqueue_nocopy (f, read);
- if (read < enq_max && BIO_ctrl_pending (oc->wbio) > 0)
- {
- deq_now = clib_min (svm_fifo_max_write_chunk (f), enq_max - read);
- read = SSL_read (oc->ssl, svm_fifo_tail (f), deq_now);
- if (read > 0)
- svm_fifo_enqueue_nocopy (f, read);
+ rv = svm_fifo_enqueue_segments (app_session->rx_fifo, segs, 2,
+ 0 /* allow partial */);
+ ASSERT (rv > 0);
}
- tls_notify_app_enqueue (ctx, app_session);
- if (BIO_ctrl_pending (oc->wbio) > 0)
- tls_add_vpp_q_builtin_rx_evt (tls_session);
+done:
+
+ /* If handshake just completed, session may still be in accepting state */
+ if (app_session->session_state >= SESSION_STATE_READY)
+ tls_notify_app_enqueue (ctx, app_session);
return wrote;
}
+static inline int
+openssl_ctx_read (tls_ctx_t *ctx, session_t *ts)
+{
+ if (ctx->tls_type == TRANSPORT_PROTO_TLS)
+ return openssl_ctx_read_tls (ctx, ts);
+ else
+ return openssl_ctx_read_dtls (ctx, ts);
+}
+
static int
openssl_ctx_init_client (tls_ctx_t * ctx)
{
long flags = SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_COMPRESSION;
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
openssl_main_t *om = &openssl_main;
- session_t *tls_session;
const SSL_METHOD *method;
int rv, err;
-#ifdef HAVE_OPENSSL_ASYNC
- openssl_resume_handler *handler;
-#endif
- method = SSLv23_client_method ();
+ method = ctx->tls_type == TRANSPORT_PROTO_TLS ? SSLv23_client_method () :
+ DTLS_client_method ();
if (method == NULL)
{
- TLS_DBG (1, "SSLv23_method returned null");
+ TLS_DBG (1, "(D)TLS_method returned null");
return -1;
}
return -1;
}
- oc->rbio = BIO_new (BIO_s_mem ());
- oc->wbio = BIO_new (BIO_s_mem ());
-
- BIO_set_mem_eof_return (oc->rbio, -1);
- BIO_set_mem_eof_return (oc->wbio, -1);
+ if (ctx->tls_type == TRANSPORT_PROTO_TLS)
+ {
+ oc->rbio = BIO_new_tls (ctx->tls_session_handle);
+ oc->wbio = BIO_new_tls (ctx->tls_session_handle);
+ }
+ else
+ {
+ oc->rbio = BIO_new_dtls (ctx->tls_session_handle);
+ oc->wbio = BIO_new_dtls (ctx->tls_session_handle);
+ }
SSL_set_bio (oc->ssl, oc->wbio, oc->rbio);
SSL_set_connect_state (oc->ssl);
TLS_DBG (1, "Initiating handshake for [%u]%u", ctx->c_thread_index,
oc->openssl_ctx_index);
- tls_session = session_get_from_handle (ctx->tls_session_handle);
+#ifdef HAVE_OPENSSL_ASYNC
+ session_t *tls_session = session_get_from_handle (ctx->tls_session_handle);
+ vpp_tls_async_init_event (ctx, openssl_ctx_handshake_rx, tls_session);
+#endif
while (1)
{
rv = SSL_do_handshake (oc->ssl);
err = SSL_get_error (oc->ssl, rv);
- openssl_try_handshake_write (oc, tls_session);
#ifdef HAVE_OPENSSL_ASYNC
if (err == SSL_ERROR_WANT_ASYNC)
{
- handler = (openssl_resume_handler *) openssl_ctx_handshake_rx;
- vpp_ssl_async_process_event (ctx, handler);
+ openssl_check_async_status (ctx, openssl_ctx_handshake_rx,
+ tls_session);
break;
}
#endif
static int
openssl_start_listen (tls_ctx_t * lctx)
{
- application_t *app;
const SSL_METHOD *method;
SSL_CTX *ssl_ctx;
int rv;
EVP_PKEY *pkey;
u32 olc_index;
openssl_listen_ctx_t *olc;
- app_worker_t *app_wrk;
+ app_cert_key_pair_t *ckpair;
long flags = SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_COMPRESSION;
openssl_main_t *om = &openssl_main;
- app_wrk = app_worker_get (lctx->parent_app_wrk_index);
- if (!app_wrk)
+ ckpair = app_cert_key_pair_get_if_valid (lctx->ckpair_index);
+ if (!ckpair)
return -1;
- app = application_get (app_wrk->app_index);
- if (!app->tls_cert || !app->tls_key)
+ if (!ckpair->cert || !ckpair->key)
{
TLS_DBG (1, "tls cert and/or key not configured %d",
lctx->parent_app_wrk_index);
return -1;
}
- method = SSLv23_method ();
+ method = lctx->tls_type == TRANSPORT_PROTO_TLS ? SSLv23_server_method () :
+ DTLS_server_method ();
ssl_ctx = SSL_CTX_new (method);
if (!ssl_ctx)
{
SSL_CTX_set_mode (ssl_ctx, SSL_MODE_ENABLE_PARTIAL_WRITE);
#ifdef HAVE_OPENSSL_ASYNC
if (om->async)
- SSL_CTX_set_mode (ssl_ctx, SSL_MODE_ASYNC);
- SSL_CTX_set_async_callback (ssl_ctx, tls_async_openssl_callback);
+ {
+ SSL_CTX_set_mode (ssl_ctx, SSL_MODE_ASYNC);
+ SSL_CTX_set_async_callback (ssl_ctx, tls_async_openssl_callback);
+ }
#endif
SSL_CTX_set_options (ssl_ctx, flags);
SSL_CTX_set_ecdh_auto (ssl_ctx, 1);
* Set the key and cert
*/
cert_bio = BIO_new (BIO_s_mem ());
- BIO_write (cert_bio, app->tls_cert, vec_len (app->tls_cert));
+ if (!cert_bio)
+ {
+ clib_warning ("unable to allocate memory");
+ return -1;
+ }
+ BIO_write (cert_bio, ckpair->cert, vec_len (ckpair->cert));
srvcert = PEM_read_bio_X509 (cert_bio, NULL, NULL, NULL);
if (!srvcert)
{
clib_warning ("unable to parse certificate");
- return -1;
+ goto err;
}
- SSL_CTX_use_certificate (ssl_ctx, srvcert);
+ rv = SSL_CTX_use_certificate (ssl_ctx, srvcert);
+ if (rv != 1)
+ {
+ clib_warning ("unable to use SSL certificate");
+ goto err;
+ }
+
BIO_free (cert_bio);
cert_bio = BIO_new (BIO_s_mem ());
- BIO_write (cert_bio, app->tls_key, vec_len (app->tls_key));
+ if (!cert_bio)
+ {
+ clib_warning ("unable to allocate memory");
+ return -1;
+ }
+ BIO_write (cert_bio, ckpair->key, vec_len (ckpair->key));
pkey = PEM_read_bio_PrivateKey (cert_bio, NULL, NULL, NULL);
if (!pkey)
{
clib_warning ("unable to parse pkey");
- return -1;
+ goto err;
}
- SSL_CTX_use_PrivateKey (ssl_ctx, pkey);
+ rv = SSL_CTX_use_PrivateKey (ssl_ctx, pkey);
+ if (rv != 1)
+ {
+ clib_warning ("unable to use SSL PrivateKey");
+ goto err;
+ }
+
BIO_free (cert_bio);
olc_index = openssl_listen_ctx_alloc ();
return 0;
+err:
+ if (cert_bio)
+ BIO_free (cert_bio);
+ return -1;
}
static int
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
u32 olc_index = ctx->tls_ssl_ctx;
openssl_listen_ctx_t *olc;
- session_t *tls_session;
int rv, err;
-#ifdef HAVE_OPENSSL_ASYNC
- openssl_resume_handler *handler;
-#endif
/* Start a new connection */
return -1;
}
- oc->rbio = BIO_new (BIO_s_mem ());
- oc->wbio = BIO_new (BIO_s_mem ());
-
- BIO_set_mem_eof_return (oc->rbio, -1);
- BIO_set_mem_eof_return (oc->wbio, -1);
+ if (ctx->tls_type == TRANSPORT_PROTO_TLS)
+ {
+ oc->rbio = BIO_new_tls (ctx->tls_session_handle);
+ oc->wbio = BIO_new_tls (ctx->tls_session_handle);
+ }
+ else
+ {
+ oc->rbio = BIO_new_dtls (ctx->tls_session_handle);
+ oc->wbio = BIO_new_dtls (ctx->tls_session_handle);
+ }
SSL_set_bio (oc->ssl, oc->wbio, oc->rbio);
SSL_set_accept_state (oc->ssl);
TLS_DBG (1, "Initiating handshake for [%u]%u", ctx->c_thread_index,
oc->openssl_ctx_index);
- tls_session = session_get_from_handle (ctx->tls_session_handle);
+#ifdef HAVE_OPENSSL_ASYNC
+ session_t *tls_session = session_get_from_handle (ctx->tls_session_handle);
+ vpp_tls_async_init_event (ctx, openssl_ctx_handshake_rx, tls_session);
+#endif
while (1)
{
rv = SSL_do_handshake (oc->ssl);
err = SSL_get_error (oc->ssl, rv);
- openssl_try_handshake_write (oc, tls_session);
#ifdef HAVE_OPENSSL_ASYNC
if (err == SSL_ERROR_WANT_ASYNC)
{
- handler = (openssl_resume_handler *) openssl_ctx_handshake_rx;
- vpp_ssl_async_process_event (ctx, handler);
+ openssl_check_async_status (ctx, openssl_ctx_handshake_rx,
+ tls_session);
break;
}
#endif
static int
openssl_transport_close (tls_ctx_t * ctx)
{
+#ifdef HAVE_OPENSSL_ASYNC
+ if (vpp_openssl_is_inflight (ctx))
+ return 0;
+#endif
+
if (!openssl_handshake_is_over (ctx))
{
- session_close (session_get_from_handle (ctx->tls_session_handle));
+ openssl_handle_handshake_failure (ctx);
return 0;
}
session_transport_closing_notify (&ctx->connection);
static int
openssl_app_close (tls_ctx_t * ctx)
{
- tls_disconnect_transport (ctx);
- session_transport_delete_notify (&ctx->connection);
- openssl_ctx_free (ctx);
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
+ session_t *app_session;
+
+ /* Wait for all data to be written to tcp */
+ app_session = session_get_from_handle (ctx->app_session_handle);
+ if (BIO_ctrl_pending (oc->rbio) <= 0
+ && !svm_fifo_max_dequeue_cons (app_session->tx_fifo))
+ openssl_confirm_app_close (ctx);
+ else
+ ctx->app_closed = 1;
return 0;
}
const static tls_engine_vft_t openssl_engine = {
.ctx_alloc = openssl_ctx_alloc,
+ .ctx_alloc_w_thread = openssl_ctx_alloc_w_thread,
.ctx_free = openssl_ctx_free,
+ .ctx_attach = openssl_ctx_attach,
+ .ctx_detach = openssl_ctx_detach,
.ctx_get = openssl_ctx_get,
.ctx_get_w_thread = openssl_ctx_get_w_thread,
.ctx_init_server = openssl_ctx_init_server,
return -1;
}
+#if OPENSSL_VERSION_NUMBER >= 0x30000000L
+ rv = X509_STORE_load_file (om->cert_store, tm->ca_cert_path);
+#else
rv = X509_STORE_load_locations (om->cert_store, tm->ca_cert_path, 0);
+#endif
+
if (rv < 0)
{
clib_warning ("failed to load ca certificate");
return (rv < 0 ? -1 : 0);
}
-static int
+int
tls_openssl_set_ciphers (char *ciphers)
{
openssl_main_t *om = &openssl_main;
{
vlib_thread_main_t *vtm = vlib_get_thread_main ();
openssl_main_t *om = &openssl_main;
- clib_error_t *error;
- u32 num_threads;
+ clib_error_t *error = 0;
+ u32 num_threads, i;
+ error = tls_openssl_api_init (vm);
num_threads = 1 /* main thread */ + vtm->n_threads;
- if ((error = vlib_call_init_function (vm, tls_init)))
- return error;
-
SSL_library_init ();
SSL_load_error_strings ();
}
vec_validate (om->ctx_pool, num_threads - 1);
-
- tls_register_engine (&openssl_engine, TLS_ENGINE_OPENSSL);
+ vec_validate (om->rx_bufs, num_threads - 1);
+ vec_validate (om->tx_bufs, num_threads - 1);
+ for (i = 0; i < num_threads; i++)
+ {
+ vec_validate (om->rx_bufs[i], DTLSO_MAX_DGRAM);
+ vec_validate (om->tx_bufs[i], DTLSO_MAX_DGRAM);
+ }
+ tls_register_engine (&openssl_engine, CRYPTO_ENGINE_OPENSSL);
om->engine_init = 0;
tls_openssl_set_ciphers
("ALL:!ADH:!LOW:!EXP:!MD5:!RC4-SHA:!DES-CBC3-SHA:@STRENGTH");
- return 0;
+ return error;
}
+/* *INDENT-OFF* */
+VLIB_INIT_FUNCTION (tls_openssl_init) =
+{
+ .runs_after = VLIB_INITS("tls_init"),
+};
+/* *INDENT-ON* */
#ifdef HAVE_OPENSSL_ASYNC
static clib_error_t *
char *engine_alg = NULL;
char *ciphers = NULL;
u8 engine_name_set = 0;
- int i;
+ int i, async = 0;
/* By present, it is not allowed to configure engine again after running */
if (om->engine_init)
}
else if (unformat (input, "async"))
{
- om->async = 1;
- openssl_async_node_enable_disable (1);
+ async = 1;
}
else if (unformat (input, "alg %s", &engine_alg))
{
if (!engine_name_set)
{
clib_warning ("No engine provided! \n");
- om->async = 0;
+ async = 0;
}
else
{
- if (!openssl_engine_register (engine_name, engine_alg))
+ vnet_session_enable_disable (vm, 1);
+ if (openssl_engine_register (engine_name, engine_alg, async) < 0)
{
- return clib_error_return (0, "failed to register %s polling",
+ return clib_error_return (0, "Failed to register %s polling",
engine_name);
}
+ else
+ {
+ vlib_cli_output (vm, "Successfully register engine %s\n",
+ engine_name);
+ }
}
+ om->async = async;
return 0;
}
/* *INDENT-ON* */
#endif
-
-VLIB_INIT_FUNCTION (tls_openssl_init);
-
/* *INDENT-OFF* */
VLIB_PLUGIN_REGISTER () = {
.version = VPP_BUILD_VER,
- .description = "openssl based TLS Engine",
+ .description = "Transport Layer Security (TLS) Engine, OpenSSL Based",
};
/* *INDENT-ON* */