From 2179513b71963a0b42ca6f1f2b641e79ce89447b Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Sun, 9 Sep 2018 09:40:51 -0700 Subject: [PATCH] session: lock app worker mq for io events Also fixes vcl client/server stats and closing procedure. Change-Id: I7d5a274ea0a3c8ea13062bf61bf402248dfe1a19 Signed-off-by: Florin Coras --- src/vcl/vcl_test_client.c | 150 +++++++++++++++++-------------- src/vcl/vcl_test_server.c | 12 ++- src/vcl/vppcom.c | 14 ++- src/vnet/session/application.c | 11 ++- src/vnet/session/application_interface.c | 7 +- src/vnet/session/session.c | 21 ++--- src/vnet/session/session_node.c | 15 ++-- src/vnet/tls/tls.c | 2 +- 8 files changed, 130 insertions(+), 102 deletions(-) diff --git a/src/vcl/vcl_test_client.c b/src/vcl/vcl_test_client.c index b2431df80fa..899d729c47d 100644 --- a/src/vcl/vcl_test_client.c +++ b/src/vcl/vcl_test_client.c @@ -233,7 +233,6 @@ vtc_worker_init (vcl_test_client_worker_t * wrk) vtwrn ("failed to register worker"); return -1; } - vt_atomic_add (&vcm->active_workers, 1); } rv = vtc_connect_test_sessions (wrk); @@ -259,6 +258,64 @@ vtc_worker_init (vcl_test_client_worker_t * wrk) return 0; } +static int stats_lock = 0; + +static void +vtc_accumulate_stats (vcl_test_client_worker_t * wrk, + sock_test_socket_t * ctrl) +{ + sock_test_socket_t *tsock; + static char buf[64]; + int i, show_rx = 0; + + while (__sync_lock_test_and_set (&stats_lock, 1)) + ; + + if (ctrl->cfg.test == SOCK_TEST_TYPE_BI + || ctrl->cfg.test == SOCK_TEST_TYPE_ECHO) + show_rx = 1; + + for (i = 0; i < wrk->cfg.num_test_sockets; i++) + { + tsock = &wrk->sessions[i]; + tsock->stats.start = ctrl->stats.start; + + if (ctrl->cfg.verbose > 1) + { + sprintf (buf, "CLIENT (fd %d) RESULTS", tsock->fd); + sock_test_stats_dump (buf, &tsock->stats, show_rx, 1 /* show tx */ , + ctrl->cfg.verbose); + } + + sock_test_stats_accumulate (&ctrl->stats, &tsock->stats); + } + + __sync_lock_release (&stats_lock); +} + +static void +vtc_worker_sessions_exit (vcl_test_client_worker_t * wrk) +{ + vcl_test_client_main_t *vcm = &vcl_client_main; + sock_test_socket_t *ctrl = &vcm->ctrl_socket; + sock_test_socket_t *tsock; + int i, verbose = ctrl->cfg.verbose; + + for (i = 0; i < wrk->cfg.num_test_sockets; i++) + { + tsock = &wrk->sessions[i]; + tsock->cfg.test = SOCK_TEST_TYPE_EXIT; + + if (verbose) + { + vtinf ("(fd %d): Sending exit cfg to server...", tsock->fd); + sock_test_cfg_dump (&tsock->cfg, 1 /* is_client */ ); + } + (void) vcl_test_write (tsock->fd, (uint8_t *) & tsock->cfg, + sizeof (tsock->cfg), &tsock->stats, verbose); + } +} + static void * vtc_worker_loop (void *arg) { @@ -338,39 +395,15 @@ vtc_worker_loop (void *arg) } } exit: + vtinf ("Worker %d done ...", wrk->wrk_index); + vtc_accumulate_stats (wrk, ctrl); + sleep (1); + vtc_worker_sessions_exit (wrk); if (wrk->wrk_index) vt_atomic_add (&vcm->active_workers, -1); return 0; } -static void -vtc_accumulate_stats (vcl_test_client_worker_t * wrk, - sock_test_socket_t * ctrl) -{ - sock_test_socket_t *tsock; - static char buf[64]; - int i, show_rx = 0; - - if (ctrl->cfg.test == SOCK_TEST_TYPE_BI - || ctrl->cfg.test == SOCK_TEST_TYPE_ECHO) - show_rx = 1; - - for (i = 0; i < wrk->cfg.num_test_sockets; i++) - { - tsock = &wrk->sessions[i]; - tsock->stats.start = ctrl->stats.start; - - if (ctrl->cfg.verbose > 1) - { - sprintf (buf, "CLIENT (fd %d) RESULTS", tsock->fd); - sock_test_stats_dump (buf, &tsock->stats, show_rx, 1 /* show tx */ , - ctrl->cfg.verbose); - } - - sock_test_stats_accumulate (&ctrl->stats, &tsock->stats); - } -} - static void vtc_print_stats (sock_test_socket_t * ctrl) { @@ -488,8 +521,6 @@ vtc_stream_client (vcl_test_client_main_t * vcm) return; } - for (i = 0; i < vcm->n_workers; i++) - vtc_accumulate_stats (&vcm->workers[i], ctrl); vtc_print_stats (ctrl); ctrl->cfg.test = SOCK_TEST_TYPE_ECHO; @@ -498,42 +529,6 @@ vtc_stream_client (vcl_test_client_main_t * vcm) vtwrn ("post-test cfg sync failed!"); } -static void -vtc_client_exit (void) -{ - vcl_test_client_main_t *vcm = &vcl_client_main; - vcl_test_client_worker_t *wrk = &vcm->workers[0]; - sock_test_socket_t *ctrl = &vcm->ctrl_socket; - sock_test_socket_t *tsock; - int i, verbose; - - verbose = ctrl->cfg.verbose; - for (i = 0; i < wrk->cfg.num_test_sockets; i++) - { - tsock = &wrk->sessions[i]; - tsock->cfg.test = SOCK_TEST_TYPE_EXIT; - - if (verbose) - { - vtinf ("(fd %d): Sending exit cfg to server...", tsock->fd); - sock_test_cfg_dump (&tsock->cfg, 1 /* is_client */ ); - } - (void) vcl_test_write (tsock->fd, (uint8_t *) & tsock->cfg, - sizeof (tsock->cfg), &tsock->stats, verbose); - } - - ctrl->cfg.test = SOCK_TEST_TYPE_EXIT; - if (verbose) - { - vtinf ("(fd %d): Sending exit cfg to server...", ctrl->fd); - sock_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ ); - } - (void) vcl_test_write (ctrl->fd, (uint8_t *) & ctrl->cfg, - sizeof (ctrl->cfg), &ctrl->stats, verbose); - vtinf ("So long and thanks for all the fish!\n\n"); - sleep (1); -} - static void dump_help (void) { @@ -954,6 +949,25 @@ vtc_read_user_input (sock_test_socket_t * ctrl) } } +static void +vtc_ctrl_session_exit (void) +{ + vcl_test_client_main_t *vcm = &vcl_client_main; + sock_test_socket_t *ctrl = &vcm->ctrl_socket; + int verbose = ctrl->cfg.verbose; + + ctrl->cfg.test = SOCK_TEST_TYPE_EXIT; + if (verbose) + { + vtinf ("(fd %d): Sending exit cfg to server...", ctrl->fd); + sock_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ ); + } + (void) vcl_test_write (ctrl->fd, (uint8_t *) & ctrl->cfg, + sizeof (ctrl->cfg), &ctrl->stats, verbose); + vtinf ("So long and thanks for all the fish!\n\n"); + sleep (1); +} + int main (int argc, char **argv) { @@ -1046,7 +1060,7 @@ main (int argc, char **argv) vtc_read_user_input (ctrl); } - vtc_client_exit (); + vtc_ctrl_session_exit (); vppcom_session_close (ctrl->fd); vppcom_app_destroy (); free (vcm->workers); diff --git a/src/vcl/vcl_test_server.c b/src/vcl/vcl_test_server.c index 2fdd7ec157d..5c8656c9603 100644 --- a/src/vcl/vcl_test_server.c +++ b/src/vcl/vcl_test_server.c @@ -497,6 +497,7 @@ vts_worker_init (vcl_test_server_worker_t * wrk) if (rv < 0) vtfail ("vppcom_epoll_ctl", rv); + ssm->active_workers += 1; vtinf ("Waiting for a client to connect on port %d ...", ssm->cfg.port); } @@ -532,6 +533,12 @@ vts_worker_loop (void *arg) if (wrk->wait_events[i].events & (EPOLLHUP | EPOLLRDHUP)) { vppcom_session_close (conn->fd); + wrk->nfds -= 1; + if (!wrk->nfds) + { + vtinf ("All client connections closed\n"); + goto done; + } continue; } if (wrk->wait_events[i].data.u32 == ~0) @@ -542,6 +549,7 @@ vts_worker_loop (void *arg) if (EPOLLIN & wrk->wait_events[i].events) { + read_again: rx_bytes = vcl_test_read (conn->fd, conn->buf, conn->buf_size, &conn->stats); @@ -563,7 +571,6 @@ vts_worker_loop (void *arg) if (!wrk->nfds) { vtinf ("All client connections closed\n"); - vtinf ("May the force be with you!\n"); goto done; } continue; @@ -572,6 +579,9 @@ vts_worker_loop (void *arg) || (conn->cfg.test == SOCK_TEST_TYPE_BI)) { vts_server_rx (conn, rx_bytes); + if (vppcom_session_attr (conn->fd, VPPCOM_ATTR_GET_NREAD, 0, + 0) > 0) + goto read_again; continue; } else if (isascii (conn->buf[0])) diff --git a/src/vcl/vppcom.c b/src/vcl/vppcom.c index 645550812b1..3acd1c41e70 100644 --- a/src/vcl/vppcom.c +++ b/src/vcl/vppcom.c @@ -285,6 +285,8 @@ vcl_session_accepted_handler (vcl_worker_t * wrk, session_accepted_msg_t * mp) vcl_wait_for_memory (session->vpp_evt_q); rx_fifo->master_session_index = session->session_index; tx_fifo->master_session_index = session->session_index; + rx_fifo->master_thread_index = vcl_get_worker_index (); + tx_fifo->master_thread_index = vcl_get_worker_index (); vec_validate (wrk->vpp_event_queues, 0); evt_q = uword_to_pointer (mp->vpp_event_queue_address, svm_msg_q_t *); wrk->vpp_event_queues[0] = evt_q; @@ -295,7 +297,8 @@ vcl_session_accepted_handler (vcl_worker_t * wrk, session_accepted_msg_t * mp) svm_msg_q_t *); rx_fifo->client_session_index = session->session_index; tx_fifo->client_session_index = session->session_index; - + rx_fifo->client_thread_index = vcl_get_worker_index (); + tx_fifo->client_thread_index = vcl_get_worker_index (); vpp_wrk_index = tx_fifo->master_thread_index; vec_validate (wrk->vpp_event_queues, vpp_wrk_index); wrk->vpp_event_queues[vpp_wrk_index] = session->vpp_evt_q; @@ -349,8 +352,7 @@ vcl_session_connected_handler (vcl_worker_t * wrk, if (mp->retval) { clib_warning ("VCL<%d>: ERROR: sid %u: connect failed! %U", getpid (), - mp->handle, session_index, format_api_error, - ntohl (mp->retval)); + session_index, format_api_error, ntohl (mp->retval)); session->session_state = STATE_FAILED; session->vpp_handle = mp->handle; return session_index; @@ -361,6 +363,8 @@ vcl_session_connected_handler (vcl_worker_t * wrk, vcl_wait_for_memory (rx_fifo); rx_fifo->client_session_index = session_index; tx_fifo->client_session_index = session_index; + rx_fifo->client_thread_index = vcl_get_worker_index (); + tx_fifo->client_thread_index = vcl_get_worker_index (); if (mp->client_event_queue_address) { @@ -648,6 +652,9 @@ vppcom_session_disconnect (u32 session_handle) u64 vpp_handle; session = vcl_session_get_w_handle (wrk, session_handle); + if (!session) + return VPPCOM_EBADFD; + vpp_handle = session->vpp_handle; state = session->session_state; @@ -2233,6 +2240,7 @@ vcl_epoll_wait_handle_mq (vcl_worker_t * wrk, svm_msg_q_t * mq, switch (e->event_type) { case FIFO_EVENT_APP_RX: + ASSERT (e->fifo->client_thread_index == vcl_get_worker_index ()); vcl_fifo_rx_evt_valid_or_break (e->fifo); sid = e->fifo->client_session_index; session = vcl_session_get (wrk, sid); diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 3811091f05c..b58e73eb4f8 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -1319,9 +1319,8 @@ static app_send_evt_handler_fn * const app_send_evt_handler_fns[3] = { /** * Send event to application * - * Logic from queue perspective is non-blocking. That is, if there's - * not enough space to enqueue a message, we return. However, if the lock - * flag is set, we do wait for queue mutex. + * Logic from queue perspective is non-blocking. If there's + * not enough space to enqueue a message, we return. */ int app_worker_send_event (app_worker_t * app, stream_session_t * s, u8 evt_type) @@ -1330,6 +1329,12 @@ app_worker_send_event (app_worker_t * app, stream_session_t * s, u8 evt_type) return app_send_evt_handler_fns[evt_type] (app, s, 0 /* lock */ ); } +/** + * Send event to application + * + * Logic from queue perspective is blocking. However, if queue is full, + * we return. + */ int app_worker_lock_and_send_event (app_worker_t * app, stream_session_t * s, u8 evt_type) diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 6c517e8f92b..6b012bb845d 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -612,9 +612,14 @@ vnet_disconnect_session (vnet_disconnect_args_t * a) } else { + app_worker_t *app_wrk; stream_session_t *s; + s = session_get_from_handle_if_valid (a->handle); - if (!s || s->app_wrk_index != a->app_index) + if (!s) + return VNET_API_ERROR_INVALID_VALUE; + app_wrk = app_worker_get (s->app_wrk_index); + if (app_wrk->app_index != a->app_index) return VNET_API_ERROR_INVALID_VALUE; /* We're peeking into another's thread pool. Make sure */ diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index c5b2124acbd..472b38baae1 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -486,7 +486,7 @@ stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes) * @return 0 on success or negative number if failed to send notification. */ static inline int -session_enqueue_notify (stream_session_t * s, u8 lock) +session_enqueue_notify (stream_session_t * s) { app_worker_t *app; @@ -504,10 +504,7 @@ session_enqueue_notify (stream_session_t * s, u8 lock) })); /* *INDENT-ON* */ - if (lock) - return app_worker_lock_and_send_event (app, s, FIFO_EVENT_APP_RX); - - return app_worker_send_event (app, s, FIFO_EVENT_APP_RX); + return app_worker_lock_and_send_event (app, s, FIFO_EVENT_APP_RX); } int @@ -519,10 +516,7 @@ session_dequeue_notify (stream_session_t * s) if (PREDICT_FALSE (!app)) return -1; - if (session_transport_service_type (s) == TRANSPORT_SERVICE_CL) - return app_worker_lock_and_send_event (app, s, FIFO_EVENT_APP_RX); - - return app_worker_send_event (app, s, FIFO_EVENT_APP_TX); + return app_worker_lock_and_send_event (app, s, FIFO_EVENT_APP_TX); } /** @@ -537,14 +531,11 @@ int session_manager_flush_enqueue_events (u8 transport_proto, u32 thread_index) { session_manager_main_t *smm = &session_manager_main; - transport_service_type_t tp_service; - int i, errors = 0, lock; stream_session_t *s; + int i, errors = 0; u32 *indices; indices = smm->session_to_enqueue[transport_proto][thread_index]; - tp_service = transport_protocol_service_type (transport_proto); - lock = tp_service == TRANSPORT_SERVICE_CL; for (i = 0; i < vec_len (indices); i++) { @@ -554,7 +545,7 @@ session_manager_flush_enqueue_events (u8 transport_proto, u32 thread_index) errors++; continue; } - if (PREDICT_FALSE (session_enqueue_notify (s, lock))) + if (PREDICT_FALSE (session_enqueue_notify (s))) errors++; } @@ -838,7 +829,7 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index, /* Find the server */ listener = listen_session_get (listener_index); - app_wrk = app_worker_get (listener->app_wrk_index); + app_wrk = application_listener_select_worker (listener, 0); sm = app_worker_get_listen_segment_manager (app_wrk, listener); if ((rv = session_alloc_and_init (sm, tc, 1, &s))) diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index 119cdd812ad..f5aed7490ea 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -29,6 +29,7 @@ session_mq_accepted_reply_handler (void *data) { session_accepted_reply_msg_t *mp = (session_accepted_reply_msg_t *) data; vnet_disconnect_args_t _a = { 0 }, *a = &_a; + app_worker_t *app_wrk; local_session_t *ls; stream_session_t *s; @@ -43,8 +44,6 @@ session_mq_accepted_reply_handler (void *data) if (session_handle_is_local (mp->handle)) { - app_worker_t *app_wrk; - application_t *app; ls = application_get_local_session_from_handle (mp->handle); if (!ls) { @@ -52,8 +51,7 @@ session_mq_accepted_reply_handler (void *data) return; } app_wrk = app_worker_get (ls->app_wrk_index); - app = application_get (app_wrk->app_index); - if (app->app_index != mp->context) + if (app_wrk->app_index != mp->context) { clib_warning ("server %u doesn't own local handle 0x%lx", mp->context, mp->handle); @@ -71,18 +69,15 @@ session_mq_accepted_reply_handler (void *data) clib_warning ("session doesn't exist"); return; } - if (s->app_wrk_index != mp->context) + app_wrk = app_worker_get (s->app_wrk_index); + if (app_wrk->app_index != mp->context) { clib_warning ("app doesn't own session"); return; } s->session_state = SESSION_STATE_READY; if (!svm_fifo_is_empty (s->server_rx_fifo)) - { - app_worker_t *app; - app = app_worker_get (s->app_wrk_index); - app_worker_send_event (app, s, FIFO_EVENT_APP_RX); - } + app_worker_lock_and_send_event (app_wrk, s, FIFO_EVENT_APP_RX); } } diff --git a/src/vnet/tls/tls.c b/src/vnet/tls/tls.c index a31466baf29..a34d0db6c88 100644 --- a/src/vnet/tls/tls.c +++ b/src/vnet/tls/tls.c @@ -49,7 +49,7 @@ tls_add_vpp_q_evt (svm_fifo_t * f, u8 evt_type) static inline int tls_add_app_q_evt (app_worker_t * app, stream_session_t * app_session) { - return app_worker_send_event (app, app_session, FIFO_EVENT_APP_RX); + return app_worker_lock_and_send_event (app, app_session, FIFO_EVENT_APP_RX); } u32 -- 2.16.6