X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvcl%2Fvcl_private.c;h=b14fdeea454212f4a2c71a0943dbe9e605cf56ea;hb=39d69112fcec114fde34955ceb41555221d3ba11;hp=32664312f01517fed448e04ebd493fb896543365;hpb=30e79c2e388a98160a3660f4f03103890c9b1b7c;p=vpp.git diff --git a/src/vcl/vcl_private.c b/src/vcl/vcl_private.c index 32664312f01..b14fdeea454 100644 --- a/src/vcl/vcl_private.c +++ b/src/vcl/vcl_private.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Cisco and/or its affiliates. + * Copyright (c) 2018-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this * You may obtain a copy of the License at: @@ -61,82 +61,13 @@ vcl_wait_for_app_state_change (app_state_t app_state) if (vcm->app_state == STATE_APP_FAILED) return VPPCOM_ECONNABORTED; } - VDBG (0, "VCL<%d>: timeout waiting for state %s (%d)", getpid (), + VDBG (0, "timeout waiting for state %s (%d)", vppcom_app_state_str (app_state), app_state); vcl_evt (VCL_EVT_SESSION_TIMEOUT, vcm, app_state); return VPPCOM_ETIMEDOUT; } -vcl_cut_through_registration_t * -vcl_ct_registration_lock_and_alloc (vcl_worker_t * wrk) -{ - vcl_cut_through_registration_t *cr; - clib_spinlock_lock (&wrk->ct_registration_lock); - pool_get (wrk->cut_through_registrations, cr); - memset (cr, 0, sizeof (*cr)); - cr->epoll_evt_conn_index = -1; - return cr; -} - -u32 -vcl_ct_registration_index (vcl_worker_t * wrk, - vcl_cut_through_registration_t * ctr) -{ - return (ctr - wrk->cut_through_registrations); -} - -void -vcl_ct_registration_lock (vcl_worker_t * wrk) -{ - clib_spinlock_lock (&wrk->ct_registration_lock); -} - -void -vcl_ct_registration_unlock (vcl_worker_t * wrk) -{ - clib_spinlock_unlock (&wrk->ct_registration_lock); -} - -vcl_cut_through_registration_t * -vcl_ct_registration_get (vcl_worker_t * wrk, u32 ctr_index) -{ - if (pool_is_free_index (wrk->cut_through_registrations, ctr_index)) - return 0; - return pool_elt_at_index (wrk->cut_through_registrations, ctr_index); -} - -vcl_cut_through_registration_t * -vcl_ct_registration_lock_and_lookup (vcl_worker_t * wrk, uword mq_addr) -{ - uword *p; - clib_spinlock_lock (&wrk->ct_registration_lock); - p = hash_get (wrk->ct_registration_by_mq, mq_addr); - if (!p) - return 0; - return vcl_ct_registration_get (wrk, p[0]); -} - -void -vcl_ct_registration_lookup_add (vcl_worker_t * wrk, uword mq_addr, - u32 ctr_index) -{ - hash_set (wrk->ct_registration_by_mq, mq_addr, ctr_index); -} - -void -vcl_ct_registration_lookup_del (vcl_worker_t * wrk, uword mq_addr) -{ - hash_unset (wrk->ct_registration_by_mq, mq_addr); -} - -void -vcl_ct_registration_del (vcl_worker_t * wrk, - vcl_cut_through_registration_t * ctr) -{ - pool_put (wrk->cut_through_registrations, ctr); -} - vcl_mq_evt_conn_t * vcl_mq_evt_conn_alloc (vcl_worker_t * wrk) { @@ -180,7 +111,7 @@ vcl_mq_epoll_add_evfd (vcl_worker_t * wrk, svm_msg_q_t * mq) e.data.u32 = mqc_index; if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_ADD, mq_fd, &e) < 0) { - clib_warning ("failed to add mq eventfd to mq epoll fd"); + VDBG (0, "failed to add mq eventfd to mq epoll fd"); return -1; } @@ -198,7 +129,7 @@ vcl_mq_epoll_del_evfd (vcl_worker_t * wrk, u32 mqc_index) mqc = vcl_mq_evt_conn_get (wrk, mqc_index); if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_DEL, mqc->mq_fd, 0) < 0) { - clib_warning ("failed to del mq eventfd to mq epoll fd"); + VDBG (0, "failed to del mq eventfd to mq epoll fd"); return -1; } return 0; @@ -235,8 +166,6 @@ vcl_worker_cleanup (vcl_worker_t * wrk, u8 notify_vpp) if (wrk->mqs_epfd > 0) close (wrk->mqs_epfd); hash_free (wrk->session_index_by_vpp_handles); - hash_free (wrk->ct_registration_by_mq); - clib_spinlock_free (&wrk->ct_registration_lock); vec_free (wrk->mq_events); vec_free (wrk->mq_msg_vector); vcl_worker_free (wrk); @@ -262,6 +191,9 @@ vcl_worker_alloc_and_init () if (vcl_get_worker_index () != ~0) return 0; + /* Use separate heap map entry for worker */ + clib_mem_set_thread_index (); + if (pool_elts (vcm->workers) == vcm->cfg.max_workers) { VDBG (0, "max-workers %u limit reached", vcm->cfg.max_workers); @@ -286,8 +218,6 @@ vcl_worker_alloc_and_init () } wrk->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); - wrk->ct_registration_by_mq = hash_create (0, sizeof (uword)); - clib_spinlock_init (&wrk->ct_registration_lock); clib_time_init (&wrk->clib_time); vec_validate (wrk->mq_events, 64); vec_validate (wrk->mq_msg_vector, 128); @@ -311,13 +241,13 @@ vcl_worker_register_with_vpp (void) vcl_send_app_worker_add_del (1 /* is_add */ ); if (vcl_wait_for_app_state_change (STATE_APP_READY)) { - clib_warning ("failed to add worker to vpp"); + VDBG (0, "failed to add worker to vpp"); return -1; } if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup_cb)) - clib_warning ("failed to add pthread cleanup function"); + VDBG (0, "failed to add pthread cleanup function"); if (pthread_setspecific (vcl_worker_stop_key, &wrk->thread_id)) - clib_warning ("failed to setup key value"); + VDBG (0, "failed to setup key value"); clib_spinlock_unlock (&vcm->workers_lock); @@ -346,144 +276,125 @@ vcl_worker_set_bapi (void) return -1; } -vcl_shared_session_t * -vcl_shared_session_alloc (void) +svm_msg_q_t * +vcl_worker_ctrl_mq (vcl_worker_t * wrk) { - vcl_shared_session_t *ss; - pool_get (vcm->shared_sessions, ss); - memset (ss, 0, sizeof (*ss)); - ss->ss_index = ss - vcm->shared_sessions; - return ss; + return wrk->ctrl_mq; } -vcl_shared_session_t * -vcl_shared_session_get (u32 ss_index) +void +vcl_segment_table_add (u64 segment_handle, u32 svm_segment_index) { - if (pool_is_free_index (vcm->shared_sessions, ss_index)) - return 0; - return pool_elt_at_index (vcm->shared_sessions, ss_index); + clib_rwlock_writer_lock (&vcm->segment_table_lock); + hash_set (vcm->segment_table, segment_handle, svm_segment_index); + clib_rwlock_writer_unlock (&vcm->segment_table_lock); +} + +u32 +vcl_segment_table_lookup (u64 segment_handle) +{ + uword *seg_indexp; + + clib_rwlock_reader_lock (&vcm->segment_table_lock); + seg_indexp = hash_get (vcm->segment_table, segment_handle); + clib_rwlock_reader_unlock (&vcm->segment_table_lock); + + if (!seg_indexp) + return VCL_INVALID_SEGMENT_INDEX; + return ((u32) * seg_indexp); } void -vcl_shared_session_free (vcl_shared_session_t * ss) +vcl_segment_table_del (u64 segment_handle) { - pool_put (vcm->shared_sessions, ss); + clib_rwlock_writer_lock (&vcm->segment_table_lock); + hash_unset (vcm->segment_table, segment_handle); + clib_rwlock_writer_unlock (&vcm->segment_table_lock); } void -vcl_worker_share_session (vcl_worker_t * parent, vcl_worker_t * wrk, - vcl_session_t * new_s) +vcl_cleanup_bapi (void) { - vcl_shared_session_t *ss; - vcl_session_t *old_s; + socket_client_main_t *scm = &socket_client_main; + api_main_t *am = vlibapi_get_main (); - if (new_s->shared_index == ~0) - { - ss = vcl_shared_session_alloc (); - ss->session_index = new_s->session_index; - vec_add1 (ss->workers, parent->wrk_index); - vec_add1 (ss->workers, wrk->wrk_index); - new_s->shared_index = ss->ss_index; - old_s = vcl_session_get (parent, new_s->session_index); - old_s->shared_index = ss->ss_index; - } - else - { - ss = vcl_shared_session_get (new_s->shared_index); - vec_add1 (ss->workers, wrk->wrk_index); - } + am->my_client_index = ~0; + am->my_registration = 0; + am->vl_input_queue = 0; + am->msg_index_by_name_and_crc = 0; + scm->socket_fd = 0; + + vl_client_api_unmap (); } int -vcl_worker_unshare_session (vcl_worker_t * wrk, vcl_session_t * s) +vcl_session_read_ready (vcl_session_t * session) { - vcl_shared_session_t *ss; - int i; - - ss = vcl_shared_session_get (s->shared_index); - for (i = 0; i < vec_len (ss->workers); i++) + /* Assumes caller has acquired spinlock: vcm->sessions_lockp */ + if (PREDICT_FALSE (session->is_vep)) { - if (ss->workers[i] == wrk->wrk_index) - { - vec_del1 (ss->workers, i); - break; - } + VDBG (0, "ERROR: session %u: cannot read from an epoll session!", + session->session_index); + return VPPCOM_EBADFD; } - if (vec_len (ss->workers) == 0) + if (PREDICT_FALSE (!(session->session_state & (STATE_OPEN | STATE_LISTEN)))) { - vcl_shared_session_free (ss); - return 1; - } + vcl_session_state_t state = session->session_state; + int rv; - /* If the first removed and not last, start session worker change. - * First request goes to vpp and vpp reflects it back to the right - * worker */ - if (i == 0) - vcl_send_session_worker_update (wrk, s, ss->workers[0]); + rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN); - return 0; -} - -void -vcl_worker_share_sessions (vcl_worker_t * parent_wrk) -{ - vcl_session_t *new_s; - vcl_worker_t *wrk; + VDBG (1, "session %u [0x%llx]: not open! state 0x%x (%s), ret %d (%s)", + session->session_index, session->vpp_handle, state, + vppcom_session_state_str (state), rv, vppcom_retval_str (rv)); + return rv; + } - if (!parent_wrk->sessions) - return; + if (session->session_state & STATE_LISTEN) + return clib_fifo_elts (session->accept_evts_fifo); - wrk = vcl_worker_get_current (); - wrk->sessions = pool_dup (parent_wrk->sessions); - wrk->session_index_by_vpp_handles = - hash_dup (parent_wrk->session_index_by_vpp_handles); + if (vcl_session_is_ct (session)) + return svm_fifo_max_dequeue_cons (session->ct_rx_fifo); - /* *INDENT-OFF* */ - pool_foreach (new_s, wrk->sessions, ({ - vcl_worker_share_session (parent_wrk, wrk, new_s); - })); - /* *INDENT-ON* */ + return svm_fifo_max_dequeue_cons (session->rx_fifo); } int -vcl_session_get_refcnt (vcl_session_t * s) +vcl_session_write_ready (vcl_session_t * session) { - vcl_shared_session_t *ss; - ss = vcl_shared_session_get (s->shared_index); - if (ss) - return vec_len (ss->workers); - return 0; -} - -void -vcl_segment_table_add (u64 segment_handle, u32 svm_segment_index) -{ - clib_rwlock_writer_lock (&vcm->segment_table_lock); - hash_set (vcm->segment_table, segment_handle, svm_segment_index); - clib_rwlock_writer_unlock (&vcm->segment_table_lock); -} + /* Assumes caller has acquired spinlock: vcm->sessions_lockp */ + if (PREDICT_FALSE (session->is_vep)) + { + VDBG (0, "session %u [0x%llx]: cannot write to an epoll session!", + session->session_index, session->vpp_handle); + return VPPCOM_EBADFD; + } -u32 -vcl_segment_table_lookup (u64 segment_handle) -{ - uword *seg_indexp; + if (PREDICT_FALSE (session->session_state & STATE_LISTEN)) + { + if (session->tx_fifo) + return svm_fifo_max_enqueue_prod (session->tx_fifo); + else + return VPPCOM_EBADFD; + } - clib_rwlock_reader_lock (&vcm->segment_table_lock); - seg_indexp = hash_get (vcm->segment_table, segment_handle); - clib_rwlock_reader_unlock (&vcm->segment_table_lock); + if (PREDICT_FALSE (!(session->session_state & STATE_OPEN))) + { + vcl_session_state_t state = session->session_state; + int rv; + + rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN); + VDBG (0, "session %u [0x%llx]: not open! state 0x%x (%s), ret %d (%s)", + session->session_index, session->vpp_handle, state, + vppcom_session_state_str (state), rv, vppcom_retval_str (rv)); + return rv; + } - if (!seg_indexp) - return VCL_INVALID_SEGMENT_INDEX; - return ((u32) * seg_indexp); -} + if (vcl_session_is_ct (session)) + return svm_fifo_max_enqueue_prod (session->ct_tx_fifo); -void -vcl_segment_table_del (u64 segment_handle) -{ - clib_rwlock_writer_lock (&vcm->segment_table_lock); - hash_unset (vcm->segment_table, segment_handle); - clib_rwlock_writer_unlock (&vcm->segment_table_lock); + return svm_fifo_max_enqueue_prod (session->tx_fifo); } /*