/*
- * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Copyright (c) 2018-2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this
* You may obtain a copy of the License at:
#include <vcl/vcl_private.h>
-pthread_key_t vcl_worker_stop_key;
+static pthread_key_t vcl_worker_stop_key;
static const char *
vppcom_app_state_str (app_state_t state)
if (vcm->app_state == STATE_APP_FAILED)
return VPPCOM_ECONNABORTED;
}
- VDBG (0, "VCL<%d>: timeout waiting for state %s (%d)", getpid (),
+ VDBG (0, "timeout waiting for state %s (%d)",
vppcom_app_state_str (app_state), app_state);
vcl_evt (VCL_EVT_SESSION_TIMEOUT, vcm, app_state);
return VPPCOM_ETIMEDOUT;
}
-vcl_cut_through_registration_t *
-vcl_ct_registration_lock_and_alloc (vcl_worker_t * wrk)
-{
- vcl_cut_through_registration_t *cr;
- clib_spinlock_lock (&wrk->ct_registration_lock);
- pool_get (wrk->cut_through_registrations, cr);
- memset (cr, 0, sizeof (*cr));
- cr->epoll_evt_conn_index = -1;
- return cr;
-}
-
-u32
-vcl_ct_registration_index (vcl_worker_t * wrk,
- vcl_cut_through_registration_t * ctr)
-{
- return (ctr - wrk->cut_through_registrations);
-}
-
-void
-vcl_ct_registration_lock (vcl_worker_t * wrk)
-{
- clib_spinlock_lock (&wrk->ct_registration_lock);
-}
-
-void
-vcl_ct_registration_unlock (vcl_worker_t * wrk)
-{
- clib_spinlock_unlock (&wrk->ct_registration_lock);
-}
-
-vcl_cut_through_registration_t *
-vcl_ct_registration_get (vcl_worker_t * wrk, u32 ctr_index)
-{
- if (pool_is_free_index (wrk->cut_through_registrations, ctr_index))
- return 0;
- return pool_elt_at_index (wrk->cut_through_registrations, ctr_index);
-}
-
-vcl_cut_through_registration_t *
-vcl_ct_registration_lock_and_lookup (vcl_worker_t * wrk, uword mq_addr)
-{
- uword *p;
- clib_spinlock_lock (&wrk->ct_registration_lock);
- p = hash_get (wrk->ct_registration_by_mq, mq_addr);
- if (!p)
- return 0;
- return vcl_ct_registration_get (wrk, p[0]);
-}
-
-void
-vcl_ct_registration_lookup_add (vcl_worker_t * wrk, uword mq_addr,
- u32 ctr_index)
-{
- hash_set (wrk->ct_registration_by_mq, mq_addr, ctr_index);
-}
-
-void
-vcl_ct_registration_lookup_del (vcl_worker_t * wrk, uword mq_addr)
-{
- hash_unset (wrk->ct_registration_by_mq, mq_addr);
-}
-
-void
-vcl_ct_registration_del (vcl_worker_t * wrk,
- vcl_cut_through_registration_t * ctr)
-{
- pool_put (wrk->cut_through_registrations, ctr);
-}
-
vcl_mq_evt_conn_t *
vcl_mq_evt_conn_alloc (vcl_worker_t * wrk)
{
e.data.u32 = mqc_index;
if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_ADD, mq_fd, &e) < 0)
{
- clib_warning ("failed to add mq eventfd to mq epoll fd");
+ VDBG (0, "failed to add mq eventfd to mq epoll fd");
return -1;
}
mqc = vcl_mq_evt_conn_get (wrk, mqc_index);
if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_DEL, mqc->mq_fd, 0) < 0)
{
- clib_warning ("failed to del mq eventfd to mq epoll fd");
+ VDBG (0, "failed to del mq eventfd to mq epoll fd");
return -1;
}
return 0;
pool_get (vcm->workers, wrk);
memset (wrk, 0, sizeof (*wrk));
wrk->wrk_index = wrk - vcm->workers;
+ wrk->forked_child = ~0;
return wrk;
}
pool_put (vcm->workers, wrk);
}
-static void
-vcl_worker_cleanup (void *arg)
+void
+vcl_worker_cleanup (vcl_worker_t * wrk, u8 notify_vpp)
{
- vcl_worker_t *wrk = vcl_worker_get_current ();
+ clib_spinlock_lock (&vcm->workers_lock);
+ if (notify_vpp)
+ {
+ /* Notify vpp that the worker is going away */
+ if (wrk->wrk_index == vcl_get_worker_index ())
+ vcl_send_app_worker_add_del (0 /* is_add */ );
+ else
+ vcl_send_child_worker_del (wrk);
+
+ /* Disconnect the binary api */
+ if (vec_len (vcm->workers) == 1)
+ vppcom_disconnect_from_vpp ();
+ else
+ vl_client_send_disconnect (1 /* vpp should cleanup */ );
+ }
- VDBG (0, "cleaning up worker %u", wrk->wrk_index);
- vcl_send_app_worker_add_del (0 /* is_add */ );
- close (wrk->mqs_epfd);
+ if (wrk->mqs_epfd > 0)
+ close (wrk->mqs_epfd);
hash_free (wrk->session_index_by_vpp_handles);
- hash_free (wrk->ct_registration_by_mq);
- clib_spinlock_free (&wrk->ct_registration_lock);
vec_free (wrk->mq_events);
vec_free (wrk->mq_msg_vector);
- vcl_set_worker_index (~0);
vcl_worker_free (wrk);
+ clib_spinlock_unlock (&vcm->workers_lock);
+}
+
+static void
+vcl_worker_cleanup_cb (void *arg)
+{
+ vcl_worker_t *wrk = vcl_worker_get_current ();
+ u32 wrk_index = wrk->wrk_index;
+ vcl_worker_cleanup (wrk, 1 /* notify vpp */ );
+ vcl_set_worker_index (~0);
+ VDBG (0, "cleaned up worker %u", wrk_index);
}
vcl_worker_t *
if (vcl_get_worker_index () != ~0)
return 0;
+ /* Use separate heap map entry for worker */
+ clib_mem_set_thread_index ();
+
+ if (pool_elts (vcm->workers) == vcm->cfg.max_workers)
+ {
+ VDBG (0, "max-workers %u limit reached", vcm->cfg.max_workers);
+ return 0;
+ }
+
+ clib_spinlock_lock (&vcm->workers_lock);
wrk = vcl_worker_alloc ();
vcl_set_worker_index (wrk->wrk_index);
+ wrk->thread_id = pthread_self ();
+ wrk->current_pid = getpid ();
wrk->mqs_epfd = -1;
if (vcm->cfg.use_mq_eventfd)
if (wrk->mqs_epfd < 0)
{
clib_unix_warning ("epoll_create() returned");
- return 0;
+ goto done;
}
}
wrk->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
- wrk->ct_registration_by_mq = hash_create (0, sizeof (uword));
- clib_spinlock_init (&wrk->ct_registration_lock);
clib_time_init (&wrk->clib_time);
vec_validate (wrk->mq_events, 64);
vec_validate (wrk->mq_msg_vector, 128);
vec_reset_length (wrk->mq_msg_vector);
+ vec_validate (wrk->unhandled_evts_vector, 128);
+ vec_reset_length (wrk->unhandled_evts_vector);
+ clib_spinlock_unlock (&vcm->workers_lock);
- if (wrk->wrk_index == 0)
- return wrk;
+done:
+ return wrk;
+}
+
+int
+vcl_worker_register_with_vpp (void)
+{
+ vcl_worker_t *wrk = vcl_worker_get_current ();
+
+ clib_spinlock_lock (&vcm->workers_lock);
- while (vcm->app_state == STATE_APP_ADDING_WORKER)
- ;
vcm->app_state = STATE_APP_ADDING_WORKER;
vcl_send_app_worker_add_del (1 /* is_add */ );
if (vcl_wait_for_app_state_change (STATE_APP_READY))
{
- clib_warning ("failed to add worker to vpp");
- return 0;
+ VDBG (0, "failed to add worker to vpp");
+ return -1;
}
+ if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup_cb))
+ VDBG (0, "failed to add pthread cleanup function");
+ if (pthread_setspecific (vcl_worker_stop_key, &wrk->thread_id))
+ VDBG (0, "failed to setup key value");
- if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup))
- clib_warning ("failed to add pthread cleanup function");
+ clib_spinlock_unlock (&vcm->workers_lock);
VDBG (0, "added worker %u", wrk->wrk_index);
+ return 0;
+}
- return wrk;
+int
+vcl_worker_set_bapi (void)
+{
+ vcl_worker_t *wrk = vcl_worker_get_current ();
+ int i;
+
+ /* Find the first worker with the same pid */
+ for (i = 0; i < vec_len (vcm->workers); i++)
+ {
+ if (i == wrk->wrk_index)
+ continue;
+ if (vcm->workers[i].current_pid == wrk->current_pid)
+ {
+ wrk->vl_input_queue = vcm->workers[i].vl_input_queue;
+ wrk->my_client_index = vcm->workers[i].my_client_index;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+svm_msg_q_t *
+vcl_worker_ctrl_mq (vcl_worker_t * wrk)
+{
+ return wrk->ctrl_mq;
+}
+
+void
+vcl_cleanup_bapi (void)
+{
+ socket_client_main_t *scm = &socket_client_main;
+ api_main_t *am = vlibapi_get_main ();
+
+ am->my_client_index = ~0;
+ am->my_registration = 0;
+ am->vl_input_queue = 0;
+ am->msg_index_by_name_and_crc = 0;
+ scm->socket_fd = 0;
+
+ vl_client_api_unmap ();
+}
+
+int
+vcl_session_read_ready (vcl_session_t * session)
+{
+ /* Assumes caller has acquired spinlock: vcm->sessions_lockp */
+ if (PREDICT_FALSE (session->is_vep))
+ {
+ VDBG (0, "ERROR: session %u: cannot read from an epoll session!",
+ session->session_index);
+ return VPPCOM_EBADFD;
+ }
+
+ if (PREDICT_FALSE (!(session->session_state & (STATE_OPEN | STATE_LISTEN))))
+ {
+ vcl_session_state_t state = session->session_state;
+ int rv;
+
+ rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN);
+
+ VDBG (1, "session %u [0x%llx]: not open! state 0x%x (%s), ret %d (%s)",
+ session->session_index, session->vpp_handle, state,
+ vppcom_session_state_str (state), rv, vppcom_retval_str (rv));
+ return rv;
+ }
+
+ if (session->session_state & STATE_LISTEN)
+ return clib_fifo_elts (session->accept_evts_fifo);
+
+ if (vcl_session_is_ct (session))
+ return svm_fifo_max_dequeue_cons (session->ct_rx_fifo);
+
+ return svm_fifo_max_dequeue_cons (session->rx_fifo);
+}
+
+int
+vcl_session_write_ready (vcl_session_t * session)
+{
+ /* Assumes caller has acquired spinlock: vcm->sessions_lockp */
+ if (PREDICT_FALSE (session->is_vep))
+ {
+ VDBG (0, "session %u [0x%llx]: cannot write to an epoll session!",
+ session->session_index, session->vpp_handle);
+ return VPPCOM_EBADFD;
+ }
+
+ if (PREDICT_FALSE (session->session_state & STATE_LISTEN))
+ {
+ if (session->tx_fifo)
+ return svm_fifo_max_enqueue_prod (session->tx_fifo);
+ else
+ return VPPCOM_EBADFD;
+ }
+
+ if (PREDICT_FALSE (!(session->session_state & STATE_OPEN)))
+ {
+ vcl_session_state_t state = session->session_state;
+ int rv;
+
+ rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN);
+ VDBG (0, "session %u [0x%llx]: not open! state 0x%x (%s), ret %d (%s)",
+ session->session_index, session->vpp_handle, state,
+ vppcom_session_state_str (state), rv, vppcom_retval_str (rv));
+ return rv;
+ }
+
+ if (vcl_session_is_ct (session))
+ return svm_fifo_max_enqueue_prod (session->ct_tx_fifo);
+
+ return svm_fifo_max_enqueue_prod (session->tx_fifo);
}
+int
+vcl_segment_attach (u64 segment_handle, char *name, ssvm_segment_type_t type,
+ int fd)
+{
+ fifo_segment_create_args_t _a, *a = &_a;
+ int rv;
+
+ memset (a, 0, sizeof (*a));
+ a->segment_name = (char *) name;
+ a->segment_type = type;
+
+ if (type == SSVM_SEGMENT_MEMFD)
+ a->memfd_fd = fd;
+
+ clib_rwlock_writer_lock (&vcm->segment_table_lock);
+
+ if ((rv = fifo_segment_attach (&vcm->segment_main, a)))
+ {
+ clib_warning ("svm_fifo_segment_attach ('%s') failed", name);
+ return rv;
+ }
+ hash_set (vcm->segment_table, segment_handle, a->new_segment_indices[0]);
+
+ clib_rwlock_writer_unlock (&vcm->segment_table_lock);
+
+ vec_reset_length (a->new_segment_indices);
+ return 0;
+}
+
+u32
+vcl_segment_table_lookup (u64 segment_handle)
+{
+ uword *seg_indexp;
+
+ clib_rwlock_reader_lock (&vcm->segment_table_lock);
+ seg_indexp = hash_get (vcm->segment_table, segment_handle);
+ clib_rwlock_reader_unlock (&vcm->segment_table_lock);
+
+ if (!seg_indexp)
+ return VCL_INVALID_SEGMENT_INDEX;
+ return ((u32) * seg_indexp);
+}
+
+void
+vcl_segment_detach (u64 segment_handle)
+{
+ fifo_segment_main_t *sm = &vcm->segment_main;
+ fifo_segment_t *segment;
+ u32 segment_index;
+
+ segment_index = vcl_segment_table_lookup (segment_handle);
+ if (segment_index == (u32) ~ 0)
+ return;
+
+ clib_rwlock_writer_lock (&vcm->segment_table_lock);
+
+ segment = fifo_segment_get_segment (sm, segment_index);
+ fifo_segment_delete (sm, segment);
+ hash_unset (vcm->segment_table, segment_handle);
+
+ clib_rwlock_writer_unlock (&vcm->segment_table_lock);
+
+ VDBG (0, "detached segment %u handle %u", segment_index, segment_handle);
+}
+
+
/*
* fd.io coding-style-patch-verification: ON
*