2 * Copyright (c) 2018-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vcl/vcl_private.h>
18 static pthread_key_t vcl_worker_stop_key;
21 vppcom_app_state_str (app_state_t state)
28 st = "STATE_APP_START";
31 case STATE_APP_CONN_VPP:
32 st = "STATE_APP_CONN_VPP";
35 case STATE_APP_ENABLED:
36 st = "STATE_APP_ENABLED";
39 case STATE_APP_ATTACHED:
40 st = "STATE_APP_ATTACHED";
44 st = "UNKNOWN_APP_STATE";
52 vcl_wait_for_app_state_change (app_state_t app_state)
54 vcl_worker_t *wrk = vcl_worker_get_current ();
55 f64 timeout = clib_time_now (&wrk->clib_time) + vcm->cfg.app_timeout;
57 while (clib_time_now (&wrk->clib_time) < timeout)
59 if (vcm->app_state == app_state)
61 if (vcm->app_state == STATE_APP_FAILED)
62 return VPPCOM_ECONNABORTED;
64 VDBG (0, "timeout waiting for state %s (%d)",
65 vppcom_app_state_str (app_state), app_state);
66 vcl_evt (VCL_EVT_SESSION_TIMEOUT, vcm, app_state);
68 return VPPCOM_ETIMEDOUT;
72 vcl_mq_evt_conn_alloc (vcl_worker_t * wrk)
74 vcl_mq_evt_conn_t *mqc;
75 pool_get (wrk->mq_evt_conns, mqc);
76 memset (mqc, 0, sizeof (*mqc));
81 vcl_mq_evt_conn_index (vcl_worker_t * wrk, vcl_mq_evt_conn_t * mqc)
83 return (mqc - wrk->mq_evt_conns);
87 vcl_mq_evt_conn_get (vcl_worker_t * wrk, u32 mq_conn_idx)
89 return pool_elt_at_index (wrk->mq_evt_conns, mq_conn_idx);
93 vcl_mq_epoll_add_evfd (vcl_worker_t * wrk, svm_msg_q_t * mq)
95 struct epoll_event e = { 0 };
96 vcl_mq_evt_conn_t *mqc;
100 mq_fd = svm_msg_q_get_consumer_eventfd (mq);
102 if (wrk->mqs_epfd < 0 || mq_fd == -1)
105 mqc = vcl_mq_evt_conn_alloc (wrk);
106 mqc_index = vcl_mq_evt_conn_index (wrk, mqc);
111 e.data.u32 = mqc_index;
112 if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_ADD, mq_fd, &e) < 0)
114 VDBG (0, "failed to add mq eventfd to mq epoll fd");
122 vcl_mq_epoll_del_evfd (vcl_worker_t * wrk, u32 mqc_index)
124 vcl_mq_evt_conn_t *mqc;
126 if (wrk->mqs_epfd || mqc_index == ~0)
129 mqc = vcl_mq_evt_conn_get (wrk, mqc_index);
130 if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_DEL, mqc->mq_fd, 0) < 0)
132 VDBG (0, "failed to del mq eventfd to mq epoll fd");
138 static vcl_worker_t *
139 vcl_worker_alloc (void)
142 pool_get (vcm->workers, wrk);
143 memset (wrk, 0, sizeof (*wrk));
144 wrk->wrk_index = wrk - vcm->workers;
145 wrk->forked_child = ~0;
150 vcl_worker_free (vcl_worker_t * wrk)
152 pool_put (vcm->workers, wrk);
156 vcl_worker_cleanup (vcl_worker_t * wrk, u8 notify_vpp)
158 clib_spinlock_lock (&vcm->workers_lock);
161 /* Notify vpp that the worker is going away */
162 if (wrk->wrk_index == vcl_get_worker_index ())
163 vcl_send_app_worker_add_del (0 /* is_add */ );
165 vcl_send_child_worker_del (wrk);
167 /* Disconnect the binary api */
168 if (vec_len (vcm->workers) == 1)
169 vppcom_disconnect_from_vpp ();
171 vl_client_send_disconnect (1 /* vpp should cleanup */ );
174 if (wrk->mqs_epfd > 0)
175 close (wrk->mqs_epfd);
176 hash_free (wrk->session_index_by_vpp_handles);
177 vec_free (wrk->mq_events);
178 vec_free (wrk->mq_msg_vector);
179 vcl_worker_free (wrk);
180 clib_spinlock_unlock (&vcm->workers_lock);
184 vcl_worker_cleanup_cb (void *arg)
186 vcl_worker_t *wrk = vcl_worker_get_current ();
187 u32 wrk_index = wrk->wrk_index;
188 vcl_worker_cleanup (wrk, 1 /* notify vpp */ );
189 vcl_set_worker_index (~0);
190 VDBG (0, "cleaned up worker %u", wrk_index);
194 vcl_worker_alloc_and_init ()
198 /* This was initialized already */
199 if (vcl_get_worker_index () != ~0)
202 /* Use separate heap map entry for worker */
203 clib_mem_set_thread_index ();
205 if (pool_elts (vcm->workers) == vcm->cfg.max_workers)
207 VDBG (0, "max-workers %u limit reached", vcm->cfg.max_workers);
211 clib_spinlock_lock (&vcm->workers_lock);
212 wrk = vcl_worker_alloc ();
213 vcl_set_worker_index (wrk->wrk_index);
214 wrk->thread_id = pthread_self ();
215 wrk->current_pid = getpid ();
218 if (vcm->cfg.use_mq_eventfd)
220 wrk->mqs_epfd = epoll_create (1);
221 if (wrk->mqs_epfd < 0)
223 clib_unix_warning ("epoll_create() returned");
228 wrk->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
229 clib_time_init (&wrk->clib_time);
230 vec_validate (wrk->mq_events, 64);
231 vec_validate (wrk->mq_msg_vector, 128);
232 vec_reset_length (wrk->mq_msg_vector);
233 vec_validate (wrk->unhandled_evts_vector, 128);
234 vec_reset_length (wrk->unhandled_evts_vector);
235 clib_spinlock_unlock (&vcm->workers_lock);
242 vcl_worker_register_with_vpp (void)
244 vcl_worker_t *wrk = vcl_worker_get_current ();
246 clib_spinlock_lock (&vcm->workers_lock);
248 vcm->app_state = STATE_APP_ADDING_WORKER;
249 vcl_send_app_worker_add_del (1 /* is_add */ );
250 if (vcl_wait_for_app_state_change (STATE_APP_READY))
252 VDBG (0, "failed to add worker to vpp");
255 if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup_cb))
256 VDBG (0, "failed to add pthread cleanup function");
257 if (pthread_setspecific (vcl_worker_stop_key, &wrk->thread_id))
258 VDBG (0, "failed to setup key value");
260 clib_spinlock_unlock (&vcm->workers_lock);
262 VDBG (0, "added worker %u", wrk->wrk_index);
267 vcl_worker_set_bapi (void)
269 vcl_worker_t *wrk = vcl_worker_get_current ();
272 /* Find the first worker with the same pid */
273 for (i = 0; i < vec_len (vcm->workers); i++)
275 if (i == wrk->wrk_index)
277 if (vcm->workers[i].current_pid == wrk->current_pid)
279 wrk->vl_input_queue = vcm->workers[i].vl_input_queue;
280 wrk->my_client_index = vcm->workers[i].my_client_index;
288 vcl_worker_ctrl_mq (vcl_worker_t * wrk)
294 vcl_cleanup_bapi (void)
296 socket_client_main_t *scm = &socket_client_main;
297 api_main_t *am = vlibapi_get_main ();
299 am->my_client_index = ~0;
300 am->my_registration = 0;
301 am->vl_input_queue = 0;
302 am->msg_index_by_name_and_crc = 0;
305 vl_client_api_unmap ();
309 vcl_session_read_ready (vcl_session_t * session)
311 /* Assumes caller has acquired spinlock: vcm->sessions_lockp */
312 if (PREDICT_FALSE (session->is_vep))
314 VDBG (0, "ERROR: session %u: cannot read from an epoll session!",
315 session->session_index);
316 return VPPCOM_EBADFD;
319 if (PREDICT_FALSE (!(session->session_state & (STATE_OPEN | STATE_LISTEN))))
321 vcl_session_state_t state = session->session_state;
324 rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN);
326 VDBG (1, "session %u [0x%llx]: not open! state 0x%x (%s), ret %d (%s)",
327 session->session_index, session->vpp_handle, state,
328 vppcom_session_state_str (state), rv, vppcom_retval_str (rv));
332 if (session->session_state & STATE_LISTEN)
333 return clib_fifo_elts (session->accept_evts_fifo);
335 if (vcl_session_is_ct (session))
336 return svm_fifo_max_dequeue_cons (session->ct_rx_fifo);
338 return svm_fifo_max_dequeue_cons (session->rx_fifo);
342 vcl_session_write_ready (vcl_session_t * session)
344 /* Assumes caller has acquired spinlock: vcm->sessions_lockp */
345 if (PREDICT_FALSE (session->is_vep))
347 VDBG (0, "session %u [0x%llx]: cannot write to an epoll session!",
348 session->session_index, session->vpp_handle);
349 return VPPCOM_EBADFD;
352 if (PREDICT_FALSE (session->session_state & STATE_LISTEN))
354 if (session->tx_fifo)
355 return svm_fifo_max_enqueue_prod (session->tx_fifo);
357 return VPPCOM_EBADFD;
360 if (PREDICT_FALSE (!(session->session_state & STATE_OPEN)))
362 vcl_session_state_t state = session->session_state;
365 rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN);
366 VDBG (0, "session %u [0x%llx]: not open! state 0x%x (%s), ret %d (%s)",
367 session->session_index, session->vpp_handle, state,
368 vppcom_session_state_str (state), rv, vppcom_retval_str (rv));
372 if (vcl_session_is_ct (session))
373 return svm_fifo_max_enqueue_prod (session->ct_tx_fifo);
375 return svm_fifo_max_enqueue_prod (session->tx_fifo);
379 vcl_segment_attach (u64 segment_handle, char *name, ssvm_segment_type_t type,
382 fifo_segment_create_args_t _a, *a = &_a;
385 memset (a, 0, sizeof (*a));
386 a->segment_name = name;
387 a->segment_type = type;
389 if (type == SSVM_SEGMENT_MEMFD)
392 clib_rwlock_writer_lock (&vcm->segment_table_lock);
394 if ((rv = fifo_segment_attach (&vcm->segment_main, a)))
396 clib_warning ("svm_fifo_segment_attach ('%s') failed", name);
399 hash_set (vcm->segment_table, segment_handle, a->new_segment_indices[0]);
401 clib_rwlock_writer_unlock (&vcm->segment_table_lock);
403 vec_reset_length (a->new_segment_indices);
408 vcl_segment_table_lookup (u64 segment_handle)
412 clib_rwlock_reader_lock (&vcm->segment_table_lock);
413 seg_indexp = hash_get (vcm->segment_table, segment_handle);
414 clib_rwlock_reader_unlock (&vcm->segment_table_lock);
417 return VCL_INVALID_SEGMENT_INDEX;
418 return ((u32) * seg_indexp);
422 vcl_segment_detach (u64 segment_handle)
424 fifo_segment_main_t *sm = &vcm->segment_main;
425 fifo_segment_t *segment;
428 segment_index = vcl_segment_table_lookup (segment_handle);
429 if (segment_index == (u32) ~ 0)
432 clib_rwlock_writer_lock (&vcm->segment_table_lock);
434 segment = fifo_segment_get_segment (sm, segment_index);
435 fifo_segment_delete (sm, segment);
436 hash_unset (vcm->segment_table, segment_handle);
438 clib_rwlock_writer_unlock (&vcm->segment_table_lock);
440 VDBG (0, "detached segment %u handle %u", segment_index, segment_handle);
445 * fd.io coding-style-patch-verification: ON
448 * eval: (c-set-style "gnu")