2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vcl/vcl_private.h>
18 static pthread_key_t vcl_worker_stop_key;
21 vppcom_app_state_str (app_state_t state)
28 st = "STATE_APP_START";
31 case STATE_APP_CONN_VPP:
32 st = "STATE_APP_CONN_VPP";
35 case STATE_APP_ENABLED:
36 st = "STATE_APP_ENABLED";
39 case STATE_APP_ATTACHED:
40 st = "STATE_APP_ATTACHED";
44 st = "UNKNOWN_APP_STATE";
52 vcl_wait_for_app_state_change (app_state_t app_state)
54 vcl_worker_t *wrk = vcl_worker_get_current ();
55 f64 timeout = clib_time_now (&wrk->clib_time) + vcm->cfg.app_timeout;
57 while (clib_time_now (&wrk->clib_time) < timeout)
59 if (vcm->app_state == app_state)
61 if (vcm->app_state == STATE_APP_FAILED)
62 return VPPCOM_ECONNABORTED;
64 VDBG (0, "VCL<%d>: timeout waiting for state %s (%d)", getpid (),
65 vppcom_app_state_str (app_state), app_state);
66 vcl_evt (VCL_EVT_SESSION_TIMEOUT, vcm, app_state);
68 return VPPCOM_ETIMEDOUT;
71 vcl_cut_through_registration_t *
72 vcl_ct_registration_lock_and_alloc (vcl_worker_t * wrk)
74 vcl_cut_through_registration_t *cr;
75 clib_spinlock_lock (&wrk->ct_registration_lock);
76 pool_get (wrk->cut_through_registrations, cr);
77 memset (cr, 0, sizeof (*cr));
78 cr->epoll_evt_conn_index = -1;
83 vcl_ct_registration_index (vcl_worker_t * wrk,
84 vcl_cut_through_registration_t * ctr)
86 return (ctr - wrk->cut_through_registrations);
90 vcl_ct_registration_lock (vcl_worker_t * wrk)
92 clib_spinlock_lock (&wrk->ct_registration_lock);
96 vcl_ct_registration_unlock (vcl_worker_t * wrk)
98 clib_spinlock_unlock (&wrk->ct_registration_lock);
101 vcl_cut_through_registration_t *
102 vcl_ct_registration_get (vcl_worker_t * wrk, u32 ctr_index)
104 if (pool_is_free_index (wrk->cut_through_registrations, ctr_index))
106 return pool_elt_at_index (wrk->cut_through_registrations, ctr_index);
109 vcl_cut_through_registration_t *
110 vcl_ct_registration_lock_and_lookup (vcl_worker_t * wrk, uword mq_addr)
113 clib_spinlock_lock (&wrk->ct_registration_lock);
114 p = hash_get (wrk->ct_registration_by_mq, mq_addr);
117 return vcl_ct_registration_get (wrk, p[0]);
121 vcl_ct_registration_lookup_add (vcl_worker_t * wrk, uword mq_addr,
124 hash_set (wrk->ct_registration_by_mq, mq_addr, ctr_index);
128 vcl_ct_registration_lookup_del (vcl_worker_t * wrk, uword mq_addr)
130 hash_unset (wrk->ct_registration_by_mq, mq_addr);
134 vcl_ct_registration_del (vcl_worker_t * wrk,
135 vcl_cut_through_registration_t * ctr)
137 pool_put (wrk->cut_through_registrations, ctr);
141 vcl_mq_evt_conn_alloc (vcl_worker_t * wrk)
143 vcl_mq_evt_conn_t *mqc;
144 pool_get (wrk->mq_evt_conns, mqc);
145 memset (mqc, 0, sizeof (*mqc));
150 vcl_mq_evt_conn_index (vcl_worker_t * wrk, vcl_mq_evt_conn_t * mqc)
152 return (mqc - wrk->mq_evt_conns);
156 vcl_mq_evt_conn_get (vcl_worker_t * wrk, u32 mq_conn_idx)
158 return pool_elt_at_index (wrk->mq_evt_conns, mq_conn_idx);
162 vcl_mq_epoll_add_evfd (vcl_worker_t * wrk, svm_msg_q_t * mq)
164 struct epoll_event e = { 0 };
165 vcl_mq_evt_conn_t *mqc;
169 mq_fd = svm_msg_q_get_consumer_eventfd (mq);
171 if (wrk->mqs_epfd < 0 || mq_fd == -1)
174 mqc = vcl_mq_evt_conn_alloc (wrk);
175 mqc_index = vcl_mq_evt_conn_index (wrk, mqc);
180 e.data.u32 = mqc_index;
181 if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_ADD, mq_fd, &e) < 0)
183 clib_warning ("failed to add mq eventfd to mq epoll fd");
191 vcl_mq_epoll_del_evfd (vcl_worker_t * wrk, u32 mqc_index)
193 vcl_mq_evt_conn_t *mqc;
195 if (wrk->mqs_epfd || mqc_index == ~0)
198 mqc = vcl_mq_evt_conn_get (wrk, mqc_index);
199 if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_DEL, mqc->mq_fd, 0) < 0)
201 clib_warning ("failed to del mq eventfd to mq epoll fd");
207 static vcl_worker_t *
208 vcl_worker_alloc (void)
211 pool_get (vcm->workers, wrk);
212 memset (wrk, 0, sizeof (*wrk));
213 wrk->wrk_index = wrk - vcm->workers;
218 vcl_worker_free (vcl_worker_t * wrk)
220 pool_put (vcm->workers, wrk);
224 vcl_worker_cleanup (u8 notify_vpp)
226 vcl_worker_t *wrk = vcl_worker_get_current ();
228 clib_spinlock_lock (&vcm->workers_lock);
230 vcl_send_app_worker_add_del (0 /* is_add */ );
231 if (wrk->mqs_epfd > 0)
232 close (wrk->mqs_epfd);
233 hash_free (wrk->session_index_by_vpp_handles);
234 hash_free (wrk->ct_registration_by_mq);
235 clib_spinlock_free (&wrk->ct_registration_lock);
236 vec_free (wrk->mq_events);
237 vec_free (wrk->mq_msg_vector);
238 vcl_set_worker_index (~0);
239 vcl_worker_free (wrk);
240 clib_spinlock_unlock (&vcm->workers_lock);
244 vcl_worker_cleanup_cb (void *arg)
246 u32 wrk_index = vcl_get_worker_index ();
247 vcl_worker_cleanup (1 /* notify vpp */ );
248 VDBG (0, "cleaned up worker %u", wrk_index);
252 vcl_worker_alloc_and_init ()
256 /* This was initialized already */
257 if (vcl_get_worker_index () != ~0)
260 if (pool_elts (vcm->workers) == vcm->cfg.max_workers)
262 VDBG (0, "max-workers %u limit reached", vcm->cfg.max_workers);
266 clib_spinlock_lock (&vcm->workers_lock);
267 wrk = vcl_worker_alloc ();
268 vcl_set_worker_index (wrk->wrk_index);
269 wrk->thread_id = pthread_self ();
270 wrk->current_pid = getpid ();
273 if (vcm->cfg.use_mq_eventfd)
275 wrk->mqs_epfd = epoll_create (1);
276 if (wrk->mqs_epfd < 0)
278 clib_unix_warning ("epoll_create() returned");
283 wrk->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
284 wrk->ct_registration_by_mq = hash_create (0, sizeof (uword));
285 clib_spinlock_init (&wrk->ct_registration_lock);
286 clib_time_init (&wrk->clib_time);
287 vec_validate (wrk->mq_events, 64);
288 vec_validate (wrk->mq_msg_vector, 128);
289 vec_reset_length (wrk->mq_msg_vector);
290 vec_validate (wrk->unhandled_evts_vector, 128);
291 vec_reset_length (wrk->unhandled_evts_vector);
292 clib_spinlock_unlock (&vcm->workers_lock);
299 vcl_worker_register_with_vpp (void)
301 vcl_worker_t *wrk = vcl_worker_get_current ();
303 clib_spinlock_lock (&vcm->workers_lock);
305 vcm->app_state = STATE_APP_ADDING_WORKER;
306 vcl_send_app_worker_add_del (1 /* is_add */ );
307 if (vcl_wait_for_app_state_change (STATE_APP_READY))
309 clib_warning ("failed to add worker to vpp");
313 if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup_cb))
314 clib_warning ("failed to add pthread cleanup function");
315 if (pthread_setspecific (vcl_worker_stop_key, &wrk->thread_id))
316 clib_warning ("failed to setup key value");
318 clib_spinlock_unlock (&vcm->workers_lock);
320 VDBG (0, "added worker %u", wrk->wrk_index);
325 vcl_worker_set_bapi (void)
327 vcl_worker_t *wrk = vcl_worker_get_current ();
330 /* Find the first worker with the same pid */
331 for (i = 0; i < vec_len (vcm->workers); i++)
333 if (i == wrk->wrk_index)
335 if (vcm->workers[i].current_pid == wrk->current_pid)
337 wrk->vl_input_queue = vcm->workers[i].vl_input_queue;
338 wrk->my_client_index = vcm->workers[i].my_client_index;
345 vcl_shared_session_t *
346 vcl_shared_session_alloc (void)
348 vcl_shared_session_t *ss;
349 pool_get (vcm->shared_sessions, ss);
350 memset (ss, 0, sizeof (*ss));
351 ss->ss_index = ss - vcm->shared_sessions;
355 vcl_shared_session_t *
356 vcl_shared_session_get (u32 ss_index)
358 if (pool_is_free_index (vcm->shared_sessions, ss_index))
360 return pool_elt_at_index (vcm->shared_sessions, ss_index);
364 vcl_shared_session_free (vcl_shared_session_t * ss)
366 pool_put (vcm->shared_sessions, ss);
370 vcl_worker_share_session (vcl_worker_t * parent, vcl_worker_t * wrk,
371 vcl_session_t * new_s)
373 vcl_shared_session_t *ss;
376 s = vcl_session_get (parent, new_s->session_index);
377 if (s->shared_index == ~0)
379 ss = vcl_shared_session_alloc ();
380 vec_add1 (ss->workers, parent->wrk_index);
381 s->shared_index = ss->ss_index;
385 ss = vcl_shared_session_get (s->shared_index);
387 new_s->shared_index = ss->ss_index;
388 vec_add1 (ss->workers, wrk->wrk_index);
392 vcl_worker_unshare_session (vcl_worker_t * wrk, vcl_session_t * s)
394 vcl_shared_session_t *ss;
397 ss = vcl_shared_session_get (s->shared_index);
398 for (i = 0; i < vec_len (ss->workers); i++)
400 if (ss->workers[i] == wrk->wrk_index)
402 vec_del1 (ss->workers, i);
407 if (vec_len (ss->workers) == 0)
409 vcl_shared_session_free (ss);
417 vcl_worker_share_sessions (u32 parent_wrk_index)
419 vcl_worker_t *parent_wrk, *wrk;
420 vcl_session_t *new_s;
422 parent_wrk = vcl_worker_get (parent_wrk_index);
423 if (!parent_wrk->sessions)
426 wrk = vcl_worker_get_current ();
427 wrk->sessions = pool_dup (parent_wrk->sessions);
428 wrk->session_index_by_vpp_handles =
429 hash_dup (parent_wrk->session_index_by_vpp_handles);
432 pool_foreach (new_s, wrk->sessions, ({
433 vcl_worker_share_session (parent_wrk, wrk, new_s);
439 vcl_session_get_refcnt (vcl_session_t * s)
441 vcl_shared_session_t *ss;
442 ss = vcl_shared_session_get (s->shared_index);
444 return vec_len (ss->workers);
449 vcl_segment_table_add (u64 segment_handle, u32 svm_segment_index)
451 clib_rwlock_writer_lock (&vcm->segment_table_lock);
452 hash_set (vcm->segment_table, segment_handle, svm_segment_index);
453 clib_rwlock_writer_unlock (&vcm->segment_table_lock);
457 vcl_segment_table_lookup (u64 segment_handle)
461 clib_rwlock_reader_lock (&vcm->segment_table_lock);
462 seg_indexp = hash_get (vcm->segment_table, segment_handle);
463 clib_rwlock_reader_unlock (&vcm->segment_table_lock);
466 return VCL_INVALID_SEGMENT_INDEX;
467 return ((u32) * seg_indexp);
471 vcl_segment_table_del (u64 segment_handle)
473 clib_rwlock_writer_lock (&vcm->segment_table_lock);
474 hash_unset (vcm->segment_table, segment_handle);
475 clib_rwlock_writer_unlock (&vcm->segment_table_lock);
479 * fd.io coding-style-patch-verification: ON
482 * eval: (c-set-style "gnu")