2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vcl/vcl_locked.h>
17 #include <vcl/vcl_private.h>
19 typedef struct vls_shared_data_
23 u32 *workers_subscribed;
24 clib_bitmap_t *listeners;
27 typedef struct vcl_locked_session_
33 u32 shared_data_index;
34 /** VCL session owned by different workers because of migration */
35 u32 owner_vcl_wrk_index;
36 uword *vcl_wrk_index_to_session_index;
37 } vcl_locked_session_t;
39 typedef struct vls_worker_
41 clib_rwlock_t sh_to_vlsh_table_lock; /** valid for multithread workers */
42 vcl_locked_session_t *vls_pool;
43 uword *session_handle_to_vlsh_table;
47 typedef struct vls_local_
50 volatile int vls_mt_n_threads;
51 pthread_mutex_t vls_mt_mq_mlock;
52 pthread_mutex_t vls_mt_spool_mlock;
53 volatile u8 select_mp_check;
54 volatile u8 epoll_mp_check;
55 } vls_process_local_t;
57 static vls_process_local_t vls_local;
58 static vls_process_local_t *vlsl = &vls_local;
60 typedef struct vls_main_
62 vls_worker_t *workers;
63 clib_rwlock_t vls_table_lock;
64 /** Pool of data shared by sessions owned by different workers */
65 vls_shared_data_t *shared_data_pool;
66 clib_rwlock_t shared_data_lock;
67 /** Lock to protect rpc among workers */
68 clib_spinlock_t worker_rpc_lock;
76 VLS_RPC_STATE_SUCCESS,
77 VLS_RPC_STATE_SESSION_NOT_EXIST,
80 typedef enum vls_rpc_msg_type_
82 VLS_RPC_CLONE_AND_SHARE,
86 typedef struct vls_rpc_msg_
92 typedef struct vls_clone_and_share_msg_
94 u32 vls_index; /**< vls to be shared */
95 u32 session_index; /**< vcl session to be shared */
96 u32 origin_vls_wrk; /**< vls worker that initiated the rpc */
97 u32 origin_vls_index; /**< vls session of the originator */
98 u32 origin_vcl_wrk; /**< vcl worker that initiated the rpc */
99 u32 origin_session_index; /**< vcl session of the originator */
100 } vls_clone_and_share_msg_t;
102 typedef struct vls_sess_cleanup_msg_
104 u32 session_index; /**< vcl session to be cleaned */
105 u32 origin_vcl_wrk; /**< worker that initiated the rpc */
106 } vls_sess_cleanup_msg_t;
108 void vls_send_session_cleanup_rpc (vcl_worker_t * wrk,
109 u32 dst_wrk_index, u32 dst_session_index);
110 void vls_send_clone_and_share_rpc (vcl_worker_t *wrk, u32 origin_vls_index,
111 u32 session_index, u32 vls_wrk_index,
112 u32 dst_wrk_index, u32 dst_vls_index,
113 u32 dst_session_index);
116 vls_get_worker_index (void)
118 if (vls_mt_wrk_supported ())
119 return vlsl->vls_wrk_index;
121 return vcl_get_worker_index ();
125 vls_shared_data_alloc (void)
127 vls_shared_data_t *vls_shd;
130 clib_rwlock_writer_lock (&vlsm->shared_data_lock);
131 pool_get_zero (vlsm->shared_data_pool, vls_shd);
132 clib_spinlock_init (&vls_shd->lock);
133 shd_index = vls_shd - vlsm->shared_data_pool;
134 clib_rwlock_writer_unlock (&vlsm->shared_data_lock);
140 vls_shared_data_index (vls_shared_data_t * vls_shd)
142 return vls_shd - vlsm->shared_data_pool;
146 vls_shared_data_get (u32 shd_index)
148 if (pool_is_free_index (vlsm->shared_data_pool, shd_index))
150 return pool_elt_at_index (vlsm->shared_data_pool, shd_index);
154 vls_shared_data_free (u32 shd_index)
156 vls_shared_data_t *vls_shd;
158 clib_rwlock_writer_lock (&vlsm->shared_data_lock);
159 vls_shd = vls_shared_data_get (shd_index);
160 clib_spinlock_free (&vls_shd->lock);
161 clib_bitmap_free (vls_shd->listeners);
162 vec_free (vls_shd->workers_subscribed);
163 pool_put (vlsm->shared_data_pool, vls_shd);
164 clib_rwlock_writer_unlock (&vlsm->shared_data_lock);
168 vls_shared_data_pool_rlock (void)
170 clib_rwlock_reader_lock (&vlsm->shared_data_lock);
174 vls_shared_data_pool_runlock (void)
176 clib_rwlock_reader_unlock (&vlsm->shared_data_lock);
180 vls_mt_table_rlock (void)
182 if (vlsl->vls_mt_n_threads > 1)
183 clib_rwlock_reader_lock (&vlsm->vls_table_lock);
187 vls_mt_table_runlock (void)
189 if (vlsl->vls_mt_n_threads > 1)
190 clib_rwlock_reader_unlock (&vlsm->vls_table_lock);
194 vls_mt_table_wlock (void)
196 if (vlsl->vls_mt_n_threads > 1)
197 clib_rwlock_writer_lock (&vlsm->vls_table_lock);
201 vls_mt_table_wunlock (void)
203 if (vlsl->vls_mt_n_threads > 1)
204 clib_rwlock_writer_unlock (&vlsm->vls_table_lock);
217 VLS_MT_LOCK_MQ = 1 << 0,
218 VLS_MT_LOCK_SPOOL = 1 << 1
219 } vls_mt_lock_type_t;
224 vlsl->vls_mt_n_threads += 1;
226 /* If multi-thread workers are supported, for each new thread register a new
227 * vcl worker with vpp. Otherwise, all threads use the same vcl worker, so
228 * update the vcl worker's thread local worker index variable */
229 if (vls_mt_wrk_supported ())
231 if (vppcom_worker_register () != VPPCOM_OK)
232 VERR ("failed to register worker");
235 vcl_set_worker_index (vlsl->vls_wrk_index);
239 vls_mt_mq_lock (void)
241 pthread_mutex_lock (&vlsl->vls_mt_mq_mlock);
245 vls_mt_mq_unlock (void)
247 pthread_mutex_unlock (&vlsl->vls_mt_mq_mlock);
251 vls_mt_spool_lock (void)
253 pthread_mutex_lock (&vlsl->vls_mt_spool_mlock);
257 vls_mt_create_unlock (void)
259 pthread_mutex_unlock (&vlsl->vls_mt_spool_mlock);
263 vls_mt_locks_init (void)
265 pthread_mutex_init (&vlsl->vls_mt_mq_mlock, NULL);
266 pthread_mutex_init (&vlsl->vls_mt_spool_mlock, NULL);
270 vls_is_shared (vcl_locked_session_t * vls)
272 return (vls->shared_data_index != ~0);
276 vls_lock (vcl_locked_session_t * vls)
278 if ((vlsl->vls_mt_n_threads > 1) || vls_is_shared (vls))
279 clib_spinlock_lock (&vls->lock);
283 vls_unlock (vcl_locked_session_t * vls)
285 if ((vlsl->vls_mt_n_threads > 1) || vls_is_shared (vls))
286 clib_spinlock_unlock (&vls->lock);
289 static inline vcl_session_handle_t
290 vls_to_sh (vcl_locked_session_t * vls)
292 return vcl_session_handle_from_index (vls->session_index);
295 static inline vcl_session_handle_t
296 vls_to_sh_tu (vcl_locked_session_t * vls)
298 vcl_session_handle_t sh;
299 sh = vls_to_sh (vls);
300 vls_mt_table_runlock ();
304 static vls_worker_t *
305 vls_worker_get_current (void)
307 return pool_elt_at_index (vlsm->workers, vls_get_worker_index ());
311 vls_worker_alloc (void)
315 pool_get_zero (vlsm->workers, wrk);
316 if (vls_mt_wrk_supported ())
317 clib_rwlock_init (&wrk->sh_to_vlsh_table_lock);
318 wrk->wrk_index = vcl_get_worker_index ();
322 vls_worker_free (vls_worker_t * wrk)
324 hash_free (wrk->session_handle_to_vlsh_table);
325 if (vls_mt_wrk_supported ())
326 clib_rwlock_free (&wrk->sh_to_vlsh_table_lock);
327 pool_free (wrk->vls_pool);
328 pool_put (vlsm->workers, wrk);
331 static vls_worker_t *
332 vls_worker_get (u32 wrk_index)
334 if (pool_is_free_index (vlsm->workers, wrk_index))
336 return pool_elt_at_index (vlsm->workers, wrk_index);
340 vls_sh_to_vlsh_table_add (vls_worker_t *wrk, vcl_session_handle_t sh, u32 vlsh)
342 if (vls_mt_wrk_supported ())
343 clib_rwlock_writer_lock (&wrk->sh_to_vlsh_table_lock);
344 hash_set (wrk->session_handle_to_vlsh_table, sh, vlsh);
345 if (vls_mt_wrk_supported ())
346 clib_rwlock_writer_unlock (&wrk->sh_to_vlsh_table_lock);
350 vls_sh_to_vlsh_table_del (vls_worker_t *wrk, vcl_session_handle_t sh)
352 if (vls_mt_wrk_supported ())
353 clib_rwlock_writer_lock (&wrk->sh_to_vlsh_table_lock);
354 hash_unset (wrk->session_handle_to_vlsh_table, sh);
355 if (vls_mt_wrk_supported ())
356 clib_rwlock_writer_unlock (&wrk->sh_to_vlsh_table_lock);
360 vls_sh_to_vlsh_table_get (vls_worker_t *wrk, vcl_session_handle_t sh)
362 if (vls_mt_wrk_supported ())
363 clib_rwlock_reader_lock (&wrk->sh_to_vlsh_table_lock);
364 uword *vlshp = hash_get (wrk->session_handle_to_vlsh_table, sh);
365 if (vls_mt_wrk_supported ())
366 clib_rwlock_reader_unlock (&wrk->sh_to_vlsh_table_lock);
371 vls_alloc (vcl_session_handle_t sh)
373 vls_worker_t *wrk = vls_worker_get_current ();
374 vcl_locked_session_t *vls;
376 vls_mt_table_wlock ();
378 pool_get_zero (wrk->vls_pool, vls);
379 vls->session_index = vppcom_session_index (sh);
380 vls->worker_index = vppcom_session_worker (sh);
381 vls->vls_index = vls - wrk->vls_pool;
382 vls->shared_data_index = ~0;
383 vls_sh_to_vlsh_table_add (wrk, sh, vls->vls_index);
384 if (vls_mt_wrk_supported ())
386 hash_set (vls->vcl_wrk_index_to_session_index, vls->worker_index,
388 vls->owner_vcl_wrk_index = vls->worker_index;
390 clib_spinlock_init (&vls->lock);
392 vls_mt_table_wunlock ();
393 return vls->vls_index;
396 static vcl_locked_session_t *
397 vls_get (vls_handle_t vlsh)
399 vls_worker_t *wrk = vls_worker_get_current ();
400 if (pool_is_free_index (wrk->vls_pool, vlsh))
402 return pool_elt_at_index (wrk->vls_pool, vlsh);
406 vls_free (vcl_locked_session_t * vls)
408 vls_worker_t *wrk = vls_worker_get_current ();
411 vls_sh_to_vlsh_table_del (
412 wrk, vcl_session_handle_from_index (vls->session_index));
413 clib_spinlock_free (&vls->lock);
414 pool_put (wrk->vls_pool, vls);
417 static vcl_locked_session_t *
418 vls_get_and_lock (vls_handle_t vlsh)
420 vls_worker_t *wrk = vls_worker_get_current ();
421 vcl_locked_session_t *vls;
422 if (pool_is_free_index (wrk->vls_pool, vlsh))
424 vls = pool_elt_at_index (wrk->vls_pool, vlsh);
429 static vcl_locked_session_t *
430 vls_get_w_dlock (vls_handle_t vlsh)
432 vcl_locked_session_t *vls;
433 vls_mt_table_rlock ();
434 vls = vls_get_and_lock (vlsh);
436 vls_mt_table_runlock ();
441 vls_get_and_unlock (vls_handle_t vlsh)
443 vcl_locked_session_t *vls;
444 vls_mt_table_rlock ();
445 vls = vls_get (vlsh);
447 vls_mt_table_runlock ();
451 vls_dunlock (vcl_locked_session_t * vls)
454 vls_mt_table_runlock ();
457 static vcl_locked_session_t *
458 vls_session_get (vls_worker_t * wrk, u32 vls_index)
460 if (pool_is_free_index (wrk->vls_pool, vls_index))
462 return pool_elt_at_index (wrk->vls_pool, vls_index);
466 vlsh_to_sh (vls_handle_t vlsh)
468 vcl_locked_session_t *vls;
471 vls = vls_get_w_dlock (vlsh);
473 return INVALID_SESSION_ID;
474 rv = vls_to_sh (vls);
480 vlsh_to_session_index (vls_handle_t vlsh)
482 vcl_session_handle_t sh;
483 sh = vlsh_to_sh (vlsh);
484 return vppcom_session_index (sh);
488 vls_si_wi_to_vlsh (u32 session_index, u32 vcl_wrk_index)
490 vls_worker_t *wrk = vls_worker_get_current ();
491 uword *vlshp = vls_sh_to_vlsh_table_get (
493 vcl_session_handle_from_wrk_session_index (session_index, vcl_wrk_index));
495 return vlshp ? *vlshp : VLS_INVALID_HANDLE;
499 vls_session_index_to_vlsh (uint32_t session_index)
503 vls_mt_table_rlock ();
504 vlsh = vls_si_wi_to_vlsh (session_index, vcl_get_worker_index ());
505 vls_mt_table_runlock ();
511 vls_is_shared_by_wrk (vcl_locked_session_t * vls, u32 wrk_index)
513 vls_shared_data_t *vls_shd;
516 if (vls->shared_data_index == ~0)
519 vls_shared_data_pool_rlock ();
521 vls_shd = vls_shared_data_get (vls->shared_data_index);
522 clib_spinlock_lock (&vls_shd->lock);
524 for (i = 0; i < vec_len (vls_shd->workers_subscribed); i++)
525 if (vls_shd->workers_subscribed[i] == wrk_index)
527 clib_spinlock_unlock (&vls_shd->lock);
528 vls_shared_data_pool_runlock ();
531 clib_spinlock_unlock (&vls_shd->lock);
533 vls_shared_data_pool_runlock ();
538 vls_listener_wrk_set (vcl_locked_session_t * vls, u32 wrk_index, u8 is_active)
540 vls_shared_data_t *vls_shd;
542 if (vls->shared_data_index == ~0)
544 clib_warning ("not a shared session");
548 vls_shared_data_pool_rlock ();
550 vls_shd = vls_shared_data_get (vls->shared_data_index);
552 clib_spinlock_lock (&vls_shd->lock);
553 clib_bitmap_set (vls_shd->listeners, wrk_index, is_active);
554 clib_spinlock_unlock (&vls_shd->lock);
556 vls_shared_data_pool_runlock ();
560 vls_shared_get_owner (vcl_locked_session_t * vls)
562 vls_shared_data_t *vls_shd;
565 vls_shared_data_pool_rlock ();
567 vls_shd = vls_shared_data_get (vls->shared_data_index);
568 owner_wrk = vls_shd->owner_wrk_index;
570 vls_shared_data_pool_runlock ();
576 vls_listener_wrk_is_active (vcl_locked_session_t * vls, u32 wrk_index)
578 vls_shared_data_t *vls_shd;
581 if (vls->shared_data_index == ~0)
583 clib_warning ("not a shared session");
587 vls_shared_data_pool_rlock ();
589 vls_shd = vls_shared_data_get (vls->shared_data_index);
591 clib_spinlock_lock (&vls_shd->lock);
592 is_set = clib_bitmap_get (vls_shd->listeners, wrk_index);
593 clib_spinlock_unlock (&vls_shd->lock);
595 vls_shared_data_pool_runlock ();
597 return (is_set == 1);
601 vls_listener_wrk_start_listen (vcl_locked_session_t * vls, u32 wrk_index)
603 vppcom_session_listen (vls_to_sh (vls), ~0);
604 vls_listener_wrk_set (vls, wrk_index, 1 /* is_active */ );
608 vls_listener_wrk_stop_listen (vcl_locked_session_t * vls, u32 wrk_index)
613 wrk = vcl_worker_get (wrk_index);
614 s = vcl_session_get (wrk, vls->session_index);
615 if (s->session_state != VCL_STATE_LISTEN)
617 vcl_send_session_unlisten (wrk, s);
618 s->session_state = VCL_STATE_LISTEN_NO_MQ;
619 vls_listener_wrk_set (vls, wrk_index, 0 /* is_active */ );
623 vls_shared_data_subscriber_position (vls_shared_data_t * vls_shd,
628 for (i = 0; i < vec_len (vls_shd->workers_subscribed); i++)
630 if (vls_shd->workers_subscribed[i] == wrk_index)
637 vls_unshare_session (vcl_locked_session_t * vls, vcl_worker_t * wrk)
639 vls_shared_data_t *vls_shd;
640 int do_disconnect, pos;
644 if (vls->shared_data_index == ~0)
647 s = vcl_session_get (wrk, vls->session_index);
648 if (s->session_state == VCL_STATE_LISTEN)
649 vls_listener_wrk_set (vls, wrk->wrk_index, 0 /* is_active */ );
651 vls_shared_data_pool_rlock ();
653 vls_shd = vls_shared_data_get (vls->shared_data_index);
654 clib_spinlock_lock (&vls_shd->lock);
656 pos = vls_shared_data_subscriber_position (vls_shd, wrk->wrk_index);
659 clib_warning ("worker %u not subscribed for vls %u", wrk->wrk_index,
665 * Unsubscribe from share data and fifos
669 svm_fifo_del_subscriber (s->rx_fifo, wrk->vpp_wrk_index);
670 svm_fifo_del_subscriber (s->tx_fifo, wrk->vpp_wrk_index);
672 vec_del1 (vls_shd->workers_subscribed, pos);
677 n_subscribers = vec_len (vls_shd->workers_subscribed);
678 do_disconnect = s->session_state == VCL_STATE_LISTEN || !n_subscribers;
679 vcl_session_cleanup (wrk, s, vcl_session_handle (s), do_disconnect);
682 * No subscriber left, cleanup shared data
686 u32 shd_index = vls_shared_data_index (vls_shd);
688 clib_spinlock_unlock (&vls_shd->lock);
689 vls_shared_data_pool_runlock ();
691 vls_shared_data_free (shd_index);
693 /* All locks have been dropped */
697 /* Return, if this is not the owning worker */
698 if (vls_shd->owner_wrk_index != wrk->wrk_index)
701 ASSERT (vec_len (vls_shd->workers_subscribed));
704 * Check if we can change owner or close
706 vls_shd->owner_wrk_index = vls_shd->workers_subscribed[0];
707 vcl_send_session_worker_update (wrk, s, vls_shd->owner_wrk_index);
709 /* XXX is this still needed? */
710 if (vec_len (vls_shd->workers_subscribed) > 1)
711 clib_warning ("more workers need to be updated");
715 clib_spinlock_unlock (&vls_shd->lock);
716 vls_shared_data_pool_runlock ();
722 vls_init_share_session (vls_worker_t * vls_wrk, vcl_locked_session_t * vls)
724 vls_shared_data_t *vls_shd;
726 u32 vls_shd_index = vls_shared_data_alloc ();
728 vls_shared_data_pool_rlock ();
730 vls_shd = vls_shared_data_get (vls_shd_index);
731 vls_shd->owner_wrk_index = vls_wrk->wrk_index;
732 vls->shared_data_index = vls_shd_index;
733 vec_add1 (vls_shd->workers_subscribed, vls_wrk->wrk_index);
735 vls_shared_data_pool_runlock ();
739 vls_share_session (vls_worker_t * vls_wrk, vcl_locked_session_t * vls)
741 vcl_worker_t *vcl_wrk = vcl_worker_get (vls_wrk->wrk_index);
742 vls_shared_data_t *vls_shd;
745 s = vcl_session_get (vcl_wrk, vls->session_index);
748 clib_warning ("wrk %u session %u vls %u NOT AVAILABLE",
749 vcl_wrk->wrk_index, vls->session_index, vls->vls_index);
753 ASSERT (vls->shared_data_index != ~0);
755 /* Reinit session lock */
756 clib_spinlock_init (&vls->lock);
758 vls_shared_data_pool_rlock ();
760 vls_shd = vls_shared_data_get (vls->shared_data_index);
762 clib_spinlock_lock (&vls_shd->lock);
763 vec_add1 (vls_shd->workers_subscribed, vls_wrk->wrk_index);
764 clib_spinlock_unlock (&vls_shd->lock);
766 vls_shared_data_pool_runlock ();
770 svm_fifo_add_subscriber (s->rx_fifo, vcl_wrk->vpp_wrk_index);
771 svm_fifo_add_subscriber (s->tx_fifo, vcl_wrk->vpp_wrk_index);
773 else if (s->session_state == VCL_STATE_LISTEN)
775 s->session_state = VCL_STATE_LISTEN_NO_MQ;
780 vls_share_sessions (vls_worker_t * vls_parent_wrk, vls_worker_t * vls_wrk)
782 vcl_locked_session_t *vls, *parent_vls;
785 pool_foreach (vls, vls_wrk->vls_pool) {
786 /* Initialize sharing on parent session */
787 if (vls->shared_data_index == ~0)
789 parent_vls = vls_session_get (vls_parent_wrk, vls->vls_index);
790 vls_init_share_session (vls_parent_wrk, parent_vls);
791 vls->shared_data_index = parent_vls->shared_data_index;
793 vls_share_session (vls_wrk, vls);
799 vls_worker_copy_on_fork (vcl_worker_t * parent_wrk)
801 vls_worker_t *vls_wrk = vls_worker_get_current (), *vls_parent_wrk;
802 vcl_worker_t *wrk = vcl_worker_get_current ();
803 u32 vls_index, session_index, wrk_index;
804 vcl_session_handle_t sh;
809 wrk->sessions = pool_dup (parent_wrk->sessions);
810 wrk->session_index_by_vpp_handles =
811 hash_dup (parent_wrk->session_index_by_vpp_handles);
816 vls_parent_wrk = vls_worker_get (parent_wrk->wrk_index);
818 hash_foreach (sh, vls_index, vls_parent_wrk->session_handle_to_vlsh_table,
820 vcl_session_handle_parse (sh, &wrk_index, &session_index);
821 hash_set (vls_wrk->session_handle_to_vlsh_table,
822 vcl_session_handle_from_index (session_index), vls_index);
825 vls_wrk->vls_pool = pool_dup (vls_parent_wrk->vls_pool);
827 vls_share_sessions (vls_parent_wrk, vls_wrk);
831 vls_mt_acq_locks (vcl_locked_session_t * vls, vls_mt_ops_t op, int *locks_acq)
833 vcl_worker_t *wrk = vcl_worker_get_current ();
834 vcl_session_t *s = 0;
839 s = vcl_session_get (wrk, vls->session_index);
840 if (PREDICT_FALSE (!s))
842 is_nonblk = vcl_session_has_attr (s, VCL_SESS_ATTR_NONBLOCK);
849 is_nonblk = vcl_session_read_ready (s) != 0;
853 *locks_acq |= VLS_MT_LOCK_MQ;
856 case VLS_MT_OP_WRITE:
859 is_nonblk = vcl_session_write_ready (s) != 0;
863 *locks_acq |= VLS_MT_LOCK_MQ;
866 case VLS_MT_OP_XPOLL:
868 *locks_acq |= VLS_MT_LOCK_MQ;
870 case VLS_MT_OP_SPOOL:
871 vls_mt_spool_lock ();
872 *locks_acq |= VLS_MT_LOCK_SPOOL;
880 vls_mt_rel_locks (int locks_acq)
882 if (locks_acq & VLS_MT_LOCK_MQ)
884 if (locks_acq & VLS_MT_LOCK_SPOOL)
885 vls_mt_create_unlock ();
889 vls_mt_session_should_migrate (vcl_locked_session_t * vls)
891 return (vls_mt_wrk_supported ()
892 && vls->worker_index != vcl_get_worker_index ());
895 static vcl_locked_session_t *
896 vls_mt_session_migrate (vcl_locked_session_t *vls)
898 u32 wrk_index = vcl_get_worker_index ();
900 vls_worker_t *vls_wrk = vls_worker_get_current ();
901 u32 src_sid, sid, vls_index, own_vcl_wrk_index;
902 vcl_session_t *session;
905 ASSERT (vls_mt_wrk_supported () && vls->worker_index != wrk_index);
908 * VCL session on current vcl worker already allocated. Update current
909 * owner worker and index and return
911 if ((p = hash_get (vls->vcl_wrk_index_to_session_index, wrk_index)))
913 vls->worker_index = wrk_index;
914 vls->session_index = (u32) p[0];
919 * Ask vcl worker that owns the original vcl session to clone it into
920 * current vcl worker session pool
923 if (!(p = hash_get (vls->vcl_wrk_index_to_session_index,
924 vls->owner_vcl_wrk_index)))
926 VERR ("session in owner worker(%u) is free", vls->owner_vcl_wrk_index);
929 vls_mt_table_runlock ();
933 src_sid = (u32) p[0];
934 wrk = vcl_worker_get_current ();
935 session = vcl_session_alloc (wrk);
936 sid = session->session_index;
937 VDBG (1, "migrate session of worker (session): %u (%u) -> %u (%u)",
938 vls->owner_vcl_wrk_index, src_sid, wrk_index, sid);
940 /* Drop lock to prevent dead lock when dst wrk trying to get lock. */
941 vls_index = vls->vls_index;
942 own_vcl_wrk_index = vls->owner_vcl_wrk_index;
944 vls_mt_table_runlock ();
945 vls_send_clone_and_share_rpc (wrk, vls_index, sid, vls_get_worker_index (),
946 own_vcl_wrk_index, vls_index, src_sid);
948 if (PREDICT_FALSE (wrk->rpc_done == VLS_RPC_STATE_SESSION_NOT_EXIST))
950 VWRN ("session %u not exist", src_sid);
953 else if (PREDICT_FALSE (wrk->rpc_done == VLS_RPC_STATE_INIT))
955 VWRN ("failed to wait rpc response");
958 else if (PREDICT_FALSE ((session->flags & VCL_SESSION_F_IS_VEP) &&
959 session->vep.next_sh != ~0))
961 VERR ("can't migrate nonempty epoll session");
965 else if (PREDICT_FALSE (!(session->flags & VCL_SESSION_F_IS_VEP) &&
966 session->session_state != VCL_STATE_CLOSED))
968 VERR ("migrate NOT supported, session_status (%u)",
969 session->session_state);
974 vls = vls_get_w_dlock (vls_index);
975 if (PREDICT_FALSE (!vls))
977 VWRN ("failed to get vls %u", vls_index);
981 session->session_index = sid;
982 vls->worker_index = wrk_index;
983 vls->session_index = sid;
984 hash_set (vls->vcl_wrk_index_to_session_index, wrk_index, sid);
985 vls_sh_to_vlsh_table_add (vls_wrk, vcl_session_handle (session),
990 vcl_session_free (wrk, session);
997 if (PREDICT_FALSE (vcl_get_worker_index () == ~0))
1001 #define vls_mt_guard(_vls, _op) \
1002 int _locks_acq = 0; \
1003 if (vls_mt_wrk_supported ()) \
1005 if (PREDICT_FALSE (_vls && \
1006 ((vcl_locked_session_t *) _vls)->worker_index != \
1007 vcl_get_worker_index ())) \
1009 _vls = vls_mt_session_migrate (_vls); \
1010 if (PREDICT_FALSE (!_vls)) \
1011 return VPPCOM_EBADFD; \
1016 if (PREDICT_FALSE (vlsl->vls_mt_n_threads > 1)) \
1017 vls_mt_acq_locks (_vls, _op, &_locks_acq); \
1020 #define vls_mt_unguard() \
1021 if (PREDICT_FALSE (_locks_acq)) \
1022 vls_mt_rel_locks (_locks_acq)
1025 vls_write (vls_handle_t vlsh, void *buf, size_t nbytes)
1027 vcl_locked_session_t *vls;
1031 if (!(vls = vls_get_w_dlock (vlsh)))
1032 return VPPCOM_EBADFD;
1034 vls_mt_guard (vls, VLS_MT_OP_WRITE);
1035 rv = vppcom_session_write (vls_to_sh_tu (vls), buf, nbytes);
1037 vls_get_and_unlock (vlsh);
1042 vls_write_msg (vls_handle_t vlsh, void *buf, size_t nbytes)
1044 vcl_locked_session_t *vls;
1048 if (!(vls = vls_get_w_dlock (vlsh)))
1049 return VPPCOM_EBADFD;
1050 vls_mt_guard (vls, VLS_MT_OP_WRITE);
1051 rv = vppcom_session_write_msg (vls_to_sh_tu (vls), buf, nbytes);
1053 vls_get_and_unlock (vlsh);
1058 vls_sendto (vls_handle_t vlsh, void *buf, int buflen, int flags,
1059 vppcom_endpt_t * ep)
1061 vcl_locked_session_t *vls;
1065 if (!(vls = vls_get_w_dlock (vlsh)))
1066 return VPPCOM_EBADFD;
1067 vls_mt_guard (vls, VLS_MT_OP_WRITE);
1068 rv = vppcom_session_sendto (vls_to_sh_tu (vls), buf, buflen, flags, ep);
1070 vls_get_and_unlock (vlsh);
1075 vls_read (vls_handle_t vlsh, void *buf, size_t nbytes)
1077 vcl_locked_session_t *vls;
1081 if (!(vls = vls_get_w_dlock (vlsh)))
1082 return VPPCOM_EBADFD;
1083 vls_mt_guard (vls, VLS_MT_OP_READ);
1084 rv = vppcom_session_read (vls_to_sh_tu (vls), buf, nbytes);
1086 vls_get_and_unlock (vlsh);
1091 vls_recvfrom (vls_handle_t vlsh, void *buffer, uint32_t buflen, int flags,
1092 vppcom_endpt_t * ep)
1094 vcl_locked_session_t *vls;
1098 if (!(vls = vls_get_w_dlock (vlsh)))
1099 return VPPCOM_EBADFD;
1100 vls_mt_guard (vls, VLS_MT_OP_READ);
1101 rv = vppcom_session_recvfrom (vls_to_sh_tu (vls), buffer, buflen, flags,
1104 vls_get_and_unlock (vlsh);
1109 vls_attr (vls_handle_t vlsh, uint32_t op, void *buffer, uint32_t * buflen)
1111 vcl_locked_session_t *vls;
1115 if (!(vls = vls_get_w_dlock (vlsh)))
1116 return VPPCOM_EBADFD;
1117 if (vls_mt_session_should_migrate (vls))
1119 vls = vls_mt_session_migrate (vls);
1120 if (PREDICT_FALSE (!vls))
1121 return VPPCOM_EBADFD;
1123 rv = vppcom_session_attr (vls_to_sh_tu (vls), op, buffer, buflen);
1124 vls_get_and_unlock (vlsh);
1129 vls_bind (vls_handle_t vlsh, vppcom_endpt_t * ep)
1131 vcl_locked_session_t *vls;
1135 if (!(vls = vls_get_w_dlock (vlsh)))
1136 return VPPCOM_EBADFD;
1137 rv = vppcom_session_bind (vls_to_sh_tu (vls), ep);
1138 vls_get_and_unlock (vlsh);
1143 vls_listen (vls_handle_t vlsh, int q_len)
1145 vcl_locked_session_t *vls;
1149 if (!(vls = vls_get_w_dlock (vlsh)))
1150 return VPPCOM_EBADFD;
1151 vls_mt_guard (vls, VLS_MT_OP_XPOLL);
1152 rv = vppcom_session_listen (vls_to_sh_tu (vls), q_len);
1154 vls_get_and_unlock (vlsh);
1159 vls_connect (vls_handle_t vlsh, vppcom_endpt_t * server_ep)
1161 vcl_locked_session_t *vls;
1165 if (!(vls = vls_get_w_dlock (vlsh)))
1166 return VPPCOM_EBADFD;
1167 vls_mt_guard (vls, VLS_MT_OP_XPOLL);
1168 rv = vppcom_session_connect (vls_to_sh_tu (vls), server_ep);
1170 vls_get_and_unlock (vlsh);
1175 vls_mp_checks (vcl_locked_session_t * vls, int is_add)
1177 vcl_worker_t *wrk = vcl_worker_get_current ();
1181 if (vls_mt_wrk_supported ())
1184 s = vcl_session_get (wrk, vls->session_index);
1185 switch (s->session_state)
1187 case VCL_STATE_LISTEN:
1190 vls_listener_wrk_set (vls, vls->worker_index, 1 /* is_active */ );
1193 vls_listener_wrk_stop_listen (vls, vls->worker_index);
1195 case VCL_STATE_LISTEN_NO_MQ:
1199 /* Register worker as listener */
1200 vls_listener_wrk_start_listen (vls, wrk->wrk_index);
1202 /* If owner worker did not attempt to accept/xpoll on the session,
1203 * force a listen stop for it, since it may not be interested in
1204 * accepting new sessions.
1205 * This is pretty much a hack done to give app workers the illusion
1206 * that it is fine to listen and not accept new sessions for a
1207 * given listener. Without it, we would accumulate unhandled
1208 * accepts on the passive worker message queue. */
1209 owner_wrk = vls_shared_get_owner (vls);
1210 if (!vls_listener_wrk_is_active (vls, owner_wrk))
1211 vls_listener_wrk_stop_listen (vls, owner_wrk);
1219 vls_accept (vls_handle_t listener_vlsh, vppcom_endpt_t * ep, int flags)
1221 vls_handle_t accepted_vlsh;
1222 vcl_locked_session_t *vls;
1226 if (!(vls = vls_get_w_dlock (listener_vlsh)))
1227 return VPPCOM_EBADFD;
1228 if (vcl_n_workers () > 1)
1229 vls_mp_checks (vls, 1 /* is_add */ );
1230 vls_mt_guard (vls, VLS_MT_OP_SPOOL);
1231 sh = vppcom_session_accept (vls_to_sh_tu (vls), ep, flags);
1233 vls_get_and_unlock (listener_vlsh);
1236 accepted_vlsh = vls_alloc (sh);
1237 if (PREDICT_FALSE (accepted_vlsh == VLS_INVALID_HANDLE))
1238 vppcom_session_close (sh);
1239 return accepted_vlsh;
1243 vls_create (uint8_t proto, uint8_t is_nonblocking)
1245 vcl_session_handle_t sh;
1247 vcl_locked_session_t *vls = NULL;
1250 vls_mt_guard (vls, VLS_MT_OP_SPOOL);
1251 sh = vppcom_session_create (proto, is_nonblocking);
1253 if (sh == INVALID_SESSION_ID)
1254 return VLS_INVALID_HANDLE;
1256 vlsh = vls_alloc (sh);
1257 if (PREDICT_FALSE (vlsh == VLS_INVALID_HANDLE))
1258 vppcom_session_close (sh);
1264 vls_mt_session_cleanup (vcl_locked_session_t * vls)
1266 u32 session_index, wrk_index, current_vcl_wrk;
1267 vcl_worker_t *wrk = vcl_worker_get_current ();
1269 ASSERT (vls_mt_wrk_supported ());
1271 current_vcl_wrk = vcl_get_worker_index ();
1274 hash_foreach (wrk_index, session_index, vls->vcl_wrk_index_to_session_index,
1276 if (current_vcl_wrk != wrk_index)
1277 vls_send_session_cleanup_rpc (wrk, wrk_index, session_index);
1280 hash_free (vls->vcl_wrk_index_to_session_index);
1284 vls_close (vls_handle_t vlsh)
1286 vcl_locked_session_t *vls;
1290 vls_mt_table_wlock ();
1292 vls = vls_get_and_lock (vlsh);
1295 vls_mt_table_wunlock ();
1296 return VPPCOM_EBADFD;
1299 vls_mt_guard (vls, VLS_MT_OP_SPOOL);
1301 if (vls_is_shared (vls))
1302 rv = vls_unshare_session (vls, vcl_worker_get_current ());
1304 rv = vppcom_session_close (vls_to_sh (vls));
1306 if (vls_mt_wrk_supported ())
1307 vls_mt_session_cleanup (vls);
1312 vls_mt_table_wunlock ();
1318 vls_epoll_create (void)
1320 vcl_session_handle_t sh;
1325 sh = vppcom_epoll_create ();
1326 if (sh == INVALID_SESSION_ID)
1327 return VLS_INVALID_HANDLE;
1329 vlsh = vls_alloc (sh);
1330 if (vlsh == VLS_INVALID_HANDLE)
1331 vppcom_session_close (sh);
1337 vls_epoll_ctl_mp_checks (vcl_locked_session_t * vls, int op)
1339 if (vcl_n_workers () <= 1)
1341 vlsl->epoll_mp_check = 1;
1345 if (op == EPOLL_CTL_MOD)
1348 vlsl->epoll_mp_check = 1;
1349 vls_mp_checks (vls, op == EPOLL_CTL_ADD);
1353 vls_epoll_ctl (vls_handle_t ep_vlsh, int op, vls_handle_t vlsh,
1354 struct epoll_event *event)
1356 vcl_locked_session_t *ep_vls, *vls;
1357 vcl_session_handle_t ep_sh, sh;
1361 vls_mt_table_rlock ();
1362 ep_vls = vls_get_and_lock (ep_vlsh);
1364 if (vls_mt_session_should_migrate (ep_vls))
1366 ep_vls = vls_mt_session_migrate (ep_vls);
1367 if (PREDICT_FALSE (!ep_vls))
1368 return VPPCOM_EBADFD;
1371 ep_sh = vls_to_sh (ep_vls);
1372 vls = vls_get_and_lock (vlsh);
1373 sh = vls_to_sh (vls);
1375 if (PREDICT_FALSE (!vlsl->epoll_mp_check))
1376 vls_epoll_ctl_mp_checks (vls, op);
1378 vls_mt_table_runlock ();
1380 rv = vppcom_epoll_ctl (ep_sh, op, sh, event);
1382 vls_mt_table_rlock ();
1383 ep_vls = vls_get (ep_vlsh);
1384 vls = vls_get (vlsh);
1386 vls_unlock (ep_vls);
1387 vls_mt_table_runlock ();
1392 vls_epoll_wait (vls_handle_t ep_vlsh, struct epoll_event *events,
1393 int maxevents, double wait_for_time)
1395 vcl_locked_session_t *vls, *vls_tmp = NULL;
1399 if (!(vls = vls_get_w_dlock (ep_vlsh)))
1400 return VPPCOM_EBADFD;
1401 vls_mt_guard (vls_tmp, VLS_MT_OP_XPOLL);
1402 rv = vppcom_epoll_wait (vls_to_sh_tu (vls), events, maxevents,
1405 vls_get_and_unlock (ep_vlsh);
1410 vls_select_mp_checks (vcl_si_set * read_map)
1412 vcl_locked_session_t *vls;
1417 if (vcl_n_workers () <= 1)
1419 vlsl->select_mp_check = 1;
1426 vlsl->select_mp_check = 1;
1427 wrk = vcl_worker_get_current ();
1430 clib_bitmap_foreach (si, read_map) {
1431 s = vcl_session_get (wrk, si);
1432 if (s->session_state == VCL_STATE_LISTEN)
1434 vls = vls_get (vls_session_index_to_vlsh (si));
1435 vls_mp_checks (vls, 1 /* is_add */);
1442 vls_select (int n_bits, vcl_si_set * read_map, vcl_si_set * write_map,
1443 vcl_si_set * except_map, double wait_for_time)
1446 vcl_locked_session_t *vls = NULL;
1449 vls_mt_guard (vls, VLS_MT_OP_XPOLL);
1450 if (PREDICT_FALSE (!vlsl->select_mp_check))
1451 vls_select_mp_checks (read_map);
1452 rv = vppcom_select (n_bits, read_map, write_map, except_map, wait_for_time);
1458 vls_unshare_vcl_worker_sessions (vcl_worker_t * wrk)
1460 u32 current_wrk, is_current;
1461 vcl_locked_session_t *vls;
1464 if (pool_elts (vcm->workers) <= 1)
1467 current_wrk = vcl_get_worker_index ();
1468 is_current = current_wrk == wrk->wrk_index;
1471 pool_foreach (s, wrk->sessions) {
1472 vls = vls_get (vls_si_wi_to_vlsh (s->session_index, wrk->wrk_index));
1473 if (vls && (is_current || vls_is_shared_by_wrk (vls, current_wrk)))
1474 vls_unshare_session (vls, wrk);
1480 vls_cleanup_vcl_worker (vcl_worker_t * wrk)
1482 vls_worker_t *vls_wrk = vls_worker_get (wrk->wrk_index);
1484 /* Unshare sessions and also cleanup worker since child may have
1485 * called _exit () and therefore vcl may not catch the event */
1486 vls_unshare_vcl_worker_sessions (wrk);
1487 vcl_worker_cleanup (wrk, 1 /* notify vpp */ );
1489 vls_worker_free (vls_wrk);
1493 vls_cleanup_forked_child (vcl_worker_t * wrk, vcl_worker_t * child_wrk)
1495 vcl_worker_t *sub_child;
1498 if (child_wrk->forked_child != ~0)
1500 sub_child = vcl_worker_get_if_valid (child_wrk->forked_child);
1503 /* Wait a bit, maybe the process is going away */
1504 while (kill (sub_child->current_pid, 0) >= 0 && tries++ < 50)
1506 if (kill (sub_child->current_pid, 0) < 0)
1507 vls_cleanup_forked_child (child_wrk, sub_child);
1510 vls_cleanup_vcl_worker (child_wrk);
1511 VDBG (0, "Cleaned up forked child wrk %u", child_wrk->wrk_index);
1512 wrk->forked_child = ~0;
1515 static struct sigaction old_sa;
1518 vls_intercept_sigchld_handler (int signum, siginfo_t * si, void *uc)
1520 vcl_worker_t *wrk, *child_wrk;
1522 if (vcl_get_worker_index () == ~0)
1525 if (sigaction (SIGCHLD, &old_sa, 0))
1527 VERR ("couldn't restore sigchld");
1531 wrk = vcl_worker_get_current ();
1532 if (wrk->forked_child == ~0)
1535 child_wrk = vcl_worker_get_if_valid (wrk->forked_child);
1539 if (si && si->si_pid != child_wrk->current_pid)
1541 VDBG (0, "unexpected child pid %u", si->si_pid);
1544 vls_cleanup_forked_child (wrk, child_wrk);
1547 if (old_sa.sa_flags & SA_SIGINFO)
1549 void (*fn) (int, siginfo_t *, void *) = old_sa.sa_sigaction;
1550 fn (signum, si, uc);
1554 void (*fn) (int) = old_sa.sa_handler;
1561 vls_incercept_sigchld ()
1563 struct sigaction sa;
1564 clib_memset (&sa, 0, sizeof (sa));
1565 sa.sa_sigaction = vls_intercept_sigchld_handler;
1566 sa.sa_flags = SA_SIGINFO;
1567 if (sigaction (SIGCHLD, &sa, &old_sa))
1569 VERR ("couldn't intercept sigchld");
1575 vls_app_pre_fork (void)
1577 vls_incercept_sigchld ();
1578 vcl_flush_mq_events ();
1582 vls_app_fork_child_handler (void)
1584 vcl_worker_t *parent_wrk;
1585 int parent_wrk_index;
1587 parent_wrk_index = vcl_get_worker_index ();
1588 VDBG (0, "initializing forked child %u with parent wrk %u", getpid (),
1594 vcl_set_worker_index (~0);
1597 * Allocate and register vcl worker with vpp
1599 if (vppcom_worker_register ())
1601 VERR ("couldn't register new worker!");
1606 * Allocate/initialize vls worker and share sessions
1608 vls_worker_alloc ();
1609 parent_wrk = vcl_worker_get (parent_wrk_index);
1610 vls_worker_copy_on_fork (parent_wrk);
1611 parent_wrk->forked_child = vcl_get_worker_index ();
1613 /* Reset number of threads and set wrk index */
1614 vlsl->vls_mt_n_threads = 0;
1615 vlsl->vls_wrk_index = vcl_get_worker_index ();
1616 vlsl->select_mp_check = 0;
1617 vlsl->epoll_mp_check = 0;
1618 vls_mt_locks_init ();
1620 VDBG (0, "forked child main worker initialized");
1625 vls_app_fork_parent_handler (void)
1628 while (vcm->forking)
1635 vls_worker_t *wrk = vls_worker_get_current ();
1637 /* Unshare the sessions. VCL will clean up the worker */
1638 vls_unshare_vcl_worker_sessions (vcl_worker_get_current ());
1639 vls_worker_free (wrk);
1643 vls_clone_and_share_rpc_handler (void *args)
1645 vls_clone_and_share_msg_t *msg = (vls_clone_and_share_msg_t *) args;
1646 vls_worker_t *wrk = vls_worker_get_current (), *dst_wrk;
1647 vcl_locked_session_t *vls, *dst_vls;
1648 vcl_worker_t *vcl_wrk = vcl_worker_get_current (), *dst_vcl_wrk;
1649 vcl_session_t *s, *dst_s;
1651 VDBG (1, "process session clone of worker (session): %u (%u) -> %u (%u)",
1652 vcl_wrk->wrk_index, msg->session_index, msg->origin_vcl_wrk,
1653 msg->origin_session_index);
1655 /* VCL locked session can't been protected, so DONT touch it.
1656 * VCL session may been free, check it.
1658 dst_vcl_wrk = vcl_worker_get (msg->origin_vcl_wrk);
1659 s = vcl_session_get (vcl_wrk, msg->session_index);
1660 if (PREDICT_FALSE (!s))
1662 dst_vcl_wrk->rpc_done = VLS_RPC_STATE_SESSION_NOT_EXIST;
1666 if (!vls_mt_wrk_supported ())
1668 vls = vls_session_get (wrk, msg->vls_index);
1669 vls_init_share_session (wrk, vls);
1670 dst_wrk = vls_worker_get (msg->origin_vls_wrk);
1671 dst_vls = vls_session_get (dst_wrk, msg->origin_vls_index);
1672 dst_vls->shared_data_index = vls->shared_data_index;
1674 dst_s = vcl_session_get (dst_vcl_wrk, msg->origin_session_index);
1675 clib_memcpy (dst_s, s, sizeof (*s));
1677 dst_vcl_wrk->rpc_done = VLS_RPC_STATE_SUCCESS;
1681 vls_session_cleanup_rpc_handler (void *args)
1683 vls_sess_cleanup_msg_t *msg = (vls_sess_cleanup_msg_t *) args;
1684 vcl_worker_t *wrk = vcl_worker_get_current ();
1685 vls_worker_t *vls_wrk = vls_worker_get_current ();
1686 vcl_session_handle_t sh = vcl_session_handle_from_index (msg->session_index);
1688 VDBG (1, "process session cleanup of worker (session): %u (%u) from %u ()",
1689 wrk->wrk_index, msg->session_index, msg->origin_vcl_wrk);
1691 vppcom_session_close (sh);
1692 vls_sh_to_vlsh_table_del (vls_wrk, sh);
1696 vls_rpc_handler (void *args)
1698 vls_rpc_msg_t *msg = (vls_rpc_msg_t *) args;
1701 case VLS_RPC_CLONE_AND_SHARE:
1702 vls_clone_and_share_rpc_handler (msg->data);
1704 case VLS_RPC_SESS_CLEANUP:
1705 vls_session_cleanup_rpc_handler (msg->data);
1713 vls_send_clone_and_share_rpc (vcl_worker_t *wrk, u32 origin_vls_index,
1714 u32 session_index, u32 vls_wrk_index,
1715 u32 dst_wrk_index, u32 dst_vls_index,
1716 u32 dst_session_index)
1718 u8 data[sizeof (u8) + sizeof (vls_clone_and_share_msg_t)];
1719 vls_clone_and_share_msg_t *msg;
1722 f64 timeout = clib_time_now (&wrk->clib_time) + VLS_WORKER_RPC_TIMEOUT;
1724 rpc = (vls_rpc_msg_t *) & data;
1725 rpc->type = VLS_RPC_CLONE_AND_SHARE;
1726 msg = (vls_clone_and_share_msg_t *) & rpc->data;
1727 msg->origin_vls_wrk = vls_wrk_index;
1728 msg->origin_vls_index = origin_vls_index;
1729 msg->origin_vcl_wrk = wrk->wrk_index;
1730 msg->origin_session_index = session_index;
1731 msg->vls_index = dst_vls_index;
1732 msg->session_index = dst_session_index;
1734 /* Try lock and handle rpcs if two threads send each other
1735 * clone requests at the same time.
1737 wrk->rpc_done = VLS_RPC_STATE_INIT;
1738 while (!clib_spinlock_trylock (&vlsm->worker_rpc_lock))
1739 vcl_flush_mq_events ();
1740 ret = vcl_send_worker_rpc (dst_wrk_index, rpc, sizeof (data));
1742 VDBG (1, "send session clone to wrk (session): %u (%u) -> %u (%u), ret=%d",
1743 dst_wrk_index, msg->session_index, msg->origin_vcl_wrk,
1744 msg->origin_session_index, ret);
1745 while (!ret && wrk->rpc_done == VLS_RPC_STATE_INIT &&
1746 clib_time_now (&wrk->clib_time) < timeout)
1748 clib_spinlock_unlock (&vlsm->worker_rpc_lock);
1752 vls_send_session_cleanup_rpc (vcl_worker_t * wrk,
1753 u32 dst_wrk_index, u32 dst_session_index)
1755 u8 data[sizeof (u8) + sizeof (vls_sess_cleanup_msg_t)];
1756 vls_sess_cleanup_msg_t *msg;
1760 rpc = (vls_rpc_msg_t *) & data;
1761 rpc->type = VLS_RPC_SESS_CLEANUP;
1762 msg = (vls_sess_cleanup_msg_t *) & rpc->data;
1763 msg->origin_vcl_wrk = wrk->wrk_index;
1764 msg->session_index = dst_session_index;
1766 ret = vcl_send_worker_rpc (dst_wrk_index, rpc, sizeof (data));
1768 VDBG (1, "send session cleanup to wrk (session): %u (%u) from %u, ret=%d",
1769 dst_wrk_index, msg->session_index, msg->origin_vcl_wrk, ret);
1773 vls_app_create (char *app_name)
1777 if ((rv = vppcom_app_create (app_name)))
1780 vlsm = clib_mem_alloc (sizeof (vls_main_t));
1781 clib_memset (vlsm, 0, sizeof (*vlsm));
1782 clib_rwlock_init (&vlsm->vls_table_lock);
1783 clib_rwlock_init (&vlsm->shared_data_lock);
1784 clib_spinlock_init (&vlsm->worker_rpc_lock);
1785 pool_alloc (vlsm->workers, vcm->cfg.max_workers);
1787 pthread_atfork (vls_app_pre_fork, vls_app_fork_parent_handler,
1788 vls_app_fork_child_handler);
1789 atexit (vls_app_exit);
1790 vls_worker_alloc ();
1791 vlsl->vls_wrk_index = vcl_get_worker_index ();
1792 vls_mt_locks_init ();
1793 vcm->wrk_rpc_fn = vls_rpc_handler;
1798 vls_use_eventfd (void)
1800 return vcm->cfg.use_mq_eventfd;
1804 vls_mt_wrk_supported (void)
1806 return vcm->cfg.mt_wrk_supported;
1810 vls_use_real_epoll (void)
1812 if (vcl_get_worker_index () == ~0)
1815 return vcl_worker_get_current ()->vcl_needs_real_epoll;
1819 vls_register_vcl_worker (void)
1825 * fd.io coding-style-patch-verification: ON
1828 * eval: (c-set-style "gnu")