2 * Copyright (c) 2018-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vcl/vcl_private.h>
18 static pthread_key_t vcl_worker_stop_key;
21 vcl_mq_evt_conn_alloc (vcl_worker_t * wrk)
23 vcl_mq_evt_conn_t *mqc;
24 pool_get (wrk->mq_evt_conns, mqc);
25 memset (mqc, 0, sizeof (*mqc));
30 vcl_mq_evt_conn_index (vcl_worker_t * wrk, vcl_mq_evt_conn_t * mqc)
32 return (mqc - wrk->mq_evt_conns);
36 vcl_mq_evt_conn_get (vcl_worker_t * wrk, u32 mq_conn_idx)
38 return pool_elt_at_index (wrk->mq_evt_conns, mq_conn_idx);
42 vcl_mq_epoll_add_evfd (vcl_worker_t * wrk, svm_msg_q_t * mq)
44 struct epoll_event e = { 0 };
45 vcl_mq_evt_conn_t *mqc;
49 mq_fd = svm_msg_q_get_eventfd (mq);
51 if (wrk->mqs_epfd < 0 || mq_fd == -1)
54 mqc = vcl_mq_evt_conn_alloc (wrk);
55 mqc_index = vcl_mq_evt_conn_index (wrk, mqc);
60 e.data.u32 = mqc_index;
61 if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_ADD, mq_fd, &e) < 0)
63 VDBG (0, "failed to add mq eventfd to mq epoll fd");
71 vcl_mq_epoll_del_evfd (vcl_worker_t * wrk, u32 mqc_index)
73 vcl_mq_evt_conn_t *mqc;
75 if (wrk->mqs_epfd || mqc_index == ~0)
78 mqc = vcl_mq_evt_conn_get (wrk, mqc_index);
79 if (epoll_ctl (wrk->mqs_epfd, EPOLL_CTL_DEL, mqc->mq_fd, 0) < 0)
81 VDBG (0, "failed to del mq eventfd to mq epoll fd");
88 vcl_worker_alloc (void)
91 pool_get (vcm->workers, wrk);
92 memset (wrk, 0, sizeof (*wrk));
93 wrk->wrk_index = wrk - vcm->workers;
94 wrk->forked_child = ~0;
99 vcl_worker_free (vcl_worker_t * wrk)
101 pool_put (vcm->workers, wrk);
105 vcl_api_app_worker_add (void)
107 if (vcm->cfg.vpp_app_socket_api)
108 return vcl_sapi_app_worker_add ();
110 return vcl_bapi_app_worker_add ();
114 vcl_api_app_worker_del (vcl_worker_t * wrk)
116 if (vcm->cfg.vpp_app_socket_api)
117 return vcl_sapi_app_worker_del (wrk);
119 vcl_bapi_app_worker_del (wrk);
123 vcl_worker_cleanup (vcl_worker_t * wrk, u8 notify_vpp)
125 clib_spinlock_lock (&vcm->workers_lock);
127 vcl_api_app_worker_del (wrk);
129 if (wrk->mqs_epfd > 0)
130 close (wrk->mqs_epfd);
131 hash_free (wrk->session_index_by_vpp_handles);
132 vec_free (wrk->mq_events);
133 vec_free (wrk->mq_msg_vector);
134 vcl_worker_free (wrk);
135 clib_spinlock_unlock (&vcm->workers_lock);
139 vcl_worker_cleanup_cb (void *arg)
144 wrk_index = vcl_get_worker_index ();
145 wrk = vcl_worker_get_if_valid (wrk_index);
149 vcl_worker_cleanup (wrk, 1 /* notify vpp */ );
150 vcl_set_worker_index (~0);
151 VDBG (0, "cleaned up worker %u", wrk_index);
155 vcl_worker_alloc_and_init ()
159 /* This was initialized already */
160 if (vcl_get_worker_index () != ~0)
163 /* Use separate heap map entry for worker */
164 clib_mem_set_thread_index ();
166 if (pool_elts (vcm->workers) == vcm->cfg.max_workers)
168 VDBG (0, "max-workers %u limit reached", vcm->cfg.max_workers);
172 clib_spinlock_lock (&vcm->workers_lock);
173 wrk = vcl_worker_alloc ();
174 vcl_set_worker_index (wrk->wrk_index);
175 wrk->thread_id = pthread_self ();
176 wrk->current_pid = getpid ();
179 if (vcm->cfg.use_mq_eventfd)
181 wrk->vcl_needs_real_epoll = 1;
182 wrk->mqs_epfd = epoll_create (1);
183 wrk->vcl_needs_real_epoll = 0;
184 if (wrk->mqs_epfd < 0)
186 clib_unix_warning ("epoll_create() returned");
191 wrk->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
192 clib_time_init (&wrk->clib_time);
193 vec_validate (wrk->mq_events, 64);
194 vec_validate (wrk->mq_msg_vector, 128);
195 vec_reset_length (wrk->mq_msg_vector);
196 vec_validate (wrk->unhandled_evts_vector, 128);
197 vec_reset_length (wrk->unhandled_evts_vector);
198 clib_spinlock_unlock (&vcm->workers_lock);
205 vcl_worker_register_with_vpp (void)
207 vcl_worker_t *wrk = vcl_worker_get_current ();
209 clib_spinlock_lock (&vcm->workers_lock);
211 if (vcl_api_app_worker_add ())
213 VDBG (0, "failed to add worker to vpp");
214 clib_spinlock_unlock (&vcm->workers_lock);
217 if (pthread_key_create (&vcl_worker_stop_key, vcl_worker_cleanup_cb))
218 VDBG (0, "failed to add pthread cleanup function");
219 if (pthread_setspecific (vcl_worker_stop_key, &wrk->thread_id))
220 VDBG (0, "failed to setup key value");
222 clib_spinlock_unlock (&vcm->workers_lock);
224 VDBG (0, "added worker %u", wrk->wrk_index);
229 vcl_worker_ctrl_mq (vcl_worker_t * wrk)
235 vcl_session_read_ready (vcl_session_t * s)
237 if (PREDICT_FALSE (s->flags & VCL_SESSION_F_IS_VEP))
239 VDBG (0, "ERROR: session %u: cannot read from an epoll session!",
241 return VPPCOM_EBADFD;
244 if (vcl_session_is_open (s))
246 if (vcl_session_is_ct (s))
247 return svm_fifo_max_dequeue_cons (s->ct_rx_fifo);
251 session_dgram_pre_hdr_t ph;
254 max_deq = svm_fifo_max_dequeue_cons (s->rx_fifo);
255 if (max_deq <= SESSION_CONN_HDR_LEN)
257 if (svm_fifo_peek (s->rx_fifo, 0, sizeof (ph), (u8 *) & ph) < 0)
259 if (ph.data_length + SESSION_CONN_HDR_LEN > max_deq)
262 return ph.data_length;
265 return svm_fifo_max_dequeue_cons (s->rx_fifo);
267 else if (s->session_state == VCL_STATE_LISTEN)
269 return clib_fifo_elts (s->accept_evts_fifo);
273 return (s->session_state == VCL_STATE_DISCONNECT) ?
274 VPPCOM_ECONNRESET : VPPCOM_ENOTCONN;
279 vcl_session_write_ready (vcl_session_t * s)
281 if (PREDICT_FALSE (s->flags & VCL_SESSION_F_IS_VEP))
283 VDBG (0, "session %u [0x%llx]: cannot write to an epoll session!",
284 s->session_index, s->vpp_handle);
285 return VPPCOM_EBADFD;
288 if (vcl_session_is_open (s))
290 if (vcl_session_is_ct (s))
291 return svm_fifo_max_enqueue_prod (s->ct_tx_fifo);
295 u32 max_enq = svm_fifo_max_enqueue_prod (s->tx_fifo);
297 if (max_enq <= sizeof (session_dgram_hdr_t))
299 return max_enq - sizeof (session_dgram_hdr_t);
302 return svm_fifo_max_enqueue_prod (s->tx_fifo);
304 else if (s->session_state == VCL_STATE_LISTEN)
307 return svm_fifo_max_enqueue_prod (s->tx_fifo);
309 return VPPCOM_EBADFD;
313 return (s->session_state == VCL_STATE_DISCONNECT) ?
314 VPPCOM_ECONNRESET : VPPCOM_ENOTCONN;
319 vcl_session_alloc_ext_cfg (vcl_session_t *s,
320 transport_endpt_ext_cfg_type_t type, u32 len)
325 s->ext_config = clib_mem_alloc (len);
326 clib_memset (s->ext_config, 0, len);
327 s->ext_config->len = len;
328 s->ext_config->type = type;
334 vcl_segment_attach (u64 segment_handle, char *name, ssvm_segment_type_t type,
337 fifo_segment_create_args_t _a, *a = &_a;
340 memset (a, 0, sizeof (*a));
341 a->segment_name = name;
342 a->segment_type = type;
344 if (type == SSVM_SEGMENT_MEMFD)
347 clib_rwlock_writer_lock (&vcm->segment_table_lock);
349 if ((rv = fifo_segment_attach (&vcm->segment_main, a)))
351 clib_warning ("svm_fifo_segment_attach ('%s') failed", name);
354 hash_set (vcm->segment_table, segment_handle, a->new_segment_indices[0]);
356 clib_rwlock_writer_unlock (&vcm->segment_table_lock);
358 vec_free (a->new_segment_indices);
363 vcl_segment_table_lookup (u64 segment_handle)
367 clib_rwlock_reader_lock (&vcm->segment_table_lock);
368 seg_indexp = hash_get (vcm->segment_table, segment_handle);
369 clib_rwlock_reader_unlock (&vcm->segment_table_lock);
372 return VCL_INVALID_SEGMENT_INDEX;
373 return ((u32) * seg_indexp);
377 vcl_segment_detach (u64 segment_handle)
379 fifo_segment_main_t *sm = &vcm->segment_main;
380 fifo_segment_t *segment;
383 segment_index = vcl_segment_table_lookup (segment_handle);
384 if (segment_index == (u32) ~ 0)
387 clib_rwlock_writer_lock (&vcm->segment_table_lock);
389 segment = fifo_segment_get_segment (sm, segment_index);
390 fifo_segment_delete (sm, segment);
391 hash_unset (vcm->segment_table, segment_handle);
393 clib_rwlock_writer_unlock (&vcm->segment_table_lock);
395 VDBG (0, "detached segment %u handle %u", segment_index, segment_handle);
399 vcl_segment_attach_session (uword segment_handle, uword rxf_offset,
400 uword txf_offset, uword mq_offset, u8 is_ct,
403 u32 fs_index, eqs_index;
404 svm_fifo_t *rxf, *txf;
408 fs_index = vcl_segment_table_lookup (segment_handle);
409 if (fs_index == VCL_INVALID_SEGMENT_INDEX)
411 VDBG (0, "ERROR: segment for session %u is not mounted!",
416 if (!is_ct && mq_offset != (uword) ~0)
418 eqs_handle = vcl_vpp_worker_segment_handle (0);
419 eqs_index = vcl_segment_table_lookup (eqs_handle);
420 ASSERT (eqs_index != VCL_INVALID_SEGMENT_INDEX);
423 clib_rwlock_reader_lock (&vcm->segment_table_lock);
425 fs = fifo_segment_get_segment (&vcm->segment_main, fs_index);
426 rxf = fifo_segment_alloc_fifo_w_offset (fs, rxf_offset);
427 txf = fifo_segment_alloc_fifo_w_offset (fs, txf_offset);
428 rxf->segment_index = fs_index;
429 txf->segment_index = fs_index;
431 if (!is_ct && mq_offset != (uword) ~0)
433 fs = fifo_segment_get_segment (&vcm->segment_main, eqs_index);
435 fifo_segment_msg_q_attach (fs, mq_offset, rxf->shr->slice_index);
438 clib_rwlock_reader_unlock (&vcm->segment_table_lock);
442 rxf->shr->client_session_index = s->session_index;
443 txf->shr->client_session_index = s->session_index;
444 rxf->client_thread_index = vcl_get_worker_index ();
445 txf->client_thread_index = vcl_get_worker_index ();
459 vcl_session_detach_fifos (vcl_session_t *s)
466 clib_rwlock_reader_lock (&vcm->segment_table_lock);
468 fs = fifo_segment_get_segment_if_valid (&vcm->segment_main,
469 s->rx_fifo->segment_index);
473 fifo_segment_free_client_fifo (fs, s->rx_fifo);
474 fifo_segment_free_client_fifo (fs, s->tx_fifo);
477 fs = fifo_segment_get_segment_if_valid (&vcm->segment_main,
478 s->ct_rx_fifo->segment_index);
482 fifo_segment_free_client_fifo (fs, s->ct_rx_fifo);
483 fifo_segment_free_client_fifo (fs, s->ct_tx_fifo);
487 clib_rwlock_reader_unlock (&vcm->segment_table_lock);
491 vcl_segment_attach_mq (uword segment_handle, uword mq_offset, u32 mq_index,
497 fs_index = vcl_segment_table_lookup (segment_handle);
498 if (fs_index == VCL_INVALID_SEGMENT_INDEX)
500 VDBG (0, "ERROR: mq segment %lx for is not attached!", segment_handle);
504 clib_rwlock_reader_lock (&vcm->segment_table_lock);
506 fs = fifo_segment_get_segment (&vcm->segment_main, fs_index);
507 *mq = fifo_segment_msg_q_attach (fs, mq_offset, mq_index);
509 clib_rwlock_reader_unlock (&vcm->segment_table_lock);
515 vcl_segment_discover_mqs (uword segment_handle, int *fds, u32 n_fds)
520 fs_index = vcl_segment_table_lookup (segment_handle);
521 if (fs_index == VCL_INVALID_SEGMENT_INDEX)
523 VDBG (0, "ERROR: mq segment %lx for is not attached!", segment_handle);
527 clib_rwlock_reader_lock (&vcm->segment_table_lock);
529 fs = fifo_segment_get_segment (&vcm->segment_main, fs_index);
530 fifo_segment_msg_qs_discover (fs, fds, n_fds);
532 clib_rwlock_reader_unlock (&vcm->segment_table_lock);
538 vcl_segment_alloc_chunk (uword segment_handle, u32 slice_index, u32 size,
545 fs_index = vcl_segment_table_lookup (segment_handle);
546 if (fs_index == VCL_INVALID_SEGMENT_INDEX)
548 VDBG (0, "ERROR: mq segment %lx for is not attached!", segment_handle);
552 clib_rwlock_reader_lock (&vcm->segment_table_lock);
554 fs = fifo_segment_get_segment (&vcm->segment_main, fs_index);
555 c = fifo_segment_alloc_chunk_w_slice (fs, slice_index, size);
556 *offset = fifo_segment_chunk_offset (fs, c);
558 clib_rwlock_reader_unlock (&vcm->segment_table_lock);
564 * fd.io coding-style-patch-verification: ON
567 * eval: (c-set-style "gnu")