vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t *
mp)
{
+ vcl_worker_t *wrk = vcl_worker_get (0);
u32 n_fds = 0;
int *fds = 0;
return;
}
- vcm->app_event_queue = uword_to_pointer (mp->app_event_queue_address,
+ wrk->app_event_queue = uword_to_pointer (mp->app_event_queue_address,
svm_msg_q_t *);
if (mp->n_fds)
{
if (mp->fd_flags & SESSION_FD_F_MQ_EVENTFD)
{
- svm_msg_q_set_consumer_eventfd (vcm->app_event_queue, fds[n_fds]);
- if (vcm->mqs_epfd < 0)
- clib_unix_warning ("epoll_create() returned");
- vcl_mq_epoll_add_evfd (vcm->app_event_queue);
+ svm_msg_q_set_consumer_eventfd (wrk->app_event_queue, fds[n_fds]);
+ vcl_mq_epoll_add_evfd (wrk, wrk->app_event_queue);
n_fds++;
}
vcm->app_state = STATE_APP_ATTACHED;
}
+static void
+vl_api_app_worker_add_del_reply_t_handler (vl_api_app_worker_add_del_reply_t *
+ mp)
+{
+ int n_fds = 0, *fds = 0;
+ vcl_worker_t *wrk;
+ u32 wrk_index;
+
+ if (mp->retval)
+ {
+ clib_warning ("VCL<%d>: add/del worker failed: %U", getpid (),
+ format_api_error, ntohl (mp->retval));
+ goto failed;
+ }
+ wrk_index = clib_net_to_host_u32 (mp->wrk_index);
+ if (mp->context != wrk_index)
+ {
+ clib_warning ("VCL<%d>: wrk numbering doesn't match ours: %u, vpp: %u",
+ getpid (), mp->context, wrk_index);
+ goto failed;
+ }
+ if (!mp->is_add)
+ return;
+
+ wrk = vcl_worker_get (wrk_index);
+ wrk->app_event_queue = uword_to_pointer (mp->app_event_queue_address,
+ svm_msg_q_t *);
+
+ if (mp->n_fds)
+ {
+ vec_validate (fds, mp->n_fds);
+ vl_socket_client_recv_fd_msg (fds, mp->n_fds, 5);
+
+ if (mp->fd_flags & SESSION_FD_F_VPP_MQ_SEGMENT)
+ if (ssvm_segment_attach ("vpp-worker-seg", SSVM_SEGMENT_MEMFD,
+ fds[n_fds++]))
+ goto failed;
+
+ if (mp->fd_flags & SESSION_FD_F_MEMFD_SEGMENT)
+ if (ssvm_segment_attach ((char *) mp->segment_name,
+ SSVM_SEGMENT_MEMFD, fds[n_fds++]))
+ goto failed;
+
+ if (mp->fd_flags & SESSION_FD_F_MQ_EVENTFD)
+ {
+ svm_msg_q_set_consumer_eventfd (wrk->app_event_queue, fds[n_fds]);
+ vcl_mq_epoll_add_evfd (wrk, wrk->app_event_queue);
+ n_fds++;
+ }
+
+ vec_free (fds);
+ }
+ else
+ {
+ if (ssvm_segment_attach ((char *) mp->segment_name, SSVM_SEGMENT_SHM,
+ -1))
+ goto failed;
+ }
+ vcm->app_state = STATE_APP_READY;
+ return;
+
+failed:
+ vcm->app_state = STATE_APP_FAILED;
+}
+
static void
vl_api_application_detach_reply_t_handler (vl_api_application_detach_reply_t *
mp)
{
vcl_cut_through_registration_t *ctr;
u32 mqc_index = ~0;
+ vcl_worker_t *wrk;
int *fds = 0;
if (mp->n_fds)
vl_socket_client_recv_fd_msg (fds, mp->n_fds, 5);
}
- ctr = vcl_ct_registration_lock_and_alloc ();
+ wrk = vcl_worker_get (mp->wrk_index);
+ ctr = vcl_ct_registration_lock_and_alloc (wrk);
ctr->mq = uword_to_pointer (mp->evt_q_address, svm_msg_q_t *);
ctr->peer_mq = uword_to_pointer (mp->peer_evt_q_address, svm_msg_q_t *);
- VDBG (0, "Adding ct registration %u", vcl_ct_registration_index (ctr));
+ VDBG (0, "Adding ct registration %u", vcl_ct_registration_index (wrk, ctr));
if (mp->n_fds && (mp->fd_flags & SESSION_FD_F_MQ_EVENTFD))
{
svm_msg_q_set_consumer_eventfd (ctr->mq, fds[0]);
svm_msg_q_set_producer_eventfd (ctr->peer_mq, fds[1]);
- mqc_index = vcl_mq_epoll_add_evfd (ctr->mq);
+ mqc_index = vcl_mq_epoll_add_evfd (wrk, ctr->mq);
ctr->epoll_evt_conn_index = mqc_index;
vec_free (fds);
}
- vcl_ct_registration_lookup_add (mp->evt_q_address,
- vcl_ct_registration_index (ctr));
- vcl_ct_registration_unlock ();
+ vcl_ct_registration_lookup_add (wrk, mp->evt_q_address,
+ vcl_ct_registration_index (wrk, ctr));
+ vcl_ct_registration_unlock (wrk);
}
static void
vl_api_bind_sock_reply_t_handler (vl_api_bind_sock_reply_t * mp)
{
- vcl_session_t *session = 0;
- u32 session_index = mp->context;
- int rv;
-
- VCL_SESSION_LOCK_AND_GET (session_index, &session);
-done:
- if (mp->retval)
- {
- clib_warning ("VCL<%d>: ERROR: vpp handle 0x%llx, "
- "sid %u: bind failed: %U",
- getpid (), mp->handle, session_index,
- format_api_error, ntohl (mp->retval));
- rv = vppcom_session_at_index (session_index, &session);
- if (rv == VPPCOM_OK)
- {
- session->session_state = STATE_FAILED;
- session->vpp_handle = mp->handle;
- }
- else
- {
- clib_warning ("[%s] ERROR: vpp handle 0x%llx, sid %u: "
- "Invalid session index (%u)!",
- getpid (), mp->handle, session_index);
- }
- goto done_unlock;
- }
-
- session->vpp_handle = mp->handle;
- session->transport.is_ip4 = mp->lcl_is_ip4;
- clib_memcpy (&session->transport.lcl_ip, mp->lcl_ip,
- sizeof (ip46_address_t));
- session->transport.lcl_port = mp->lcl_port;
- vppcom_session_table_add_listener (mp->handle, session_index);
- session->session_state = STATE_LISTEN;
-
- if (session->is_dgram)
- {
- svm_fifo_t *rx_fifo, *tx_fifo;
- session->vpp_evt_q = uword_to_pointer (mp->vpp_evt_q, svm_msg_q_t *);
- rx_fifo = uword_to_pointer (mp->rx_fifo, svm_fifo_t *);
- rx_fifo->client_session_index = session_index;
- tx_fifo = uword_to_pointer (mp->tx_fifo, svm_fifo_t *);
- tx_fifo->client_session_index = session_index;
- session->rx_fifo = rx_fifo;
- session->tx_fifo = tx_fifo;
- }
-
- VDBG (1, "VCL<%d>: vpp handle 0x%llx, sid %u: bind succeeded!",
- getpid (), mp->handle, mp->context);
-done_unlock:
- VCL_SESSION_UNLOCK ();
+ /* Expecting a similar message on mq. So ignore this */
+ VDBG (1, "VCL<%d>: bapi msg vpp handle 0x%llx, sid %u: bind retval: %u!",
+ getpid (), mp->handle, mp->context, mp->retval);
}
static void
VDBG (1, "VCL<%d>: sid %u: unbind succeeded!", getpid (), mp->context);
}
+static void
+vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t *
+ mp)
+{
+ if (mp->retval)
+ clib_warning ("VCL<%d>: ERROR: sid %u: disconnect failed: %U",
+ getpid (), mp->context, format_api_error,
+ ntohl (mp->retval));
+}
+
+static void
+vl_api_connect_session_reply_t_handler (vl_api_connect_sock_reply_t * mp)
+{
+ if (mp->retval)
+ clib_warning ("VCL<%d>: ERROR: sid %u: connect failed: %U",
+ getpid (), mp->context, format_api_error,
+ ntohl (mp->retval));
+}
+
#define foreach_sock_msg \
_(SESSION_ENABLE_DISABLE_REPLY, session_enable_disable_reply) \
_(BIND_SOCK_REPLY, bind_sock_reply) \
_(UNBIND_SOCK_REPLY, unbind_sock_reply) \
+_(CONNECT_SESSION_REPLY, connect_session_reply) \
+_(DISCONNECT_SESSION_REPLY, disconnect_session_reply) \
_(APPLICATION_ATTACH_REPLY, application_attach_reply) \
_(APPLICATION_DETACH_REPLY, application_detach_reply) \
_(MAP_ANOTHER_SEGMENT, map_another_segment) \
_(UNMAP_SEGMENT, unmap_segment) \
_(APP_CUT_THROUGH_REGISTRATION_ADD, app_cut_through_registration_add) \
+_(APP_WORKER_ADD_DEL_REPLY, app_worker_add_del_reply) \
void
vppcom_api_hookup (void)
{
-#define _(N, n) \
- vl_msg_api_set_handlers(VL_API_##N, #n, \
+#define _(N, n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
vl_api_##n##_t_handler, \
vl_noop_handler, \
vl_api_##n##_t_endian, \
}
void
-vppcom_send_connect_sock (vcl_session_t * session, u32 session_index)
+vcl_send_app_worker_add_del (u8 is_add)
+{
+ vcl_worker_t *wrk = vcl_worker_get_current ();
+ vl_api_app_worker_add_del_t *mp;
+ u32 wrk_index = wrk->wrk_index;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+
+ mp->_vl_msg_id = ntohs (VL_API_APP_WORKER_ADD_DEL);
+ mp->client_index = vcm->my_client_index;
+ mp->app_api_index = clib_host_to_net_u32 (vcm->my_client_index);
+ mp->context = wrk_index;
+ mp->is_add = is_add;
+ if (!is_add)
+ mp->wrk_index = clib_host_to_net_u32 (wrk_index);
+
+ vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & mp);
+}
+
+void
+vppcom_send_connect_sock (vcl_session_t * session)
{
vl_api_connect_sock_t *cmp;
- /* Assumes caller as acquired the spinlock: vcm->sessions_lockp */
cmp = vl_msg_api_alloc (sizeof (*cmp));
memset (cmp, 0, sizeof (*cmp));
cmp->_vl_msg_id = ntohs (VL_API_CONNECT_SOCK);
cmp->client_index = vcm->my_client_index;
- cmp->context = session_index;
-
+ cmp->context = session->session_index;
+ cmp->wrk_index = vcl_get_worker_index ();
cmp->is_ip4 = session->transport.is_ip4;
clib_memcpy (cmp->ip, &session->transport.rmt_ip, sizeof (cmp->ip));
cmp->port = session->transport.rmt_port;
}
void
-vppcom_send_disconnect_session (u64 vpp_handle, u32 session_index)
+vppcom_send_disconnect_session (u64 vpp_handle)
{
vl_api_disconnect_session_t *dmp;
- VDBG (1, "VCL<%d>: vpp handle 0x%llx, sid %u: sending disconnect msg",
- getpid (), vpp_handle, session_index);
-
dmp = vl_msg_api_alloc (sizeof (*dmp));
memset (dmp, 0, sizeof (*dmp));
dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION);
* of bind and listen locally via vppcom_session_bind() and
* vppcom_session_listen() */
void
-vppcom_send_bind_sock (vcl_session_t * session, u32 session_index)
+vppcom_send_bind_sock (vcl_session_t * session)
{
vl_api_bind_sock_t *bmp;
bmp->_vl_msg_id = ntohs (VL_API_BIND_SOCK);
bmp->client_index = vcm->my_client_index;
- bmp->context = session_index;
+ bmp->context = session->session_index;
+ bmp->wrk_index = vcl_get_worker_index ();
bmp->is_ip4 = session->transport.is_ip4;
clib_memcpy (bmp->ip, &session->transport.lcl_ip, sizeof (bmp->ip));
bmp->port = session->transport.lcl_port;
ump->_vl_msg_id = ntohs (VL_API_UNBIND_SOCK);
ump->client_index = vcm->my_client_index;
+ ump->wrk_index = vcl_get_worker_index ();
ump->handle = vpp_handle;
vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & ump);
}