clib_memcpy_fast (c->data, s->ext_config, s->ext_config->len);
}
-static void
-vcl_send_session_listen (vcl_worker_t * wrk, vcl_session_t * s)
+void
+vcl_send_session_listen (vcl_worker_t *wrk, vcl_session_t *s)
{
app_session_evt_t _app_evt, *app_evt = &_app_evt;
session_listen_msg_t *mp;
clib_mem_free (s->ext_config);
s->ext_config = 0;
}
+ s->flags |= VCL_SESSION_F_PENDING_LISTEN;
}
static void
f64 timeout;
ASSERT (!wrk->session_attr_op);
+ mq = s->vpp_evt_q;
+ if (PREDICT_FALSE (!mq))
+ {
+ /* FIXME: attribute should be stored and sent once session is
+ * bound/connected to vpp */
+ return 0;
+ }
+
wrk->session_attr_op = 1;
wrk->session_attr_op_rv = -1;
- mq = s->vpp_evt_q;
app_alloc_ctrl_evt_to_vpp (mq, app_evt, SESSION_CTRL_EVT_TRANSPORT_ATTR);
mp = (session_transport_attr_msg_t *) app_evt->evt->data;
memset (mp, 0, sizeof (*mp));
format_session_error, mp->retval);
session->session_state = VCL_STATE_DETACHED;
session->vpp_handle = VCL_INVALID_SESSION_HANDLE;
+ session->vpp_error = mp->retval;
return session_index;
}
if (session->session_state != VCL_STATE_CLOSED)
session->session_state = VCL_STATE_DISCONNECT;
+
+ session->flags |= (VCL_SESSION_F_RD_SHUTDOWN | VCL_SESSION_F_WR_SHUTDOWN);
VDBG (0, "session %u [0x%llx]: reset", sid, reset_msg->handle);
return sid;
}
session->transport.lcl_port = mp->lcl_port;
vcl_session_table_add_listener (wrk, mp->handle, sid);
session->session_state = VCL_STATE_LISTEN;
+ session->flags &= ~VCL_SESSION_F_PENDING_LISTEN;
if (vcl_session_is_cl (session))
{
if (session->listener_index != VCL_INVALID_SESSION_INDEX)
{
listen_session = vcl_session_get (wrk, session->listener_index);
- listen_session->n_accepted_sessions--;
+ if (listen_session)
+ listen_session->n_accepted_sessions--;
}
return VPPCOM_OK;
{
s->flags |= VCL_SESSION_F_PENDING_DISCONNECT;
s->session_state = VCL_STATE_DISCONNECT;
+ s->flags |= (VCL_SESSION_F_RD_SHUTDOWN | VCL_SESSION_F_WR_SHUTDOWN);
vec_add2 (wrk->unhandled_evts_vector, ecpy, 1);
*ecpy = *e;
ecpy->postponed = 1;
vcl_send_session_unlisten (wrk, session);
- VDBG (1, "session %u [0x%llx]: sending unbind!", session->session_index,
+ VDBG (0, "session %u [0x%llx]: sending unbind!", session->session_index,
session->vpp_handle);
vcl_evt (VCL_EVT_UNBIND, session);
return vcl_bapi_attach ();
}
+int
+vcl_is_first_reattach_to_execute ()
+{
+ if (vcm->reattach_count == 0)
+ return 1;
+
+ return 0;
+}
+
+void
+vcl_set_reattach_counter ()
+{
+ ++vcm->reattach_count;
+
+ if (vcm->reattach_count == vec_len (vcm->workers))
+ vcm->reattach_count = 0;
+}
+
+/**
+ * Reattach vcl to vpp after it has previously been disconnected.
+ *
+ * The logic should be:
+ * - first worker to hit `vcl_api_retry_attach` should attach to vpp,
+ * to reproduce the `vcl_api_attach` in `vppcom_app_create`.
+ * - the rest of the workers should `reproduce vcl_worker_register_with_vpp`
+ * from `vppcom_worker_register` since they were already allocated.
+ */
+
+static void
+vcl_api_retry_attach (vcl_worker_t *wrk)
+{
+ vcl_session_t *s;
+
+ clib_spinlock_lock (&vcm->workers_lock);
+ if (vcl_is_first_reattach_to_execute ())
+ {
+ if (vcl_api_attach ())
+ {
+ clib_spinlock_unlock (&vcm->workers_lock);
+ return;
+ }
+ vcl_set_reattach_counter ();
+ clib_spinlock_unlock (&vcm->workers_lock);
+ }
+ else
+ {
+ vcl_set_reattach_counter ();
+ clib_spinlock_unlock (&vcm->workers_lock);
+ vcl_worker_register_with_vpp ();
+ }
+
+ /* Treat listeners as configuration that needs to be re-added to vpp */
+ pool_foreach (s, wrk->sessions)
+ {
+ if (s->flags & VCL_SESSION_F_IS_VEP)
+ continue;
+ if (s->session_state == VCL_STATE_LISTEN_NO_MQ)
+ vppcom_session_listen (vcl_session_handle (s), 10);
+ else
+ VDBG (0, "internal error: unexpected state %d", s->session_state);
+ }
+}
+
+static void
+vcl_api_handle_disconnect (vcl_worker_t *wrk)
+{
+ wrk->api_client_handle = ~0;
+ vcl_worker_detach_sessions (wrk);
+}
+
static void
vcl_api_detach (vcl_worker_t * wrk)
{
+ if (wrk->api_client_handle == ~0)
+ return;
+
vcl_send_app_detach (wrk);
if (vcm->cfg.vpp_app_socket_api)
vcl_worker_alloc_and_init ();
if ((rv = vcl_api_attach ()))
- return rv;
+ {
+ vppcom_app_destroy ();
+ return rv;
+ }
VDBG (0, "app_name '%s', my_client_index %d (0x%x)", app_name,
vcm->workers[0].api_client_handle, vcm->workers[0].api_client_handle);
vcl_api_detach (current_wrk);
vcl_worker_cleanup (current_wrk, 0 /* notify vpp */ );
+ vcl_set_worker_index (~0);
vcl_elog_stop (vcm);
session->session_state = VCL_STATE_CLOSED;
session->vpp_handle = ~0;
session->is_dgram = vcl_proto_is_dgram (proto);
+ session->vpp_error = SESSION_E_NONE;
if (is_nonblocking)
vcl_session_set_attr (session, VCL_SESS_ATTR_NONBLOCK);
{
vcl_session_t *cur, *prev;
+ ASSERT (s->vep.lt_next == VCL_INVALID_SESSION_INDEX);
+
if (wrk->ep_lt_current == VCL_INVALID_SESSION_INDEX)
{
wrk->ep_lt_current = s->session_index;
{
vcl_session_t *prev, *next;
+ ASSERT (s->vep.lt_next != VCL_INVALID_SESSION_INDEX);
+
if (s->vep.lt_next == s->session_index)
{
wrk->ep_lt_current = VCL_INVALID_SESSION_INDEX;
s->vep.lt_next = VCL_INVALID_SESSION_INDEX;
+ s->vep.lt_prev = VCL_INVALID_SESSION_INDEX;
return;
}
wrk->ep_lt_current = s->vep.lt_next;
s->vep.lt_next = VCL_INVALID_SESSION_INDEX;
+ s->vep.lt_prev = VCL_INVALID_SESSION_INDEX;
}
int
return VPPCOM_OK;
}
-static int
-validate_args_session_accept_ (vcl_worker_t * wrk, vcl_session_t * ls)
-{
- if (ls->flags & VCL_SESSION_F_IS_VEP)
- {
- VDBG (0, "ERROR: cannot accept on epoll session %u!",
- ls->session_index);
- return VPPCOM_EBADFD;
- }
-
- if ((ls->session_state != VCL_STATE_LISTEN)
- && (!vcl_session_is_connectable_listener (wrk, ls)))
- {
- VDBG (0,
- "ERROR: session [0x%llx]: not in listen state! state 0x%x"
- " (%s)",
- ls->vpp_handle, ls->session_state,
- vcl_session_state_str (ls->session_state));
- return VPPCOM_EBADFD;
- }
- return VPPCOM_OK;
-}
-
int
vppcom_unformat_proto (uint8_t * proto, char *proto_str)
{
}
int
-vppcom_session_accept (uint32_t listen_session_handle, vppcom_endpt_t * ep,
- uint32_t flags)
+vppcom_session_accept (uint32_t ls_handle, vppcom_endpt_t *ep, uint32_t flags)
{
- u32 client_session_index = ~0, listen_session_index, accept_flags = 0;
+ u32 client_session_index = ~0, ls_index, accept_flags = 0;
vcl_worker_t *wrk = vcl_worker_get_current ();
session_accepted_msg_t accepted_msg;
- vcl_session_t *listen_session = 0;
- vcl_session_t *client_session = 0;
+ vcl_session_t *ls, *client_session = 0;
vcl_session_msg_t *evt;
u8 is_nonblocking;
- int rv;
again:
- listen_session = vcl_session_get_w_handle (wrk, listen_session_handle);
- if (!listen_session)
+ ls = vcl_session_get_w_handle (wrk, ls_handle);
+ if (!ls)
return VPPCOM_EBADFD;
- listen_session_index = listen_session->session_index;
- if ((rv = validate_args_session_accept_ (wrk, listen_session)))
- return rv;
+ if ((ls->session_state != VCL_STATE_LISTEN) &&
+ (ls->session_state != VCL_STATE_LISTEN_NO_MQ) &&
+ (!vcl_session_is_connectable_listener (wrk, ls)))
+ {
+ VDBG (0, "ERROR: session [0x%llx]: not in listen state! state (%s)",
+ ls->vpp_handle, vcl_session_state_str (ls->session_state));
+ return VPPCOM_EBADFD;
+ }
+
+ ls_index = ls->session_index;
- if (clib_fifo_elts (listen_session->accept_evts_fifo))
+ if (clib_fifo_elts (ls->accept_evts_fifo))
{
- clib_fifo_sub2 (listen_session->accept_evts_fifo, evt);
+ clib_fifo_sub2 (ls->accept_evts_fifo, evt);
accept_flags = evt->flags;
accepted_msg = evt->accepted_msg;
goto handle;
}
- is_nonblocking = vcl_session_has_attr (listen_session,
- VCL_SESS_ATTR_NONBLOCK);
+ is_nonblocking = vcl_session_has_attr (ls, VCL_SESS_ATTR_NONBLOCK);
while (1)
{
if (svm_msg_q_is_empty (wrk->app_event_queue) && is_nonblocking)
handle:
- client_session_index = vcl_session_accepted_handler (wrk, &accepted_msg,
- listen_session_index);
+ client_session_index =
+ vcl_session_accepted_handler (wrk, &accepted_msg, ls_index);
if (client_session_index == VCL_INVALID_SESSION_INDEX)
return VPPCOM_ECONNABORTED;
- listen_session = vcl_session_get (wrk, listen_session_index);
+ ls = vcl_session_get (wrk, ls_index);
client_session = vcl_session_get (wrk, client_session_index);
if (flags & O_NONBLOCK)
vcl_session_set_attr (client_session, VCL_SESS_ATTR_NONBLOCK);
- VDBG (1, "listener %u [0x%llx]: Got a connect request! session %u [0x%llx],"
- " flags %d, is_nonblocking %u", listen_session->session_index,
- listen_session->vpp_handle, client_session_index,
+ VDBG (1,
+ "listener %u [0x%llx]: Got a connect request! session %u [0x%llx],"
+ " flags %d, is_nonblocking %u",
+ ls->session_index, ls->vpp_handle, client_session_index,
client_session->vpp_handle, flags,
vcl_session_has_attr (client_session, VCL_SESS_ATTR_NONBLOCK));
VDBG (0,
"listener %u [0x%llx] accepted %u [0x%llx] peer: %U:%u "
"local: %U:%u",
- listen_session_handle, listen_session->vpp_handle,
- client_session_index, client_session->vpp_handle,
- vcl_format_ip46_address, &client_session->transport.rmt_ip,
+ ls_handle, ls->vpp_handle, client_session_index,
+ client_session->vpp_handle, vcl_format_ip46_address,
+ &client_session->transport.rmt_ip,
client_session->transport.is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6,
clib_net_to_host_u16 (client_session->transport.rmt_port),
vcl_format_ip46_address, &client_session->transport.lcl_ip,
client_session->transport.is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6,
clib_net_to_host_u16 (client_session->transport.lcl_port));
- vcl_evt (VCL_EVT_ACCEPT, client_session, listen_session,
- client_session_index);
+ vcl_evt (VCL_EVT_ACCEPT, client_session, ls, client_session_index);
/*
* Session might have been closed already
}
n_read = svm_fifo_segments (rx_fifo, s->rx_bytes_pending,
- (svm_fifo_seg_t *) ds, n_segments, max_bytes);
+ (svm_fifo_seg_t *) ds, &n_segments, max_bytes);
if (n_read < 0)
return VPPCOM_EAGAIN;
}
always_inline int
-vppcom_session_write_inline (vcl_worker_t * wrk, vcl_session_t * s, void *buf,
+vppcom_session_write_inline (vcl_worker_t *wrk, vcl_session_t *s, void *buf,
size_t n, u8 is_flush, u8 is_dgram)
{
int n_write, is_nonblocking;
et = SESSION_IO_EVT_TX_FLUSH;
if (is_dgram)
- n_write = app_send_dgram_raw (tx_fifo, &s->transport,
- s->vpp_evt_q, buf, n, et,
- 0 /* do_evt */ , SVM_Q_WAIT);
+ n_write =
+ app_send_dgram_raw_gso (tx_fifo, &s->transport, s->vpp_evt_q, buf, n,
+ s->gso_size, et, 0 /* do_evt */, SVM_Q_WAIT);
else
n_write = app_send_stream_raw (tx_fifo, s->vpp_evt_q, buf, n, et,
0 /* do_evt */ , SVM_Q_WAIT);
if (PREDICT_FALSE (!s))
return VPPCOM_EBADFD;
- return vppcom_session_write_inline (wrk, s, buf, n,
- 0 /* is_flush */ , s->is_dgram ? 1 : 0);
+ return vppcom_session_write_inline (wrk, s, buf, n, 0 /* is_flush */,
+ s->is_dgram ? 1 : 0);
}
int
if (PREDICT_FALSE (!s))
return VPPCOM_EBADFD;
- return vppcom_session_write_inline (wrk, s, buf, n,
- 1 /* is_flush */ , s->is_dgram ? 1 : 0);
+ return vppcom_session_write_inline (wrk, s, buf, n, 1 /* is_flush */,
+ s->is_dgram ? 1 : 0);
}
#define vcl_fifo_rx_evt_valid_or_break(_s) \
int n_mq_evts, i;
u64 buf;
+ if (PREDICT_FALSE (wrk->api_client_handle == ~0))
+ {
+ vcl_api_retry_attach (wrk);
+ return 0;
+ }
+
vec_validate (wrk->mq_events, pool_elts (wrk->mq_evt_conns));
n_mq_evts = epoll_wait (wrk->mqs_epfd, wrk->mq_events,
vec_len (wrk->mq_events), time_to_wait);
for (i = 0; i < n_mq_evts; i++)
{
+ if (PREDICT_FALSE (wrk->mq_events[i].data.u32 == ~0))
+ {
+ vcl_api_handle_disconnect (wrk);
+ continue;
+ }
+
mqc = vcl_mq_evt_conn_get (wrk, wrk->mq_events[i].data.u32);
n_read = read (mqc->mq_fd, &buf, sizeof (buf));
vcl_select_handle_mq (wrk, mqc->mq, n_bits, read_map, write_map,
return vcl_session_handle (vep_session);
}
+static void
+vcl_epoll_ctl_add_unhandled_event (vcl_worker_t *wrk, vcl_session_t *s,
+ u8 is_epollet, session_evt_type_t evt)
+{
+ if (!is_epollet)
+ {
+ if (s->vep.lt_next == VCL_INVALID_SESSION_INDEX)
+ vcl_epoll_lt_add (wrk, s);
+ return;
+ }
+
+ session_event_t e = { 0 };
+ e.session_index = s->session_index;
+ e.event_type = evt;
+ if (evt == SESSION_IO_EVT_RX)
+ s->flags &= ~VCL_SESSION_F_HAS_RX_EVT;
+ vec_add1 (wrk->unhandled_evts_vector, e);
+}
+
int
vppcom_epoll_ctl (uint32_t vep_handle, int op, uint32_t session_handle,
struct epoll_event *event)
/* Generate EPOLLOUT if tx fifo not full */
if ((event->events & EPOLLOUT) && (vcl_session_write_ready (s) > 0))
{
- session_event_t e = { 0 };
- e.event_type = SESSION_IO_EVT_TX;
- e.session_index = s->session_index;
- vec_add1 (wrk->unhandled_evts_vector, e);
+ vcl_epoll_ctl_add_unhandled_event (wrk, s, event->events & EPOLLET,
+ SESSION_IO_EVT_TX);
add_evt = 1;
}
/* Generate EPOLLIN if rx fifo has data */
if ((event->events & EPOLLIN) && (vcl_session_read_ready (s) > 0))
{
- session_event_t e = { 0 };
- e.event_type = SESSION_IO_EVT_RX;
- e.session_index = s->session_index;
- vec_add1 (wrk->unhandled_evts_vector, e);
- s->flags &= ~VCL_SESSION_F_HAS_RX_EVT;
+ vcl_epoll_ctl_add_unhandled_event (wrk, s, event->events & EPOLLET,
+ SESSION_IO_EVT_RX);
add_evt = 1;
}
if (!add_evt && vcl_session_is_closing (s))
if ((event->events & EPOLLOUT) && !(s->vep.ev.events & EPOLLOUT) &&
(vcl_session_write_ready (s) > 0))
{
- session_event_t e = { 0 };
- e.event_type = SESSION_IO_EVT_TX;
- e.session_index = s->session_index;
- vec_add1 (wrk->unhandled_evts_vector, e);
+ vcl_epoll_ctl_add_unhandled_event (wrk, s, event->events & EPOLLET,
+ SESSION_IO_EVT_TX);
}
/* Generate EPOLLIN if session read ready and event was not on */
if ((event->events & EPOLLIN) && !(s->vep.ev.events & EPOLLIN) &&
(vcl_session_read_ready (s) > 0))
{
- session_event_t e = { 0 };
- e.event_type = SESSION_IO_EVT_RX;
- e.session_index = s->session_index;
- vec_add1 (wrk->unhandled_evts_vector, e);
- s->flags &= ~VCL_SESSION_F_HAS_RX_EVT;
+ vcl_epoll_ctl_add_unhandled_event (wrk, s, event->events & EPOLLET,
+ SESSION_IO_EVT_RX);
}
s->vep.et_mask = VEP_DEFAULT_ET_MASK;
s->vep.ev = *event;
/* Generate EPOLLOUT because there's no connected event */
if (!(EPOLLOUT & session_events))
break;
+ /* We didn't have a fifo when the event was added */
+ svm_fifo_add_want_deq_ntf (
+ (vcl_session_is_ct (s) ? s->ct_tx_fifo : s->tx_fifo),
+ SVM_FIFO_WANT_DEQ_NOTIF_IF_FULL);
add_event = 1;
events[*num_ev].events = EPOLLOUT;
session_evt_data = s->vep.ev.data.u64;
}
session_evt_data = s->vep.ev.data.u64;
+ break;
+ case SESSION_CTRL_EVT_BOUND:
+ vcl_session_bound_handler (wrk, (session_bound_msg_t *) e->data);
break;
case SESSION_CTRL_EVT_RESET:
if (!e->postponed)
}
session_events = s->vep.ev.events;
add_event = 1;
- events[*num_ev].events = EPOLLHUP | EPOLLRDHUP;
+ events[*num_ev].events = EPOLLERR | EPOLLHUP;
+ if ((EPOLLRDHUP & session_events) &&
+ (s->flags & VCL_SESSION_F_RD_SHUTDOWN))
+ {
+ events[*num_ev].events |= EPOLLRDHUP;
+ }
+ if ((EPOLLIN & session_events) && (s->flags & VCL_SESSION_F_RD_SHUTDOWN))
+ {
+ events[*num_ev].events |= EPOLLIN;
+ }
session_evt_data = s->vep.ev.data.u64;
break;
case SESSION_CTRL_EVT_UNLISTEN_REPLY:
double end = -1;
u64 buf;
+ if (PREDICT_FALSE (wrk->api_client_handle == ~0))
+ {
+ vcl_api_retry_attach (wrk);
+ return n_evts;
+ }
+
vec_validate (wrk->mq_events, pool_elts (wrk->mq_evt_conns));
if (!n_evts)
{
for (i = 0; i < n_mq_evts; i++)
{
+ if (PREDICT_FALSE (wrk->mq_events[i].data.u32 == ~0))
+ {
+ /* api socket was closed */
+ vcl_api_handle_disconnect (wrk);
+ continue;
+ }
+
mqc = vcl_mq_evt_conn_get (wrk, wrk->mq_events[i].data.u32);
n_read = read (mqc->mq_fd, &buf, sizeof (buf));
vcl_epoll_wait_handle_mq (wrk, mqc->mq, events, maxevents, 0,
VDBG (2, "VPPCOM_ATTR_GET_TCP_USER_MSS: %d, buflen %d", *(int *) buffer,
*buflen);
break;
-
case VPPCOM_ATTR_SET_TCP_USER_MSS:
if (!(buffer && buflen && (*buflen == sizeof (u32))))
{
clib_memcpy (session->ext_config->data, buffer, *buflen);
session->ext_config->len = *buflen;
break;
+ case VPPCOM_ATTR_SET_IP_PKTINFO:
+ if (buffer && buflen && (*buflen == sizeof (int)) &&
+ !vcl_session_has_attr (session, VCL_SESS_ATTR_IP_PKTINFO))
+ {
+ if (*(int *) buffer)
+ vcl_session_set_attr (session, VCL_SESS_ATTR_IP_PKTINFO);
+ else
+ vcl_session_clear_attr (session, VCL_SESS_ATTR_IP_PKTINFO);
+
+ VDBG (2, "VCL_SESS_ATTR_IP_PKTINFO: %d, buflen %d",
+ vcl_session_has_attr (session, VCL_SESS_ATTR_IP_PKTINFO),
+ *buflen);
+ }
+ else
+ rv = VPPCOM_EINVAL;
+ break;
+
+ case VPPCOM_ATTR_GET_IP_PKTINFO:
+ if (buffer && buflen && (*buflen >= sizeof (int)))
+ {
+ *(int *) buffer =
+ vcl_session_has_attr (session, VCL_SESS_ATTR_IP_PKTINFO);
+ *buflen = sizeof (int);
+
+ VDBG (2, "VCL_SESS_ATTR_IP_PKTINFO: %d, buflen %d", *(int *) buffer,
+ *buflen);
+ }
+ else
+ rv = VPPCOM_EINVAL;
+ break;
default:
rv = VPPCOM_EINVAL;
return rv;
}
+static void
+vcl_handle_ep_app_tlvs (vcl_session_t *s, vppcom_endpt_t *ep)
+{
+ vppcom_endpt_tlv_t *tlv = ep->app_tlvs;
+
+ do
+ {
+ switch (tlv->data_type)
+ {
+ case VCL_UDP_SEGMENT:
+ s->gso_size = *(u16 *) tlv->data;
+ break;
+ case VCL_IP_PKTINFO:
+ clib_memcpy_fast (&s->transport.lcl_ip, (ip4_address_t *) tlv->data,
+ sizeof (ip4_address_t));
+ break;
+ default:
+ VDBG (0, "Ignorning unsupported app tlv %u", tlv->data_type);
+ break;
+ }
+ tlv = VCL_EP_NEXT_APP_TLV (ep, tlv);
+ }
+ while (tlv);
+}
+
int
vppcom_session_sendto (uint32_t session_handle, void *buffer,
uint32_t buflen, int flags, vppcom_endpt_t * ep)
s->transport.rmt_port = ep->port;
vcl_ip_copy_from_ep (&s->transport.rmt_ip, ep);
+ if (ep->app_tlvs)
+ vcl_handle_ep_app_tlvs (s, ep);
+
/* Session not connected/bound in vpp. Create it by 'connecting' it */
if (PREDICT_FALSE (s->session_state == VCL_STATE_CLOSED))
{
st = "VPPCOM_ETIMEDOUT";
break;
+ case VPPCOM_EADDRINUSE:
+ st = "VPPCOM_EADDRINUSE";
+ break;
+
default:
st = "UNKNOWN_STATE";
break;
return vcl_bapi_del_cert_key_pair (ckpair_index);
}
+int
+vppcom_session_get_error (uint32_t session_handle)
+{
+ vcl_worker_t *wrk = vcl_worker_get_current ();
+ vcl_session_t *session = 0;
+
+ session = vcl_session_get_w_handle (wrk, session_handle);
+ if (!session)
+ return VPPCOM_EBADFD;
+
+ if (PREDICT_FALSE (session->flags & VCL_SESSION_F_IS_VEP))
+ {
+ VWRN ("epoll session %u! will not have connect", session->session_index);
+ return VPPCOM_EBADFD;
+ }
+
+ if (session->vpp_error == SESSION_E_PORTINUSE)
+ return VPPCOM_EADDRINUSE;
+ else if (session->vpp_error == SESSION_E_REFUSED)
+ return VPPCOM_ECONNREFUSED;
+ else if (session->vpp_error != SESSION_E_NONE)
+ return VPPCOM_EFAULT;
+ else
+ return VPPCOM_OK;
+}
+
+int
+vppcom_worker_is_detached (void)
+{
+ vcl_worker_t *wrk = vcl_worker_get_current ();
+
+ if (!vcm->cfg.use_mq_eventfd)
+ return VPPCOM_ENOTSUP;
+
+ return wrk->api_client_handle == ~0;
+}
+
/*
* fd.io coding-style-patch-verification: ON
*