X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvcl%2Fvppcom.c;h=a66926d8731a9577f8bbb2731955a932d8a9bf00;hb=410bcca41c1a3e7c3d4b4c2940120f9b21732d49;hp=1f9857676e270e16f82da8755399c3b27c96cd8c;hpb=00f44cc1f7f3cc10c0d6b147c0bceb831a9e97fb;p=vpp.git diff --git a/src/vcl/vppcom.c b/src/vcl/vppcom.c index 1f9857676e2..a66926d8731 100644 --- a/src/vcl/vppcom.c +++ b/src/vcl/vppcom.c @@ -195,14 +195,40 @@ typedef enum vcl_event_id_ { VCL_EVENT_INVALID_EVENT, VCL_EVENT_CONNECT_REQ_ACCEPTED, + VCL_EVENT_IOEVENT_RX_FIFO, + VCL_EVENT_IOEVENT_TX_FIFO, VCL_EVENT_N_EVENTS } vcl_event_id_t; + typedef struct vce_event_connect_request_ { u32 accepted_session_index; } vce_event_connect_request_t; +typedef struct vppcom_session_listener +{ + vppcom_session_listener_cb user_cb; + vppcom_session_listener_errcb user_errcb; + void *user_cb_data; +} vppcom_session_listener_t; + +typedef struct vppcom_session_ioevent_ +{ + vppcom_session_ioevent_cb user_cb; + void *user_cb_data; +} vppcom_session_ioevent_t; + +typedef struct vppcom_session_io_thread_ +{ + pthread_t thread; + pthread_mutex_t vce_io_lock; + pthread_cond_t vce_io_cond; + u32 *active_session_indexes; //pool + vppcom_session_ioevent_t *ioevents; //pool + clib_spinlock_t io_sessions_lockp; +} vppcom_session_io_thread_t; + typedef struct vppcom_main_t_ { u8 init; @@ -247,6 +273,9 @@ typedef struct vppcom_main_t_ /* Event thread */ vce_event_thread_t event_thread; + /* IO thread */ + vppcom_session_io_thread_t session_io_thread; + /* VPP Event-logger */ elog_main_t elog_main; elog_track_t elog_track; @@ -357,6 +386,9 @@ vppcom_session_state_str (session_state_t state) /* * VPPCOM Utility Functions */ + + + static inline int vppcom_session_at_index (u32 session_index, session_t * volatile *sess) { @@ -372,6 +404,81 @@ vppcom_session_at_index (u32 session_index, session_t * volatile *sess) return VPPCOM_OK; } +void * +vppcom_session_io_thread_fn (void *arg) +{ + vppcom_session_io_thread_t *evt = (vppcom_session_io_thread_t *) arg; + u32 *session_indexes = 0, *session_index; + int i, rv; + u32 bytes = 0; + session_t *session; + + while (1) + { + vec_reset_length (session_indexes); + clib_spinlock_lock (&evt->io_sessions_lockp); + pool_foreach (session_index, evt->active_session_indexes, ( + { + vec_add1 + (session_indexes, + *session_index); + } + )); + clib_spinlock_unlock (&evt->io_sessions_lockp); + if (session_indexes) + { + for (i = 0; i < vec_len (session_indexes); ++i) + { + VCL_LOCK_AND_GET_SESSION (session_indexes[i], &session); + bytes = svm_fifo_max_dequeue (session->rx_fifo); + clib_spinlock_unlock (&vcm->sessions_lockp); + + if (bytes) + { + vppcom_ioevent_t *eio; + vce_event_t *ev; + u32 ev_idx; + + clib_spinlock_lock (&vcm->event_thread.events_lockp); + + pool_get (vcm->event_thread.vce_events, ev); + ev_idx = (u32) (ev - vcm->event_thread.vce_events); + eio = vce_get_event_data (ev, sizeof (*eio)); + ev->evk.eid = VCL_EVENT_IOEVENT_RX_FIFO; + ev->evk.session_index = session_indexes[i]; + eio->bytes = bytes; + eio->session_index = session_indexes[i]; + + clib_spinlock_unlock (&vcm->event_thread.events_lockp); + + rv = vce_generate_event (&vcm->event_thread, ev_idx); + } + } + } + struct timespec ts; + ts.tv_sec = 0; + ts.tv_nsec = 1000000; /* 1 millisecond */ + nanosleep (&ts, NULL); + } +done: + clib_spinlock_unlock (&vcm->sessions_lockp); + return NULL; +} + +int +vppcom_start_io_event_thread (vppcom_session_io_thread_t * evt, + u8 max_sessions) +{ + pthread_cond_init (&(evt->vce_io_cond), NULL); + pthread_mutex_init (&(evt->vce_io_lock), NULL); + + clib_spinlock_init (&(evt->io_sessions_lockp)); + + return pthread_create (&(evt->thread), NULL /* attr */ , + vppcom_session_io_thread_fn, evt); +} + + static inline void vppcom_session_table_add_listener (u64 listener_handle, u32 value) { @@ -436,10 +543,105 @@ write_elog (void) } +static inline void +vppcom_send_accept_session_reply (u64 handle, u32 context, int retval) +{ + vl_api_accept_session_reply_t *rmp; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); + rmp->retval = htonl (retval); + rmp->context = context; + rmp->handle = handle; + vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & rmp); +} + /* * VPPCOM Event Functions */ +void +vce_registered_ioevent_handler_fn (void *arg) +{ + vce_event_handler_reg_t *reg = (vce_event_handler_reg_t *) arg; + vppcom_ioevent_t *eio; + vce_event_t *ev; + u32 ioevt_ndx = (u64) (reg->handler_fn_args); + vppcom_session_ioevent_t *ioevent, ioevent_; + + clib_spinlock_lock (&(vcm->event_thread.events_lockp)); + ev = vce_get_event_from_index (&vcm->event_thread, reg->ev_idx); + eio = vce_get_event_data (ev, sizeof (*eio)); + clib_spinlock_unlock (&(vcm->event_thread.events_lockp)); + + clib_spinlock_lock (&vcm->session_io_thread.io_sessions_lockp); + ioevent = pool_elt_at_index (vcm->session_io_thread.ioevents, ioevt_ndx); + ioevent_ = *ioevent; + clib_spinlock_unlock (&vcm->session_io_thread.io_sessions_lockp); + (ioevent_.user_cb) (eio, ioevent_.user_cb_data); + vce_clear_event (&vcm->event_thread, reg->ev_idx); + return; + + /*TODO - Unregister check in close for this listener */ + +} + +void +vce_registered_listener_connect_handler_fn (void *arg) +{ + vce_event_handler_reg_t *reg = (vce_event_handler_reg_t *) arg; + vce_event_connect_request_t *ecr; + vce_event_t *ev; + vppcom_endpt_t ep; + + session_t *new_session; + int rv; + + vppcom_session_listener_t *session_listener = + (vppcom_session_listener_t *) reg->handler_fn_args; + + clib_spinlock_lock (&(vcm->event_thread.events_lockp)); + ev = vce_get_event_from_index (&vcm->event_thread, reg->ev_idx); + ecr = vce_get_event_data (ev, sizeof (*ecr)); + clib_spinlock_unlock (&(vcm->event_thread.events_lockp)); + VCL_LOCK_AND_GET_SESSION (ecr->accepted_session_index, &new_session); + + ep.is_ip4 = new_session->peer_addr.is_ip4; + ep.port = new_session->peer_port; + if (new_session->peer_addr.is_ip4) + clib_memcpy (&ep.ip, &new_session->peer_addr.ip46.ip4, + sizeof (ip4_address_t)); + else + clib_memcpy (&ep.ip, &new_session->peer_addr.ip46.ip6, + sizeof (ip6_address_t)); + + vppcom_send_accept_session_reply (new_session->vpp_handle, + new_session->client_context, + 0 /* retval OK */ ); + clib_spinlock_unlock (&vcm->sessions_lockp); + + (session_listener->user_cb) (ecr->accepted_session_index, &ep, + session_listener->user_cb_data); + + if (vcm->session_io_thread.io_sessions_lockp) + { + /* Throw this new accepted session index into the rx poll thread pool */ + clib_spinlock_lock (&vcm->session_io_thread.io_sessions_lockp); + u32 *active_session_index; + pool_get (vcm->session_io_thread.active_session_indexes, + active_session_index); + *active_session_index = ecr->accepted_session_index; + clib_spinlock_unlock (&vcm->session_io_thread.io_sessions_lockp); + } + + /*TODO - Unregister check in close for this listener */ + return; + +done: + ASSERT (0); // If we can't get a lock or accepted session fails, lets blow up. +} + /** * * @brief vce_connect_request_handler_fn * - used for listener sessions @@ -460,7 +662,7 @@ vce_connect_request_handler_fn (void *arg) } /** - * @brief vce_epoll_wait_connect_request_handler_fn + * @brief vce_poll_wait_connect_request_handler_fn * - used by vppcom_epoll_xxxx() for listener sessions * - when a vl_api_accept_session_t_handler() generates an event * this callback is alerted and sets the fields that vppcom_epoll_wait() @@ -469,13 +671,13 @@ vce_connect_request_handler_fn (void *arg) * @param arg - void* to be cast to vce_event_handler_reg_t* */ void -vce_epoll_wait_connect_request_handler_fn (void *arg) +vce_poll_wait_connect_request_handler_fn (void *arg) { vce_event_handler_reg_t *reg = (vce_event_handler_reg_t *) arg; vce_event_t *ev; /* Retrieve the VCL_EVENT_CONNECT_REQ_ACCEPTED event */ ev = vce_get_event_from_index (&vcm->event_thread, reg->ev_idx); - vce_event_connect_request_t *ecr = (vce_event_connect_request_t *) ev->data; + vce_event_connect_request_t *ecr = vce_get_event_data (ev, sizeof (*ecr)); /* Add the accepted_session_index to the FIFO */ clib_spinlock_lock (&vcm->session_fifo_lockp); @@ -485,7 +687,7 @@ vce_epoll_wait_connect_request_handler_fn (void *arg) /* Recycling the event. */ clib_spinlock_lock (&(vcm->event_thread.events_lockp)); - vcm->event_thread.recycle_event = 1; + ev->recycle = 1; clib_fifo_add1 (vcm->event_thread.event_index_fifo, reg->ev_idx); clib_spinlock_unlock (&(vcm->event_thread.events_lockp)); } @@ -745,8 +947,8 @@ vppcom_app_send_attach (void) (vcm->cfg.app_scope_global ? APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE : 0) | (app_is_proxy ? APP_OPTIONS_FLAGS_IS_PROXY : 0); bmp->options[APP_OPTIONS_PROXY_TRANSPORT] = - (vcm->cfg.app_proxy_transport_tcp ? 1 << TRANSPORT_PROTO_TCP : 0) | - (vcm->cfg.app_proxy_transport_udp ? 1 << TRANSPORT_PROTO_UDP : 0); + (u64) ((vcm->cfg.app_proxy_transport_tcp ? 1 << TRANSPORT_PROTO_TCP : 0) | + (vcm->cfg.app_proxy_transport_udp ? 1 << TRANSPORT_PROTO_UDP : 0)); bmp->options[APP_OPTIONS_SEGMENT_SIZE] = vcm->cfg.segment_size; bmp->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = vcm->cfg.add_segment_size; bmp->options[APP_OPTIONS_RX_FIFO_SIZE] = vcm->cfg.rx_fifo_size; @@ -1019,6 +1221,16 @@ done: /* * Setup session */ + if (vcm->session_io_thread.io_sessions_lockp) + { + // Add this connection to the active io sessions list + clib_spinlock_lock (&vcm->session_io_thread.io_sessions_lockp); + u32 *active_session_index; + pool_get (vcm->session_io_thread.active_session_indexes, + active_session_index); + *active_session_index = session_index; + clib_spinlock_unlock (&vcm->session_io_thread.io_sessions_lockp); + } session->vpp_event_queue = uword_to_pointer (mp->vpp_event_queue_address, svm_queue_t *); @@ -1141,8 +1353,7 @@ done: session->vpp_handle = mp->handle; session->lcl_addr.is_ip4 = mp->lcl_is_ip4; - clib_memcpy (&session->lcl_addr.ip46, mp->lcl_ip, - sizeof (session->peer_addr.ip46)); + session->lcl_addr.ip46 = to_ip46 (!mp->lcl_is_ip4, mp->lcl_ip); session->lcl_port = mp->lcl_port; vppcom_session_table_add_listener (mp->handle, session_index); session->state = STATE_LISTEN; @@ -1250,20 +1461,6 @@ format_ip46_address (u8 * s, va_list * args) format (s, "%U", format_ip6_address, &ip46->ip6); } -static inline void -vppcom_send_accept_session_reply (u64 handle, u32 context, int retval) -{ - vl_api_accept_session_reply_t *rmp; - - rmp = vl_msg_api_alloc (sizeof (*rmp)); - memset (rmp, 0, sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); - rmp->retval = htonl (retval); - rmp->context = context; - rmp->handle = handle; - vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & rmp); -} - static void vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) { @@ -1309,7 +1506,7 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) /* Allocate local session and set it up */ pool_get (vcm->sessions, session); memset (session, 0, sizeof (*session)); - session_index = session - vcm->sessions; + session_index = (u32) (session - vcm->sessions); rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); rx_fifo->client_session_index = session_index; @@ -1325,8 +1522,7 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) session->state = STATE_ACCEPT; session->peer_port = mp->port; session->peer_addr.is_ip4 = mp->is_ip4; - clib_memcpy (&session->peer_addr.ip46, mp->ip, - sizeof (session->peer_addr.ip46)); + session->peer_addr.ip46 = to_ip46 (!mp->is_ip4, mp->ip); /* Add it to lookup table */ hash_set (vcm->session_index_by_vpp_handles, mp->handle, session_index); @@ -1338,10 +1534,8 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) clib_spinlock_lock (&vcm->event_thread.events_lockp); pool_get (vcm->event_thread.vce_events, ev); - ev->data = clib_mem_alloc (sizeof (vce_event_connect_request_t)); - ev->refcnt = 0; ev_idx = (u32) (ev - vcm->event_thread.vce_events); - ecr = ev->data; + ecr = vce_get_event_data (ev, sizeof (*ecr)); ev->evk.eid = VCL_EVENT_CONNECT_REQ_ACCEPTED; listen_session = vppcom_session_table_lookup_listener (mp->listener_handle); ev->evk.session_index = (u32) (listen_session - vcm->sessions); @@ -1350,14 +1544,14 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) clib_spinlock_unlock (&vcm->event_thread.events_lockp); rv = vce_generate_event (&vcm->event_thread, ev_idx); - ASSERT (rv == 0); if (VPPCOM_DEBUG > 1) clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: client accept " "request from %s address %U port %d queue %p!", getpid (), mp->handle, session_index, mp->is_ip4 ? "IPv4" : "IPv6", - format_ip46_address, &mp->ip, mp->is_ip4, + format_ip46_address, &mp->ip, + mp->is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6, clib_net_to_host_u16 (mp->port), session->vpp_event_queue); if (VPPCOM_DEBUG > 0) @@ -1403,6 +1597,9 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) } +/* VPP combines bind and listen as one operation. VCL manages the separation + * of bind and listen locally via vppcom_session_bind() and + * vppcom_session_listen() */ static void vppcom_send_bind_sock (session_t * session, u32 session_index) { @@ -2143,6 +2340,7 @@ vppcom_app_create (char *app_name) vcm->cfg.listen_queue_size); vppcom_cfg_read (conf_fname); + env_var_str = getenv (VPPCOM_ENV_API_PREFIX); if (env_var_str) { @@ -2259,7 +2457,6 @@ vppcom_app_create (char *app_name) rv = vce_start_event_thread (&(vcm->event_thread), 20); - if (VPPCOM_DEBUG > 0) clib_warning ("VCL<%d>: sending session enable", getpid ()); @@ -2295,6 +2492,7 @@ void vppcom_app_destroy (void) { int rv; + f64 orig_app_timeout; if (vcm->my_client_index == ~0) return; @@ -2322,7 +2520,10 @@ vppcom_app_destroy (void) } vppcom_app_detach (); + orig_app_timeout = vcm->cfg.app_timeout; + vcm->cfg.app_timeout = 2.0; rv = vppcom_wait_for_app_state_change (STATE_APP_ENABLED); + vcm->cfg.app_timeout = orig_app_timeout; if (PREDICT_FALSE (rv)) { if (VPPCOM_DEBUG > 0) @@ -2575,7 +2776,7 @@ vppcom_session_bind (uint32_t session_index, vppcom_endpt_t * ep) "port %u, proto %s", getpid (), session_index, session->lcl_addr.is_ip4 ? "IPv4" : "IPv6", format_ip46_address, &session->lcl_addr.ip46, - session->lcl_addr.is_ip4, + session->lcl_addr.is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6, clib_net_to_host_u16 (session->lcl_port), session->proto ? "UDP" : "TCP"); @@ -2649,7 +2850,7 @@ vppcom_session_listen (uint32_t listen_session_index, uint32_t q_len) if (VPPCOM_DEBUG > 0) clib_warning ("VCL<%d>: vpp handle 0x%llx, " - "sid %u: sending bind request...", + "sid %u: sending VPP bind+listen request...", getpid (), listen_vpp_handle, listen_session_index); vppcom_send_bind_sock (listen_session, listen_session_index); @@ -2662,10 +2863,10 @@ vppcom_session_listen (uint32_t listen_session_index, uint32_t q_len) if (PREDICT_FALSE (retval)) { if (VPPCOM_DEBUG > 0) - clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: bind failed! " - "returning %d (%s)", getpid (), - listen_session->vpp_handle, listen_session_index, - retval, vppcom_retval_str (retval)); + clib_warning + ("VCL<%d>: vpp handle 0x%llx, sid %u: bind+listen failed! " + "returning %d (%s)", getpid (), listen_session->vpp_handle, + listen_session_index, retval, vppcom_retval_str (retval)); clib_spinlock_unlock (&vcm->sessions_lockp); rv = retval; goto done; @@ -2681,6 +2882,45 @@ done: return rv; } +int +vppcom_session_register_listener (uint32_t session_index, + vppcom_session_listener_cb cb, + vppcom_session_listener_errcb + errcb, uint8_t flags, int q_len, void *ptr) +{ + int rv = VPPCOM_OK; + vce_event_key_t evk; + vppcom_session_listener_t *listener_args; + + if (!vcm->session_io_thread.io_sessions_lockp) + rv = vppcom_start_io_event_thread (&vcm->session_io_thread, 100 /* DAW_TODO: ??? hard-coded value */ + ); + if (rv) + { + goto done; + } + rv = vppcom_session_listen (session_index, q_len); + if (rv) + { + goto done; + } + + /* Register handler for connect_request event on listen_session_index */ + listener_args = clib_mem_alloc (sizeof (vppcom_session_listener_t)); // DAW_TODO: Use a pool instead of thrashing the memory allocator! + listener_args->user_cb = cb; + listener_args->user_cb_data = ptr; + listener_args->user_errcb = errcb; + + evk.session_index = session_index; + evk.eid = VCL_EVENT_CONNECT_REQ_ACCEPTED; + (void) vce_register_handler (&vcm->event_thread, &evk, + vce_registered_listener_connect_handler_fn, + listener_args); + +done: + return rv; +} + int validate_args_session_accept_ (session_t * listen_session) { @@ -2752,25 +2992,26 @@ vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep, evk.session_index = listen_session_index; evk.eid = VCL_EVENT_CONNECT_REQ_ACCEPTED; reg = vce_register_handler (&vcm->event_thread, &evk, - vce_connect_request_handler_fn); - - ev = 0; + vce_connect_request_handler_fn, 0); + clib_spinlock_lock (&(vcm->event_thread.events_lockp)); + ev = vce_get_event_from_index (&vcm->event_thread, reg->ev_idx); pthread_mutex_lock (®->handler_lock); while (!ev) { - rv = - pthread_cond_timedwait (®->handler_cond, ®->handler_lock, &ts); + clib_spinlock_unlock (&(vcm->event_thread.events_lockp)); + rv = pthread_cond_timedwait (®->handler_cond, + ®->handler_lock, &ts); if (rv == ETIMEDOUT) { rv = VPPCOM_EAGAIN; goto cleanup; } + clib_spinlock_lock (&(vcm->event_thread.events_lockp)); ev = vce_get_event_from_index (&vcm->event_thread, reg->ev_idx); } - result = (vce_event_connect_request_t *) ev->data; + result = vce_get_event_data (ev, sizeof (*result)); client_session_index = result->accepted_session_index; - - + clib_spinlock_unlock (&(vcm->event_thread.events_lockp)); /* Remove from the FIFO used to service epoll */ clib_spinlock_lock (&vcm->session_fifo_lockp); @@ -2796,7 +3037,7 @@ vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep, "lookup failed! returning %d (%s)", getpid (), listen_vpp_handle, listen_session_index, client_session_index, rv, vppcom_retval_str (rv)); - goto done; + goto cleanup; } if (flags & O_NONBLOCK) @@ -2836,7 +3077,8 @@ vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep, client_session_index, client_session->lcl_addr.is_ip4 ? "IPv4" : "IPv6", format_ip46_address, &client_session->lcl_addr.ip46, - client_session->lcl_addr.is_ip4, + client_session->lcl_addr.is_ip4 ? + IP46_TYPE_IP4 : IP46_TYPE_IP6, clib_net_to_host_u16 (client_session->lcl_port)); if (VPPCOM_DEBUG > 0) @@ -2894,8 +3136,17 @@ vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep, clib_spinlock_unlock (&vcm->sessions_lockp); rv = (int) client_session_index; - vce_clear_event (&vcm->event_thread, ev); - + vce_clear_event (&vcm->event_thread, reg->ev_idx); + if (vcm->session_io_thread.io_sessions_lockp) + { + /* Throw this new accepted session index into the rx poll thread pool */ + clib_spinlock_lock (&vcm->session_io_thread.io_sessions_lockp); + u32 *active_session_index; + pool_get (vcm->session_io_thread.active_session_indexes, + active_session_index); + *active_session_index = client_session_index; + clib_spinlock_unlock (&vcm->session_io_thread.io_sessions_lockp); + } cleanup: vce_unregister_handler (&vcm->event_thread, reg); pthread_mutex_unlock (®->handler_lock); @@ -2930,7 +3181,8 @@ vppcom_session_connect (uint32_t session_index, vppcom_endpt_t * server_ep) getpid (), session->vpp_handle, session_index, session->peer_addr.is_ip4 ? "IPv4" : "IPv6", format_ip46_address, - &session->peer_addr.ip46, session->peer_addr.is_ip4, + &session->peer_addr.ip46, session->peer_addr.is_ip4 ? + IP46_TYPE_IP4 : IP46_TYPE_IP6, clib_net_to_host_u16 (session->peer_port), session->proto ? "UDP" : "TCP", session->state, vppcom_session_state_str (session->state)); @@ -2949,7 +3201,8 @@ vppcom_session_connect (uint32_t session_index, vppcom_endpt_t * server_ep) getpid (), session->vpp_handle, session_index, session->peer_addr.is_ip4 ? "IPv4" : "IPv6", format_ip46_address, - &session->peer_addr.ip46, session->peer_addr.is_ip4, + &session->peer_addr.ip46, session->peer_addr.is_ip4 ? + IP46_TYPE_IP4 : IP46_TYPE_IP6, clib_net_to_host_u16 (session->peer_port), session->proto ? "UDP" : "TCP"); @@ -3194,6 +3447,41 @@ done: return rv; } +int +vppcom_session_register_ioevent_cb (uint32_t session_index, + vppcom_session_ioevent_cb cb, + uint8_t rx, void *ptr) +{ + int rv = VPPCOM_OK; + vce_event_key_t evk; + vppcom_session_ioevent_t *ioevent; + + if (!vcm->session_io_thread.io_sessions_lockp) + rv = vppcom_start_io_event_thread (&vcm->session_io_thread, 100 /* DAW_TODO: ??? hard-coded value */ + ); + + if (rv == VPPCOM_OK) + { + void *io_evt_ndx; + + /* Register handler for ioevent on session_index */ + clib_spinlock_lock (&vcm->session_io_thread.io_sessions_lockp); + pool_get (vcm->session_io_thread.ioevents, ioevent); + io_evt_ndx = (void *) (ioevent - vcm->session_io_thread.ioevents); + ioevent->user_cb = cb; + ioevent->user_cb_data = ptr; + clib_spinlock_unlock (&vcm->session_io_thread.io_sessions_lockp); + + evk.session_index = session_index; + evk.eid = rx ? VCL_EVENT_IOEVENT_RX_FIFO : VCL_EVENT_IOEVENT_TX_FIFO; + + (void) vce_register_handler (&vcm->event_thread, &evk, + vce_registered_ioevent_handler_fn, + io_evt_ndx); + } + return rv; +} + int vppcom_session_write (uint32_t session_index, void *buf, size_t n) { @@ -3249,7 +3537,12 @@ vppcom_session_write (uint32_t session_index, void *buf, size_t n) } while (!is_nonblocking && (n_write <= 0)); - /* If event wasn't set, add one */ + /* If event wasn't set, add one + * + * To reduce context switching, can check if an + * event is already there for this event_key, but for now + * this will suffice. */ + if ((n_write > 0) && svm_fifo_set_event (tx_fifo)) { /* Fabricate TX event, send to vpp */ @@ -3461,8 +3754,31 @@ vppcom_select (unsigned long n_bits, unsigned long *read_map, bits_set = VPPCOM_EBADFD; goto select_done; } - - rv = vppcom_session_read_ready (session, session_index); + if (session->state & STATE_LISTEN) + { + vce_event_handler_reg_t *reg = 0; + vce_event_key_t evk; + + /* Check if handler already registered for this + * event. + * If not, register handler for connect_request event + * on listen_session_index + */ + evk.session_index = session_index; + evk.eid = VCL_EVENT_CONNECT_REQ_ACCEPTED; + reg = vce_get_event_handler (&vcm->event_thread, &evk); + if (!reg) + reg = vce_register_handler (&vcm->event_thread, &evk, + vce_poll_wait_connect_request_handler_fn, + 0 /* No callback args */); + rv = vppcom_session_read_ready (session, session_index); + if (rv > 0) + { + vce_unregister_handler (&vcm->event_thread, reg); + } + } + else + rv = vppcom_session_read_ready (session, session_index); clib_spinlock_unlock (&vcm->sessions_lockp); if (except_map && vcm->ex_bitmap && clib_bitmap_get (vcm->ex_bitmap, session_index) && @@ -3773,7 +4089,8 @@ vppcom_epoll_ctl (uint32_t vep_idx, int op, uint32_t session_index, evk.eid = VCL_EVENT_CONNECT_REQ_ACCEPTED; vep_session->poll_reg = vce_register_handler (&vcm->event_thread, &evk, - vce_epoll_wait_connect_request_handler_fn); + vce_poll_wait_connect_request_handler_fn, + 0 /* No callback args */ ); } if (VPPCOM_DEBUG > 1) clib_warning ("VCL<%d>: EPOLL_CTL_ADD: vep_idx %u, " @@ -4351,7 +4668,8 @@ vppcom_session_attr (uint32_t session_index, uint32_t op, clib_warning ("VCL<%d>: VPPCOM_ATTR_GET_PEER_ADDR: sid %u, " "is_ip4 = %u, addr = %U, port %u", getpid (), session_index, ep->is_ip4, format_ip46_address, - &session->peer_addr.ip46, ep->is_ip4, + &session->peer_addr.ip46, + ep->is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6, clib_net_to_host_u16 (ep->port)); if (VPPCOM_DEBUG > 0) { @@ -4417,7 +4735,8 @@ vppcom_session_attr (uint32_t session_index, uint32_t op, clib_warning ("VCL<%d>: VPPCOM_ATTR_GET_LCL_ADDR: sid %u, " "is_ip4 = %u, addr = %U port %d", getpid (), session_index, ep->is_ip4, format_ip46_address, - &session->lcl_addr.ip46, ep->is_ip4, + &session->lcl_addr.ip46, + ep->is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6, clib_net_to_host_u16 (ep->port)); if (VPPCOM_DEBUG > 0) {