+ return 0;
+}
+
+static void
+application_start_stop_proxy_local_scope (application_t * app,
+ u8 transport_proto, u8 is_start)
+{
+ session_endpoint_t sep = SESSION_ENDPOINT_NULL;
+ app_namespace_t *app_ns;
+ app_ns = app_namespace_get (app->ns_index);
+ sep.is_ip4 = 1;
+ sep.transport_proto = transport_proto;
+ sep.port = 0;
+
+ if (is_start)
+ {
+ session_lookup_add_session_endpoint (app_ns->local_table_index, &sep,
+ app->app_index);
+ sep.is_ip4 = 0;
+ session_lookup_add_session_endpoint (app_ns->local_table_index, &sep,
+ app->app_index);
+ }
+ else
+ {
+ session_lookup_del_session_endpoint (app_ns->local_table_index, &sep);
+ sep.is_ip4 = 0;
+ session_lookup_del_session_endpoint (app_ns->local_table_index, &sep);
+ }
+}
+
+void
+application_start_stop_proxy (application_t * app,
+ transport_proto_t transport_proto, u8 is_start)
+{
+ if (application_has_local_scope (app))
+ application_start_stop_proxy_local_scope (app, transport_proto, is_start);
+
+ if (application_has_global_scope (app))
+ {
+ application_start_stop_proxy_fib_proto (app, FIB_PROTOCOL_IP4,
+ transport_proto, is_start);
+ application_start_stop_proxy_fib_proto (app, FIB_PROTOCOL_IP6,
+ transport_proto, is_start);
+ }
+}
+
+void
+application_setup_proxy (application_t * app)
+{
+ u16 transports = app->proxied_transports;
+ transport_proto_t tp;
+
+ ASSERT (application_is_proxy (app));
+
+ /* *INDENT-OFF* */
+ transport_proto_foreach (tp, ({
+ if (transports & (1 << tp))
+ application_start_stop_proxy (app, tp, 1);
+ }));
+ /* *INDENT-ON* */
+}
+
+void
+application_remove_proxy (application_t * app)
+{
+ u16 transports = app->proxied_transports;
+ transport_proto_t tp;
+
+ ASSERT (application_is_proxy (app));
+
+ /* *INDENT-OFF* */
+ transport_proto_foreach (tp, ({
+ if (transports & (1 << tp))
+ application_start_stop_proxy (app, tp, 0);
+ }));
+ /* *INDENT-ON* */
+}
+
+segment_manager_properties_t *
+application_segment_manager_properties (application_t * app)
+{
+ return &app->sm_properties;
+}
+
+segment_manager_properties_t *
+application_get_segment_manager_properties (u32 app_index)
+{
+ application_t *app = application_get (app_index);
+ return &app->sm_properties;
+}
+
+static inline int
+app_enqueue_evt (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, u8 lock)
+{
+ if (PREDICT_FALSE (svm_msg_q_is_full (mq)))
+ {
+ clib_warning ("evt q full");
+ svm_msg_q_free_msg (mq, msg);
+ if (lock)
+ svm_msg_q_unlock (mq);
+ return -1;
+ }
+
+ if (lock)
+ {
+ svm_msg_q_add_and_unlock (mq, msg);
+ return 0;
+ }
+
+ /* Even when not locking the ring, we must wait for queue mutex */
+ if (svm_msg_q_add (mq, msg, SVM_Q_WAIT))
+ {
+ clib_warning ("msg q add returned");
+ return -1;
+ }
+ return 0;
+}
+
+static inline int
+app_send_io_evt_rx (app_worker_t * app_wrk, stream_session_t * s, u8 lock)
+{
+ session_event_t *evt;
+ svm_msg_q_msg_t msg;
+ svm_msg_q_t *mq;
+
+ if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY
+ && s->session_state != SESSION_STATE_LISTENING))
+ {
+ /* Session is closed so app will never clean up. Flush rx fifo */
+ if (s->session_state == SESSION_STATE_CLOSED)
+ svm_fifo_dequeue_drop_all (s->server_rx_fifo);
+ return 0;
+ }
+
+ if (app_worker_application_is_builtin (app_wrk))
+ {
+ application_t *app = application_get (app_wrk->app_index);
+ return app->cb_fns.builtin_app_rx_callback (s);
+ }
+
+ if (svm_fifo_has_event (s->server_rx_fifo)
+ || svm_fifo_is_empty (s->server_rx_fifo))
+ return 0;
+
+ mq = app_wrk->event_queue;
+ if (lock)
+ svm_msg_q_lock (mq);
+
+ if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
+ {
+ clib_warning ("evt q rings full");
+ if (lock)
+ svm_msg_q_unlock (mq);
+ return -1;
+ }
+
+ msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
+ ASSERT (!svm_msg_q_msg_is_invalid (&msg));
+
+ evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
+ evt->fifo = s->server_rx_fifo;
+ evt->event_type = FIFO_EVENT_APP_RX;
+
+ (void) svm_fifo_set_event (s->server_rx_fifo);
+
+ if (app_enqueue_evt (mq, &msg, lock))
+ return -1;
+ return 0;
+}
+
+static inline int
+app_send_io_evt_tx (app_worker_t * app_wrk, stream_session_t * s, u8 lock)
+{
+ svm_msg_q_t *mq;
+ session_event_t *evt;
+ svm_msg_q_msg_t msg;
+
+ if (app_worker_application_is_builtin (app_wrk))
+ return 0;
+
+ mq = app_wrk->event_queue;
+ if (lock)
+ svm_msg_q_lock (mq);
+
+ if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
+ {
+ clib_warning ("evt q rings full");
+ if (lock)
+ svm_msg_q_unlock (mq);
+ return -1;
+ }
+
+ msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
+ ASSERT (!svm_msg_q_msg_is_invalid (&msg));
+
+ evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
+ evt->event_type = FIFO_EVENT_APP_TX;
+ evt->fifo = s->server_tx_fifo;
+
+ return app_enqueue_evt (mq, &msg, lock);
+}
+
+/* *INDENT-OFF* */
+typedef int (app_send_evt_handler_fn) (app_worker_t *app,
+ stream_session_t *s,
+ u8 lock);
+static app_send_evt_handler_fn * const app_send_evt_handler_fns[3] = {
+ app_send_io_evt_rx,
+ 0,
+ app_send_io_evt_tx,
+};
+/* *INDENT-ON* */
+
+/**
+ * Send event to application
+ *
+ * Logic from queue perspective is non-blocking. If there's
+ * not enough space to enqueue a message, we return.
+ */
+int
+app_worker_send_event (app_worker_t * app, stream_session_t * s, u8 evt_type)
+{
+ ASSERT (app && evt_type <= FIFO_EVENT_APP_TX);
+ return app_send_evt_handler_fns[evt_type] (app, s, 0 /* lock */ );
+}
+
+/**
+ * Send event to application
+ *
+ * Logic from queue perspective is blocking. However, if queue is full,
+ * we return.
+ */
+int
+app_worker_lock_and_send_event (app_worker_t * app, stream_session_t * s,
+ u8 evt_type)
+{
+ return app_send_evt_handler_fns[evt_type] (app, s, 1 /* lock */ );
+}
+
+local_session_t *
+application_local_session_alloc (app_worker_t * app_wrk)
+{
+ local_session_t *s;
+ pool_get (app_wrk->local_sessions, s);
+ clib_memset (s, 0, sizeof (*s));
+ s->app_wrk_index = app_wrk->wrk_index;
+ s->session_index = s - app_wrk->local_sessions;
+ s->session_type = session_type_from_proto_and_ip (TRANSPORT_PROTO_NONE, 0);
+ return s;
+}
+
+void
+application_local_session_free (app_worker_t * app, local_session_t * s)
+{
+ pool_put (app->local_sessions, s);
+ if (CLIB_DEBUG)
+ clib_memset (s, 0xfc, sizeof (*s));
+}
+
+local_session_t *
+application_get_local_session (app_worker_t * app_wrk, u32 session_index)
+{
+ if (pool_is_free_index (app_wrk->local_sessions, session_index))
+ return 0;
+ return pool_elt_at_index (app_wrk->local_sessions, session_index);
+}
+
+local_session_t *
+application_get_local_session_from_handle (session_handle_t handle)
+{
+ app_worker_t *server_wrk;
+ u32 session_index, server_wrk_index;
+ local_session_parse_handle (handle, &server_wrk_index, &session_index);
+ server_wrk = app_worker_get_if_valid (server_wrk_index);
+ if (!server_wrk)
+ return 0;
+ return application_get_local_session (server_wrk, session_index);
+}
+
+local_session_t *
+application_local_listen_session_alloc (application_t * app)
+{
+ local_session_t *ll;
+ pool_get (app->local_listen_sessions, ll);
+ clib_memset (ll, 0, sizeof (*ll));
+ return ll;
+}
+
+u32
+application_local_listener_index (application_t * app, local_session_t * ll)
+{
+ return (ll - app->local_listen_sessions);
+}
+
+void
+application_local_listen_session_free (application_t * app,
+ local_session_t * ll)
+{
+ pool_put (app->local_listen_sessions, ll);
+ if (CLIB_DEBUG)
+ clib_memset (ll, 0xfb, sizeof (*ll));
+}
+
+int
+application_start_local_listen (application_t * app,
+ session_endpoint_cfg_t * sep_ext,
+ session_handle_t * handle)
+{
+ app_listener_t *app_listener;
+ session_endpoint_t *sep;
+ app_worker_t *app_wrk;
+ session_handle_t lh;
+ local_session_t *ll;
+ u32 table_index;
+
+ sep = (session_endpoint_t *) sep_ext;
+ table_index = application_local_session_table (app);
+ app_wrk = app_worker_get (sep_ext->app_wrk_index);
+
+ /* An exact sep match, as opposed to session_lookup_local_listener */
+ lh = session_lookup_endpoint_listener (table_index, sep, 1);
+ if (lh != SESSION_INVALID_HANDLE)
+ {
+ ll = application_get_local_listener_w_handle (lh);
+ if (ll->app_index != app->app_index)
+ return VNET_API_ERROR_ADDRESS_IN_USE;
+
+ if (ll->app_wrk_index == app_wrk->wrk_index)
+ return VNET_API_ERROR_ADDRESS_IN_USE;
+
+ app_listener = app_local_listener_get (app, ll->listener_db_index);
+ app_listener->workers = clib_bitmap_set (app_listener->workers,
+ app_wrk->wrk_map_index, 1);
+ *handle = application_local_session_handle (ll);
+ return 0;
+ }
+
+ ll = application_local_listen_session_alloc (app);
+ ll->session_type = session_type_from_proto_and_ip (TRANSPORT_PROTO_NONE, 0);
+ ll->app_wrk_index = app_wrk->app_index;
+ ll->session_index = application_local_listener_index (app, ll);
+ ll->port = sep_ext->port;
+ /* Store the original session type for the unbind */
+ ll->listener_session_type =
+ session_type_from_proto_and_ip (sep_ext->transport_proto,
+ sep_ext->is_ip4);
+ ll->transport_listener_index = ~0;
+ ll->app_index = app->app_index;
+
+ app_listener = app_local_listener_alloc (app);
+ ll->listener_db_index = app_listener->al_index;
+ app_listener->workers = clib_bitmap_set (app_listener->workers,
+ app_wrk->wrk_map_index, 1);
+
+ *handle = application_local_session_handle (ll);
+ session_lookup_add_session_endpoint (table_index, sep, *handle);
+
+ return 0;
+}
+
+/**
+ * Clean up local session table. If we have a listener session use it to
+ * find the port and proto. If not, the handle must be a local table handle
+ * so parse it.
+ */
+int
+application_stop_local_listen (u32 app_index, u32 wrk_map_index,
+ session_handle_t lh)
+{
+ session_endpoint_t sep = SESSION_ENDPOINT_NULL;
+ u32 table_index, ll_index, server_index;
+ app_listener_t *app_listener;
+ app_worker_t *server_wrk;
+ stream_session_t *sl = 0;
+ local_session_t *ll, *ls;
+ application_t *server;
+
+ server = application_get (app_index);
+ table_index = application_local_session_table (server);
+
+ /* We have both local and global table binds. Figure from global what
+ * the sep we should be cleaning up is.
+ */
+ if (!session_handle_is_local (lh))
+ {
+ sl = listen_session_get_from_handle (lh);
+ if (!sl || listen_session_get_local_session_endpoint (sl, &sep))
+ {
+ clib_warning ("broken listener");
+ return -1;
+ }
+ lh = session_lookup_endpoint_listener (table_index, &sep, 0);
+ if (lh == SESSION_INVALID_HANDLE)
+ return -1;
+ }
+
+ local_session_parse_handle (lh, &server_index, &ll_index);
+ if (PREDICT_FALSE (server_index != app_index))
+ {
+ clib_warning ("app %u does not own local handle 0x%lx", app_index, lh);
+ return -1;
+ }
+
+ ll = application_get_local_listen_session (server, ll_index);
+ if (PREDICT_FALSE (!ll))
+ {
+ clib_warning ("no local listener");
+ return -1;
+ }
+
+ app_listener = app_local_listener_get (server, ll->listener_db_index);
+ if (!clib_bitmap_get (app_listener->workers, wrk_map_index))
+ {
+ clib_warning ("app wrk %u not listening on handle %lu", wrk_map_index,
+ lh);
+ return -1;
+ }
+
+ server_wrk = application_get_worker (server, wrk_map_index);
+ /* *INDENT-OFF* */
+ pool_foreach (ls, server_wrk->local_sessions, ({
+ if (ls->listener_index == ll->session_index)
+ application_local_session_disconnect (server_wrk->app_index, ls);
+ }));
+ /* *INDENT-ON* */
+
+ clib_bitmap_set_no_check (app_listener->workers, wrk_map_index, 0);
+ if (clib_bitmap_is_zero (app_listener->workers))
+ {
+ app_local_listener_free (server, app_listener);
+ application_local_listener_session_endpoint (ll, &sep);
+ session_lookup_del_session_endpoint (table_index, &sep);
+ application_local_listen_session_free (server, ll);
+ }
+
+ return 0;
+}
+
+static void
+application_local_session_fix_eventds (svm_msg_q_t * sq, svm_msg_q_t * cq)
+{
+ int fd;
+
+ /*
+ * segment manager initializes only the producer eventds, since vpp is
+ * typically the producer. But for local sessions, we also pass to the
+ * apps the mqs they listen on for events from peer apps, so they are also
+ * consumer fds.
+ */
+ fd = svm_msg_q_get_producer_eventfd (sq);
+ svm_msg_q_set_consumer_eventfd (sq, fd);
+ fd = svm_msg_q_get_producer_eventfd (cq);
+ svm_msg_q_set_consumer_eventfd (cq, fd);
+}
+
+int
+application_local_session_connect (app_worker_t * client_wrk,
+ app_worker_t * server_wrk,
+ local_session_t * ll, u32 opaque)
+{
+ u32 seg_size, evt_q_sz, evt_q_elts, margin = 16 << 10;
+ u32 round_rx_fifo_sz, round_tx_fifo_sz, sm_index;
+ segment_manager_properties_t *props, *cprops;
+ int rv, has_transport, seg_index;
+ svm_fifo_segment_private_t *seg;
+ application_t *server, *client;
+ segment_manager_t *sm;
+ local_session_t *ls;
+ svm_msg_q_t *sq, *cq;
+ u64 segment_handle;
+
+ ls = application_local_session_alloc (server_wrk);
+ server = application_get (server_wrk->app_index);
+ client = application_get (client_wrk->app_index);
+
+ props = application_segment_manager_properties (server);
+ cprops = application_segment_manager_properties (client);
+ evt_q_elts = props->evt_q_size + cprops->evt_q_size;
+ evt_q_sz = segment_manager_evt_q_expected_size (evt_q_elts);
+ round_rx_fifo_sz = 1 << max_log2 (props->rx_fifo_size);
+ round_tx_fifo_sz = 1 << max_log2 (props->tx_fifo_size);
+ seg_size = round_rx_fifo_sz + round_tx_fifo_sz + evt_q_sz + margin;
+
+ has_transport = session_has_transport ((stream_session_t *) ll);
+ if (!has_transport)
+ {
+ /* Local sessions don't have backing transport */
+ ls->port = ll->port;
+ sm = application_get_local_segment_manager (server_wrk);
+ }
+ else
+ {
+ stream_session_t *sl = (stream_session_t *) ll;
+ transport_connection_t *tc;
+ tc = listen_session_get_transport (sl);
+ ls->port = tc->lcl_port;
+ sm = app_worker_get_listen_segment_manager (server_wrk, sl);
+ }
+
+ seg_index = segment_manager_add_segment (sm, seg_size);
+ if (seg_index < 0)
+ {
+ clib_warning ("failed to add new cut-through segment");
+ return seg_index;
+ }
+ seg = segment_manager_get_segment_w_lock (sm, seg_index);
+ sq = segment_manager_alloc_queue (seg, props);
+ cq = segment_manager_alloc_queue (seg, cprops);
+
+ if (props->use_mq_eventfd)
+ application_local_session_fix_eventds (sq, cq);
+
+ ls->server_evt_q = pointer_to_uword (sq);
+ ls->client_evt_q = pointer_to_uword (cq);
+ rv = segment_manager_try_alloc_fifos (seg, props->rx_fifo_size,
+ props->tx_fifo_size,
+ &ls->server_rx_fifo,
+ &ls->server_tx_fifo);
+ if (rv)
+ {
+ clib_warning ("failed to add fifos in cut-through segment");
+ segment_manager_segment_reader_unlock (sm);
+ goto failed;
+ }
+ sm_index = segment_manager_index (sm);
+ ls->server_rx_fifo->ct_session_index = ls->session_index;
+ ls->server_tx_fifo->ct_session_index = ls->session_index;
+ ls->server_rx_fifo->segment_manager = sm_index;
+ ls->server_tx_fifo->segment_manager = sm_index;
+ ls->server_rx_fifo->segment_index = seg_index;
+ ls->server_tx_fifo->segment_index = seg_index;
+ ls->svm_segment_index = seg_index;
+ ls->listener_index = ll->session_index;
+ ls->client_wrk_index = client_wrk->wrk_index;
+ ls->client_opaque = opaque;
+ ls->listener_session_type = ll->session_type;
+ ls->session_state = SESSION_STATE_READY;
+
+ segment_handle = segment_manager_segment_handle (sm, seg);
+ if ((rv = server->cb_fns.add_segment_callback (server_wrk->api_client_index,
+ segment_handle)))
+ {
+ clib_warning ("failed to notify server of new segment");
+ segment_manager_segment_reader_unlock (sm);
+ goto failed;
+ }
+ segment_manager_segment_reader_unlock (sm);
+ if ((rv = server->cb_fns.session_accept_callback ((stream_session_t *) ls)))
+ {
+ clib_warning ("failed to send accept cut-through notify to server");
+ goto failed;
+ }
+ if (server->flags & APP_OPTIONS_FLAGS_IS_BUILTIN)
+ application_local_session_connect_notify (ls);
+
+ return 0;
+
+failed:
+ if (!has_transport)
+ segment_manager_del_segment (sm, seg);
+ return rv;
+}
+
+static u64
+application_client_local_connect_key (local_session_t * ls)
+{
+ return (((u64) ls->app_wrk_index) << 32 | (u64) ls->session_index);
+}
+
+static void
+application_client_local_connect_key_parse (u64 key, u32 * app_wrk_index,
+ u32 * session_index)
+{
+ *app_wrk_index = key >> 32;
+ *session_index = key & 0xFFFFFFFF;
+}
+
+int
+application_local_session_connect_notify (local_session_t * ls)
+{
+ svm_fifo_segment_private_t *seg;
+ app_worker_t *client_wrk, *server_wrk;
+ segment_manager_t *sm;
+ application_t *client;
+ int rv, is_fail = 0;
+ u64 segment_handle;
+ u64 client_key;
+
+ client_wrk = app_worker_get (ls->client_wrk_index);
+ server_wrk = app_worker_get (ls->app_wrk_index);
+ client = application_get (client_wrk->app_index);
+
+ sm = application_get_local_segment_manager_w_session (server_wrk, ls);
+ seg = segment_manager_get_segment_w_lock (sm, ls->svm_segment_index);
+ segment_handle = segment_manager_segment_handle (sm, seg);
+ if ((rv = client->cb_fns.add_segment_callback (client_wrk->api_client_index,
+ segment_handle)))
+ {
+ clib_warning ("failed to notify client %u of new segment",
+ ls->client_wrk_index);
+ segment_manager_segment_reader_unlock (sm);
+ application_local_session_disconnect (ls->client_wrk_index, ls);
+ is_fail = 1;
+ }
+ else
+ {
+ segment_manager_segment_reader_unlock (sm);
+ }