+static app_main_t app_main;
+
+static app_worker_map_t *
+app_worker_map_alloc (application_t * app)
+{
+ app_worker_map_t *map;
+ pool_get (app->worker_maps, map);
+ memset (map, 0, sizeof (*map));
+ return map;
+}
+
+static u32
+app_worker_map_index (application_t * app, app_worker_map_t * map)
+{
+ return (map - app->worker_maps);
+}
+
+static void
+app_worker_map_free (application_t * app, app_worker_map_t * map)
+{
+ pool_put (app->worker_maps, map);
+}
+
+static app_worker_map_t *
+app_worker_map_get (application_t * app, u32 map_index)
+{
+ return pool_elt_at_index (app->worker_maps, map_index);
+}
+
+static u8 *
+app_get_name_from_reg_index (application_t * app)
+{
+ u8 *app_name;
+
+ vl_api_registration_t *regp;
+ regp = vl_api_client_index_to_registration (app->api_client_index);
+ if (!regp)
+ app_name = format (0, "builtin-%d%c", app->app_index, 0);
+ else
+ app_name = format (0, "%s%c", regp->name, 0);
+
+ return app_name;
+}
+
+static u8 *
+app_get_name (application_t * app)
+{
+ if (!app->name)
+ return app_get_name_from_reg_index (app);
+ return app->name;
+}
+
+u32
+application_session_table (application_t * app, u8 fib_proto)
+{
+ app_namespace_t *app_ns;
+ app_ns = app_namespace_get (app->ns_index);
+ if (!application_has_global_scope (app))
+ return APP_INVALID_INDEX;
+ if (fib_proto == FIB_PROTOCOL_IP4)
+ return session_lookup_get_index_for_fib (fib_proto,
+ app_ns->ip4_fib_index);
+ else
+ return session_lookup_get_index_for_fib (fib_proto,
+ app_ns->ip6_fib_index);
+}
+
+u32
+application_local_session_table (application_t * app)
+{
+ app_namespace_t *app_ns;
+ if (!application_has_local_scope (app))
+ return APP_INVALID_INDEX;
+ app_ns = app_namespace_get (app->ns_index);
+ return app_ns->local_table_index;
+}
+
+int
+application_api_queue_is_full (application_t * app)
+{
+ svm_queue_t *q;
+
+ /* builtin servers are always OK */
+ if (app->api_client_index == ~0)
+ return 0;
+
+ q = vl_api_client_index_to_input_queue (app->api_client_index);
+ if (!q)
+ return 1;
+
+ if (q->cursize == q->maxsize)
+ return 1;
+ return 0;
+}
+
+/**
+ * Returns app name
+ *
+ * Since the name is not stored per app, we generate it on the fly. It is
+ * the caller's responsibility to free the vector
+ */
+u8 *
+application_name_from_index (u32 app_index)
+{
+ application_t *app = application_get (app_index);
+ if (!app)
+ return 0;
+ return app_get_name_from_reg_index (app);
+}
+
+static void
+application_table_add (application_t * app)
+{
+ if (app->api_client_index != APP_INVALID_INDEX)
+ hash_set (app_main.app_by_api_client_index, app->api_client_index,
+ app->app_index);
+ else if (app->name)
+ hash_set_mem (app_main.app_by_name, app->name, app->app_index);
+}
+
+static void
+application_table_del (application_t * app)
+{
+ if (app->api_client_index != APP_INVALID_INDEX)
+ hash_unset (app_main.app_by_api_client_index, app->api_client_index);
+ else if (app->name)
+ hash_unset_mem (app_main.app_by_name, app->name);
+}
+
+application_t *
+application_lookup (u32 api_client_index)
+{
+ uword *p;
+ p = hash_get (app_main.app_by_api_client_index, api_client_index);
+ if (p)
+ return application_get (p[0]);
+
+ return 0;
+}
+
+application_t *
+application_lookup_name (const u8 * name)
+{
+ uword *p;
+ p = hash_get_mem (app_main.app_by_name, name);
+ if (p)
+ return application_get (p[0]);
+
+ return 0;
+}
+
+application_t *
+application_alloc (void)
+{
+ application_t *app;
+ pool_get (app_main.app_pool, app);
+ memset (app, 0, sizeof (*app));
+ app->app_index = app - app_main.app_pool;
+ return app;
+}
+
+application_t *
+application_get (u32 app_index)
+{
+ if (app_index == APP_INVALID_INDEX)
+ return 0;
+ return pool_elt_at_index (app_main.app_pool, app_index);
+}
+
+application_t *
+application_get_if_valid (u32 app_index)
+{
+ if (pool_is_free_index (app_main.app_pool, app_index))
+ return 0;
+
+ return pool_elt_at_index (app_main.app_pool, app_index);
+}
+
+u32
+application_index (application_t * app)
+{
+ return app - app_main.app_pool;
+}
+
+static void
+application_verify_cb_fns (session_cb_vft_t * cb_fns)
+{
+ if (cb_fns->session_accept_callback == 0)
+ clib_warning ("No accept callback function provided");
+ if (cb_fns->session_connected_callback == 0)
+ clib_warning ("No session connected callback function provided");
+ if (cb_fns->session_disconnect_callback == 0)
+ clib_warning ("No session disconnect callback function provided");
+ if (cb_fns->session_reset_callback == 0)
+ clib_warning ("No session reset callback function provided");
+}
+
+/**
+ * Check app config for given segment type
+ *
+ * Returns 1 on success and 0 otherwise
+ */
+static u8
+application_verify_cfg (ssvm_segment_type_t st)
+{
+ u8 is_valid;
+ if (st == SSVM_SEGMENT_MEMFD)
+ {
+ is_valid = (session_manager_get_evt_q_segment () != 0);
+ if (!is_valid)
+ clib_warning ("memfd seg: vpp's event qs IN binary api svm region");
+ return is_valid;
+ }
+ else if (st == SSVM_SEGMENT_SHM)
+ {
+ is_valid = (session_manager_get_evt_q_segment () == 0);
+ if (!is_valid)
+ clib_warning ("shm seg: vpp's event qs NOT IN binary api svm region");
+ return is_valid;
+ }
+ else
+ return 1;
+}
+
+int
+application_alloc_and_init (app_init_args_t * a)
+{
+ ssvm_segment_type_t seg_type = SSVM_SEGMENT_MEMFD;
+ segment_manager_properties_t *props;
+ vl_api_registration_t *reg;
+ application_t *app;
+ u64 *options;
+
+ app = application_alloc ();
+ options = a->options;
+ /*
+ * Make sure we support the requested configuration
+ */
+ if (!(options[APP_OPTIONS_FLAGS] & APP_OPTIONS_FLAGS_IS_BUILTIN))
+ {
+ reg = vl_api_client_index_to_registration (a->api_client_index);
+ if (!reg)
+ return VNET_API_ERROR_APP_UNSUPPORTED_CFG;
+ if (vl_api_registration_file_index (reg) == VL_API_INVALID_FI)
+ seg_type = SSVM_SEGMENT_SHM;
+ }
+ else
+ {
+ if (options[APP_OPTIONS_FLAGS] & APP_OPTIONS_FLAGS_EVT_MQ_USE_EVENTFD)
+ {
+ clib_warning ("mq eventfds can only be used if socket transport is "
+ "used for api");
+ return VNET_API_ERROR_APP_UNSUPPORTED_CFG;
+ }
+ seg_type = SSVM_SEGMENT_PRIVATE;
+ }
+
+ if (!application_verify_cfg (seg_type))
+ return VNET_API_ERROR_APP_UNSUPPORTED_CFG;
+
+ /* Check that the obvious things are properly set up */
+ application_verify_cb_fns (a->session_cb_vft);
+
+ app->api_client_index = a->api_client_index;
+ app->flags = options[APP_OPTIONS_FLAGS];
+ app->cb_fns = *a->session_cb_vft;
+ app->ns_index = options[APP_OPTIONS_NAMESPACE];
+ app->proxied_transports = options[APP_OPTIONS_PROXY_TRANSPORT];
+ app->name = vec_dup (a->name);
+
+ /* If no scope enabled, default to global */
+ if (!application_has_global_scope (app)
+ && !application_has_local_scope (app))
+ app->flags |= APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+
+ props = application_segment_manager_properties (app);
+ segment_manager_properties_init (props);
+ props->segment_size = options[APP_OPTIONS_ADD_SEGMENT_SIZE];
+ props->prealloc_fifos = options[APP_OPTIONS_PREALLOC_FIFO_PAIRS];
+ if (options[APP_OPTIONS_ADD_SEGMENT_SIZE])
+ {
+ props->add_segment_size = options[APP_OPTIONS_ADD_SEGMENT_SIZE];
+ props->add_segment = 1;
+ }
+ if (options[APP_OPTIONS_RX_FIFO_SIZE])
+ props->rx_fifo_size = options[APP_OPTIONS_RX_FIFO_SIZE];
+ if (options[APP_OPTIONS_TX_FIFO_SIZE])
+ props->tx_fifo_size = options[APP_OPTIONS_TX_FIFO_SIZE];
+ if (options[APP_OPTIONS_EVT_QUEUE_SIZE])
+ props->evt_q_size = options[APP_OPTIONS_EVT_QUEUE_SIZE];
+ if (options[APP_OPTIONS_FLAGS] & APP_OPTIONS_FLAGS_EVT_MQ_USE_EVENTFD)
+ props->use_mq_eventfd = 1;
+ if (options[APP_OPTIONS_TLS_ENGINE])
+ app->tls_engine = options[APP_OPTIONS_TLS_ENGINE];
+ props->segment_type = seg_type;
+
+ /* Add app to lookup by api_client_index table */
+ application_table_add (app);
+ a->app_index = application_index (app);
+
+ APP_DBG ("New app name: %v api index: %u index %u", app->name,
+ app->api_client_index, app->app_index);
+
+ return 0;
+}
+
+void
+application_free (application_t * app)
+{
+ app_worker_map_t *wrk_map;
+ app_worker_t *app_wrk;
+
+ /*
+ * The app event queue allocated in first segment is cleared with
+ * the segment manager. No need to explicitly free it.
+ */
+ APP_DBG ("Delete app name %v api index: %d index: %d", app->name,
+ app->api_client_index, app->app_index);
+
+ if (application_is_proxy (app))
+ application_remove_proxy (app);
+
+ /* *INDENT-OFF* */
+ pool_flush (wrk_map, app->worker_maps, ({
+ app_wrk = app_worker_get (wrk_map->wrk_index);
+ app_worker_free (app_wrk);
+ }));
+ /* *INDENT-ON* */
+ pool_free (app->worker_maps);
+
+ application_table_del (app);
+ vec_free (app->name);
+ vec_free (app->tls_cert);
+ vec_free (app->tls_key);
+ pool_put (app_main.app_pool, app);
+}
+
+app_worker_t *
+application_get_worker (application_t * app, u32 wrk_map_index)
+{
+ app_worker_map_t *map;
+ map = app_worker_map_get (app, wrk_map_index);
+ if (!map)
+ return 0;
+ return app_worker_get (map->wrk_index);
+}
+
+app_worker_t *
+application_get_default_worker (application_t * app)
+{
+ return application_get_worker (app, 0);
+}
+
+app_worker_t *
+app_worker_alloc (application_t * app)
+{
+ app_worker_t *app_wrk;
+ pool_get (app_main.workers, app_wrk);
+ memset (app_wrk, 0, sizeof (*app_wrk));
+ app_wrk->wrk_index = app_wrk - app_main.workers;
+ app_wrk->app_index = app->app_index;
+ app_wrk->wrk_map_index = ~0;
+ app_wrk->connects_seg_manager = APP_INVALID_SEGMENT_MANAGER_INDEX;
+ app_wrk->first_segment_manager = APP_INVALID_SEGMENT_MANAGER_INDEX;
+ app_wrk->local_segment_manager = APP_INVALID_SEGMENT_MANAGER_INDEX;
+ APP_DBG ("New app %v worker %u", app_get_name (app), app_wrk->wrk_index);
+ return app_wrk;
+}
+
+app_worker_t *
+app_worker_get (u32 wrk_index)
+{
+ return pool_elt_at_index (app_main.workers, wrk_index);
+}
+
+app_worker_t *
+app_worker_get_if_valid (u32 wrk_index)
+{
+ if (pool_is_free_index (app_main.workers, wrk_index))
+ return 0;
+ return pool_elt_at_index (app_main.workers, wrk_index);
+}
+
+void
+app_worker_free (app_worker_t * app_wrk)
+{
+ application_t *app = application_get (app_wrk->app_index);
+ vnet_unbind_args_t _a, *a = &_a;
+ u64 handle, *handles = 0;
+ segment_manager_t *sm;
+ u32 sm_index;
+ int i;
+
+ /*
+ * Listener cleanup
+ */
+
+ /* *INDENT-OFF* */
+ hash_foreach (handle, sm_index, app_wrk->listeners_table,
+ ({
+ vec_add1 (handles, handle);
+ sm = segment_manager_get (sm_index);
+ sm->app_wrk_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
+ }));
+ /* *INDENT-ON* */
+
+ for (i = 0; i < vec_len (handles); i++)
+ {
+ a->app_index = app->app_index;
+ a->app_wrk_index = app_wrk->wrk_map_index;
+ a->handle = handles[i];
+ /* seg manager is removed when unbind completes */
+ vnet_unbind (a);
+ }
+
+ /*
+ * Connects segment manager cleanup
+ */
+
+ if (app_wrk->connects_seg_manager != APP_INVALID_SEGMENT_MANAGER_INDEX)
+ {
+ sm = segment_manager_get (app_wrk->connects_seg_manager);
+ sm->app_wrk_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
+ segment_manager_init_del (sm);
+ }
+
+ /* If first segment manager is used by a listener */
+ if (app_wrk->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX
+ && app_wrk->first_segment_manager != app_wrk->connects_seg_manager)
+ {
+ sm = segment_manager_get (app_wrk->first_segment_manager);
+ /* .. and has no fifos, e.g. it might be used for redirected sessions,
+ * remove it */
+ if (!segment_manager_has_fifos (sm))
+ {
+ sm->app_wrk_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
+ segment_manager_del (sm);
+ }
+ }
+
+ /*
+ * Local sessions
+ */
+ application_local_sessions_free (app_wrk);
+
+ pool_put (app_main.workers, app_wrk);
+ if (CLIB_DEBUG)
+ memset (app_wrk, 0xfe, sizeof (*app_wrk));
+}
+
+int
+app_worker_alloc_and_init (application_t * app, app_worker_t ** wrk)
+{
+ app_worker_map_t *wrk_map;
+ app_worker_t *app_wrk;
+ segment_manager_t *sm;
+ int rv;
+
+ app_wrk = app_worker_alloc (app);
+ wrk_map = app_worker_map_alloc (app);
+ wrk_map->wrk_index = app_wrk->wrk_index;
+ app_wrk->wrk_map_index = app_worker_map_index (app, wrk_map);
+
+ /*
+ * Setup first segment manager
+ */
+ sm = segment_manager_new ();
+ sm->app_wrk_index = app_wrk->wrk_index;
+
+ if ((rv = segment_manager_init (sm, app->sm_properties.segment_size,
+ app->sm_properties.prealloc_fifos)))
+ {
+ app_worker_free (app_wrk);
+ return rv;
+ }
+ sm->first_is_protected = 1;
+
+ /*
+ * Setup app worker
+ */
+ app_wrk->first_segment_manager = segment_manager_index (sm);
+ app_wrk->listeners_table = hash_create (0, sizeof (u64));
+ app_wrk->event_queue = segment_manager_event_queue (sm);
+ app_wrk->app_is_builtin = application_is_builtin (app);
+
+ /*
+ * Segment manager for local sessions
+ */
+ sm = segment_manager_new ();
+ sm->app_wrk_index = app_wrk->wrk_index;
+ app_wrk->local_segment_manager = segment_manager_index (sm);
+ app_wrk->local_connects = hash_create (0, sizeof (u64));
+
+ *wrk = app_wrk;
+
+ return 0;
+}
+
+static segment_manager_t *
+application_alloc_segment_manager (app_worker_t * app_wrk)
+{
+ segment_manager_t *sm = 0;
+
+ /* If the first segment manager is not in use, don't allocate a new one */
+ if (app_wrk->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX
+ && app_wrk->first_segment_manager_in_use == 0)
+ {
+ sm = segment_manager_get (app_wrk->first_segment_manager);
+ app_wrk->first_segment_manager_in_use = 1;
+ return sm;
+ }
+
+ sm = segment_manager_new ();
+ sm->app_wrk_index = app_wrk->wrk_index;
+
+ return sm;
+}
+
+/**
+ * Start listening local transport endpoint for requested transport.
+ *
+ * Creates a 'dummy' stream session with state LISTENING to be used in session
+ * lookups, prior to establishing connection. Requests transport to build
+ * it's own specific listening connection.
+ */
+int
+app_worker_start_listen (app_worker_t * app_wrk, session_endpoint_t * sep,
+ session_handle_t * res)
+{
+ segment_manager_t *sm;
+ stream_session_t *s;
+ session_handle_t handle;
+ session_type_t sst;
+
+ sst = session_type_from_proto_and_ip (sep->transport_proto, sep->is_ip4);
+ s = listen_session_new (0, sst);
+ s->app_wrk_index = app_wrk->wrk_index;
+
+ /* Allocate segment manager. All sessions derived out of a listen session
+ * have fifos allocated by the same segment manager. */
+ if (!(sm = application_alloc_segment_manager (app_wrk)))
+ goto err;
+
+ /* Add to app's listener table. Useful to find all child listeners
+ * when app goes down, although, just for unbinding this is not needed */
+ handle = listen_session_get_handle (s);
+ hash_set (app_wrk->listeners_table, handle, segment_manager_index (sm));
+
+ if (stream_session_listen (s, sep))
+ {
+ segment_manager_del (sm);
+ hash_unset (app_wrk->listeners_table, handle);
+ goto err;
+ }
+
+ *res = handle;
+ return 0;
+
+err:
+ listen_session_del (s);
+ return -1;
+}
+
+/**
+ * Stop listening on session associated to handle
+ *
+ * @param handle listener handle
+ * @param app_index index of the app owning the handle. This is used
+ * only for validating ownership
+ */
+int
+app_worker_stop_listen (session_handle_t handle, u32 app_index)
+{
+ stream_session_t *listener;
+ segment_manager_t *sm;
+ app_worker_t *app_wrk;
+ uword *indexp;
+
+ listener = listen_session_get_from_handle (handle);
+ app_wrk = app_worker_get (listener->app_wrk_index);
+ if (PREDICT_FALSE (!app_wrk || app_wrk->app_index != app_index))
+ {
+ clib_warning ("app doesn't own handle %llu!", handle);
+ return -1;
+ }
+ if (PREDICT_FALSE (hash_get (app_wrk->listeners_table, handle) == 0))
+ {
+ clib_warning ("listener handle was removed %llu!", handle);
+ return -1;
+ }
+
+ stream_session_stop_listen (listener);
+
+ indexp = hash_get (app_wrk->listeners_table, handle);
+ ASSERT (indexp);
+
+ sm = segment_manager_get (*indexp);
+ if (app_wrk->first_segment_manager == *indexp)
+ {
+ /* Delete sessions but don't remove segment manager */
+ app_wrk->first_segment_manager_in_use = 0;
+ segment_manager_del_sessions (sm);
+ }
+ else
+ {
+ segment_manager_init_del (sm);
+ }
+ hash_unset (app_wrk->listeners_table, handle);
+ listen_session_del (listener);
+
+ return 0;
+}
+
+int
+app_worker_open_session (app_worker_t * app, session_endpoint_t * sep,
+ u32 api_context)
+{
+ int rv;
+
+ /* Make sure we have a segment manager for connects */
+ app_worker_alloc_connects_segment_manager (app);
+
+ if ((rv = session_open (app->wrk_index, sep, api_context)))
+ return rv;
+
+ return 0;
+}
+
+int
+app_worker_alloc_connects_segment_manager (app_worker_t * app_wrk)
+{
+ segment_manager_t *sm;
+
+ if (app_wrk->connects_seg_manager == APP_INVALID_SEGMENT_MANAGER_INDEX)
+ {
+ sm = application_alloc_segment_manager (app_wrk);
+ if (sm == 0)
+ return -1;
+ app_wrk->connects_seg_manager = segment_manager_index (sm);
+ }
+ return 0;
+}
+
+segment_manager_t *
+app_worker_get_connect_segment_manager (app_worker_t * app)
+{
+ ASSERT (app->connects_seg_manager != (u32) ~ 0);
+ return segment_manager_get (app->connects_seg_manager);
+}
+
+segment_manager_t *
+app_worker_get_listen_segment_manager (app_worker_t * app,
+ stream_session_t * s)
+{
+ uword *smp;
+ smp = hash_get (app->listeners_table, listen_session_get_handle (s));
+ ASSERT (smp != 0);
+ return segment_manager_get (*smp);
+}
+
+clib_error_t *
+vnet_app_worker_add_del (vnet_app_worker_add_del_args_t * a)
+{
+ svm_fifo_segment_private_t *fs;
+ app_worker_map_t *wrk_map;
+ app_worker_t *app_wrk;
+ segment_manager_t *sm;
+ application_t *app;
+ int rv;
+
+ app = application_get (a->app_index);
+ if (!app)
+ return clib_error_return_code (0, VNET_API_ERROR_INVALID_VALUE, 0,
+ "App %u does not exist", a->app_index);
+
+ if (a->is_add)
+ {
+ if ((rv = app_worker_alloc_and_init (app, &app_wrk)))
+ return clib_error_return_code (0, rv, 0, "app wrk init: %d", rv);
+ sm = segment_manager_get (app_wrk->first_segment_manager);
+ fs = segment_manager_get_segment_w_lock (sm, 0);
+ a->segment = &fs->ssvm;
+ segment_manager_segment_reader_unlock (sm);
+ a->evt_q = app_wrk->event_queue;
+ }
+ else
+ {
+ wrk_map = app_worker_map_get (app, a->wrk_index);
+ if (!wrk_map)
+ return clib_error_return_code (0, VNET_API_ERROR_INVALID_VALUE, 0,
+ "App %u does not have worker %u",
+ app->app_index, a->wrk_index);
+ app_wrk = app_worker_get (wrk_map->wrk_index);
+ app_worker_map_free (app, wrk_map);
+ if (!app_wrk)
+ return clib_error_return_code (0, VNET_API_ERROR_INVALID_VALUE, 0,
+ "No worker %u", a->wrk_index);
+ app_worker_free (app_wrk);
+ }
+ return 0;
+}
+
+segment_manager_t *
+application_get_local_segment_manager (app_worker_t * app)
+{
+ return segment_manager_get (app->local_segment_manager);
+}
+
+segment_manager_t *
+application_get_local_segment_manager_w_session (app_worker_t * app,
+ local_session_t * ls)
+{
+ stream_session_t *listener;
+ if (application_local_session_listener_has_transport (ls))
+ {
+ listener = listen_session_get (ls->listener_index);
+ return app_worker_get_listen_segment_manager (app, listener);
+ }
+ return segment_manager_get (app->local_segment_manager);
+}
+
+int
+application_is_proxy (application_t * app)
+{
+ return (app->flags & APP_OPTIONS_FLAGS_IS_PROXY);
+}
+
+int
+application_is_builtin (application_t * app)
+{
+ return (app->flags & APP_OPTIONS_FLAGS_IS_BUILTIN);
+}
+
+int
+application_is_builtin_proxy (application_t * app)
+{
+ return (application_is_proxy (app) && application_is_builtin (app));
+}
+
+u8
+application_has_local_scope (application_t * app)
+{
+ return app->flags & APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+}
+
+u8
+application_has_global_scope (application_t * app)
+{
+ return app->flags & APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+}
+
+u8
+application_use_mq_for_ctrl (application_t * app)
+{
+ return app->flags & APP_OPTIONS_FLAGS_USE_MQ_FOR_CTRL_MSGS;
+}
+
+/**
+ * Send an API message to the external app, to map new segment
+ */
+int
+app_worker_add_segment_notify (u32 app_wrk_index, ssvm_private_t * fs)
+{
+ app_worker_t *app_wrk = app_worker_get (app_wrk_index);
+ application_t *app = application_get (app_wrk->app_index);
+ return app->cb_fns.add_segment_callback (app->api_client_index, fs);
+}
+
+u32
+application_n_listeners (app_worker_t * app)
+{
+ return hash_elts (app->listeners_table);
+}
+
+stream_session_t *
+app_worker_first_listener (app_worker_t * app, u8 fib_proto,
+ u8 transport_proto)
+{
+ stream_session_t *listener;
+ u64 handle;
+ u32 sm_index;
+ u8 sst;
+
+ sst = session_type_from_proto_and_ip (transport_proto,
+ fib_proto == FIB_PROTOCOL_IP4);
+
+ /* *INDENT-OFF* */
+ hash_foreach (handle, sm_index, app->listeners_table, ({
+ listener = listen_session_get_from_handle (handle);
+ if (listener->session_type == sst
+ && listener->listener_index != SESSION_PROXY_LISTENER_INDEX)
+ return listener;
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+u8
+app_worker_application_is_builtin (app_worker_t * app_wrk)
+{
+ return app_wrk->app_is_builtin;
+}
+
+stream_session_t *
+application_proxy_listener (app_worker_t * app, u8 fib_proto,
+ u8 transport_proto)
+{
+ stream_session_t *listener;
+ u64 handle;
+ u32 sm_index;
+ u8 sst;
+
+ sst = session_type_from_proto_and_ip (transport_proto,
+ fib_proto == FIB_PROTOCOL_IP4);
+
+ /* *INDENT-OFF* */
+ hash_foreach (handle, sm_index, app->listeners_table, ({
+ listener = listen_session_get_from_handle (handle);
+ if (listener->session_type == sst
+ && listener->listener_index == SESSION_PROXY_LISTENER_INDEX)
+ return listener;
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+static clib_error_t *
+application_start_stop_proxy_fib_proto (application_t * app, u8 fib_proto,
+ u8 transport_proto, u8 is_start)
+{
+ app_namespace_t *app_ns = app_namespace_get (app->ns_index);
+ u8 is_ip4 = (fib_proto == FIB_PROTOCOL_IP4);
+ session_endpoint_t sep = SESSION_ENDPOINT_NULL;
+ transport_connection_t *tc;
+ app_worker_t *app_wrk;
+ stream_session_t *s;
+ u64 handle;
+
+ /* TODO decide if we want proxy to be enabled for all workers */
+ app_wrk = application_get_default_worker (app);
+ if (is_start)
+ {
+ s = app_worker_first_listener (app_wrk, fib_proto, transport_proto);
+ if (!s)
+ {
+ sep.is_ip4 = is_ip4;
+ sep.fib_index = app_namespace_get_fib_index (app_ns, fib_proto);
+ sep.sw_if_index = app_ns->sw_if_index;
+ sep.transport_proto = transport_proto;
+ app_worker_start_listen (app_wrk, &sep, &handle);
+ s = listen_session_get_from_handle (handle);
+ s->listener_index = SESSION_PROXY_LISTENER_INDEX;
+ }
+ }
+ else
+ {
+ s = application_proxy_listener (app_wrk, fib_proto, transport_proto);
+ ASSERT (s);
+ }
+
+ tc = listen_session_get_transport (s);
+
+ if (!ip_is_zero (&tc->lcl_ip, 1))
+ {
+ u32 sti;
+ sep.is_ip4 = is_ip4;
+ sep.fib_index = app_namespace_get_fib_index (app_ns, fib_proto);
+ sep.transport_proto = transport_proto;
+ sep.port = 0;
+ sti = session_lookup_get_index_for_fib (fib_proto, sep.fib_index);
+ if (is_start)
+ session_lookup_add_session_endpoint (sti, &sep, s->session_index);
+ else
+ session_lookup_del_session_endpoint (sti, &sep);
+ }
+
+ return 0;
+}
+
+static void
+application_start_stop_proxy_local_scope (application_t * app,
+ u8 transport_proto, u8 is_start)
+{
+ session_endpoint_t sep = SESSION_ENDPOINT_NULL;
+ app_namespace_t *app_ns;
+ app_ns = app_namespace_get (app->ns_index);
+ sep.is_ip4 = 1;
+ sep.transport_proto = transport_proto;
+ sep.port = 0;
+
+ if (is_start)
+ {
+ session_lookup_add_session_endpoint (app_ns->local_table_index, &sep,
+ app->app_index);
+ sep.is_ip4 = 0;
+ session_lookup_add_session_endpoint (app_ns->local_table_index, &sep,
+ app->app_index);
+ }
+ else
+ {
+ session_lookup_del_session_endpoint (app_ns->local_table_index, &sep);
+ sep.is_ip4 = 0;
+ session_lookup_del_session_endpoint (app_ns->local_table_index, &sep);
+ }
+}
+
+void
+application_start_stop_proxy (application_t * app,
+ transport_proto_t transport_proto, u8 is_start)
+{
+ if (application_has_local_scope (app))
+ application_start_stop_proxy_local_scope (app, transport_proto, is_start);
+
+ if (application_has_global_scope (app))
+ {
+ application_start_stop_proxy_fib_proto (app, FIB_PROTOCOL_IP4,
+ transport_proto, is_start);
+ application_start_stop_proxy_fib_proto (app, FIB_PROTOCOL_IP6,
+ transport_proto, is_start);
+ }
+}
+
+void
+application_setup_proxy (application_t * app)
+{
+ u16 transports = app->proxied_transports;
+ transport_proto_t tp;
+
+ ASSERT (application_is_proxy (app));
+
+ /* *INDENT-OFF* */
+ transport_proto_foreach (tp, ({
+ if (transports & (1 << tp))
+ application_start_stop_proxy (app, tp, 1);
+ }));
+ /* *INDENT-ON* */
+}
+
+void
+application_remove_proxy (application_t * app)
+{
+ u16 transports = app->proxied_transports;
+ transport_proto_t tp;
+
+ ASSERT (application_is_proxy (app));
+
+ /* *INDENT-OFF* */
+ transport_proto_foreach (tp, ({
+ if (transports & (1 << tp))
+ application_start_stop_proxy (app, tp, 0);
+ }));
+ /* *INDENT-ON* */
+}
+
+segment_manager_properties_t *
+application_segment_manager_properties (application_t * app)
+{
+ return &app->sm_properties;
+}
+
+segment_manager_properties_t *
+application_get_segment_manager_properties (u32 app_index)
+{
+ application_t *app = application_get (app_index);
+ return &app->sm_properties;
+}
+
+static inline int
+app_enqueue_evt (svm_msg_q_t * mq, svm_msg_q_msg_t * msg, u8 lock)
+{
+ if (PREDICT_FALSE (svm_msg_q_is_full (mq)))
+ {
+ clib_warning ("evt q full");
+ svm_msg_q_free_msg (mq, msg);
+ if (lock)
+ svm_msg_q_unlock (mq);
+ return -1;
+ }
+
+ if (lock)
+ {
+ svm_msg_q_add_and_unlock (mq, msg);
+ return 0;
+ }
+
+ /* Even when not locking the ring, we must wait for queue mutex */
+ if (svm_msg_q_add (mq, msg, SVM_Q_WAIT))
+ {
+ clib_warning ("msg q add returned");
+ return -1;
+ }
+ return 0;
+}
+
+static inline int
+app_send_io_evt_rx (app_worker_t * app_wrk, stream_session_t * s, u8 lock)
+{
+ session_event_t *evt;
+ svm_msg_q_msg_t msg;
+ svm_msg_q_t *mq;
+
+ if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY
+ && s->session_state != SESSION_STATE_LISTENING))
+ {
+ /* Session is closed so app will never clean up. Flush rx fifo */
+ if (s->session_state == SESSION_STATE_CLOSED)
+ svm_fifo_dequeue_drop_all (s->server_rx_fifo);
+ return 0;
+ }
+
+ if (app_worker_application_is_builtin (app_wrk))
+ {
+ application_t *app = application_get (app_wrk->app_index);
+ return app->cb_fns.builtin_app_rx_callback (s);
+ }
+
+ if (svm_fifo_has_event (s->server_rx_fifo)
+ || svm_fifo_is_empty (s->server_rx_fifo))
+ return 0;
+
+ mq = app_wrk->event_queue;
+ if (lock)
+ svm_msg_q_lock (mq);
+
+ if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
+ {
+ clib_warning ("evt q rings full");
+ if (lock)
+ svm_msg_q_unlock (mq);
+ return -1;
+ }
+
+ msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
+ ASSERT (!svm_msg_q_msg_is_invalid (&msg));
+
+ evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
+ evt->fifo = s->server_rx_fifo;
+ evt->event_type = FIFO_EVENT_APP_RX;
+
+ if (app_enqueue_evt (mq, &msg, lock))
+ return -1;
+ (void) svm_fifo_set_event (s->server_rx_fifo);
+ return 0;
+}
+
+static inline int
+app_send_io_evt_tx (app_worker_t * app_wrk, stream_session_t * s, u8 lock)
+{
+ svm_msg_q_t *mq;
+ session_event_t *evt;
+ svm_msg_q_msg_t msg;
+
+ if (app_worker_application_is_builtin (app_wrk))
+ return 0;
+
+ mq = app_wrk->event_queue;
+ if (lock)
+ svm_msg_q_lock (mq);
+
+ if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
+ {
+ clib_warning ("evt q rings full");
+ if (lock)
+ svm_msg_q_unlock (mq);
+ return -1;
+ }
+
+ msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
+ ASSERT (!svm_msg_q_msg_is_invalid (&msg));
+
+ evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
+ evt->event_type = FIFO_EVENT_APP_TX;
+ evt->fifo = s->server_tx_fifo;
+
+ return app_enqueue_evt (mq, &msg, lock);
+}
+
+/* *INDENT-OFF* */
+typedef int (app_send_evt_handler_fn) (app_worker_t *app,
+ stream_session_t *s,
+ u8 lock);
+static app_send_evt_handler_fn * const app_send_evt_handler_fns[3] = {
+ app_send_io_evt_rx,
+ 0,
+ app_send_io_evt_tx,
+};
+/* *INDENT-ON* */
+
+/**
+ * Send event to application
+ *
+ * Logic from queue perspective is non-blocking. That is, if there's
+ * not enough space to enqueue a message, we return. However, if the lock
+ * flag is set, we do wait for queue mutex.