+ s->session_state = SESSION_STATE_CLOSED;
+
+ /* Delete from main lookup table before we axe the the transport */
+ session_lookup_del_session (s);
+ tp_vfts[session_get_transport_proto (s)].cleanup (s->connection_index,
+ s->thread_index);
+ /* Since we called cleanup, no delete notification will come. So, make
+ * sure the session is properly freed. */
+ segment_manager_dealloc_fifos (s->svm_segment_index, s->server_rx_fifo,
+ s->server_tx_fifo);
+ session_free (s);
+}
+
+transport_service_type_t
+session_transport_service_type (stream_session_t * s)
+{
+ transport_proto_t tp;
+ tp = session_get_transport_proto (s);
+ return transport_protocol_service_type (tp);
+}
+
+transport_tx_fn_type_t
+session_transport_tx_fn_type (stream_session_t * s)
+{
+ transport_proto_t tp;
+ tp = session_get_transport_proto (s);
+ return transport_protocol_tx_fn_type (tp);
+}
+
+u8
+session_tx_is_dgram (stream_session_t * s)
+{
+ return (session_transport_tx_fn_type (s) == TRANSPORT_TX_DGRAM);
+}
+
+/**
+ * Allocate event queues in the shared-memory segment
+ *
+ * That can either be a newly created memfd segment, that will need to be
+ * mapped by all stack users, or the binary api's svm region. The latter is
+ * assumed to be already mapped. NOTE that this assumption DOES NOT hold if
+ * api clients bootstrap shm api over sockets (i.e. use memfd segments) and
+ * vpp uses api svm region for event queues.
+ */
+void
+session_vpp_event_queues_allocate (session_manager_main_t * smm)
+{
+ u32 evt_q_length = 2048, evt_size = sizeof (session_event_t);
+ ssvm_private_t *eqs = &smm->evt_qs_segment;
+ api_main_t *am = &api_main;
+ u64 eqs_size = 64 << 20;
+ pid_t vpp_pid = getpid ();
+ void *oldheap;
+ int i;
+
+ if (smm->configured_event_queue_length)
+ evt_q_length = smm->configured_event_queue_length;
+
+ if (smm->evt_qs_use_memfd_seg)
+ {
+ if (smm->evt_qs_segment_size)
+ eqs_size = smm->evt_qs_segment_size;
+
+ eqs->ssvm_size = eqs_size;
+ eqs->i_am_master = 1;
+ eqs->my_pid = vpp_pid;
+ eqs->name = format (0, "%s%c", "evt-qs-segment", 0);
+ eqs->requested_va = smm->session_baseva;
+
+ if (ssvm_master_init (eqs, SSVM_SEGMENT_MEMFD))
+ {
+ clib_warning ("failed to initialize queue segment");
+ return;
+ }
+ }
+
+ if (smm->evt_qs_use_memfd_seg)
+ oldheap = ssvm_push_heap (eqs->sh);
+ else
+ oldheap = svm_push_data_heap (am->vlib_rp);
+
+ for (i = 0; i < vec_len (smm->vpp_event_queues); i++)
+ {
+ svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
+ u32 notif_q_size = clib_max (16, evt_q_length >> 4);
+ svm_msg_q_ring_cfg_t rc[SESSION_MQ_N_RINGS] = {
+ {evt_q_length, evt_size, 0}
+ ,
+ {notif_q_size, 256, 0}
+ };
+ cfg->consumer_pid = 0;
+ cfg->n_rings = 2;
+ cfg->q_nitems = evt_q_length;
+ cfg->ring_cfgs = rc;
+ smm->vpp_event_queues[i] = svm_msg_q_alloc (cfg);
+ if (smm->evt_qs_use_memfd_seg)
+ {
+ if (svm_msg_q_alloc_consumer_eventfd (smm->vpp_event_queues[i]))
+ clib_warning ("eventfd returned");
+ }
+ }
+
+ if (smm->evt_qs_use_memfd_seg)
+ ssvm_pop_heap (oldheap);
+ else
+ svm_pop_heap (oldheap);
+}
+
+ssvm_private_t *
+session_manager_get_evt_q_segment (void)
+{
+ session_manager_main_t *smm = &session_manager_main;
+ if (smm->evt_qs_use_memfd_seg)
+ return &smm->evt_qs_segment;
+ return 0;