+ if (smm->evt_qs_segment_size)
+ eqs_size = smm->evt_qs_segment_size;
+
+ eqs->ssvm_size = eqs_size;
+ eqs->i_am_master = 1;
+ eqs->my_pid = vpp_pid;
+ eqs->name = format (0, "%s%c", "evt-qs-segment", 0);
+ eqs->requested_va = smm->session_baseva;
+
+ if (ssvm_master_init (eqs, SSVM_SEGMENT_MEMFD))
+ {
+ clib_warning ("failed to initialize queue segment");
+ return;
+ }
+ }
+
+ if (smm->evt_qs_use_memfd_seg)
+ oldheap = ssvm_push_heap (eqs->sh);
+ else
+ oldheap = svm_push_data_heap (am->vlib_rp);
+
+ for (i = 0; i < vec_len (smm->wrk); i++)
+ {
+ svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
+ svm_msg_q_ring_cfg_t rc[SESSION_MQ_N_RINGS] = {
+ {evt_q_length, evt_size, 0}
+ ,
+ {evt_q_length << 1, 256, 0}
+ };
+ cfg->consumer_pid = 0;
+ cfg->n_rings = 2;
+ cfg->q_nitems = evt_q_length;
+ cfg->ring_cfgs = rc;
+ smm->wrk[i].vpp_event_queue = svm_msg_q_alloc (cfg);
+ if (smm->evt_qs_use_memfd_seg)
+ {
+ if (svm_msg_q_alloc_consumer_eventfd (smm->wrk[i].vpp_event_queue))
+ clib_warning ("eventfd returned");
+ }
+ }
+
+ if (smm->evt_qs_use_memfd_seg)
+ ssvm_pop_heap (oldheap);
+ else
+ svm_pop_heap (oldheap);
+}
+
+ssvm_private_t *
+session_manager_get_evt_q_segment (void)
+{
+ session_manager_main_t *smm = &session_manager_main;
+ if (smm->evt_qs_use_memfd_seg)
+ return &smm->evt_qs_segment;
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static session_fifo_rx_fn *session_tx_fns[TRANSPORT_TX_N_FNS] = {
+ session_tx_fifo_peek_and_snd,
+ session_tx_fifo_dequeue_and_snd,
+ session_tx_fifo_dequeue_internal,
+ session_tx_fifo_dequeue_and_snd
+};
+/* *INDENT-ON* */
+
+/**
+ * Initialize session layer for given transport proto and ip version
+ *
+ * Allocates per session type (transport proto + ip version) data structures
+ * and adds arc from session queue node to session type output node.
+ */
+void
+session_register_transport (transport_proto_t transport_proto,
+ const transport_proto_vft_t * vft, u8 is_ip4,
+ u32 output_node)
+{
+ session_manager_main_t *smm = &session_manager_main;
+ session_type_t session_type;
+ u32 next_index = ~0;