* will receive data, etc.
*/
txf->shr->master_session_index = s->session_index;
- txf->master_thread_index = s->thread_index;
+ txf->vpp_sh = s->handle;
/*
* Account for the active-open session's use of the fifos
if (svm_fifo_set_event (proxy_tx_fifo))
{
u8 thread_index = proxy_tx_fifo->master_thread_index;
- u32 session_index = proxy_tx_fifo->shr->master_session_index;
+ u32 session_index = proxy_tx_fifo->vpp_session_index;
return session_send_io_evt_to_thread_custom (&session_index,
thread_index,
SESSION_IO_EVT_TX);
int rv;
if (!svm_fifo_set_event (s->rx_fifo))
return;
- if ((rv = app_send_io_evt_to_vpp (s->vpp_evt_q,
- s->rx_fifo->shr->master_session_index,
- SESSION_IO_EVT_RX, SVM_Q_WAIT)))
+ if ((rv =
+ app_send_io_evt_to_vpp (s->vpp_evt_q, s->rx_fifo->vpp_session_index,
+ SESSION_IO_EVT_RX, SVM_Q_WAIT)))
ECHO_FAIL (ECHO_FAIL_SEND_IO_EVT, "app_send_io_evt_to_vpp errored %d",
rv);
svm_fifo_clear_deq_ntf (s->rx_fifo);
s->tx_fifo = fifo_segment_alloc_fifo_w_offset (fs, txf_offset);
s->rx_fifo->segment_index = fs_index;
s->tx_fifo->segment_index = fs_index;
+ s->rx_fifo->vpp_session_index = s->rx_fifo->shr->master_session_index;
+ s->tx_fifo->vpp_session_index = s->tx_fifo->shr->master_session_index;
+ s->rx_fifo->app_session_index = s->session_index;
+ s->tx_fifo->app_session_index = s->session_index;
s->rx_fifo->shr->client_session_index = s->session_index;
s->tx_fifo->shr->client_session_index = s->session_index;
if (svm_fifo_has_event (s->rx_fifo))
return 0;
- app_session = s->rx_fifo->shr->client_session_index;
+ app_session = s->rx_fifo->app_session_index;
mq = app_wrk->event_queue;
rv = test_mq_try_lock_and_alloc_msg (mq, SESSION_MQ_IO_EVT_RING, mq_msg);
if (CLIB_DEBUG)
{
sf->master_session_index = ~0;
+ f->vpp_session_index = ~0;
f->master_thread_index = ~0;
}
pfss_fifo_del_active_list (pfss, of);
/* Collect chunks that were provided in return for those detached */
- fsh_slice_collect_chunks (fs->h, fss, of->chunks_at_attach);
- of->chunks_at_attach = 0;
+ fsh_slice_collect_chunks (fs->h, fss, svm_fifo_chunks_at_attach (of));
+ svm_fifo_chunks_at_attach (of) = 0;
/* Collect hdr that was provided in return for the detached */
- fss_fifo_free_list_push (fs->h, fss, of->hdr_at_attach);
- of->hdr_at_attach = 0;
+ fss_fifo_free_list_push (fs->h, fss, svm_fifo_hdr_at_attach (of));
+ svm_fifo_hdr_at_attach (of) = 0;
clib_mem_bulk_free (pfss->fifos, *f);
*f = 0;
/* Allocate shared hdr and chunks to be collected at detach in return
* for those that are being attached now */
of = *f;
- of->hdr_at_attach = fsh_try_alloc_fifo_hdr (fs->h, fss);
+ svm_fifo_hdr_at_attach (of) = fsh_try_alloc_fifo_hdr (fs->h, fss);
c = fs_chunk_ptr (fs->h, nf->shr->start_chunk);
- of->chunks_at_attach = pc = fsh_try_alloc_chunk (fs->h, fss, c->length);
+ svm_fifo_chunks_at_attach (of) = pc =
+ fsh_try_alloc_chunk (fs->h, fss, c->length);
while ((c = fs_chunk_ptr (fs->h, c->next)))
{
u32 ooos_newest; /**< Last segment to have been updated */
u8 flags; /**< fifo flags */
- u8 master_thread_index; /**< session layer thread index */
- u8 client_thread_index; /**< app worker index */
i8 refcnt; /**< reference count */
- u32 segment_manager; /**< session layer segment manager index */
- u32 segment_index; /**< segment index in segment manager */
+ u8 client_thread_index; /**< app worker index */
+ u32 app_session_index; /**< app session index */
+ union
+ {
+ struct
+ {
+ u32 vpp_session_index; /**< session layer session index */
+ u32 master_thread_index; /**< session layer thread index */
+ };
+ u64 vpp_sh;
+ };
+ u32 segment_manager; /**< session layer segment manager index */
+ u32 segment_index; /**< segment index in segment manager */
struct _svm_fifo *next; /**< prev in active chain */
struct _svm_fifo *prev; /**< prev in active chain */
- svm_fifo_chunk_t *chunks_at_attach; /**< chunks to be accounted at detach */
- svm_fifo_shared_t *hdr_at_attach; /**< hdr to be freed at detach */
-
#if SVM_FIFO_TRACE
svm_fifo_trace_elem_t *trace;
#endif
} svm_fifo_t;
+/* To minimize size of svm_fifo_t reuse ooo pointers for tracking chunks and
+ * hdr at attach/detach. Fifo being migrated should not receive new data */
+#define svm_fifo_chunks_at_attach(f) f->ooo_deq
+#define svm_fifo_hdr_at_attach(f) \
+ ((union { \
+ svm_fifo_shared_t *hdr; \
+ svm_fifo_chunk_t *ooo_enq; \
+ } *) &f->ooo_enq) \
+ ->hdr
+
typedef struct fifo_segment_slice_
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline);
if (verbose > 1)
s = format (s, "%Uvpp session %d thread %d app session %d thread %d\n",
- format_white_space, indent, f->shr->master_session_index,
- f->master_thread_index, f->shr->client_session_index,
+ format_white_space, indent, f->vpp_session_index,
+ f->master_thread_index, f->app_session_index,
f->client_thread_index);
if (verbose)
if (!is_ct)
{
+ rxf->vpp_session_index = rxf->shr->master_session_index;
+ txf->vpp_session_index = txf->shr->master_session_index;
rxf->shr->client_session_index = s->session_index;
txf->shr->client_session_index = s->session_index;
+ rxf->app_session_index = s->session_index;
+ txf->app_session_index = s->session_index;
rxf->client_thread_index = vcl_get_worker_index ();
txf->client_thread_index = vcl_get_worker_index ();
s->rx_fifo = rxf;
/* Generate new tx event if we have outstanding data */
if (svm_fifo_has_event (s->tx_fifo))
- app_send_io_evt_to_vpp (s->vpp_evt_q,
- s->tx_fifo->shr->master_session_index,
+ app_send_io_evt_to_vpp (s->vpp_evt_q, s->tx_fifo->vpp_session_index,
SESSION_IO_EVT_TX, SVM_Q_WAIT);
VDBG (0, "Migrated 0x%lx to thread %u 0x%lx", mp->handle,
if (PREDICT_FALSE (svm_fifo_needs_deq_ntf (rx_fifo, n_read)))
{
svm_fifo_clear_deq_ntf (rx_fifo);
- app_send_io_evt_to_vpp (s->vpp_evt_q,
- s->rx_fifo->shr->master_session_index,
+ app_send_io_evt_to_vpp (s->vpp_evt_q, s->rx_fifo->vpp_session_index,
SESSION_IO_EVT_RX, SVM_Q_WAIT);
}
}
if (svm_fifo_set_event (s->tx_fifo))
- app_send_io_evt_to_vpp (
- s->vpp_evt_q, s->tx_fifo->shr->master_session_index, et, SVM_Q_WAIT);
+ app_send_io_evt_to_vpp (s->vpp_evt_q, s->tx_fifo->vpp_session_index, et,
+ SVM_Q_WAIT);
/* The underlying fifo segment can run out of memory */
if (PREDICT_FALSE (n_write < 0))
if (do_evt)
{
if (svm_fifo_set_event (f))
- app_send_io_evt_to_vpp (vpp_evt_q, f->shr->master_session_index,
- evt_type, noblock);
+ app_send_io_evt_to_vpp (vpp_evt_q, f->vpp_session_index, evt_type,
+ noblock);
}
return len;
}
if (do_evt)
{
if (rv > 0 && svm_fifo_set_event (f))
- app_send_io_evt_to_vpp (vpp_evt_q, f->shr->master_session_index,
- evt_type, noblock);
+ app_send_io_evt_to_vpp (vpp_evt_q, f->vpp_session_index, evt_type,
+ noblock);
}
return rv;
}
ls->rx_fifo->shr->master_session_index = ls->session_index;
ls->tx_fifo->shr->master_session_index = ls->session_index;
- ls->rx_fifo->master_thread_index = ls->thread_index;
- ls->tx_fifo->master_thread_index = ls->thread_index;
+ ls->rx_fifo->vpp_sh = ls->handle;
+ ls->tx_fifo->vpp_sh = ls->handle;
seg_handle = segment_manager_segment_handle (sm, fs);
segment_manager_segment_reader_unlock (sm);
return rv;
rx_fifo->shr->master_session_index = s->session_index;
- rx_fifo->master_thread_index = s->thread_index;
+ rx_fifo->vpp_sh = s->handle;
tx_fifo->shr->master_session_index = s->session_index;
- tx_fifo->master_thread_index = s->thread_index;
+ tx_fifo->vpp_sh = s->handle;
s->rx_fifo = rx_fifo;
s->tx_fifo = tx_fifo;
&tx_fifo);
rx_fifo->shr->master_session_index = s->session_index;
- rx_fifo->master_thread_index = s->thread_index;
+ rx_fifo->vpp_sh = s->handle;
tx_fifo->shr->master_session_index = s->session_index;
- tx_fifo->master_thread_index = s->thread_index;
+ tx_fifo->vpp_sh = s->handle;
s->rx_fifo = rx_fifo;
s->tx_fifo = tx_fifo;
*/
while (f)
{
- session = session_get_if_valid (f->shr->master_session_index,
+ session = session_get_if_valid (f->vpp_session_index,
f->master_thread_index);
if (session)
vec_add1 (handles, session_handle (session));
f = fifo_segment_get_slice_fifo_list (fs, slice_index);
while (f)
{
- session = session_get_if_valid (f->shr->master_session_index,
+ session = session_get_if_valid (f->vpp_session_index,
f->master_thread_index);
if (session)
{
segment_manager_segment_reader_unlock (sm);
(*f)->shr->master_session_index = s->session_index;
- (*f)->master_thread_index = s->thread_index;
+ (*f)->vpp_sh = s->handle;
}
u32
u32 session_index, thread_index;
session_t *session;
- session_index = f->shr->master_session_index;
+ session_index = f->vpp_session_index;
thread_index = f->master_thread_index;
session = session_get (session_index, thread_index);
int
session_send_io_evt_to_thread (svm_fifo_t * f, session_evt_type_t evt_type)
{
- return session_send_evt_to_thread (&f->shr->master_session_index, 0,
+ return session_send_evt_to_thread (&f->vpp_session_index, 0,
f->master_thread_index, evt_type);
}
}
/* Setup client session index in advance, in case data arrives
- * before the app processes message and updates it */
+ * before the app processes message and updates it
+ * Maybe this needs to be done via a reply message from app */
s->rx_fifo->shr->client_session_index = api_context;
s->tx_fifo->shr->client_session_index = api_context;
+ s->rx_fifo->app_session_index = api_context;
+ s->tx_fifo->app_session_index = api_context;
snd_msg:
mq_evt = svm_msg_q_msg_data (mq, &mq_msg);
mq_evt->event_type = SESSION_IO_EVT_RX;
- mq_evt->session_index = s->rx_fifo->shr->client_session_index;
+ mq_evt->session_index = s->rx_fifo->app_session_index;
(void) svm_fifo_set_event (s->rx_fifo);
mq_evt = svm_msg_q_msg_data (mq, &mq_msg);
mq_evt->event_type = SESSION_IO_EVT_TX;
- mq_evt->session_index = s->tx_fifo->shr->client_session_index;
+ mq_evt->session_index = s->tx_fifo->app_session_index;
svm_msg_q_add_raw (mq, &mq_msg);
case SESSION_IO_EVT_BUILTIN_RX:
case SESSION_IO_EVT_TX_MAIN:
case SESSION_IO_EVT_TX_FLUSH:
- if (e->session_index == f->shr->master_session_index)
+ if (e->session_index == f->vpp_session_index)
return 1;
break;
case SESSION_CTRL_EVT_CLOSE:
return;
}
+ /* TODO(fcoras) This needs to be part of the reply message */
+ s->rx_fifo->app_session_index = s->rx_fifo->shr->client_session_index;
+ s->tx_fifo->app_session_index = s->tx_fifo->shr->client_session_index;
+
/* Special handling for cut-through sessions */
if (!session_has_transport (s))
{