#ifndef __included_uri_h__
#define __included_uri_h__
-#include <vlib/vlib.h>
-#include <vnet/vnet.h>
#include <svm/svm_fifo_segment.h>
#include <vnet/session/session.h>
#include <vnet/session/application.h>
_(ACCEPT_REDIRECT, "Use FIFO with redirects") \
_(ADD_SEGMENT, "Add segment and signal app if needed") \
_(IS_BUILTIN, "Application is builtin") \
- _(IS_PROXY, "Application is proxying") \
+ _(IS_PROXY, "Application is proxying") \
_(USE_GLOBAL_SCOPE, "App can use global session scope") \
- _(USE_LOCAL_SCOPE, "App can use local session scope")
+ _(USE_LOCAL_SCOPE, "App can use local session scope") \
+ _(USE_MQ_FOR_CTRL_MSGS, "Use message queue for ctr msgs") \
typedef enum _app_options
{
{
ip46_address_t rmt_ip; /**< remote ip */
ip46_address_t lcl_ip; /**< local ip */
- u16 rmt_port; /**< remote port */
- u16 lcl_port; /**< local port */
+ u16 rmt_port; /**< remote port (network order) */
+ u16 lcl_port; /**< local port (network order) */
u8 is_ip4; /**< set if uses ip4 networking */
} app_session_transport_t;
-typedef struct app_session_
+#define foreach_app_session_field \
+ _(svm_fifo_t, *rx_fifo) /**< rx fifo */ \
+ _(svm_fifo_t, *tx_fifo) /**< tx fifo */ \
+ _(session_type_t, session_type) /**< session type */ \
+ _(volatile u8, session_state) /**< session state */ \
+ _(u32, session_index) /**< index in owning pool */ \
+ _(app_session_transport_t, transport) /**< transport info */ \
+ _(svm_msg_q_t, *vpp_evt_q) /**< vpp event queue */ \
+ _(u8, is_dgram) /**< flag for dgram mode */ \
+
+typedef struct
{
- svm_fifo_t *rx_fifo; /**< rx fifo */
- svm_fifo_t *tx_fifo; /**< tx fifo */
- session_type_t session_type; /**< session type */
- volatile u8 session_state; /**< session state */
- u32 session_index; /**< index in owning pool */
- app_session_transport_t transport; /**< transport info */
- svm_queue_t *vpp_evt_q; /**< vpp event queue for session */
- u8 is_dgram; /**< set if it works in dgram mode */
+#define _(type, name) type name;
+ foreach_app_session_field
+#undef _
} app_session_t;
+typedef struct session_accepted_msg_
+{
+ u32 context;
+ u64 listener_handle;
+ u64 handle;
+ u64 server_rx_fifo;
+ u64 server_tx_fifo;
+ u64 vpp_event_queue_address;
+ u64 server_event_queue_address;
+ u16 port;
+ u8 is_ip4;
+ u8 ip[16];
+} session_accepted_msg_t;
+
+typedef struct session_accepted_reply_msg_
+{
+ u32 context;
+ i32 retval;
+ u64 handle;
+} session_accepted_reply_msg_t;
+
+/* Make sure this is not too large, otherwise it won't fit when dequeued in
+ * the session queue node */
+STATIC_ASSERT (sizeof (session_accepted_reply_msg_t) <= 16, "accept reply");
+
+typedef struct session_connected_msg_
+{
+ u32 context;
+ i32 retval;
+ u64 handle;
+ u64 server_rx_fifo;
+ u64 server_tx_fifo;
+ u64 vpp_event_queue_address;
+ u64 client_event_queue_address;
+ u32 segment_size;
+ u8 segment_name_length;
+ u8 segment_name[64];
+ u8 lcl_ip[16];
+ u8 is_ip4;
+ u16 lcl_port;
+} session_connected_msg_t;
+
+typedef struct session_disconnected_msg_
+{
+ u32 client_index;
+ u32 context;
+ u64 handle;
+} session_disconnected_msg_t;
+
+typedef struct session_disconnected_reply_msg_
+{
+ u32 context;
+ i32 retval;
+ u64 handle;
+} session_disconnected_reply_msg_t;
+
+typedef struct session_reset_msg_
+{
+ u32 client_index;
+ u32 context;
+ u64 handle;
+} session_reset_msg_t;
+
+typedef struct session_reset_reply_msg_
+{
+ u32 client_index;
+ u32 context;
+ i32 retval;
+ u64 handle;
+} session_reset_reply_msg_t;
+
+typedef struct app_session_event_
+{
+ svm_msg_q_msg_t msg;
+ session_event_t *evt;
+} app_session_evt_t;
+
+static inline void
+app_alloc_ctrl_evt_to_vpp (svm_msg_q_t * mq, app_session_evt_t * app_evt,
+ u8 evt_type)
+{
+ svm_msg_q_lock_and_alloc_msg_w_ring (mq,
+ SESSION_MQ_CTRL_EVT_RING,
+ SVM_Q_WAIT, &app_evt->msg);
+ svm_msg_q_unlock (mq);
+ app_evt->evt = svm_msg_q_msg_data (mq, &app_evt->msg);
+ memset (app_evt->evt, 0, sizeof (*app_evt->evt));
+ app_evt->evt->event_type = evt_type;
+}
+
+static inline void
+app_send_ctrl_evt_to_vpp (svm_msg_q_t * mq, app_session_evt_t * app_evt)
+{
+ svm_msg_q_add (mq, &app_evt->msg, SVM_Q_WAIT);
+}
+
+/**
+ * Send fifo io event to vpp worker thread
+ *
+ * Because there may be multiple writers to one of vpp's queues, this
+ * protects message allocation and enqueueing.
+ *
+ * @param mq vpp message queue
+ * @param f fifo for which the event is sent
+ * @param evt_type type of event
+ * @param noblock flag to indicate is request is blocking or not
+ * @return 0 if success, negative integer otherwise
+ */
+static inline int
+app_send_io_evt_to_vpp (svm_msg_q_t * mq, svm_fifo_t * f, u8 evt_type,
+ u8 noblock)
+{
+ session_event_t *evt;
+ svm_msg_q_msg_t msg;
+
+ if (noblock)
+ {
+ if (svm_msg_q_try_lock (mq))
+ return -1;
+ if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
+ {
+ svm_msg_q_unlock (mq);
+ return -2;
+ }
+ msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
+ if (PREDICT_FALSE (svm_msg_q_msg_is_invalid (&msg)))
+ {
+ svm_msg_q_unlock (mq);
+ return -2;
+ }
+ evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
+ evt->fifo = f;
+ evt->event_type = evt_type;
+ svm_msg_q_add_and_unlock (mq, &msg);
+ return 0;
+ }
+ else
+ {
+ svm_msg_q_lock (mq);
+ msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
+ while (svm_msg_q_msg_is_invalid (&msg))
+ {
+ svm_msg_q_wait (mq);
+ msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
+ }
+ evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
+ evt->fifo = f;
+ evt->event_type = evt_type;
+ if (svm_msg_q_is_full (mq))
+ svm_msg_q_wait (mq);
+ svm_msg_q_add_and_unlock (mq, &msg);
+ return 0;
+ }
+}
+
always_inline int
app_send_dgram_raw (svm_fifo_t * f, app_session_transport_t * at,
- svm_queue_t * vpp_evt_q, u8 * data, u32 len, u8 noblock)
+ svm_msg_q_t * vpp_evt_q, u8 * data, u32 len, u8 noblock)
{
u32 max_enqueue, actual_write;
session_dgram_hdr_t hdr;
- session_fifo_event_t evt;
int rv;
max_enqueue = svm_fifo_max_enqueue (f);
- if (svm_fifo_max_enqueue (f) <= sizeof (session_dgram_hdr_t))
+ if (max_enqueue <= sizeof (session_dgram_hdr_t))
return 0;
max_enqueue -= sizeof (session_dgram_hdr_t);
clib_memcpy (&hdr.lcl_ip, &at->lcl_ip, sizeof (ip46_address_t));
hdr.lcl_port = at->lcl_port;
rv = svm_fifo_enqueue_nowait (f, sizeof (hdr), (u8 *) & hdr);
- if (rv <= 0)
- return 0;
-
ASSERT (rv == sizeof (hdr));
if ((rv = svm_fifo_enqueue_nowait (f, actual_write, data)) > 0)
{
if (svm_fifo_set_event (f))
- {
- evt.fifo = f;
- evt.event_type = FIFO_EVENT_APP_TX;
- svm_queue_add (vpp_evt_q, (u8 *) & evt, noblock);
- }
+ app_send_io_evt_to_vpp (vpp_evt_q, f, FIFO_EVENT_APP_TX, noblock);
}
+ ASSERT (rv);
return rv;
}
}
always_inline int
-app_send_stream_raw (svm_fifo_t * f, svm_queue_t * vpp_evt_q, u8 * data,
+app_send_stream_raw (svm_fifo_t * f, svm_msg_q_t * vpp_evt_q, u8 * data,
u32 len, u8 noblock)
{
- session_fifo_event_t evt;
int rv;
if ((rv = svm_fifo_enqueue_nowait (f, len, data)) > 0)
{
if (svm_fifo_set_event (f))
- {
- evt.fifo = f;
- evt.event_type = FIFO_EVENT_APP_TX;
- svm_queue_add (vpp_evt_q, (u8 *) & evt, noblock);
- }
+ app_send_io_evt_to_vpp (vpp_evt_q, f, FIFO_EVENT_APP_TX, noblock);
}
return rv;
}