/*
- * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*/
#include <vnet/session/session.h>
-#include <vlibmemory/api.h>
+#include <vnet/session/session_debug.h>
+#include <vnet/session/application.h>
#include <vnet/dpo/load_balance.h>
#include <vnet/fib/ip4_fib.h>
-#include <vnet/session/application.h>
-#include <vnet/tcp/tcp.h>
-
-/**
- * Per-type vector of transport protocol virtual function tables
- */
-static transport_proto_vft_t *tp_vfts;
session_manager_main_t session_manager_main;
-/*
- * Session lookup key; (src-ip, dst-ip, src-port, dst-port, session-type)
- * Value: (owner thread index << 32 | session_index);
- */
-static void
-stream_session_table_add_for_tc (u8 sst, transport_connection_t * tc,
- u64 value)
+static inline int
+session_send_evt_to_thread (void *data, void *args, u32 thread_index,
+ session_evt_type_t evt_type)
{
- session_manager_main_t *smm = &session_manager_main;
- session_kv4_t kv4;
- session_kv6_t kv6;
+ session_event_t *evt;
+ svm_msg_q_msg_t msg;
+ svm_msg_q_t *mq;
+ u32 tries = 0, max_tries;
- switch (sst)
+ mq = session_manager_get_vpp_event_queue (thread_index);
+ while (svm_msg_q_try_lock (mq))
+ {
+ max_tries = vlib_get_current_process (vlib_get_main ())? 1e6 : 3;
+ if (tries++ == max_tries)
+ {
+ SESSION_DBG ("failed to enqueue evt");
+ return -1;
+ }
+ }
+ if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
{
- case SESSION_TYPE_IP4_UDP:
- case SESSION_TYPE_IP4_TCP:
- make_v4_ss_kv_from_tc (&kv4, tc);
- kv4.value = value;
- clib_bihash_add_del_16_8 (&smm->v4_session_hash, &kv4, 1 /* is_add */ );
+ svm_msg_q_unlock (mq);
+ return -2;
+ }
+ msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
+ if (PREDICT_FALSE (svm_msg_q_msg_is_invalid (&msg)))
+ {
+ svm_msg_q_unlock (mq);
+ return -2;
+ }
+ evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
+ evt->event_type = evt_type;
+ switch (evt_type)
+ {
+ case FIFO_EVENT_RPC:
+ evt->rpc_args.fp = data;
+ evt->rpc_args.arg = args;
break;
- case SESSION_TYPE_IP6_UDP:
- case SESSION_TYPE_IP6_TCP:
- make_v6_ss_kv_from_tc (&kv6, tc);
- kv6.value = value;
- clib_bihash_add_del_48_8 (&smm->v6_session_hash, &kv6, 1 /* is_add */ );
+ case FIFO_EVENT_APP_TX:
+ case SESSION_IO_EVT_TX_FLUSH:
+ case FIFO_EVENT_BUILTIN_RX:
+ evt->fifo = data;
+ break;
+ case FIFO_EVENT_BUILTIN_TX:
+ case FIFO_EVENT_DISCONNECT:
+ evt->session_handle = session_handle ((session_t *) data);
break;
default:
- clib_warning ("Session type not supported");
- ASSERT (0);
+ clib_warning ("evt unhandled!");
+ svm_msg_q_unlock (mq);
+ return -1;
}
+
+ svm_msg_q_add_and_unlock (mq, &msg);
+ return 0;
}
-void
-stream_session_table_add (session_manager_main_t * smm, stream_session_t * s,
- u64 value)
+int
+session_send_io_evt_to_thread (svm_fifo_t * f, session_evt_type_t evt_type)
{
- transport_connection_t *tc;
-
- tc = tp_vfts[s->session_type].get_connection (s->connection_index,
- s->thread_index);
- stream_session_table_add_for_tc (s->session_type, tc, value);
+ return session_send_evt_to_thread (f, 0, f->master_thread_index, evt_type);
}
-static void
-stream_session_half_open_table_add (u8 sst, transport_connection_t * tc,
- u64 value)
+int
+session_send_io_evt_to_thread_custom (void *data, u32 thread_index,
+ session_evt_type_t evt_type)
{
- session_manager_main_t *smm = &session_manager_main;
- session_kv4_t kv4;
- session_kv6_t kv6;
-
- switch (sst)
- {
- case SESSION_TYPE_IP4_UDP:
- case SESSION_TYPE_IP4_TCP:
- make_v4_ss_kv_from_tc (&kv4, tc);
- kv4.value = value;
- clib_bihash_add_del_16_8 (&smm->v4_half_open_hash, &kv4,
- 1 /* is_add */ );
- break;
- case SESSION_TYPE_IP6_UDP:
- case SESSION_TYPE_IP6_TCP:
- make_v6_ss_kv_from_tc (&kv6, tc);
- kv6.value = value;
- clib_bihash_add_del_48_8 (&smm->v6_half_open_hash, &kv6,
- 1 /* is_add */ );
- break;
- default:
- clib_warning ("Session type not supported");
- ASSERT (0);
- }
+ return session_send_evt_to_thread (data, 0, thread_index, evt_type);
}
-static int
-stream_session_table_del_for_tc (session_manager_main_t * smm, u8 sst,
- transport_connection_t * tc)
+int
+session_send_ctrl_evt_to_thread (session_t * s, session_evt_type_t evt_type)
{
- session_kv4_t kv4;
- session_kv6_t kv6;
+ /* only event supported for now is disconnect */
+ ASSERT (evt_type == FIFO_EVENT_DISCONNECT);
+ return session_send_evt_to_thread (s, 0, s->thread_index,
+ FIFO_EVENT_DISCONNECT);
+}
- switch (sst)
+void
+session_send_rpc_evt_to_thread (u32 thread_index, void *fp, void *rpc_args)
+{
+ if (thread_index != vlib_get_thread_index ())
+ session_send_evt_to_thread (fp, rpc_args, thread_index, FIFO_EVENT_RPC);
+ else
{
- case SESSION_TYPE_IP4_UDP:
- case SESSION_TYPE_IP4_TCP:
- make_v4_ss_kv_from_tc (&kv4, tc);
- return clib_bihash_add_del_16_8 (&smm->v4_session_hash, &kv4,
- 0 /* is_add */ );
- break;
- case SESSION_TYPE_IP6_UDP:
- case SESSION_TYPE_IP6_TCP:
- make_v6_ss_kv_from_tc (&kv6, tc);
- return clib_bihash_add_del_48_8 (&smm->v6_session_hash, &kv6,
- 0 /* is_add */ );
- break;
- default:
- clib_warning ("Session type not supported");
- ASSERT (0);
+ void (*fnp) (void *) = fp;
+ fnp (rpc_args);
}
-
- return 0;
}
-static int
-stream_session_table_del (session_manager_main_t * smm, stream_session_t * s)
+static void
+session_program_transport_close (session_t * s)
{
- transport_connection_t *ts;
+ u32 thread_index = vlib_get_thread_index ();
+ session_manager_worker_t *wrk;
+ session_event_t *evt;
- ts = tp_vfts[s->session_type].get_connection (s->connection_index,
- s->thread_index);
- return stream_session_table_del_for_tc (smm, s->session_type, ts);
+ /* If we are in the handler thread, or being called with the worker barrier
+ * held, just append a new event to pending disconnects vector. */
+ if (vlib_thread_is_main_w_barrier () || thread_index == s->thread_index)
+ {
+ wrk = session_manager_get_worker (s->thread_index);
+ vec_add2 (wrk->pending_disconnects, evt, 1);
+ clib_memset (evt, 0, sizeof (*evt));
+ evt->session_handle = session_handle (s);
+ evt->event_type = FIFO_EVENT_DISCONNECT;
+ }
+ else
+ session_send_ctrl_evt_to_thread (s, FIFO_EVENT_DISCONNECT);
}
-static void
-stream_session_half_open_table_del (session_manager_main_t * smm, u8 sst,
- transport_connection_t * tc)
+session_t *
+session_alloc (u32 thread_index)
{
- session_kv4_t kv4;
- session_kv6_t kv6;
-
- switch (sst)
+ session_manager_worker_t *wrk = &session_manager_main.wrk[thread_index];
+ session_t *s;
+ u8 will_expand = 0;
+ pool_get_aligned_will_expand (wrk->sessions, will_expand,
+ CLIB_CACHE_LINE_BYTES);
+ /* If we have peekers, let them finish */
+ if (PREDICT_FALSE (will_expand && vlib_num_workers ()))
{
- case SESSION_TYPE_IP4_UDP:
- case SESSION_TYPE_IP4_TCP:
- make_v4_ss_kv_from_tc (&kv4, tc);
- clib_bihash_add_del_16_8 (&smm->v4_half_open_hash, &kv4,
- 0 /* is_add */ );
- break;
- case SESSION_TYPE_IP6_UDP:
- case SESSION_TYPE_IP6_TCP:
- make_v6_ss_kv_from_tc (&kv6, tc);
- clib_bihash_add_del_48_8 (&smm->v6_half_open_hash, &kv6,
- 0 /* is_add */ );
- break;
- default:
- clib_warning ("Session type not supported");
- ASSERT (0);
+ clib_rwlock_writer_lock (&wrk->peekers_rw_locks);
+ pool_get_aligned (wrk->sessions, s, CLIB_CACHE_LINE_BYTES);
+ clib_rwlock_writer_unlock (&wrk->peekers_rw_locks);
+ }
+ else
+ {
+ pool_get_aligned (wrk->sessions, s, CLIB_CACHE_LINE_BYTES);
}
+ clib_memset (s, 0, sizeof (*s));
+ s->session_index = s - wrk->sessions;
+ s->thread_index = thread_index;
+ return s;
}
-stream_session_t *
-stream_session_lookup_listener4 (ip4_address_t * lcl, u16 lcl_port, u8 proto)
+void
+session_free (session_t * s)
{
- session_manager_main_t *smm = &session_manager_main;
- session_kv4_t kv4;
- int rv;
-
- make_v4_listener_kv (&kv4, lcl, lcl_port, proto);
- rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4);
- if (rv == 0)
- return pool_elt_at_index (smm->listen_sessions[proto], (u32) kv4.value);
-
- /* Zero out the lcl ip */
- kv4.key[0] = 0;
- rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4);
- if (rv == 0)
- return pool_elt_at_index (smm->listen_sessions[proto], kv4.value);
+ pool_put (session_manager_main.wrk[s->thread_index].sessions, s);
+ if (CLIB_DEBUG)
+ clib_memset (s, 0xFA, sizeof (*s));
+}
- return 0;
+void
+session_free_w_fifos (session_t * s)
+{
+ segment_manager_dealloc_fifos (s->svm_segment_index, s->rx_fifo,
+ s->tx_fifo);
+ session_free (s);
}
-/** Looks up a session based on the 5-tuple passed as argument.
+/**
+ * Cleans up session and lookup table.
*
- * First it tries to find an established session, if this fails, it tries
- * finding a listener session if this fails, it tries a lookup with a
- * wildcarded local source (listener bound to all interfaces)
+ * Transport connection must still be valid.
*/
-stream_session_t *
-stream_session_lookup4 (ip4_address_t * lcl, ip4_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto,
- u32 my_thread_index)
+static void
+session_delete (session_t * s)
{
- session_manager_main_t *smm = &session_manager_main;
- session_kv4_t kv4;
int rv;
- /* Lookup session amongst established ones */
- make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4);
- if (rv == 0)
- return stream_session_get_tsi (kv4.value, my_thread_index);
+ /* Delete from the main lookup table. */
+ if ((rv = session_lookup_del_session (s)))
+ clib_warning ("hash delete error, rv %d", rv);
- /* If nothing is found, check if any listener is available */
- return stream_session_lookup_listener4 (lcl, lcl_port, proto);
+ session_free_w_fifos (s);
}
-stream_session_t *
-stream_session_lookup_listener6 (ip6_address_t * lcl, u16 lcl_port, u8 proto)
+int
+session_alloc_fifos (segment_manager_t * sm, session_t * s)
{
- session_manager_main_t *smm = &session_manager_main;
- session_kv6_t kv6;
+ svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0;
+ u32 fifo_segment_index;
int rv;
- make_v6_listener_kv (&kv6, lcl, lcl_port, proto);
- rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6);
- if (rv == 0)
- return pool_elt_at_index (smm->listen_sessions[proto], kv6.value);
+ if ((rv = segment_manager_alloc_session_fifos (sm, &server_rx_fifo,
+ &server_tx_fifo,
+ &fifo_segment_index)))
+ return rv;
+ /* Initialize backpointers */
+ server_rx_fifo->master_session_index = s->session_index;
+ server_rx_fifo->master_thread_index = s->thread_index;
- /* Zero out the lcl ip */
- kv6.key[0] = kv6.key[1] = 0;
- rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6);
- if (rv == 0)
- return pool_elt_at_index (smm->listen_sessions[proto], kv6.value);
+ server_tx_fifo->master_session_index = s->session_index;
+ server_tx_fifo->master_thread_index = s->thread_index;
+ s->rx_fifo = server_rx_fifo;
+ s->tx_fifo = server_tx_fifo;
+ s->svm_segment_index = fifo_segment_index;
return 0;
}
-/* Looks up a session based on the 5-tuple passed as argument.
- * First it tries to find an established session, if this fails, it tries
- * finding a listener session if this fails, it tries a lookup with a
- * wildcarded local source (listener bound to all interfaces) */
-stream_session_t *
-stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto,
- u32 my_thread_index)
+static session_t *
+session_alloc_for_connection (transport_connection_t * tc)
{
- session_manager_main_t *smm = vnet_get_session_manager_main ();
- session_kv6_t kv6;
- int rv;
+ session_t *s;
+ u32 thread_index = tc->thread_index;
- make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6);
- if (rv == 0)
- return stream_session_get_tsi (kv6.value, my_thread_index);
+ ASSERT (thread_index == vlib_get_thread_index ()
+ || transport_protocol_is_cl (tc->proto));
- /* If nothing is found, check if any listener is available */
- return stream_session_lookup_listener6 (lcl, lcl_port, proto);
-}
+ s = session_alloc (thread_index);
+ s->session_type = session_type_from_proto_and_ip (tc->proto, tc->is_ip4);
+ s->enqueue_epoch = (u64) ~ 0;
+ s->session_state = SESSION_STATE_CLOSED;
-stream_session_t *
-stream_session_lookup_listener (ip46_address_t * lcl, u16 lcl_port, u8 proto)
-{
- switch (proto)
- {
- case SESSION_TYPE_IP4_UDP:
- case SESSION_TYPE_IP4_TCP:
- return stream_session_lookup_listener4 (&lcl->ip4, lcl_port, proto);
- break;
- case SESSION_TYPE_IP6_UDP:
- case SESSION_TYPE_IP6_TCP:
- return stream_session_lookup_listener6 (&lcl->ip6, lcl_port, proto);
- break;
- }
- return 0;
+ /* Attach transport to session and vice versa */
+ s->connection_index = tc->c_index;
+ tc->s_index = s->session_index;
+ return s;
}
-static u64
-stream_session_half_open_lookup (session_manager_main_t * smm,
- ip46_address_t * lcl, ip46_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto)
+static int
+session_alloc_and_init (segment_manager_t * sm, transport_connection_t * tc,
+ u8 alloc_fifos, session_t ** ret_s)
{
- session_kv4_t kv4;
- session_kv6_t kv6;
+ session_t *s;
int rv;
- switch (proto)
+ s = session_alloc_for_connection (tc);
+ if (alloc_fifos && (rv = session_alloc_fifos (sm, s)))
{
- case SESSION_TYPE_IP4_UDP:
- case SESSION_TYPE_IP4_TCP:
- make_v4_ss_kv (&kv4, &lcl->ip4, &rmt->ip4, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4);
-
- if (rv == 0)
- return kv4.value;
-
- return (u64) ~ 0;
- break;
- case SESSION_TYPE_IP6_UDP:
- case SESSION_TYPE_IP6_TCP:
- make_v6_ss_kv (&kv6, &lcl->ip6, &rmt->ip6, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6);
+ session_free (s);
+ *ret_s = 0;
+ return rv;
+ }
- if (rv == 0)
- return kv6.value;
+ /* Add to the main lookup table */
+ session_lookup_add_connection (tc, session_handle (s));
- return (u64) ~ 0;
- break;
- }
+ *ret_s = s;
return 0;
}
-transport_connection_t *
-stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto,
- u32 my_thread_index)
+/**
+ * Discards bytes from buffer chain
+ *
+ * It discards n_bytes_to_drop starting at first buffer after chain_b
+ */
+always_inline void
+session_enqueue_discard_chain_bytes (vlib_main_t * vm, vlib_buffer_t * b,
+ vlib_buffer_t ** chain_b,
+ u32 n_bytes_to_drop)
{
- session_manager_main_t *smm = &session_manager_main;
- session_kv4_t kv4;
- stream_session_t *s;
- int rv;
-
- /* Lookup session amongst established ones */
- make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4);
- if (rv == 0)
+ vlib_buffer_t *next = *chain_b;
+ u32 to_drop = n_bytes_to_drop;
+ ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
+ while (to_drop && (next->flags & VLIB_BUFFER_NEXT_PRESENT))
{
- s = stream_session_get_tsi (kv4.value, my_thread_index);
-
- return tp_vfts[s->session_type].get_connection (s->connection_index,
- my_thread_index);
+ next = vlib_get_buffer (vm, next->next_buffer);
+ if (next->current_length > to_drop)
+ {
+ vlib_buffer_advance (next, to_drop);
+ to_drop = 0;
+ }
+ else
+ {
+ to_drop -= next->current_length;
+ next->current_length = 0;
+ }
}
+ *chain_b = next;
- /* If nothing is found, check if any listener is available */
- s = stream_session_lookup_listener4 (lcl, lcl_port, proto);
- if (s)
- return tp_vfts[s->session_type].get_listener (s->connection_index);
-
- /* Finally, try half-open connections */
- rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4);
- if (rv == 0)
- return tp_vfts[proto].get_half_open (kv4.value & 0xFFFFFFFF);
-
- return 0;
+ if (to_drop == 0)
+ b->total_length_not_including_first_buffer -= n_bytes_to_drop;
}
-transport_connection_t *
-stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto,
- u32 my_thread_index)
+/**
+ * Enqueue buffer chain tail
+ */
+always_inline int
+session_enqueue_chain_tail (session_t * s, vlib_buffer_t * b,
+ u32 offset, u8 is_in_order)
{
- session_manager_main_t *smm = &session_manager_main;
- stream_session_t *s;
- session_kv6_t kv6;
- int rv;
+ vlib_buffer_t *chain_b;
+ u32 chain_bi, len, diff;
+ vlib_main_t *vm = vlib_get_main ();
+ u8 *data;
+ u32 written = 0;
+ int rv = 0;
+
+ if (is_in_order && offset)
+ {
+ diff = offset - b->current_length;
+ if (diff > b->total_length_not_including_first_buffer)
+ return 0;
+ chain_b = b;
+ session_enqueue_discard_chain_bytes (vm, b, &chain_b, diff);
+ chain_bi = vlib_get_buffer_index (vm, chain_b);
+ }
+ else
+ chain_bi = b->next_buffer;
- make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6);
- if (rv == 0)
+ do
{
- s = stream_session_get_tsi (kv6.value, my_thread_index);
+ chain_b = vlib_get_buffer (vm, chain_bi);
+ data = vlib_buffer_get_current (chain_b);
+ len = chain_b->current_length;
+ if (!len)
+ continue;
+ if (is_in_order)
+ {
+ rv = svm_fifo_enqueue_nowait (s->rx_fifo, len, data);
+ if (rv == len)
+ {
+ written += rv;
+ }
+ else if (rv < len)
+ {
+ return (rv > 0) ? (written + rv) : written;
+ }
+ else if (rv > len)
+ {
+ written += rv;
- return tp_vfts[s->session_type].get_connection (s->connection_index,
- my_thread_index);
- }
+ /* written more than what was left in chain */
+ if (written > b->total_length_not_including_first_buffer)
+ return written;
- /* If nothing is found, check if any listener is available */
- s = stream_session_lookup_listener6 (lcl, lcl_port, proto);
- if (s)
- return tp_vfts[s->session_type].get_listener (s->connection_index);
+ /* drop the bytes that have already been delivered */
+ session_enqueue_discard_chain_bytes (vm, b, &chain_b, rv - len);
+ }
+ }
+ else
+ {
+ rv = svm_fifo_enqueue_with_offset (s->rx_fifo, offset, len, data);
+ if (rv)
+ {
+ clib_warning ("failed to enqueue multi-buffer seg");
+ return -1;
+ }
+ offset += len;
+ }
+ }
+ while ((chain_bi = (chain_b->flags & VLIB_BUFFER_NEXT_PRESENT)
+ ? chain_b->next_buffer : 0));
- /* Finally, try half-open connections */
- rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6);
- if (rv == 0)
- return tp_vfts[s->session_type].get_half_open (kv6.value & 0xFFFFFFFF);
+ if (is_in_order)
+ return written;
return 0;
}
-/**
- * Allocate vpp event queue (once) per worker thread
+/*
+ * Enqueue data for delivery to session peer. Does not notify peer of enqueue
+ * event but on request can queue notification events for later delivery by
+ * calling stream_server_flush_enqueue_events().
+ *
+ * @param tc Transport connection which is to be enqueued data
+ * @param b Buffer to be enqueued
+ * @param offset Offset at which to start enqueueing if out-of-order
+ * @param queue_event Flag to indicate if peer is to be notified or if event
+ * is to be queued. The former is useful when more data is
+ * enqueued and only one event is to be generated.
+ * @param is_in_order Flag to indicate if data is in order
+ * @return Number of bytes enqueued or a negative value if enqueueing failed.
*/
-void
-vpp_session_event_queue_allocate (session_manager_main_t * smm,
- u32 thread_index)
+int
+session_enqueue_stream_connection (transport_connection_t * tc,
+ vlib_buffer_t * b, u32 offset,
+ u8 queue_event, u8 is_in_order)
{
- api_main_t *am = &api_main;
- void *oldheap;
+ session_t *s;
+ int enqueued = 0, rv, in_order_off;
- if (smm->vpp_event_queues[thread_index] == 0)
+ s = session_get (tc->s_index, tc->thread_index);
+
+ if (is_in_order)
{
- /* Allocate event fifo in the /vpe-api shared-memory segment */
- oldheap = svm_push_data_heap (am->vlib_rp);
-
- smm->vpp_event_queues[thread_index] =
- unix_shared_memory_queue_init (2048 /* nels $$$$ config */ ,
- sizeof (session_fifo_event_t),
- 0 /* consumer pid */ ,
- 0
- /* (do not) send signal when queue non-empty */
- );
-
- svm_pop_heap (oldheap);
+ enqueued = svm_fifo_enqueue_nowait (s->rx_fifo,
+ b->current_length,
+ vlib_buffer_get_current (b));
+ if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT)
+ && enqueued >= 0))
+ {
+ in_order_off = enqueued > b->current_length ? enqueued : 0;
+ rv = session_enqueue_chain_tail (s, b, in_order_off, 1);
+ if (rv > 0)
+ enqueued += rv;
+ }
+ }
+ else
+ {
+ rv = svm_fifo_enqueue_with_offset (s->rx_fifo, offset,
+ b->current_length,
+ vlib_buffer_get_current (b));
+ if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && !rv))
+ session_enqueue_chain_tail (s, b, offset + b->current_length, 0);
+ /* if something was enqueued, report even this as success for ooo
+ * segment handling */
+ return rv;
}
-}
-void
-session_manager_get_segment_info (u32 index, u8 ** name, u32 * size)
-{
- svm_fifo_segment_private_t *s;
- s = svm_fifo_get_segment (index);
- *name = s->h->segment_name;
- *size = s->ssvm.ssvm_size;
+ if (queue_event)
+ {
+ /* Queue RX event on this fifo. Eventually these will need to be flushed
+ * by calling stream_server_flush_enqueue_events () */
+ session_manager_worker_t *wrk;
+
+ wrk = session_manager_get_worker (s->thread_index);
+ if (s->enqueue_epoch != wrk->current_enqueue_epoch[tc->proto])
+ {
+ s->enqueue_epoch = wrk->current_enqueue_epoch[tc->proto];
+ vec_add1 (wrk->session_to_enqueue[tc->proto], s->session_index);
+ }
+ }
+
+ return enqueued;
}
-always_inline int
-session_manager_add_segment_i (session_manager_main_t * smm,
- session_manager_t * sm,
- u32 segment_size, u8 * segment_name)
+int
+session_enqueue_dgram_connection (session_t * s,
+ session_dgram_hdr_t * hdr,
+ vlib_buffer_t * b, u8 proto, u8 queue_event)
{
- svm_fifo_segment_create_args_t _ca, *ca = &_ca;
- int rv;
-
- memset (ca, 0, sizeof (*ca));
+ int enqueued = 0, rv, in_order_off;
- ca->segment_name = (char *) segment_name;
- ca->segment_size = segment_size;
+ ASSERT (svm_fifo_max_enqueue (s->rx_fifo)
+ >= b->current_length + sizeof (*hdr));
- rv = svm_fifo_segment_create (ca);
- if (rv)
+ svm_fifo_enqueue_nowait (s->rx_fifo, sizeof (session_dgram_hdr_t),
+ (u8 *) hdr);
+ enqueued = svm_fifo_enqueue_nowait (s->rx_fifo, b->current_length,
+ vlib_buffer_get_current (b));
+ if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && enqueued >= 0))
{
- clib_warning ("svm_fifo_segment_create ('%s', %d) failed",
- ca->segment_name, ca->segment_size);
- vec_free (segment_name);
- return -1;
+ in_order_off = enqueued > b->current_length ? enqueued : 0;
+ rv = session_enqueue_chain_tail (s, b, in_order_off, 1);
+ if (rv > 0)
+ enqueued += rv;
}
+ if (queue_event)
+ {
+ /* Queue RX event on this fifo. Eventually these will need to be flushed
+ * by calling stream_server_flush_enqueue_events () */
+ session_manager_worker_t *wrk;
- vec_add1 (sm->segment_indices, ca->new_segment_index);
+ wrk = session_manager_get_worker (s->thread_index);
+ if (s->enqueue_epoch != wrk->current_enqueue_epoch[proto])
+ {
+ s->enqueue_epoch = wrk->current_enqueue_epoch[proto];
+ vec_add1 (wrk->session_to_enqueue[proto], s->session_index);
+ }
+ }
+ return enqueued;
+}
+
+/** Check if we have space in rx fifo to push more bytes */
+u8
+stream_session_no_space (transport_connection_t * tc, u32 thread_index,
+ u16 data_len)
+{
+ session_t *s = session_get (tc->s_index, thread_index);
+
+ if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY))
+ return 1;
+
+ if (data_len > svm_fifo_max_enqueue (s->rx_fifo))
+ return 1;
return 0;
}
-static int
-session_manager_add_segment (session_manager_main_t * smm,
- session_manager_t * sm)
+u32
+session_tx_fifo_max_dequeue (transport_connection_t * tc)
{
- u8 *segment_name;
- svm_fifo_segment_create_args_t _ca, *ca = &_ca;
- u32 add_segment_size;
- u32 default_segment_size = 128 << 10;
-
- memset (ca, 0, sizeof (*ca));
- segment_name = format (0, "%d-%d%c", getpid (),
- smm->unique_segment_name_counter++, 0);
- add_segment_size =
- sm->add_segment_size ? sm->add_segment_size : default_segment_size;
-
- return session_manager_add_segment_i (smm, sm, add_segment_size,
- segment_name);
+ session_t *s = session_get (tc->s_index, tc->thread_index);
+ if (!s->tx_fifo)
+ return 0;
+ return svm_fifo_max_dequeue (s->tx_fifo);
}
int
-session_manager_add_first_segment (session_manager_main_t * smm,
- session_manager_t * sm, u32 segment_size,
- u8 ** segment_name)
+stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer,
+ u32 offset, u32 max_bytes)
{
- svm_fifo_segment_create_args_t _ca, *ca = &_ca;
- memset (ca, 0, sizeof (*ca));
- *segment_name = format (0, "%d-%d%c", getpid (),
- smm->unique_segment_name_counter++, 0);
- return session_manager_add_segment_i (smm, sm, segment_size, *segment_name);
+ session_t *s = session_get (tc->s_index, tc->thread_index);
+ return svm_fifo_peek (s->tx_fifo, offset, max_bytes, buffer);
}
-void
-session_manager_del (session_manager_main_t * smm, session_manager_t * sm)
+u32
+stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes)
{
- u32 *deleted_sessions = 0;
- u32 *deleted_thread_indices = 0;
- int i, j;
-
- /* Across all fifo segments used by the server */
- for (j = 0; j < vec_len (sm->segment_indices); j++)
- {
- svm_fifo_segment_private_t *fifo_segment;
- svm_fifo_t **fifos;
- /* Vector of fifos allocated in the segment */
- fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]);
- fifos = (svm_fifo_t **) fifo_segment->h->fifos;
-
- /*
- * Remove any residual sessions from the session lookup table
- * Don't bother deleting the individual fifos, we're going to
- * throw away the fifo segment in a minute.
- */
- for (i = 0; i < vec_len (fifos); i++)
- {
- svm_fifo_t *fifo;
- u32 session_index, thread_index;
- stream_session_t *session;
+ session_t *s = session_get (tc->s_index, tc->thread_index);
+ return svm_fifo_dequeue_drop (s->tx_fifo, max_bytes);
+}
- fifo = fifos[i];
- session_index = fifo->server_session_index;
- thread_index = fifo->server_thread_index;
+static inline int
+session_notify_subscribers (u32 app_index, session_t * s,
+ svm_fifo_t * f, session_evt_type_t evt_type)
+{
+ app_worker_t *app_wrk;
+ application_t *app;
+ int i;
- session = pool_elt_at_index (smm->sessions[thread_index],
- session_index);
+ app = application_get (app_index);
+ if (!app)
+ return -1;
- /* Add to the deleted_sessions vector (once!) */
- if (!session->is_deleted)
- {
- session->is_deleted = 1;
- vec_add1 (deleted_sessions,
- session - smm->sessions[thread_index]);
- vec_add1 (deleted_thread_indices, thread_index);
- }
- }
+ for (i = 0; i < f->n_subscribers; i++)
+ {
+ app_wrk = application_get_worker (app, f->subscribers[i]);
+ if (!app_wrk)
+ continue;
+ if (app_worker_lock_and_send_event (app_wrk, s, evt_type))
+ return -1;
+ }
- for (i = 0; i < vec_len (deleted_sessions); i++)
- {
- stream_session_t *session;
+ return 0;
+}
- session =
- pool_elt_at_index (smm->sessions[deleted_thread_indices[i]],
- deleted_sessions[i]);
+/**
+ * Notify session peer that new data has been enqueued.
+ *
+ * @param s Stream session for which the event is to be generated.
+ * @param lock Flag to indicate if call should lock message queue.
+ *
+ * @return 0 on success or negative number if failed to send notification.
+ */
+static inline int
+session_enqueue_notify (session_t * s)
+{
+ app_worker_t *app_wrk;
- /* Instead of directly removing the session call disconnect */
- stream_session_disconnect (session);
+ app_wrk = app_worker_get_if_valid (s->app_wrk_index);
+ if (PREDICT_FALSE (!app_wrk))
+ {
+ SESSION_DBG ("invalid s->app_index = %d", s->app_wrk_index);
+ return 0;
+ }
- /*
- stream_session_table_del (smm, session);
- pool_put(smm->sessions[deleted_thread_indices[i]], session);
- */
- }
+ /* *INDENT-OFF* */
+ SESSION_EVT_DBG(SESSION_EVT_ENQ, s, ({
+ ed->data[0] = FIFO_EVENT_APP_RX;
+ ed->data[1] = svm_fifo_max_dequeue (s->rx_fifo);
+ }));
+ /* *INDENT-ON* */
- vec_reset_length (deleted_sessions);
- vec_reset_length (deleted_thread_indices);
+ if (PREDICT_FALSE (app_worker_lock_and_send_event (app_wrk, s,
+ FIFO_EVENT_APP_RX)))
+ return -1;
- /* Instead of removing the segment, test when removing the session if
- * the segment can be removed
- */
- /* svm_fifo_segment_delete (fifo_segment); */
- }
+ if (PREDICT_FALSE (svm_fifo_n_subscribers (s->rx_fifo)))
+ return session_notify_subscribers (app_wrk->app_index, s,
+ s->rx_fifo, FIFO_EVENT_APP_RX);
- vec_free (deleted_sessions);
- vec_free (deleted_thread_indices);
+ return 0;
}
int
-session_manager_allocate_session_fifos (session_manager_main_t * smm,
- session_manager_t * sm,
- svm_fifo_t ** server_rx_fifo,
- svm_fifo_t ** server_tx_fifo,
- u32 * fifo_segment_index,
- u8 * added_a_segment)
+session_dequeue_notify (session_t * s)
{
- svm_fifo_segment_private_t *fifo_segment;
- u32 fifo_size, default_fifo_size = 128 << 10; /* TODO config */
- int i;
-
- *added_a_segment = 0;
-
- /* Allocate svm fifos */
- ASSERT (vec_len (sm->segment_indices));
-
-again:
- for (i = 0; i < vec_len (sm->segment_indices); i++)
- {
- *fifo_segment_index = sm->segment_indices[i];
- fifo_segment = svm_fifo_get_segment (*fifo_segment_index);
-
- fifo_size = sm->rx_fifo_size;
- fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size;
- *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size);
+ app_worker_t *app_wrk;
- fifo_size = sm->tx_fifo_size;
- fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size;
- *server_tx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size);
+ app_wrk = app_worker_get_if_valid (s->app_wrk_index);
+ if (PREDICT_FALSE (!app_wrk))
+ return -1;
- if (*server_rx_fifo == 0)
- {
- /* This would be very odd, but handle it... */
- if (*server_tx_fifo != 0)
- {
- svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo);
- *server_tx_fifo = 0;
- }
- continue;
- }
- if (*server_tx_fifo == 0)
- {
- if (*server_rx_fifo != 0)
- {
- svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo);
- *server_rx_fifo = 0;
- }
- continue;
- }
- break;
- }
+ if (PREDICT_FALSE (app_worker_lock_and_send_event (app_wrk, s,
+ FIFO_EVENT_APP_TX)))
+ return -1;
- /* See if we're supposed to create another segment */
- if (*server_rx_fifo == 0)
- {
- if (sm->add_segment)
- {
- if (*added_a_segment)
- {
- clib_warning ("added a segment, still cant allocate a fifo");
- return SESSION_ERROR_NEW_SEG_NO_SPACE;
- }
+ if (PREDICT_FALSE (s->tx_fifo->n_subscribers))
+ return session_notify_subscribers (app_wrk->app_index, s,
+ s->tx_fifo, FIFO_EVENT_APP_TX);
- if (session_manager_add_segment (smm, sm))
- return VNET_API_ERROR_URI_FIFO_CREATE_FAILED;
+ svm_fifo_clear_tx_ntf (s->tx_fifo);
- *added_a_segment = 1;
- goto again;
- }
- else
- return SESSION_ERROR_NO_SPACE;
- }
return 0;
}
+/**
+ * Flushes queue of sessions that are to be notified of new data
+ * enqueued events.
+ *
+ * @param thread_index Thread index for which the flush is to be performed.
+ * @return 0 on success or a positive number indicating the number of
+ * failures due to API queue being full.
+ */
int
-stream_session_create_i (session_manager_main_t * smm, application_t * app,
- transport_connection_t * tc,
- stream_session_t ** ret_s)
+session_manager_flush_enqueue_events (u8 transport_proto, u32 thread_index)
{
- int rv;
- svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0;
- u32 fifo_segment_index;
- u32 pool_index, seg_size;
- stream_session_t *s;
- u64 value;
- u32 thread_index = tc->thread_index;
- session_manager_t *sm;
- u8 segment_added;
- u8 *seg_name;
-
- sm = session_manager_get (app->session_manager_index);
-
- /* Check the API queue */
- if (app->mode == APP_SERVER && application_api_queue_is_full (app))
- return SESSION_ERROR_API_QUEUE_FULL;
+ session_manager_worker_t *wrk = session_manager_get_worker (thread_index);
+ session_t *s;
+ int i, errors = 0;
+ u32 *indices;
- if ((rv = session_manager_allocate_session_fifos (smm, sm, &server_rx_fifo,
- &server_tx_fifo,
- &fifo_segment_index,
- &segment_added)))
- return rv;
+ indices = wrk->session_to_enqueue[transport_proto];
- if (segment_added && app->mode == APP_SERVER)
+ for (i = 0; i < vec_len (indices); i++)
{
- /* Send an API message to the external server, to map new segment */
- ASSERT (app->cb_fns.add_segment_callback);
-
- session_manager_get_segment_info (fifo_segment_index, &seg_name,
- &seg_size);
- if (app->cb_fns.add_segment_callback (app->api_client_index, seg_name,
- seg_size))
- return VNET_API_ERROR_URI_FIFO_CREATE_FAILED;
+ s = session_get_if_valid (indices[i], thread_index);
+ if (PREDICT_FALSE (!s))
+ {
+ errors++;
+ continue;
+ }
+ if (PREDICT_FALSE (session_enqueue_notify (s)))
+ errors++;
}
- /* Create the session */
- pool_get (smm->sessions[thread_index], s);
- memset (s, 0, sizeof (*s));
-
- /* Initialize backpointers */
- pool_index = s - smm->sessions[thread_index];
- server_rx_fifo->server_session_index = pool_index;
- server_rx_fifo->server_thread_index = thread_index;
-
- server_tx_fifo->server_session_index = pool_index;
- server_tx_fifo->server_thread_index = thread_index;
-
- s->server_rx_fifo = server_rx_fifo;
- s->server_tx_fifo = server_tx_fifo;
+ vec_reset_length (indices);
+ wrk->session_to_enqueue[transport_proto] = indices;
+ wrk->current_enqueue_epoch[transport_proto]++;
- /* Initialize state machine, such as it is... */
- s->session_type = app->session_type;
- s->session_state = SESSION_STATE_CONNECTING;
- s->app_index = application_get_index (app);
- s->server_segment_index = fifo_segment_index;
- s->thread_index = thread_index;
- s->session_index = pool_index;
-
- /* Attach transport to session */
- s->connection_index = tc->c_index;
-
- /* Attach session to transport */
- tc->s_index = s->session_index;
-
- /* Add to the main lookup table */
- value = (((u64) thread_index) << 32) | (u64) s->session_index;
- stream_session_table_add_for_tc (app->session_type, tc, value);
-
- *ret_s = s;
+ return errors;
+}
- return 0;
+int
+session_manager_flush_all_enqueue_events (u8 transport_proto)
+{
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ int i, errors = 0;
+ for (i = 0; i < 1 + vtm->n_threads; i++)
+ errors += session_manager_flush_enqueue_events (transport_proto, i);
+ return errors;
}
-/*
- * Enqueue data for delivery to session peer. Does not notify peer of enqueue
- * event but on request can queue notification events for later delivery by
- * calling stream_server_flush_enqueue_events().
+/**
+ * Init fifo tail and head pointers
*
- * @param tc Transport connection which is to be enqueued data
- * @param data Data to be enqueued
- * @param len Length of data to be enqueued
- * @param queue_event Flag to indicate if peer is to be notified or if event
- * is to be queued. The former is useful when more data is
- * enqueued and only one event is to be generated.
- * @return Number of bytes enqueued or a negative value if enqueueing failed.
+ * Useful if transport uses absolute offsets for tracking ooo segments.
*/
-int
-stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len,
- u8 queue_event)
+void
+stream_session_init_fifos_pointers (transport_connection_t * tc,
+ u32 rx_pointer, u32 tx_pointer)
{
- stream_session_t *s;
- int enqueued;
+ session_t *s;
+ s = session_get (tc->s_index, tc->thread_index);
+ svm_fifo_init_pointers (s->rx_fifo, rx_pointer);
+ svm_fifo_init_pointers (s->tx_fifo, tx_pointer);
+}
- s = stream_session_get (tc->s_index, tc->thread_index);
+int
+session_stream_connect_notify (transport_connection_t * tc, u8 is_fail)
+{
+ u32 opaque = 0, new_ti, new_si;
+ session_t *new_s = 0;
+ segment_manager_t *sm;
+ app_worker_t *app_wrk;
+ application_t *app;
+ u8 alloc_fifos;
+ int error = 0;
+ u64 handle;
+
+ /*
+ * Find connection handle and cleanup half-open table
+ */
+ handle = session_lookup_half_open_handle (tc);
+ if (handle == HALF_OPEN_LOOKUP_INVALID_VALUE)
+ {
+ SESSION_DBG ("half-open was removed!");
+ return -1;
+ }
+ session_lookup_del_half_open (tc);
- /* Make sure there's enough space left. We might've filled the pipes */
- if (PREDICT_FALSE (len > svm_fifo_max_enqueue (s->server_rx_fifo)))
+ /* Get the app's index from the handle we stored when opening connection
+ * and the opaque (api_context for external apps) from transport session
+ * index */
+ app_wrk = app_worker_get_if_valid (handle >> 32);
+ if (!app_wrk)
return -1;
+ opaque = tc->s_index;
+ app = application_get (app_wrk->app_index);
- enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, s->pid, len, data);
-
- if (queue_event)
+ /*
+ * Allocate new session with fifos (svm segments are allocated if needed)
+ */
+ if (!is_fail)
{
- /* Queue RX event on this fifo. Eventually these will need to be flushed
- * by calling stream_server_flush_enqueue_events () */
- session_manager_main_t *smm = vnet_get_session_manager_main ();
- u32 thread_index = s->thread_index;
- u32 my_enqueue_epoch = smm->current_enqueue_epoch[thread_index];
+ sm = app_worker_get_connect_segment_manager (app_wrk);
+ alloc_fifos = !application_is_builtin_proxy (app);
+ if (session_alloc_and_init (sm, tc, alloc_fifos, &new_s))
+ {
+ is_fail = 1;
+ error = -1;
+ }
+ else
+ {
+ new_s->session_state = SESSION_STATE_CONNECTING;
+ new_s->app_wrk_index = app_wrk->wrk_index;
+ new_si = new_s->session_index;
+ new_ti = new_s->thread_index;
+ }
+ }
- if (s->enqueue_epoch != my_enqueue_epoch)
+ /*
+ * Notify client application
+ */
+ if (app->cb_fns.session_connected_callback (app_wrk->wrk_index, opaque,
+ new_s, is_fail))
+ {
+ SESSION_DBG ("failed to notify app");
+ if (!is_fail)
{
- s->enqueue_epoch = my_enqueue_epoch;
- vec_add1 (smm->session_indices_to_enqueue_by_thread[thread_index],
- s - smm->sessions[thread_index]);
+ new_s = session_get (new_si, new_ti);
+ session_transport_close (new_s);
+ }
+ }
+ else
+ {
+ if (!is_fail)
+ {
+ new_s = session_get (new_si, new_ti);
+ new_s->session_state = SESSION_STATE_READY;
}
}
- return enqueued;
+ return error;
}
-/** Check if we have space in rx fifo to push more bytes */
-u8
-stream_session_no_space (transport_connection_t * tc, u32 thread_index,
- u16 data_len)
+typedef struct _session_switch_pool_args
{
- stream_session_t *s = stream_session_get (tc->c_index, thread_index);
-
- if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY))
- return 1;
-
- if (data_len > svm_fifo_max_enqueue (s->server_rx_fifo))
- return 1;
+ u32 session_index;
+ u32 thread_index;
+ u32 new_thread_index;
+ u32 new_session_index;
+} session_switch_pool_args_t;
- return 0;
+static void
+session_switch_pool (void *cb_args)
+{
+ session_switch_pool_args_t *args = (session_switch_pool_args_t *) cb_args;
+ session_t *s;
+ ASSERT (args->thread_index == vlib_get_thread_index ());
+ s = session_get (args->session_index, args->thread_index);
+ s->tx_fifo->master_session_index = args->new_session_index;
+ s->tx_fifo->master_thread_index = args->new_thread_index;
+ transport_cleanup (session_get_transport_proto (s), s->connection_index,
+ s->thread_index);
+ session_free (s);
+ clib_mem_free (cb_args);
}
-u32
-stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer,
- u32 offset, u32 max_bytes)
+/**
+ * Move dgram session to the right thread
+ */
+int
+session_dgram_connect_notify (transport_connection_t * tc,
+ u32 old_thread_index, session_t ** new_session)
{
- stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
- return svm_fifo_peek (s->server_tx_fifo, s->pid, offset, max_bytes, buffer);
+ session_t *new_s;
+ session_switch_pool_args_t *rpc_args;
+
+ /*
+ * Clone half-open session to the right thread.
+ */
+ new_s = session_clone_safe (tc->s_index, old_thread_index);
+ new_s->connection_index = tc->c_index;
+ new_s->rx_fifo->master_session_index = new_s->session_index;
+ new_s->rx_fifo->master_thread_index = new_s->thread_index;
+ new_s->session_state = SESSION_STATE_READY;
+ session_lookup_add_connection (tc, session_handle (new_s));
+
+ /*
+ * Ask thread owning the old session to clean it up and make us the tx
+ * fifo owner
+ */
+ rpc_args = clib_mem_alloc (sizeof (*rpc_args));
+ rpc_args->new_session_index = new_s->session_index;
+ rpc_args->new_thread_index = new_s->thread_index;
+ rpc_args->session_index = tc->s_index;
+ rpc_args->thread_index = old_thread_index;
+ session_send_rpc_evt_to_thread (rpc_args->thread_index, session_switch_pool,
+ rpc_args);
+
+ tc->s_index = new_s->session_index;
+ new_s->connection_index = tc->c_index;
+ *new_session = new_s;
+ return 0;
}
-u32
-stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes)
+int
+stream_session_accept_notify (transport_connection_t * tc)
{
- stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
- return svm_fifo_dequeue_drop (s->server_tx_fifo, s->pid, max_bytes);
+ app_worker_t *app_wrk;
+ application_t *app;
+ session_t *s;
+
+ s = session_get (tc->s_index, tc->thread_index);
+ app_wrk = app_worker_get_if_valid (s->app_wrk_index);
+ if (!app_wrk)
+ return -1;
+ s->session_state = SESSION_STATE_ACCEPTING;
+ app = application_get (app_wrk->app_index);
+ return app->cb_fns.session_accept_callback (s);
}
/**
- * Notify session peer that new data has been enqueued.
- *
- * @param s Stream session for which the event is to be generated.
- * @param block Flag to indicate if call should block if event queue is full.
+ * Notification from transport that connection is being closed.
*
- * @return 0 on succes or negative number if failed to send notification.
+ * A disconnect is sent to application but state is not removed. Once
+ * disconnect is acknowledged by application, session disconnect is called.
+ * Ultimately this leads to close being called on transport (passive close).
*/
-static int
-stream_session_enqueue_notify (stream_session_t * s, u8 block)
+void
+session_transport_closing_notify (transport_connection_t * tc)
{
+ app_worker_t *app_wrk;
application_t *app;
- session_fifo_event_t evt;
- unix_shared_memory_queue_t *q;
- static u32 serial_number;
+ session_t *s;
- if (PREDICT_FALSE (s->session_state == SESSION_STATE_CLOSED))
- return 0;
-
- /* Get session's server */
- app = application_get (s->app_index);
+ s = session_get (tc->s_index, tc->thread_index);
+ if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
+ return;
+ s->session_state = SESSION_STATE_TRANSPORT_CLOSING;
+ app_wrk = app_worker_get_if_valid (s->app_wrk_index);
+ if (!app_wrk)
+ return;
+ app = application_get (app_wrk->app_index);
+ app->cb_fns.session_disconnect_callback (s);
+}
- /* Fabricate event */
- evt.fifo = s->server_rx_fifo;
- evt.event_type = FIFO_EVENT_SERVER_RX;
- evt.event_id = serial_number++;
- evt.enqueue_length = svm_fifo_max_dequeue (s->server_rx_fifo);
+/**
+ * Notification from transport that connection is being deleted
+ *
+ * This removes the session if it is still valid. It should be called only on
+ * previously fully established sessions. For instance failed connects should
+ * call stream_session_connect_notify and indicate that the connect has
+ * failed.
+ */
+void
+session_transport_delete_notify (transport_connection_t * tc)
+{
+ session_t *s;
- /* Add event to server's event queue */
- q = app->event_queue;
+ /* App might've been removed already */
+ if (!(s = session_get_if_valid (tc->s_index, tc->thread_index)))
+ return;
- /* Based on request block (or not) for lack of space */
- if (block || PREDICT_TRUE (q->cursize < q->maxsize))
- unix_shared_memory_queue_add (app->event_queue, (u8 *) & evt,
- 0 /* do wait for mutex */ );
- else
- return -1;
+ /* Make sure we don't try to send anything more */
+ svm_fifo_dequeue_drop_all (s->tx_fifo);
- if (1)
+ switch (s->session_state)
{
- ELOG_TYPE_DECLARE (e) =
- {
- .format = "evt-enqueue: id %d length %d",.format_args = "i4i4",};
- struct
- {
- u32 data[2];
- } *ed;
- ed = ELOG_DATA (&vlib_global_main.elog_main, e);
- ed->data[0] = evt.event_id;
- ed->data[1] = evt.enqueue_length;
+ case SESSION_STATE_ACCEPTING:
+ case SESSION_STATE_TRANSPORT_CLOSING:
+ /* If transport finishes or times out before we get a reply
+ * from the app, mark transport as closed and wait for reply
+ * before removing the session. Cleanup session table in advance
+ * because transport will soon be closed and closed sessions
+ * are assumed to have been removed from the lookup table */
+ session_lookup_del_session (s);
+ s->session_state = SESSION_STATE_TRANSPORT_CLOSED;
+ break;
+ case SESSION_STATE_CLOSING:
+ case SESSION_STATE_CLOSED_WAITING:
+ /* Cleanup lookup table as transport needs to still be valid.
+ * Program transport close to ensure that all session events
+ * have been cleaned up. Once transport close is called, the
+ * session is just removed because both transport and app have
+ * confirmed the close*/
+ session_lookup_del_session (s);
+ s->session_state = SESSION_STATE_TRANSPORT_CLOSED;
+ session_program_transport_close (s);
+ break;
+ case SESSION_STATE_TRANSPORT_CLOSED:
+ break;
+ case SESSION_STATE_CLOSED:
+ session_delete (s);
+ break;
+ default:
+ clib_warning ("session state %u", s->session_state);
+ session_delete (s);
+ break;
}
-
- return 0;
}
/**
- * Flushes queue of sessions that are to be notified of new data
- * enqueued events.
- *
- * @param thread_index Thread index for which the flush is to be performed.
- * @return 0 on success or a positive number indicating the number of
- * failures due to API queue being full.
+ * Notification from transport that session can be closed
+ *
+ * Should be called by transport only if it was closed with non-empty
+ * tx fifo and once it decides to begin the closing procedure prior to
+ * issuing a delete notify. This gives the chance to the session layer
+ * to cleanup any outstanding events.
*/
-int
-session_manager_flush_enqueue_events (u32 thread_index)
+void
+session_transport_closed_notify (transport_connection_t * tc)
{
- session_manager_main_t *smm = &session_manager_main;
- u32 *session_indices_to_enqueue;
- int i, errors = 0;
+ session_t *s;
- session_indices_to_enqueue =
- smm->session_indices_to_enqueue_by_thread[thread_index];
+ if (!(s = session_get_if_valid (tc->s_index, tc->thread_index)))
+ return;
- for (i = 0; i < vec_len (session_indices_to_enqueue); i++)
+ /* If app close has not been received or has not yet resulted in
+ * a transport close, only mark the session transport as closed */
+ if (s->session_state <= SESSION_STATE_CLOSING)
{
- stream_session_t *s0;
-
- /* Get session */
- s0 = stream_session_get (session_indices_to_enqueue[i], thread_index);
- if (stream_session_enqueue_notify (s0, 0 /* don't block */ ))
- {
- errors++;
- }
+ session_lookup_del_session (s);
+ s->session_state = SESSION_STATE_TRANSPORT_CLOSED;
}
+ else
+ s->session_state = SESSION_STATE_CLOSED;
+}
- vec_reset_length (session_indices_to_enqueue);
-
- smm->session_indices_to_enqueue_by_thread[thread_index] =
- session_indices_to_enqueue;
-
- /* Increment enqueue epoch for next round */
- smm->current_enqueue_epoch[thread_index]++;
-
- return errors;
+/**
+ * Notify application that connection has been reset.
+ */
+void
+session_transport_reset_notify (transport_connection_t * tc)
+{
+ session_t *s;
+ app_worker_t *app_wrk;
+ application_t *app;
+ s = session_get (tc->s_index, tc->thread_index);
+ svm_fifo_dequeue_drop_all (s->tx_fifo);
+ if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
+ return;
+ s->session_state = SESSION_STATE_TRANSPORT_CLOSING;
+ app_wrk = app_worker_get (s->app_wrk_index);
+ app = application_get (app_wrk->app_index);
+ app->cb_fns.session_reset_callback (s);
}
-/*
- * Start listening on server's ip/port pair for requested transport.
- *
- * Creates a 'dummy' stream session with state LISTENING to be used in session
- * lookups, prior to establishing connection. Requests transport to build
- * it's own specific listening connection.
+/**
+ * Accept a stream session. Optionally ping the server by callback.
*/
int
-stream_session_start_listen (u32 server_index, ip46_address_t * ip, u16 port)
+stream_session_accept (transport_connection_t * tc, u32 listener_index,
+ u8 notify)
{
- session_manager_main_t *smm = &session_manager_main;
- stream_session_t *s;
- transport_connection_t *tc;
- application_t *srv;
- u32 tci;
-
- srv = application_get (server_index);
-
- pool_get (smm->listen_sessions[srv->session_type], s);
- memset (s, 0, sizeof (*s));
-
- s->session_type = srv->session_type;
- s->session_state = SESSION_STATE_LISTENING;
- s->session_index = s - smm->listen_sessions[srv->session_type];
- s->app_index = srv->index;
+ session_t *s, *listener;
+ app_worker_t *app_wrk;
+ segment_manager_t *sm;
+ int rv;
- /* Transport bind/listen */
- tci = tp_vfts[srv->session_type].bind (smm->vlib_main, s->session_index, ip,
- port);
+ /* Find the server */
+ listener = listen_session_get (listener_index);
+ app_wrk = application_listener_select_worker (listener, 0);
- /* Attach transport to session */
- s->connection_index = tci;
- tc = tp_vfts[srv->session_type].get_listener (tci);
+ sm = app_worker_get_listen_segment_manager (app_wrk, listener);
+ if ((rv = session_alloc_and_init (sm, tc, 1, &s)))
+ return rv;
- srv->session_index = s->session_index;
+ s->app_wrk_index = app_wrk->wrk_index;
+ s->listener_index = listener_index;
- /* Add to the main lookup table */
- stream_session_table_add_for_tc (s->session_type, tc, s->session_index);
+ /* Shoulder-tap the server */
+ if (notify)
+ {
+ application_t *app = application_get (app_wrk->app_index);
+ return app->cb_fns.session_accept_callback (s);
+ }
return 0;
}
-void
-stream_session_stop_listen (u32 server_index)
+int
+session_open_cl (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
{
- session_manager_main_t *smm = &session_manager_main;
- stream_session_t *listener;
transport_connection_t *tc;
- application_t *srv;
-
- srv = application_get (server_index);
- listener = pool_elt_at_index (smm->listen_sessions[srv->session_type],
- srv->session_index);
-
- tc = tp_vfts[srv->session_type].get_listener (listener->connection_index);
- stream_session_table_del_for_tc (smm, listener->session_type, tc);
+ transport_endpoint_cfg_t *tep;
+ segment_manager_t *sm;
+ app_worker_t *app_wrk;
+ session_t *s;
+ application_t *app;
+ int rv;
- tp_vfts[srv->session_type].unbind (smm->vlib_main,
- listener->connection_index);
- pool_put (smm->listen_sessions[srv->session_type], listener);
-}
+ tep = session_endpoint_to_transport_cfg (rmt);
+ rv = transport_connect (rmt->transport_proto, tep);
+ if (rv < 0)
+ {
+ SESSION_DBG ("Transport failed to open connection.");
+ return VNET_API_ERROR_SESSION_CONNECT;
+ }
-int
-connect_server_add_segment_cb (application_t * ss, char *segment_name,
- u32 segment_size)
-{
- /* Does exactly nothing, but die */
- ASSERT (0);
- return 0;
-}
+ tc = transport_get_half_open (rmt->transport_proto, (u32) rv);
-void
-connects_session_manager_init (session_manager_main_t * smm, u8 session_type)
-{
- session_manager_t *sm;
- u32 connect_fifo_size = 256 << 10; /* Config? */
- u32 default_segment_size = 1 << 20;
+ /* For dgram type of service, allocate session and fifos now.
+ */
+ app_wrk = app_worker_get (app_wrk_index);
+ sm = app_worker_get_connect_segment_manager (app_wrk);
- pool_get (smm->session_managers, sm);
- memset (sm, 0, sizeof (*sm));
+ if (session_alloc_and_init (sm, tc, 1, &s))
+ return -1;
+ s->app_wrk_index = app_wrk->wrk_index;
+ s->session_state = SESSION_STATE_OPENED;
- sm->add_segment_size = default_segment_size;
- sm->rx_fifo_size = connect_fifo_size;
- sm->tx_fifo_size = connect_fifo_size;
- sm->add_segment = 1;
+ /* Tell the app about the new event fifo for this session */
+ app = application_get (app_wrk->app_index);
+ app->cb_fns.session_connected_callback (app_wrk->wrk_index, opaque, s, 0);
- session_manager_add_segment (smm, sm);
- smm->connect_manager_index[session_type] = sm - smm->session_managers;
+ return 0;
}
-void
-stream_session_connect_notify (transport_connection_t * tc, u8 sst,
- u8 is_fail)
+int
+session_open_vc (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
{
- session_manager_main_t *smm = &session_manager_main;
- application_t *app;
- stream_session_t *new_s = 0;
- u64 value;
+ transport_connection_t *tc;
+ transport_endpoint_cfg_t *tep;
+ u64 handle;
+ int rv;
- value = stream_session_half_open_lookup (smm, &tc->lcl_ip, &tc->rmt_ip,
- tc->lcl_port, tc->rmt_port,
- tc->proto);
- if (value == HALF_OPEN_LOOKUP_INVALID_VALUE)
+ tep = session_endpoint_to_transport_cfg (rmt);
+ rv = transport_connect (rmt->transport_proto, tep);
+ if (rv < 0)
{
- clib_warning ("This can't be good!");
- return;
+ SESSION_DBG ("Transport failed to open connection.");
+ return VNET_API_ERROR_SESSION_CONNECT;
}
- app = application_get (value >> 32);
-
- if (!is_fail)
- {
- /* Create new session (server segments are allocated if needed) */
- if (stream_session_create_i (smm, app, tc, &new_s))
- return;
-
- app->session_index = stream_session_get_index (new_s);
- app->thread_index = new_s->thread_index;
+ tc = transport_get_half_open (rmt->transport_proto, (u32) rv);
+
+ /* If transport offers a stream service, only allocate session once the
+ * connection has been established.
+ * Add connection to half-open table and save app and tc index. The
+ * latter is needed to help establish the connection while the former
+ * is needed when the connect notify comes and we have to notify the
+ * external app
+ */
+ handle = (((u64) app_wrk_index) << 32) | (u64) tc->c_index;
+ session_lookup_add_half_open (tc, handle);
+
+ /* Store api_context (opaque) for when the reply comes. Not the nicest
+ * thing but better than allocating a separate half-open pool.
+ */
+ tc->s_index = opaque;
+ return 0;
+}
- /* Allocate vpp event queue for this thread if needed */
- vpp_session_event_queue_allocate (smm, tc->thread_index);
- }
+int
+session_open_app (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
+{
+ session_endpoint_cfg_t *sep = (session_endpoint_cfg_t *) rmt;
+ transport_endpoint_cfg_t *tep_cfg = session_endpoint_to_transport_cfg (sep);
- /* Notify client */
- app->cb_fns.session_connected_callback (app->api_client_index, new_s,
- is_fail);
+ sep->app_wrk_index = app_wrk_index;
+ sep->opaque = opaque;
- /* Cleanup session lookup */
- stream_session_half_open_table_del (smm, sst, tc);
+ return transport_connect (rmt->transport_proto, tep_cfg);
}
-void
-stream_session_accept_notify (transport_connection_t * tc)
-{
- application_t *server;
- stream_session_t *s;
+typedef int (*session_open_service_fn) (u32, session_endpoint_t *, u32);
- s = stream_session_get (tc->s_index, tc->thread_index);
- server = application_get (s->app_index);
- server->cb_fns.session_accept_callback (s);
-}
+/* *INDENT-OFF* */
+static session_open_service_fn session_open_srv_fns[TRANSPORT_N_SERVICES] = {
+ session_open_vc,
+ session_open_cl,
+ session_open_app,
+};
+/* *INDENT-ON* */
/**
- * Notification from transport that connection is being closed.
+ * Ask transport to open connection to remote transport endpoint.
*
- * A disconnect is sent to application but state is not removed. Once
- * disconnect is acknowledged by application, session disconnect is called.
- * Ultimately this leads to close being called on transport (passive close).
+ * Stores handle for matching request with reply since the call can be
+ * asynchronous. For instance, for TCP the 3-way handshake must complete
+ * before reply comes. Session is only created once connection is established.
+ *
+ * @param app_index Index of the application requesting the connect
+ * @param st Session type requested.
+ * @param tep Remote transport endpoint
+ * @param opaque Opaque data (typically, api_context) the application expects
+ * on open completion.
*/
-void
-stream_session_disconnect_notify (transport_connection_t * tc)
+int
+session_open (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
{
- application_t *server;
- stream_session_t *s;
-
- s = stream_session_get (tc->s_index, tc->thread_index);
- server = application_get (s->app_index);
- server->cb_fns.session_disconnect_callback (s);
+ transport_service_type_t tst;
+ tst = transport_protocol_service_type (rmt->transport_proto);
+ return session_open_srv_fns[tst] (app_wrk_index, rmt, opaque);
}
/**
- * Cleans up session and associated app if needed.
+ * Ask transport to listen on session endpoint.
+ *
+ * @param s Session for which listen will be called. Note that unlike
+ * established sessions, listen sessions are not associated to a
+ * thread.
+ * @param sep Local endpoint to be listened on.
*/
-void
-stream_session_delete (stream_session_t * s)
+int
+session_listen (session_t * ls, session_endpoint_cfg_t * sep)
{
- session_manager_main_t *smm = vnet_get_session_manager_main ();
- svm_fifo_segment_private_t *fifo_segment;
- application_t *app;
- int rv;
+ transport_connection_t *tc;
+ transport_endpoint_t *tep;
+ u32 tc_index, s_index;
- /* delete from the main lookup table */
- rv = stream_session_table_del (smm, s);
+ /* Transport bind/listen */
+ tep = session_endpoint_to_transport (sep);
+ s_index = ls->session_index;
+ tc_index = transport_start_listen (sep->transport_proto, s_index, tep);
- if (rv)
- clib_warning ("hash delete error, rv %d", rv);
+ if (tc_index == (u32) ~ 0)
+ return -1;
- /* Cleanup fifo segments */
- fifo_segment = svm_fifo_get_segment (s->server_segment_index);
- svm_fifo_segment_free_fifo (fifo_segment, s->server_rx_fifo);
- svm_fifo_segment_free_fifo (fifo_segment, s->server_tx_fifo);
+ /* Attach transport to session */
+ ls = listen_session_get (s_index);
+ ls->connection_index = tc_index;
- app = application_get_if_valid (s->app_index);
+ /* Add to the main lookup table after transport was initialized */
+ tc = transport_get_listener (sep->transport_proto, tc_index);
+ session_lookup_add_connection (tc, s_index);
+ return 0;
+}
- /* No app. A possibility: after disconnect application called unbind */
- if (!app)
- return;
+/**
+ * Ask transport to stop listening on local transport endpoint.
+ *
+ * @param s Session to stop listening on. It must be in state LISTENING.
+ */
+int
+session_stop_listen (session_t * s)
+{
+ transport_proto_t tp = session_get_transport_proto (s);
+ transport_connection_t *tc;
- if (app->mode == APP_CLIENT)
- {
- /* Cleanup app if client */
- application_del (app);
- }
- else if (app->mode == APP_SERVER)
- {
- session_manager_t *sm;
- svm_fifo_segment_private_t *fifo_segment;
- svm_fifo_t **fifos;
- u32 fifo_index;
+ if (s->session_state != SESSION_STATE_LISTENING)
+ return -1;
- /* For server, see if any segments can be removed */
- sm = session_manager_get (app->session_manager_index);
+ tc = transport_get_listener (tp, s->connection_index);
+ if (!tc)
+ return VNET_API_ERROR_ADDRESS_NOT_IN_USE;
- /* Delete fifo */
- fifo_segment = svm_fifo_get_segment (s->server_segment_index);
- fifos = (svm_fifo_t **) fifo_segment->h->fifos;
+ session_lookup_del_connection (tc);
+ transport_stop_listen (tp, s->connection_index);
+ return 0;
+}
- fifo_index = svm_fifo_segment_index (fifo_segment);
+/**
+ * Initialize session closing procedure.
+ *
+ * Request is always sent to session node to ensure that all outstanding
+ * requests are served before transport is notified.
+ */
+void
+session_close (session_t * s)
+{
+ if (!s)
+ return;
- /* Remove segment only if it holds no fifos and not the first */
- if (sm->segment_indices[0] != fifo_index && vec_len (fifos) == 0)
- svm_fifo_segment_delete (fifo_segment);
+ if (s->session_state >= SESSION_STATE_CLOSING)
+ {
+ /* Session will only be removed once both app and transport
+ * acknowledge the close */
+ if (s->session_state == SESSION_STATE_TRANSPORT_CLOSED)
+ session_program_transport_close (s);
+
+ /* Session already closed. Clear the tx fifo */
+ if (s->session_state == SESSION_STATE_CLOSED)
+ svm_fifo_dequeue_drop_all (s->tx_fifo);
+ return;
}
- pool_put (smm->sessions[s->thread_index], s);
+ s->session_state = SESSION_STATE_CLOSING;
+ session_program_transport_close (s);
}
/**
- * Notification from transport that connection is being deleted
+ * Notify transport the session can be disconnected. This should eventually
+ * result in a delete notification that allows us to cleanup session state.
+ * Called for both active/passive disconnects.
*
- * This should be called only on previously fully established sessions. For
- * instance failed connects should call stream_session_connect_notify and
- * indicate that the connect has failed.
+ * Must be called from the session's thread.
*/
void
-stream_session_delete_notify (transport_connection_t * tc)
+session_transport_close (session_t * s)
{
- stream_session_t *s;
-
- /* App might've been removed already */
- s = stream_session_get_if_valid (tc->s_index, tc->thread_index);
- if (!s)
+ /* If transport is already closed, just free the session */
+ if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)
{
+ session_free_w_fifos (s);
return;
}
- stream_session_delete (s);
+
+ /* If tx queue wasn't drained, change state to closed waiting for transport.
+ * This way, the transport, if it so wishes, can continue to try sending the
+ * outstanding data (in closed state it cannot). It MUST however at one
+ * point, either after sending everything or after a timeout, call delete
+ * notify. This will finally lead to the complete cleanup of the session.
+ */
+ if (svm_fifo_max_dequeue (s->tx_fifo))
+ s->session_state = SESSION_STATE_CLOSED_WAITING;
+ else
+ s->session_state = SESSION_STATE_CLOSED;
+
+ transport_close (session_get_transport_proto (s), s->connection_index,
+ s->thread_index);
}
/**
- * Notify application that connection has been reset.
+ * Cleanup transport and session state.
+ *
+ * Notify transport of the cleanup and free the session. This should
+ * be called only if transport reported some error and is already
+ * closed.
*/
void
-stream_session_reset_notify (transport_connection_t * tc)
+session_transport_cleanup (session_t * s)
{
- stream_session_t *s;
- application_t *app;
- s = stream_session_get (tc->s_index, tc->thread_index);
+ s->session_state = SESSION_STATE_CLOSED;
- app = application_get (s->app_index);
- app->cb_fns.session_reset_callback (s);
+ /* Delete from main lookup table before we axe the the transport */
+ session_lookup_del_session (s);
+ transport_cleanup (session_get_transport_proto (s), s->connection_index,
+ s->thread_index);
+ /* Since we called cleanup, no delete notification will come. So, make
+ * sure the session is properly freed. */
+ session_free_w_fifos (s);
}
/**
- * Accept a stream session. Optionally ping the server by callback.
+ * Allocate event queues in the shared-memory segment
+ *
+ * That can either be a newly created memfd segment, that will need to be
+ * mapped by all stack users, or the binary api's svm region. The latter is
+ * assumed to be already mapped. NOTE that this assumption DOES NOT hold if
+ * api clients bootstrap shm api over sockets (i.e. use memfd segments) and
+ * vpp uses api svm region for event queues.
*/
-int
-stream_session_accept (transport_connection_t * tc, u32 listener_index,
- u8 sst, u8 notify)
+void
+session_vpp_event_queues_allocate (session_manager_main_t * smm)
{
- session_manager_main_t *smm = &session_manager_main;
- application_t *server;
- stream_session_t *s, *listener;
+ u32 evt_q_length = 2048, evt_size = sizeof (session_event_t);
+ ssvm_private_t *eqs = &smm->evt_qs_segment;
+ api_main_t *am = &api_main;
+ u64 eqs_size = 64 << 20;
+ pid_t vpp_pid = getpid ();
+ void *oldheap;
+ int i;
- int rv;
+ if (smm->configured_event_queue_length)
+ evt_q_length = smm->configured_event_queue_length;
- /* Find the server */
- listener = pool_elt_at_index (smm->listen_sessions[sst], listener_index);
- server = application_get (listener->app_index);
+ if (smm->evt_qs_use_memfd_seg)
+ {
+ if (smm->evt_qs_segment_size)
+ eqs_size = smm->evt_qs_segment_size;
- if ((rv = stream_session_create_i (smm, server, tc, &s)))
- return rv;
+ eqs->ssvm_size = eqs_size;
+ eqs->i_am_master = 1;
+ eqs->my_pid = vpp_pid;
+ eqs->name = format (0, "%s%c", "evt-qs-segment", 0);
+ eqs->requested_va = smm->session_baseva;
+
+ if (ssvm_master_init (eqs, SSVM_SEGMENT_MEMFD))
+ {
+ clib_warning ("failed to initialize queue segment");
+ return;
+ }
+ }
- /* Allocate vpp event queue for this thread if needed */
- vpp_session_event_queue_allocate (smm, tc->thread_index);
+ if (smm->evt_qs_use_memfd_seg)
+ oldheap = ssvm_push_heap (eqs->sh);
+ else
+ oldheap = svm_push_data_heap (am->vlib_rp);
- /* Shoulder-tap the server */
- if (notify)
+ for (i = 0; i < vec_len (smm->wrk); i++)
{
- server->cb_fns.session_accept_callback (s);
+ svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
+ svm_msg_q_ring_cfg_t rc[SESSION_MQ_N_RINGS] = {
+ {evt_q_length, evt_size, 0}
+ ,
+ {evt_q_length << 1, 256, 0}
+ };
+ cfg->consumer_pid = 0;
+ cfg->n_rings = 2;
+ cfg->q_nitems = evt_q_length;
+ cfg->ring_cfgs = rc;
+ smm->wrk[i].vpp_event_queue = svm_msg_q_alloc (cfg);
+ if (smm->evt_qs_use_memfd_seg)
+ {
+ if (svm_msg_q_alloc_consumer_eventfd (smm->wrk[i].vpp_event_queue))
+ clib_warning ("eventfd returned");
+ }
}
- return 0;
+ if (smm->evt_qs_use_memfd_seg)
+ ssvm_pop_heap (oldheap);
+ else
+ svm_pop_heap (oldheap);
}
-int
-stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order,
- u32 app_index)
+ssvm_private_t *
+session_manager_get_evt_q_segment (void)
{
- transport_connection_t *tc;
- u32 tci;
- u64 value;
- int rv;
+ session_manager_main_t *smm = &session_manager_main;
+ if (smm->evt_qs_use_memfd_seg)
+ return &smm->evt_qs_segment;
+ return 0;
+}
- /* Ask transport to open connection */
- rv = tp_vfts[sst].open (addr, port_host_byte_order);
- if (rv < 0)
- {
- clib_warning ("Transport failed to open connection.");
- return VNET_API_ERROR_SESSION_CONNECT_FAIL;
- }
+/* *INDENT-OFF* */
+static session_fifo_rx_fn *session_tx_fns[TRANSPORT_TX_N_FNS] = {
+ session_tx_fifo_peek_and_snd,
+ session_tx_fifo_dequeue_and_snd,
+ session_tx_fifo_dequeue_internal,
+ session_tx_fifo_dequeue_and_snd
+};
+/* *INDENT-ON* */
- tci = rv;
+/**
+ * Initialize session layer for given transport proto and ip version
+ *
+ * Allocates per session type (transport proto + ip version) data structures
+ * and adds arc from session queue node to session type output node.
+ */
+void
+session_register_transport (transport_proto_t transport_proto,
+ const transport_proto_vft_t * vft, u8 is_ip4,
+ u32 output_node)
+{
+ session_manager_main_t *smm = &session_manager_main;
+ session_type_t session_type;
+ u32 next_index = ~0;
- /* Get transport connection */
- tc = tp_vfts[sst].get_half_open (tci);
+ session_type = session_type_from_proto_and_ip (transport_proto, is_ip4);
- /* Store api_client_index and transport connection index */
- value = (((u64) app_index) << 32) | (u64) tc->c_index;
+ vec_validate (smm->session_type_to_next, session_type);
+ vec_validate (smm->session_tx_fns, session_type);
- /* Add to the half-open lookup table */
- stream_session_half_open_table_add (sst, tc, value);
+ /* *INDENT-OFF* */
+ if (output_node != ~0)
+ {
+ foreach_vlib_main (({
+ next_index = vlib_node_add_next (this_vlib_main,
+ session_queue_node.index,
+ output_node);
+ }));
+ }
+ /* *INDENT-ON* */
- return 0;
+ smm->session_type_to_next[session_type] = next_index;
+ smm->session_tx_fns[session_type] = session_tx_fns[vft->tx_type];
}
-/**
- * Disconnect session and propagate to transport. This should eventually
- * result in a delete notification that allows us to cleanup session state.
- * Called for both active/passive disconnects.
- */
-void
-stream_session_disconnect (stream_session_t * s)
+transport_connection_t *
+session_get_transport (session_t * s)
{
- tp_vfts[s->session_type].close (s->connection_index, s->thread_index);
- s->session_state = SESSION_STATE_CLOSED;
+ if (s->session_state != SESSION_STATE_LISTENING)
+ return transport_get_connection (session_get_transport_proto (s),
+ s->connection_index, s->thread_index);
+ else
+ return transport_get_listener (session_get_transport_proto (s),
+ s->connection_index);
}
-/**
- * Cleanup transport and session state.
- */
-void
-stream_session_cleanup (stream_session_t * s)
+transport_connection_t *
+listen_session_get_transport (session_t * s)
{
- tp_vfts[s->session_type].cleanup (s->connection_index, s->thread_index);
- stream_session_delete (s);
+ return transport_get_listener (session_get_transport_proto (s),
+ s->connection_index);
}
-void
-session_register_transport (u8 type, const transport_proto_vft_t * vft)
+int
+listen_session_get_local_session_endpoint (session_t * listener,
+ session_endpoint_t * sep)
{
- session_manager_main_t *smm = vnet_get_session_manager_main ();
-
- vec_validate (tp_vfts, type);
- tp_vfts[type] = *vft;
+ transport_connection_t *tc;
+ tc = listen_session_get_transport (listener);
+ if (!tc)
+ {
+ clib_warning ("no transport");
+ return -1;
+ }
- /* If an offset function is provided, then peek instead of dequeue */
- smm->session_rx_fns[type] =
- (vft->rx_fifo_offset) ? session_fifo_rx_peek : session_fifo_rx_dequeue;
+ /* N.B. The ip should not be copied because this is the local endpoint */
+ sep->port = tc->lcl_port;
+ sep->transport_proto = tc->proto;
+ sep->is_ip4 = tc->is_ip4;
+ return 0;
}
-transport_proto_vft_t *
-session_get_transport_vft (u8 type)
+void
+session_flush_frames_main_thread (vlib_main_t * vm)
{
- if (type >= vec_len (tp_vfts))
- return 0;
- return &tp_vfts[type];
+ ASSERT (vlib_get_thread_index () == 0);
+ vlib_process_signal_event_mt (vm, session_queue_process_node.index,
+ SESSION_Q_PROCESS_FLUSH_FRAMES, 0);
}
static clib_error_t *
session_manager_main_enable (vlib_main_t * vm)
{
+ segment_manager_main_init_args_t _sm_args = { 0 }, *sm_args = &_sm_args;
session_manager_main_t *smm = &session_manager_main;
vlib_thread_main_t *vtm = vlib_get_thread_main ();
- u32 num_threads;
- int i;
+ u32 num_threads, preallocated_sessions_per_worker;
+ session_manager_worker_t *wrk;
+ int i, j;
num_threads = 1 /* main thread */ + vtm->n_threads;
if (num_threads < 1)
return clib_error_return (0, "n_thread_stacks not set");
- /* $$$ config parameters */
- svm_fifo_segment_init (0x200000000ULL /* first segment base VA */ ,
- 20 /* timeout in seconds */ );
-
- /* configure per-thread ** vectors */
- vec_validate (smm->sessions, num_threads - 1);
- vec_validate (smm->session_indices_to_enqueue_by_thread, num_threads - 1);
- vec_validate (smm->tx_buffers, num_threads - 1);
- vec_validate (smm->fifo_events, num_threads - 1);
- vec_validate (smm->evts_partially_read, num_threads - 1);
- vec_validate (smm->current_enqueue_epoch, num_threads - 1);
- vec_validate (smm->vpp_event_queues, num_threads - 1);
-
- /* $$$$ preallocate hack config parameter */
- for (i = 0; i < 200000; i++)
+ /* Allocate cache line aligned worker contexts */
+ vec_validate_aligned (smm->wrk, num_threads - 1, CLIB_CACHE_LINE_BYTES);
+
+ for (i = 0; i < TRANSPORT_N_PROTO; i++)
{
- stream_session_t *ss;
- pool_get (smm->sessions[0], ss);
- memset (ss, 0, sizeof (*ss));
+ for (j = 0; j < num_threads; j++)
+ smm->wrk[j].current_enqueue_epoch[i] = 1;
}
- for (i = 0; i < 200000; i++)
- pool_put_index (smm->sessions[0], i);
+ for (i = 0; i < num_threads; i++)
+ {
+ wrk = &smm->wrk[i];
+ vec_validate (wrk->free_event_vector, 128);
+ _vec_len (wrk->free_event_vector) = 0;
+ vec_validate (wrk->pending_event_vector, 128);
+ _vec_len (wrk->pending_event_vector) = 0;
+ vec_validate (wrk->pending_disconnects, 128);
+ _vec_len (wrk->pending_disconnects) = 0;
+ vec_validate (wrk->postponed_event_vector, 128);
+ _vec_len (wrk->postponed_event_vector) = 0;
+
+ wrk->last_vlib_time = vlib_time_now (vlib_mains[i]);
+ wrk->dispatch_period = 500e-6;
+
+ if (num_threads > 1)
+ clib_rwlock_init (&smm->wrk[i].peekers_rw_locks);
+ }
- clib_bihash_init_16_8 (&smm->v4_session_hash, "v4 session table",
- 200000 /* $$$$ config parameter nbuckets */ ,
- (64 << 20) /*$$$ config parameter table size */ );
- clib_bihash_init_48_8 (&smm->v6_session_hash, "v6 session table",
- 200000 /* $$$$ config parameter nbuckets */ ,
- (64 << 20) /*$$$ config parameter table size */ );
+#if SESSION_DEBUG
+ vec_validate (smm->last_event_poll_by_thread, num_threads - 1);
+#endif
- clib_bihash_init_16_8 (&smm->v4_half_open_hash, "v4 half-open table",
- 200000 /* $$$$ config parameter nbuckets */ ,
- (64 << 20) /*$$$ config parameter table size */ );
- clib_bihash_init_48_8 (&smm->v6_half_open_hash, "v6 half-open table",
- 200000 /* $$$$ config parameter nbuckets */ ,
- (64 << 20) /*$$$ config parameter table size */ );
+ /* Allocate vpp event queues segment and queue */
+ session_vpp_event_queues_allocate (smm);
- for (i = 0; i < SESSION_N_TYPES; i++)
- smm->connect_manager_index[i] = INVALID_INDEX;
+ /* Initialize fifo segment main baseva and timeout */
+ sm_args->baseva = smm->session_baseva + smm->evt_qs_segment_size;
+ sm_args->size = smm->session_va_space_size;
+ segment_manager_main_init (sm_args);
- smm->is_enabled = 1;
+ /* Preallocate sessions */
+ if (smm->preallocated_sessions)
+ {
+ if (num_threads == 1)
+ {
+ pool_init_fixed (smm->wrk[0].sessions, smm->preallocated_sessions);
+ }
+ else
+ {
+ int j;
+ preallocated_sessions_per_worker =
+ (1.1 * (f64) smm->preallocated_sessions /
+ (f64) (num_threads - 1));
- /* Enable TCP transport */
- vnet_tcp_enable_disable (vm, 1);
+ for (j = 1; j < num_threads; j++)
+ {
+ pool_init_fixed (smm->wrk[j].sessions,
+ preallocated_sessions_per_worker);
+ }
+ }
+ }
+ session_lookup_init ();
+ app_namespaces_init ();
+ transport_init ();
+
+ smm->is_enabled = 1;
+
+ /* Enable transports */
+ transport_enable_disable (vm, 1);
+ transport_init_tx_pacers_period ();
return 0;
}
+void
+session_node_enable_disable (u8 is_en)
+{
+ u8 state = is_en ? VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ u8 have_workers = vtm->n_threads != 0;
+
+ /* *INDENT-OFF* */
+ foreach_vlib_main (({
+ if (have_workers && ii == 0)
+ {
+ vlib_node_set_state (this_vlib_main, session_queue_process_node.index,
+ state);
+ if (is_en)
+ {
+ vlib_node_t *n = vlib_get_node (this_vlib_main,
+ session_queue_process_node.index);
+ vlib_start_process (this_vlib_main, n->runtime_index);
+ }
+ else
+ {
+ vlib_process_signal_event_mt (this_vlib_main,
+ session_queue_process_node.index,
+ SESSION_Q_PROCESS_STOP, 0);
+ }
+
+ continue;
+ }
+ vlib_node_set_state (this_vlib_main, session_queue_node.index,
+ state);
+ }));
+ /* *INDENT-ON* */
+}
+
clib_error_t *
vnet_session_enable_disable (vlib_main_t * vm, u8 is_en)
{
+ clib_error_t *error = 0;
if (is_en)
{
if (session_manager_main.is_enabled)
return 0;
- vlib_node_set_state (vm, session_queue_node.index,
- VLIB_NODE_STATE_POLLING);
-
- return session_manager_main_enable (vm);
+ session_node_enable_disable (is_en);
+ error = session_manager_main_enable (vm);
}
else
{
session_manager_main.is_enabled = 0;
- vlib_node_set_state (vm, session_queue_node.index,
- VLIB_NODE_STATE_DISABLED);
+ session_node_enable_disable (is_en);
}
- return 0;
+ return error;
}
clib_error_t *
session_manager_main_init (vlib_main_t * vm)
{
session_manager_main_t *smm = &session_manager_main;
-
- smm->vlib_main = vm;
- smm->vnet_main = vnet_get_main ();
+ smm->session_baseva = HIGH_SEGMENT_BASEVA;
+#if (HIGH_SEGMENT_BASEVA > (4ULL << 30))
+ smm->session_va_space_size = 128ULL << 30;
+ smm->evt_qs_segment_size = 64 << 20;
+#else
+ smm->session_va_space_size = 128 << 20;
+ smm->evt_qs_segment_size = 1 << 20;
+#endif
smm->is_enabled = 0;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (session_manager_main_init);
+
+static clib_error_t *
+session_config_fn (vlib_main_t * vm, unformat_input_t * input)
+{
+ session_manager_main_t *smm = &session_manager_main;
+ u32 nitems;
+ uword tmp;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "event-queue-length %d", &nitems))
+ {
+ if (nitems >= 2048)
+ smm->configured_event_queue_length = nitems;
+ else
+ clib_warning ("event queue length %d too small, ignored", nitems);
+ }
+ else if (unformat (input, "preallocated-sessions %d",
+ &smm->preallocated_sessions))
+ ;
+ else if (unformat (input, "v4-session-table-buckets %d",
+ &smm->configured_v4_session_table_buckets))
+ ;
+ else if (unformat (input, "v4-halfopen-table-buckets %d",
+ &smm->configured_v4_halfopen_table_buckets))
+ ;
+ else if (unformat (input, "v6-session-table-buckets %d",
+ &smm->configured_v6_session_table_buckets))
+ ;
+ else if (unformat (input, "v6-halfopen-table-buckets %d",
+ &smm->configured_v6_halfopen_table_buckets))
+ ;
+ else if (unformat (input, "v4-session-table-memory %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000)
+ return clib_error_return (0, "memory size %llx (%lld) too large",
+ tmp, tmp);
+ smm->configured_v4_session_table_memory = tmp;
+ }
+ else if (unformat (input, "v4-halfopen-table-memory %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000)
+ return clib_error_return (0, "memory size %llx (%lld) too large",
+ tmp, tmp);
+ smm->configured_v4_halfopen_table_memory = tmp;
+ }
+ else if (unformat (input, "v6-session-table-memory %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000)
+ return clib_error_return (0, "memory size %llx (%lld) too large",
+ tmp, tmp);
+ smm->configured_v6_session_table_memory = tmp;
+ }
+ else if (unformat (input, "v6-halfopen-table-memory %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000)
+ return clib_error_return (0, "memory size %llx (%lld) too large",
+ tmp, tmp);
+ smm->configured_v6_halfopen_table_memory = tmp;
+ }
+ else if (unformat (input, "local-endpoints-table-memory %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000)
+ return clib_error_return (0, "memory size %llx (%lld) too large",
+ tmp, tmp);
+ smm->local_endpoints_table_memory = tmp;
+ }
+ else if (unformat (input, "local-endpoints-table-buckets %d",
+ &smm->local_endpoints_table_buckets))
+ ;
+ else if (unformat (input, "evt_qs_memfd_seg"))
+ smm->evt_qs_use_memfd_seg = 1;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
return 0;
}
-VLIB_INIT_FUNCTION (session_manager_main_init)
+VLIB_CONFIG_FUNCTION (session_config_fn, "session");
+
/*
* fd.io coding-style-patch-verification: ON
*