X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fsession%2Fsession.c;h=dfc967b12dc28acb83e0a793b2f68ea436087751;hb=7fb0fe1;hp=06e2a09af31a7af83ba04d32edf923d25727d002;hpb=e69f4954a9de40a47f0bc27cdab0ba44e6985dac;p=vpp.git diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 06e2a09af31..dfc967b12dc 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -18,688 +18,270 @@ */ #include +#include +#include #include #include #include -#include -#include -#include - -/** - * Per-type vector of transport protocol virtual function tables - */ -static transport_proto_vft_t *tp_vfts; session_manager_main_t session_manager_main; - -/* - * Session lookup key; (src-ip, dst-ip, src-port, dst-port, session-type) - * Value: (owner thread index << 32 | session_index); - */ -static void -stream_session_table_add_for_tc (u8 sst, transport_connection_t * tc, - u64 value) -{ - session_manager_main_t *smm = &session_manager_main; - session_kv4_t kv4; - session_kv6_t kv6; - - switch (sst) - { - case SESSION_TYPE_IP4_UDP: - case SESSION_TYPE_IP4_TCP: - make_v4_ss_kv_from_tc (&kv4, tc); - kv4.value = value; - clib_bihash_add_del_16_8 (&smm->v4_session_hash, &kv4, 1 /* is_add */ ); - break; - case SESSION_TYPE_IP6_UDP: - case SESSION_TYPE_IP6_TCP: - make_v6_ss_kv_from_tc (&kv6, tc); - kv6.value = value; - clib_bihash_add_del_48_8 (&smm->v6_session_hash, &kv6, 1 /* is_add */ ); - break; - default: - clib_warning ("Session type not supported"); - ASSERT (0); - } -} - -void -stream_session_table_add (session_manager_main_t * smm, stream_session_t * s, - u64 value) -{ - transport_connection_t *tc; - - tc = tp_vfts[s->session_type].get_connection (s->connection_index, - s->thread_index); - stream_session_table_add_for_tc (s->session_type, tc, value); -} +extern transport_proto_vft_t *tp_vfts; static void -stream_session_half_open_table_add (u8 sst, transport_connection_t * tc, - u64 value) +session_send_evt_to_thread (u64 session_handle, fifo_event_type_t evt_type, + u32 thread_index, void *fp, void *rpc_args) { - session_manager_main_t *smm = &session_manager_main; - session_kv4_t kv4; - session_kv6_t kv6; + u32 tries = 0; + session_fifo_event_t evt = { {0}, }; + svm_queue_t *q; - switch (sst) + evt.event_type = evt_type; + if (evt_type == FIFO_EVENT_RPC) { - case SESSION_TYPE_IP4_UDP: - case SESSION_TYPE_IP4_TCP: - make_v4_ss_kv_from_tc (&kv4, tc); - kv4.value = value; - clib_bihash_add_del_16_8 (&smm->v4_half_open_hash, &kv4, - 1 /* is_add */ ); - break; - case SESSION_TYPE_IP6_UDP: - case SESSION_TYPE_IP6_TCP: - make_v6_ss_kv_from_tc (&kv6, tc); - kv6.value = value; - clib_bihash_add_del_48_8 (&smm->v6_half_open_hash, &kv6, - 1 /* is_add */ ); - break; - default: - clib_warning ("Session type not supported"); - ASSERT (0); + evt.rpc_args.fp = fp; + evt.rpc_args.arg = rpc_args; } -} - -static int -stream_session_table_del_for_tc (session_manager_main_t * smm, u8 sst, - transport_connection_t * tc) -{ - session_kv4_t kv4; - session_kv6_t kv6; + else + evt.session_handle = session_handle; - switch (sst) + q = session_manager_get_vpp_event_queue (thread_index); + while (svm_queue_add (q, (u8 *) & evt, 1)) { - case SESSION_TYPE_IP4_UDP: - case SESSION_TYPE_IP4_TCP: - make_v4_ss_kv_from_tc (&kv4, tc); - return clib_bihash_add_del_16_8 (&smm->v4_session_hash, &kv4, - 0 /* is_add */ ); - break; - case SESSION_TYPE_IP6_UDP: - case SESSION_TYPE_IP6_TCP: - make_v6_ss_kv_from_tc (&kv6, tc); - return clib_bihash_add_del_48_8 (&smm->v6_session_hash, &kv6, - 0 /* is_add */ ); - break; - default: - clib_warning ("Session type not supported"); - ASSERT (0); + if (tries++ == 3) + { + SESSION_DBG ("failed to enqueue evt"); + break; + } } - - return 0; } -static int -stream_session_table_del (session_manager_main_t * smm, stream_session_t * s) +void +session_send_session_evt_to_thread (u64 session_handle, + fifo_event_type_t evt_type, + u32 thread_index) { - transport_connection_t *ts; - - ts = tp_vfts[s->session_type].get_connection (s->connection_index, - s->thread_index); - return stream_session_table_del_for_tc (smm, s->session_type, ts); + session_send_evt_to_thread (session_handle, evt_type, thread_index, 0, 0); } -static void -stream_session_half_open_table_del (session_manager_main_t * smm, u8 sst, - transport_connection_t * tc) +void +session_send_rpc_evt_to_thread (u32 thread_index, void *fp, void *rpc_args) { - session_kv4_t kv4; - session_kv6_t kv6; - - switch (sst) + if (thread_index != vlib_get_thread_index ()) + session_send_evt_to_thread (0, FIFO_EVENT_RPC, thread_index, fp, + rpc_args); + else { - case SESSION_TYPE_IP4_UDP: - case SESSION_TYPE_IP4_TCP: - make_v4_ss_kv_from_tc (&kv4, tc); - clib_bihash_add_del_16_8 (&smm->v4_half_open_hash, &kv4, - 0 /* is_add */ ); - break; - case SESSION_TYPE_IP6_UDP: - case SESSION_TYPE_IP6_TCP: - make_v6_ss_kv_from_tc (&kv6, tc); - clib_bihash_add_del_48_8 (&smm->v6_half_open_hash, &kv6, - 0 /* is_add */ ); - break; - default: - clib_warning ("Session type not supported"); - ASSERT (0); + void (*fnp) (void *) = fp; + fnp (rpc_args); } } stream_session_t * -stream_session_lookup_listener4 (ip4_address_t * lcl, u16 lcl_port, u8 proto) -{ - session_manager_main_t *smm = &session_manager_main; - session_kv4_t kv4; - int rv; - - make_v4_listener_kv (&kv4, lcl, lcl_port, proto); - rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); - if (rv == 0) - return pool_elt_at_index (smm->listen_sessions[proto], (u32) kv4.value); - - /* Zero out the lcl ip */ - kv4.key[0] = 0; - rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); - if (rv == 0) - return pool_elt_at_index (smm->listen_sessions[proto], kv4.value); - - return 0; -} - -/** Looks up a session based on the 5-tuple passed as argument. - * - * First it tries to find an established session, if this fails, it tries - * finding a listener session if this fails, it tries a lookup with a - * wildcarded local source (listener bound to all interfaces) - */ -stream_session_t * -stream_session_lookup4 (ip4_address_t * lcl, ip4_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto, - u32 my_thread_index) +session_alloc (u32 thread_index) { session_manager_main_t *smm = &session_manager_main; - session_kv4_t kv4; - int rv; - - /* Lookup session amongst established ones */ - make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto); - rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); - if (rv == 0) - return stream_session_get_tsi (kv4.value, my_thread_index); - - /* If nothing is found, check if any listener is available */ - return stream_session_lookup_listener4 (lcl, lcl_port, proto); + stream_session_t *s; + u8 will_expand = 0; + pool_get_aligned_will_expand (smm->sessions[thread_index], will_expand, + CLIB_CACHE_LINE_BYTES); + /* If we have peekers, let them finish */ + if (PREDICT_FALSE (will_expand && vlib_num_workers ())) + { + clib_rwlock_writer_lock (&smm->peekers_rw_locks[thread_index]); + pool_get_aligned (session_manager_main.sessions[thread_index], s, + CLIB_CACHE_LINE_BYTES); + clib_rwlock_writer_unlock (&smm->peekers_rw_locks[thread_index]); + } + else + { + pool_get_aligned (session_manager_main.sessions[thread_index], s, + CLIB_CACHE_LINE_BYTES); + } + memset (s, 0, sizeof (*s)); + s->session_index = s - session_manager_main.sessions[thread_index]; + s->thread_index = thread_index; + return s; } -stream_session_t * -stream_session_lookup_listener6 (ip6_address_t * lcl, u16 lcl_port, u8 proto) +void +session_free (stream_session_t * s) { - session_manager_main_t *smm = &session_manager_main; - session_kv6_t kv6; - int rv; - - make_v6_listener_kv (&kv6, lcl, lcl_port, proto); - rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); - if (rv == 0) - return pool_elt_at_index (smm->listen_sessions[proto], kv6.value); - - /* Zero out the lcl ip */ - kv6.key[0] = kv6.key[1] = 0; - rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); - if (rv == 0) - return pool_elt_at_index (smm->listen_sessions[proto], kv6.value); - - return 0; + pool_put (session_manager_main.sessions[s->thread_index], s); + if (CLIB_DEBUG) + memset (s, 0xFA, sizeof (*s)); } -/* Looks up a session based on the 5-tuple passed as argument. - * First it tries to find an established session, if this fails, it tries - * finding a listener session if this fails, it tries a lookup with a - * wildcarded local source (listener bound to all interfaces) */ -stream_session_t * -stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto, - u32 my_thread_index) +int +session_alloc_fifos (segment_manager_t * sm, stream_session_t * s) { - session_manager_main_t *smm = vnet_get_session_manager_main (); - session_kv6_t kv6; + svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0; + u32 fifo_segment_index; int rv; - make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto); - rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); - if (rv == 0) - return stream_session_get_tsi (kv6.value, my_thread_index); - - /* If nothing is found, check if any listener is available */ - return stream_session_lookup_listener6 (lcl, lcl_port, proto); -} - -stream_session_t * -stream_session_lookup_listener (ip46_address_t * lcl, u16 lcl_port, u8 proto) -{ - switch (proto) - { - case SESSION_TYPE_IP4_UDP: - case SESSION_TYPE_IP4_TCP: - return stream_session_lookup_listener4 (&lcl->ip4, lcl_port, proto); - break; - case SESSION_TYPE_IP6_UDP: - case SESSION_TYPE_IP6_TCP: - return stream_session_lookup_listener6 (&lcl->ip6, lcl_port, proto); - break; - } - return 0; -} + if ((rv = segment_manager_alloc_session_fifos (sm, &server_rx_fifo, + &server_tx_fifo, + &fifo_segment_index))) + return rv; + /* Initialize backpointers */ + server_rx_fifo->master_session_index = s->session_index; + server_rx_fifo->master_thread_index = s->thread_index; -static u64 -stream_session_half_open_lookup (session_manager_main_t * smm, - ip46_address_t * lcl, ip46_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto) -{ - session_kv4_t kv4; - session_kv6_t kv6; - int rv; + server_tx_fifo->master_session_index = s->session_index; + server_tx_fifo->master_thread_index = s->thread_index; - switch (proto) - { - case SESSION_TYPE_IP4_UDP: - case SESSION_TYPE_IP4_TCP: - make_v4_ss_kv (&kv4, &lcl->ip4, &rmt->ip4, lcl_port, rmt_port, proto); - rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); - - if (rv == 0) - return kv4.value; - - return (u64) ~ 0; - break; - case SESSION_TYPE_IP6_UDP: - case SESSION_TYPE_IP6_TCP: - make_v6_ss_kv (&kv6, &lcl->ip6, &rmt->ip6, lcl_port, rmt_port, proto); - rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6); - - if (rv == 0) - return kv6.value; - - return (u64) ~ 0; - break; - } + s->server_rx_fifo = server_rx_fifo; + s->server_tx_fifo = server_tx_fifo; + s->svm_segment_index = fifo_segment_index; return 0; } -transport_connection_t * -stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto, - u32 my_thread_index) +static stream_session_t * +session_alloc_for_connection (transport_connection_t * tc) { - session_manager_main_t *smm = &session_manager_main; - session_kv4_t kv4; stream_session_t *s; - int rv; - - /* Lookup session amongst established ones */ - make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto); - rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); - if (rv == 0) - { - s = stream_session_get_tsi (kv4.value, my_thread_index); - - return tp_vfts[s->session_type].get_connection (s->connection_index, - my_thread_index); - } + u32 thread_index = tc->thread_index; - /* If nothing is found, check if any listener is available */ - s = stream_session_lookup_listener4 (lcl, lcl_port, proto); - if (s) - return tp_vfts[s->session_type].get_listener (s->connection_index); + ASSERT (thread_index == vlib_get_thread_index ()); - /* Finally, try half-open connections */ - rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); - if (rv == 0) - return tp_vfts[proto].get_half_open (kv4.value & 0xFFFFFFFF); + s = session_alloc (thread_index); + s->session_type = session_type_from_proto_and_ip (tc->proto, tc->is_ip4); + s->session_state = SESSION_STATE_CONNECTING; + s->enqueue_epoch = ~0; - return 0; + /* Attach transport to session and vice versa */ + s->connection_index = tc->c_index; + tc->s_index = s->session_index; + return s; } -transport_connection_t * -stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto, - u32 my_thread_index) +static int +session_alloc_and_init (segment_manager_t * sm, transport_connection_t * tc, + u8 alloc_fifos, stream_session_t ** ret_s) { - session_manager_main_t *smm = &session_manager_main; stream_session_t *s; - session_kv6_t kv6; int rv; - make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto); - rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); - if (rv == 0) + s = session_alloc_for_connection (tc); + if (alloc_fifos && (rv = session_alloc_fifos (sm, s))) { - s = stream_session_get_tsi (kv6.value, my_thread_index); - - return tp_vfts[s->session_type].get_connection (s->connection_index, - my_thread_index); + session_free (s); + *ret_s = 0; + return rv; } - /* If nothing is found, check if any listener is available */ - s = stream_session_lookup_listener6 (lcl, lcl_port, proto); - if (s) - return tp_vfts[s->session_type].get_listener (s->connection_index); - - /* Finally, try half-open connections */ - rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6); - if (rv == 0) - return tp_vfts[proto].get_half_open (kv6.value & 0xFFFFFFFF); + /* Add to the main lookup table */ + session_lookup_add_connection (tc, session_handle (s)); + *ret_s = s; return 0; } /** - * Allocate vpp event queue (once) per worker thread + * Discards bytes from buffer chain + * + * It discards n_bytes_to_drop starting at first buffer after chain_b */ -void -vpp_session_event_queue_allocate (session_manager_main_t * smm, - u32 thread_index) -{ - api_main_t *am = &api_main; - void *oldheap; - - if (smm->vpp_event_queues[thread_index] == 0) - { - /* Allocate event fifo in the /vpe-api shared-memory segment */ - oldheap = svm_push_data_heap (am->vlib_rp); - - smm->vpp_event_queues[thread_index] = - unix_shared_memory_queue_init (2048 /* nels $$$$ config */ , - sizeof (session_fifo_event_t), - 0 /* consumer pid */ , - 0 - /* (do not) send signal when queue non-empty */ - ); - - svm_pop_heap (oldheap); - } -} - -void -session_manager_get_segment_info (u32 index, u8 ** name, u32 * size) -{ - svm_fifo_segment_private_t *s; - s = svm_fifo_get_segment (index); - *name = s->h->segment_name; - *size = s->ssvm.ssvm_size; -} - -always_inline int -session_manager_add_segment_i (session_manager_main_t * smm, - session_manager_t * sm, - u32 segment_size, u8 * segment_name) +always_inline void +session_enqueue_discard_chain_bytes (vlib_main_t * vm, vlib_buffer_t * b, + vlib_buffer_t ** chain_b, + u32 n_bytes_to_drop) { - svm_fifo_segment_create_args_t _ca, *ca = &_ca; - int rv; - - memset (ca, 0, sizeof (*ca)); - - ca->segment_name = (char *) segment_name; - ca->segment_size = segment_size; - - rv = svm_fifo_segment_create (ca); - if (rv) + vlib_buffer_t *next = *chain_b; + u32 to_drop = n_bytes_to_drop; + ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT); + while (to_drop && (next->flags & VLIB_BUFFER_NEXT_PRESENT)) { - clib_warning ("svm_fifo_segment_create ('%s', %d) failed", - ca->segment_name, ca->segment_size); - vec_free (segment_name); - return -1; - } - - vec_add1 (sm->segment_indices, ca->new_segment_index); - - return 0; -} - -static int -session_manager_add_segment (session_manager_main_t * smm, - session_manager_t * sm) -{ - u8 *segment_name; - svm_fifo_segment_create_args_t _ca, *ca = &_ca; - u32 add_segment_size; - u32 default_segment_size = 128 << 10; - - memset (ca, 0, sizeof (*ca)); - segment_name = format (0, "%d-%d%c", getpid (), - smm->unique_segment_name_counter++, 0); - add_segment_size = - sm->add_segment_size ? sm->add_segment_size : default_segment_size; - - return session_manager_add_segment_i (smm, sm, add_segment_size, - segment_name); -} - -int -session_manager_add_first_segment (session_manager_main_t * smm, - session_manager_t * sm, u32 segment_size, - u8 ** segment_name) -{ - svm_fifo_segment_create_args_t _ca, *ca = &_ca; - memset (ca, 0, sizeof (*ca)); - *segment_name = format (0, "%d-%d%c", getpid (), - smm->unique_segment_name_counter++, 0); - return session_manager_add_segment_i (smm, sm, segment_size, *segment_name); -} - -void -session_manager_del (session_manager_main_t * smm, session_manager_t * sm) -{ - u32 *deleted_sessions = 0; - u32 *deleted_thread_indices = 0; - int i, j; - - /* Across all fifo segments used by the server */ - for (j = 0; j < vec_len (sm->segment_indices); j++) - { - svm_fifo_segment_private_t *fifo_segment; - svm_fifo_t **fifos; - /* Vector of fifos allocated in the segment */ - fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); - fifos = (svm_fifo_t **) fifo_segment->h->fifos; - - /* - * Remove any residual sessions from the session lookup table - * Don't bother deleting the individual fifos, we're going to - * throw away the fifo segment in a minute. - */ - for (i = 0; i < vec_len (fifos); i++) + next = vlib_get_buffer (vm, next->next_buffer); + if (next->current_length > to_drop) { - svm_fifo_t *fifo; - u32 session_index, thread_index; - stream_session_t *session; - - fifo = fifos[i]; - session_index = fifo->server_session_index; - thread_index = fifo->server_thread_index; - - session = pool_elt_at_index (smm->sessions[thread_index], - session_index); - - /* Add to the deleted_sessions vector (once!) */ - if (!session->is_deleted) - { - session->is_deleted = 1; - vec_add1 (deleted_sessions, - session - smm->sessions[thread_index]); - vec_add1 (deleted_thread_indices, thread_index); - } + vlib_buffer_advance (next, to_drop); + to_drop = 0; } - - for (i = 0; i < vec_len (deleted_sessions); i++) + else { - stream_session_t *session; - - session = - pool_elt_at_index (smm->sessions[deleted_thread_indices[i]], - deleted_sessions[i]); - - /* Instead of directly removing the session call disconnect */ - stream_session_disconnect (session); - - /* - stream_session_table_del (smm, session); - pool_put(smm->sessions[deleted_thread_indices[i]], session); - */ + to_drop -= next->current_length; + next->current_length = 0; } - - vec_reset_length (deleted_sessions); - vec_reset_length (deleted_thread_indices); - - /* Instead of removing the segment, test when removing the session if - * the segment can be removed - */ - /* svm_fifo_segment_delete (fifo_segment); */ } + *chain_b = next; - vec_free (deleted_sessions); - vec_free (deleted_thread_indices); + if (to_drop == 0) + b->total_length_not_including_first_buffer -= n_bytes_to_drop; } -int -session_manager_allocate_session_fifos (session_manager_main_t * smm, - session_manager_t * sm, - svm_fifo_t ** server_rx_fifo, - svm_fifo_t ** server_tx_fifo, - u32 * fifo_segment_index, - u8 * added_a_segment) +/** + * Enqueue buffer chain tail + */ +always_inline int +session_enqueue_chain_tail (stream_session_t * s, vlib_buffer_t * b, + u32 offset, u8 is_in_order) { - svm_fifo_segment_private_t *fifo_segment; - u32 fifo_size, default_fifo_size = 128 << 10; /* TODO config */ - int i; - - *added_a_segment = 0; - - /* Allocate svm fifos */ - ASSERT (vec_len (sm->segment_indices)); - -again: - for (i = 0; i < vec_len (sm->segment_indices); i++) + vlib_buffer_t *chain_b; + u32 chain_bi, len, diff; + vlib_main_t *vm = vlib_get_main (); + u8 *data; + u32 written = 0; + int rv = 0; + + if (is_in_order && offset) { - *fifo_segment_index = sm->segment_indices[i]; - fifo_segment = svm_fifo_get_segment (*fifo_segment_index); - - fifo_size = sm->rx_fifo_size; - fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; - *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); - - fifo_size = sm->tx_fifo_size; - fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; - *server_tx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + diff = offset - b->current_length; + if (diff > b->total_length_not_including_first_buffer) + return 0; + chain_b = b; + session_enqueue_discard_chain_bytes (vm, b, &chain_b, diff); + chain_bi = vlib_get_buffer_index (vm, chain_b); + } + else + chain_bi = b->next_buffer; - if (*server_rx_fifo == 0) + do + { + chain_b = vlib_get_buffer (vm, chain_bi); + data = vlib_buffer_get_current (chain_b); + len = chain_b->current_length; + if (!len) + continue; + if (is_in_order) { - /* This would be very odd, but handle it... */ - if (*server_tx_fifo != 0) + rv = svm_fifo_enqueue_nowait (s->server_rx_fifo, len, data); + if (rv == len) { - svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo); - *server_tx_fifo = 0; + written += rv; } - continue; - } - if (*server_tx_fifo == 0) - { - if (*server_rx_fifo != 0) + else if (rv < len) { - svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo); - *server_rx_fifo = 0; + return (rv > 0) ? (written + rv) : written; } - continue; - } - break; - } - - /* See if we're supposed to create another segment */ - if (*server_rx_fifo == 0) - { - if (sm->add_segment) - { - if (*added_a_segment) + else if (rv > len) { - clib_warning ("added a segment, still cant allocate a fifo"); - return SESSION_ERROR_NEW_SEG_NO_SPACE; - } + written += rv; - if (session_manager_add_segment (smm, sm)) - return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + /* written more than what was left in chain */ + if (written > b->total_length_not_including_first_buffer) + return written; - *added_a_segment = 1; - goto again; + /* drop the bytes that have already been delivered */ + session_enqueue_discard_chain_bytes (vm, b, &chain_b, rv - len); + } } else { - clib_warning ("No space to allocate fifos!"); - return SESSION_ERROR_NO_SPACE; + rv = svm_fifo_enqueue_with_offset (s->server_rx_fifo, offset, len, + data); + if (rv) + { + clib_warning ("failed to enqueue multi-buffer seg"); + return -1; + } + offset += len; } } - return 0; -} - -int -stream_session_create_i (session_manager_main_t * smm, application_t * app, - transport_connection_t * tc, - stream_session_t ** ret_s) -{ - int rv; - svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0; - u32 fifo_segment_index; - u32 pool_index, seg_size; - stream_session_t *s; - u64 value; - u32 thread_index = tc->thread_index; - session_manager_t *sm; - u8 segment_added; - u8 *seg_name; - - sm = session_manager_get (app->session_manager_index); - - /* Check the API queue */ - if (app->mode == APP_SERVER && application_api_queue_is_full (app)) - return SESSION_ERROR_API_QUEUE_FULL; - - if ((rv = session_manager_allocate_session_fifos (smm, sm, &server_rx_fifo, - &server_tx_fifo, - &fifo_segment_index, - &segment_added))) - return rv; - - if (segment_added && app->mode == APP_SERVER) - { - /* Send an API message to the external server, to map new segment */ - ASSERT (app->cb_fns.add_segment_callback); - - session_manager_get_segment_info (fifo_segment_index, &seg_name, - &seg_size); - if (app->cb_fns.add_segment_callback (app->api_client_index, seg_name, - seg_size)) - return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; - } - - /* Create the session */ - pool_get (smm->sessions[thread_index], s); - memset (s, 0, sizeof (*s)); - - /* Initialize backpointers */ - pool_index = s - smm->sessions[thread_index]; - server_rx_fifo->server_session_index = pool_index; - server_rx_fifo->server_thread_index = thread_index; - - server_tx_fifo->server_session_index = pool_index; - server_tx_fifo->server_thread_index = thread_index; + while ((chain_bi = (chain_b->flags & VLIB_BUFFER_NEXT_PRESENT) + ? chain_b->next_buffer : 0)); - s->server_rx_fifo = server_rx_fifo; - s->server_tx_fifo = server_tx_fifo; - - /* Initialize state machine, such as it is... */ - s->session_type = app->session_type; - s->session_state = SESSION_STATE_CONNECTING; - s->app_index = application_get_index (app); - s->server_segment_index = fifo_segment_index; - s->thread_index = thread_index; - s->session_index = pool_index; - - /* Attach transport to session */ - s->connection_index = tc->c_index; - - /* Attach session to transport */ - tc->s_index = s->session_index; - - /* Add to the main lookup table */ - value = (((u64) thread_index) << 32) | (u64) s->session_index; - stream_session_table_add_for_tc (app->session_type, tc, value); - - *ret_s = s; + if (is_in_order) + return written; return 0; } @@ -710,27 +292,49 @@ stream_session_create_i (session_manager_main_t * smm, application_t * app, * calling stream_server_flush_enqueue_events(). * * @param tc Transport connection which is to be enqueued data - * @param data Data to be enqueued - * @param len Length of data to be enqueued + * @param b Buffer to be enqueued + * @param offset Offset at which to start enqueueing if out-of-order * @param queue_event Flag to indicate if peer is to be notified or if event * is to be queued. The former is useful when more data is * enqueued and only one event is to be generated. + * @param is_in_order Flag to indicate if data is in order * @return Number of bytes enqueued or a negative value if enqueueing failed. */ int -stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, - u8 queue_event) +session_enqueue_stream_connection (transport_connection_t * tc, + vlib_buffer_t * b, u32 offset, + u8 queue_event, u8 is_in_order) { stream_session_t *s; - int enqueued; + int enqueued = 0, rv, in_order_off; - s = stream_session_get (tc->s_index, tc->thread_index); + s = session_get (tc->s_index, tc->thread_index); - /* Make sure there's enough space left. We might've filled the pipes */ - if (PREDICT_FALSE (len > svm_fifo_max_enqueue (s->server_rx_fifo))) - return -1; - - enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, s->pid, len, data); + if (is_in_order) + { + enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, + b->current_length, + vlib_buffer_get_current (b)); + if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) + && enqueued >= 0)) + { + in_order_off = enqueued > b->current_length ? enqueued : 0; + rv = session_enqueue_chain_tail (s, b, in_order_off, 1); + if (rv > 0) + enqueued += rv; + } + } + else + { + rv = svm_fifo_enqueue_with_offset (s->server_rx_fifo, offset, + b->current_length, + vlib_buffer_get_current (b)); + if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && !rv)) + session_enqueue_chain_tail (s, b, offset + b->current_length, 0); + /* if something was enqueued, report even this as success for ooo + * segment handling */ + return rv; + } if (queue_event) { @@ -738,12 +342,12 @@ stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, * by calling stream_server_flush_enqueue_events () */ session_manager_main_t *smm = vnet_get_session_manager_main (); u32 thread_index = s->thread_index; - u32 my_enqueue_epoch = smm->current_enqueue_epoch[thread_index]; + u32 enqueue_epoch = smm->current_enqueue_epoch[tc->proto][thread_index]; - if (s->enqueue_epoch != my_enqueue_epoch) + if (s->enqueue_epoch != enqueue_epoch) { - s->enqueue_epoch = my_enqueue_epoch; - vec_add1 (smm->session_indices_to_enqueue_by_thread[thread_index], + s->enqueue_epoch = enqueue_epoch; + vec_add1 (smm->session_to_enqueue[tc->proto][thread_index], s - smm->sessions[thread_index]); } } @@ -751,12 +355,52 @@ stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, return enqueued; } + +int +session_enqueue_dgram_connection (stream_session_t * s, + session_dgram_hdr_t * hdr, + vlib_buffer_t * b, u8 proto, u8 queue_event) +{ + int enqueued = 0, rv, in_order_off; + + ASSERT (svm_fifo_max_enqueue (s->server_rx_fifo) + >= b->current_length + sizeof (*hdr)); + + svm_fifo_enqueue_nowait (s->server_rx_fifo, sizeof (session_dgram_hdr_t), + (u8 *) hdr); + enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, b->current_length, + vlib_buffer_get_current (b)); + if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && enqueued >= 0)) + { + in_order_off = enqueued > b->current_length ? enqueued : 0; + rv = session_enqueue_chain_tail (s, b, in_order_off, 1); + if (rv > 0) + enqueued += rv; + } + if (queue_event) + { + /* Queue RX event on this fifo. Eventually these will need to be flushed + * by calling stream_server_flush_enqueue_events () */ + session_manager_main_t *smm = vnet_get_session_manager_main (); + u32 thread_index = s->thread_index; + u32 enqueue_epoch = smm->current_enqueue_epoch[proto][thread_index]; + + if (s->enqueue_epoch != enqueue_epoch) + { + s->enqueue_epoch = enqueue_epoch; + vec_add1 (smm->session_to_enqueue[proto][thread_index], + s - smm->sessions[thread_index]); + } + } + return enqueued; +} + /** Check if we have space in rx fifo to push more bytes */ u8 stream_session_no_space (transport_connection_t * tc, u32 thread_index, u16 data_len) { - stream_session_t *s = stream_session_get (tc->c_index, thread_index); + stream_session_t *s = session_get (tc->s_index, thread_index); if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY)) return 1; @@ -768,18 +412,27 @@ stream_session_no_space (transport_connection_t * tc, u32 thread_index, } u32 +stream_session_tx_fifo_max_dequeue (transport_connection_t * tc) +{ + stream_session_t *s = session_get (tc->s_index, tc->thread_index); + if (!s->server_tx_fifo) + return 0; + return svm_fifo_max_dequeue (s->server_tx_fifo); +} + +int stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, u32 offset, u32 max_bytes) { - stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); - return svm_fifo_peek (s->server_tx_fifo, s->pid, offset, max_bytes, buffer); + stream_session_t *s = session_get (tc->s_index, tc->thread_index); + return svm_fifo_peek (s->server_tx_fifo, offset, max_bytes, buffer); } u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes) { - stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); - return svm_fifo_dequeue_drop (s->server_tx_fifo, s->pid, max_bytes); + stream_session_t *s = session_get (tc->s_index, tc->thread_index); + return svm_fifo_dequeue_drop (s->server_tx_fifo, max_bytes); } /** @@ -791,43 +444,59 @@ stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes) * @return 0 on succes or negative number if failed to send notification. */ static int -stream_session_enqueue_notify (stream_session_t * s, u8 block) +session_enqueue_notify (stream_session_t * s, u8 block) { application_t *app; session_fifo_event_t evt; - unix_shared_memory_queue_t *q; - static u32 serial_number; + svm_queue_t *q; if (PREDICT_FALSE (s->session_state == SESSION_STATE_CLOSED)) - return 0; + { + /* Session is closed so app will never clean up. Flush rx fifo */ + u32 to_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo); + if (to_dequeue) + svm_fifo_dequeue_drop (s->server_rx_fifo, to_dequeue); + return 0; + } /* Get session's server */ - app = application_get (s->app_index); + app = application_get_if_valid (s->app_index); - /* Fabricate event */ - evt.fifo = s->server_rx_fifo; - evt.event_type = FIFO_EVENT_SERVER_RX; - evt.event_id = serial_number++; - evt.enqueue_length = svm_fifo_max_dequeue (s->server_rx_fifo); + if (PREDICT_FALSE (app == 0)) + { + clib_warning ("invalid s->app_index = %d", s->app_index); + return 0; + } - /* Built-in server? Hand event to the callback... */ - if (app->cb_fns.builtin_server_rx_callback) - return app->cb_fns.builtin_server_rx_callback (s, &evt); + /* Built-in app? Hand event to the callback... */ + if (app->cb_fns.builtin_app_rx_callback) + return app->cb_fns.builtin_app_rx_callback (s); - /* Add event to server's event queue */ - q = app->event_queue; + /* If no event, send one */ + if (svm_fifo_set_event (s->server_rx_fifo)) + { + /* Fabricate event */ + evt.fifo = s->server_rx_fifo; + evt.event_type = FIFO_EVENT_APP_RX; - /* Based on request block (or not) for lack of space */ - if (block || PREDICT_TRUE (q->cursize < q->maxsize)) - unix_shared_memory_queue_add (app->event_queue, (u8 *) & evt, - 0 /* do wait for mutex */ ); - else - return -1; + /* Add event to server's event queue */ + q = app->event_queue; + + /* Based on request block (or not) for lack of space */ + if (block || PREDICT_TRUE (q->cursize < q->maxsize)) + svm_queue_add (app->event_queue, (u8 *) & evt, + 0 /* do wait for mutex */ ); + else + { + clib_warning ("fifo full"); + return -1; + } + } /* *INDENT-OFF* */ - SESSION_EVT_DBG(s, SESSION_EVT_ENQ, ({ - ed->data[0] = evt.event_id; - ed->data[1] = evt.enqueue_length; + SESSION_EVT_DBG(SESSION_EVT_ENQ, s, ({ + ed->data[0] = evt.event_type; + ed->data[1] = svm_fifo_max_dequeue (s->server_rx_fifo); })); /* *INDENT-ON* */ @@ -843,165 +512,190 @@ stream_session_enqueue_notify (stream_session_t * s, u8 block) * failures due to API queue being full. */ int -session_manager_flush_enqueue_events (u32 thread_index) +session_manager_flush_enqueue_events (u8 transport_proto, u32 thread_index) { session_manager_main_t *smm = &session_manager_main; - u32 *session_indices_to_enqueue; + u32 *indices; + stream_session_t *s; int i, errors = 0; - session_indices_to_enqueue = - smm->session_indices_to_enqueue_by_thread[thread_index]; + indices = smm->session_to_enqueue[transport_proto][thread_index]; - for (i = 0; i < vec_len (session_indices_to_enqueue); i++) + for (i = 0; i < vec_len (indices); i++) { - stream_session_t *s0; - - /* Get session */ - s0 = stream_session_get (session_indices_to_enqueue[i], thread_index); - if (stream_session_enqueue_notify (s0, 0 /* don't block */ )) - { - errors++; - } + s = session_get_if_valid (indices[i], thread_index); + if (s == 0 || session_enqueue_notify (s, 0 /* don't block */ )) + errors++; } - vec_reset_length (session_indices_to_enqueue); - - smm->session_indices_to_enqueue_by_thread[thread_index] = - session_indices_to_enqueue; - - /* Increment enqueue epoch for next round */ - smm->current_enqueue_epoch[thread_index]++; + vec_reset_length (indices); + smm->session_to_enqueue[transport_proto][thread_index] = indices; + smm->current_enqueue_epoch[transport_proto][thread_index]++; return errors; } -/* - * Start listening on server's ip/port pair for requested transport. - * - * Creates a 'dummy' stream session with state LISTENING to be used in session - * lookups, prior to establishing connection. Requests transport to build - * it's own specific listening connection. - */ int -stream_session_start_listen (u32 server_index, ip46_address_t * ip, u16 port) +session_manager_flush_all_enqueue_events (u8 transport_proto) { - session_manager_main_t *smm = &session_manager_main; - stream_session_t *s; - transport_connection_t *tc; - application_t *srv; - u32 tci; - - srv = application_get (server_index); - - pool_get (smm->listen_sessions[srv->session_type], s); - memset (s, 0, sizeof (*s)); - - s->session_type = srv->session_type; - s->session_state = SESSION_STATE_LISTENING; - s->session_index = s - smm->listen_sessions[srv->session_type]; - s->app_index = srv->index; - - /* Transport bind/listen */ - tci = tp_vfts[srv->session_type].bind (s->session_index, ip, port); - - /* Attach transport to session */ - s->connection_index = tci; - tc = tp_vfts[srv->session_type].get_listener (tci); - - srv->session_index = s->session_index; - - /* Add to the main lookup table */ - stream_session_table_add_for_tc (s->session_type, tc, s->session_index); - - return 0; + vlib_thread_main_t *vtm = vlib_get_thread_main (); + int i, errors = 0; + for (i = 0; i < 1 + vtm->n_threads; i++) + errors += session_manager_flush_enqueue_events (transport_proto, i); + return errors; } +/** + * Init fifo tail and head pointers + * + * Useful if transport uses absolute offsets for tracking ooo segments. + */ void -stream_session_stop_listen (u32 server_index) +stream_session_init_fifos_pointers (transport_connection_t * tc, + u32 rx_pointer, u32 tx_pointer) { - session_manager_main_t *smm = &session_manager_main; - stream_session_t *listener; - transport_connection_t *tc; - application_t *srv; - - srv = application_get (server_index); - listener = pool_elt_at_index (smm->listen_sessions[srv->session_type], - srv->session_index); - - tc = tp_vfts[srv->session_type].get_listener (listener->connection_index); - stream_session_table_del_for_tc (smm, listener->session_type, tc); - - tp_vfts[srv->session_type].unbind (listener->connection_index); - pool_put (smm->listen_sessions[srv->session_type], listener); + stream_session_t *s; + s = session_get (tc->s_index, tc->thread_index); + svm_fifo_init_pointers (s->server_rx_fifo, rx_pointer); + svm_fifo_init_pointers (s->server_tx_fifo, tx_pointer); } int -connect_server_add_segment_cb (application_t * ss, char *segment_name, - u32 segment_size) -{ - /* Does exactly nothing, but die */ - ASSERT (0); - return 0; -} - -void -connects_session_manager_init (session_manager_main_t * smm, u8 session_type) +session_stream_connect_notify (transport_connection_t * tc, u8 is_fail) { - session_manager_t *sm; - u32 connect_fifo_size = 256 << 10; /* Config? */ - u32 default_segment_size = 1 << 20; - - pool_get (smm->session_managers, sm); - memset (sm, 0, sizeof (*sm)); - - sm->add_segment_size = default_segment_size; - sm->rx_fifo_size = connect_fifo_size; - sm->tx_fifo_size = connect_fifo_size; - sm->add_segment = 1; - - session_manager_add_segment (smm, sm); - smm->connect_manager_index[session_type] = sm - smm->session_managers; -} - -void -stream_session_connect_notify (transport_connection_t * tc, u8 sst, - u8 is_fail) -{ - session_manager_main_t *smm = &session_manager_main; - application_t *app; + u32 opaque = 0, new_ti, new_si; stream_session_t *new_s = 0; - u64 value; - - value = stream_session_half_open_lookup (smm, &tc->lcl_ip, &tc->rmt_ip, - tc->lcl_port, tc->rmt_port, - tc->proto); - if (value == HALF_OPEN_LOOKUP_INVALID_VALUE) + segment_manager_t *sm; + application_t *app; + u8 alloc_fifos; + int error = 0; + u64 handle; + + /* + * Find connection handle and cleanup half-open table + */ + handle = session_lookup_half_open_handle (tc); + if (handle == HALF_OPEN_LOOKUP_INVALID_VALUE) { - clib_warning ("This can't be good!"); - return; + SESSION_DBG ("half-open was removed!"); + return -1; } + session_lookup_del_half_open (tc); - app = application_get (value >> 32); + /* Get the app's index from the handle we stored when opening connection + * and the opaque (api_context for external apps) from transport session + * index */ + app = application_get_if_valid (handle >> 32); + if (!app) + return -1; + opaque = tc->s_index; + /* + * Allocate new session with fifos (svm segments are allocated if needed) + */ if (!is_fail) { - /* Create new session (server segments are allocated if needed) */ - if (stream_session_create_i (smm, app, tc, &new_s)) - return; + sm = application_get_connect_segment_manager (app); + alloc_fifos = !application_is_builtin_proxy (app); + if (session_alloc_and_init (sm, tc, alloc_fifos, &new_s)) + { + is_fail = 1; + error = -1; + } + else + { + new_s->app_index = app->index; + new_si = new_s->session_index; + new_ti = new_s->thread_index; + } + } - app->session_index = stream_session_get_index (new_s); - app->thread_index = new_s->thread_index; + /* + * Notify client application + */ + if (app->cb_fns.session_connected_callback (app->index, opaque, new_s, + is_fail)) + { + SESSION_DBG ("failed to notify app"); + if (!is_fail) + { + new_s = session_get (new_si, new_ti); + stream_session_disconnect_transport (new_s); + } + } + else + { + if (!is_fail) + { + new_s = session_get (new_si, new_ti); + new_s->session_state = SESSION_STATE_READY; + } + } + + return error; +} - /* Allocate vpp event queue for this thread if needed */ - vpp_session_event_queue_allocate (smm, tc->thread_index); - } +typedef struct _session_switch_pool_args +{ + u32 session_index; + u32 thread_index; + u32 new_thread_index; + u32 new_session_index; +} session_switch_pool_args_t; - /* Notify client */ - app->cb_fns.session_connected_callback (app->api_client_index, new_s, - is_fail); +static void +session_switch_pool (void *cb_args) +{ + session_switch_pool_args_t *args = (session_switch_pool_args_t *) cb_args; + transport_proto_t tp; + stream_session_t *s; + ASSERT (args->thread_index == vlib_get_thread_index ()); + s = session_get (args->session_index, args->thread_index); + s->server_tx_fifo->master_session_index = args->new_session_index; + s->server_tx_fifo->master_thread_index = args->new_thread_index; + tp = session_get_transport_proto (s); + tp_vfts[tp].cleanup (s->connection_index, s->thread_index); + session_free (s); + clib_mem_free (cb_args); +} - /* Cleanup session lookup */ - stream_session_half_open_table_del (smm, sst, tc); +/** + * Move dgram session to the right thread + */ +int +session_dgram_connect_notify (transport_connection_t * tc, + u32 old_thread_index, + stream_session_t ** new_session) +{ + stream_session_t *new_s; + session_switch_pool_args_t *rpc_args; + + /* + * Clone half-open session to the right thread. + */ + new_s = session_clone_safe (tc->s_index, old_thread_index); + new_s->connection_index = tc->c_index; + new_s->server_rx_fifo->master_session_index = new_s->session_index; + new_s->server_rx_fifo->master_thread_index = new_s->thread_index; + new_s->session_state = SESSION_STATE_READY; + session_lookup_add_connection (tc, session_handle (new_s)); + + /* + * Ask thread owning the old session to clean it up and make us the tx + * fifo owner + */ + rpc_args = clib_mem_alloc (sizeof (*rpc_args)); + rpc_args->new_session_index = new_s->session_index; + rpc_args->new_thread_index = new_s->thread_index; + rpc_args->session_index = tc->s_index; + rpc_args->thread_index = old_thread_index; + session_send_rpc_evt_to_thread (rpc_args->thread_index, session_switch_pool, + rpc_args); + + tc->s_index = new_s->session_index; + new_s->connection_index = tc->c_index; + *new_session = new_s; + return 0; } void @@ -1010,7 +704,7 @@ stream_session_accept_notify (transport_connection_t * tc) application_t *server; stream_session_t *s; - s = stream_session_get (tc->s_index, tc->thread_index); + s = session_get (tc->s_index, tc->thread_index); server = application_get (s->app_index); server->cb_fns.session_accept_callback (s); } @@ -1028,70 +722,36 @@ stream_session_disconnect_notify (transport_connection_t * tc) application_t *server; stream_session_t *s; - s = stream_session_get (tc->s_index, tc->thread_index); + s = session_get (tc->s_index, tc->thread_index); server = application_get (s->app_index); server->cb_fns.session_disconnect_callback (s); } /** - * Cleans up session and associated app if needed. + * Cleans up session and lookup table. */ void stream_session_delete (stream_session_t * s) { - session_manager_main_t *smm = vnet_get_session_manager_main (); - svm_fifo_segment_private_t *fifo_segment; - application_t *app; + int rv; /* Delete from the main lookup table. */ - stream_session_table_del (smm, s); + if ((rv = session_lookup_del_session (s))) + clib_warning ("hash delete error, rv %d", rv); /* Cleanup fifo segments */ - fifo_segment = svm_fifo_get_segment (s->server_segment_index); - svm_fifo_segment_free_fifo (fifo_segment, s->server_rx_fifo); - svm_fifo_segment_free_fifo (fifo_segment, s->server_tx_fifo); - - app = application_get_if_valid (s->app_index); - - /* No app. A possibility: after disconnect application called unbind */ - if (!app) - return; - - if (app->mode == APP_CLIENT) - { - /* Cleanup app if client */ - application_del (app); - } - else if (app->mode == APP_SERVER) - { - session_manager_t *sm; - svm_fifo_segment_private_t *fifo_segment; - svm_fifo_t **fifos; - u32 fifo_index; - - /* For server, see if any segments can be removed */ - sm = session_manager_get (app->session_manager_index); - - /* Delete fifo */ - fifo_segment = svm_fifo_get_segment (s->server_segment_index); - fifos = (svm_fifo_t **) fifo_segment->h->fifos; - - fifo_index = svm_fifo_segment_index (fifo_segment); - - /* Remove segment only if it holds no fifos and not the first */ - if (sm->segment_indices[0] != fifo_index && vec_len (fifos) == 0) - svm_fifo_segment_delete (fifo_segment); - } - - pool_put (smm->sessions[s->thread_index], s); + segment_manager_dealloc_fifos (s->svm_segment_index, s->server_rx_fifo, + s->server_tx_fifo); + session_free (s); } /** * Notification from transport that connection is being deleted * - * This should be called only on previously fully established sessions. For - * instance failed connects should call stream_session_connect_notify and - * indicate that the connect has failed. + * This removes the session if it is still valid. It should be called only on + * previously fully established sessions. For instance failed connects should + * call stream_session_connect_notify and indicate that the connect has + * failed. */ void stream_session_delete_notify (transport_connection_t * tc) @@ -1099,11 +759,9 @@ stream_session_delete_notify (transport_connection_t * tc) stream_session_t *s; /* App might've been removed already */ - s = stream_session_get_if_valid (tc->s_index, tc->thread_index); + s = session_get_if_valid (tc->s_index, tc->thread_index); if (!s) - { - return; - } + return; stream_session_delete (s); } @@ -1115,7 +773,7 @@ stream_session_reset_notify (transport_connection_t * tc) { stream_session_t *s; application_t *app; - s = stream_session_get (tc->s_index, tc->thread_index); + s = session_get (tc->s_index, tc->thread_index); app = application_get (s->app_index); app->cb_fns.session_reset_callback (s); @@ -1126,23 +784,24 @@ stream_session_reset_notify (transport_connection_t * tc) */ int stream_session_accept (transport_connection_t * tc, u32 listener_index, - u8 sst, u8 notify) + u8 notify) { - session_manager_main_t *smm = &session_manager_main; application_t *server; stream_session_t *s, *listener; - + segment_manager_t *sm; int rv; /* Find the server */ - listener = pool_elt_at_index (smm->listen_sessions[sst], listener_index); + listener = listen_session_get (listener_index); server = application_get (listener->app_index); - if ((rv = stream_session_create_i (smm, server, tc, &s))) + sm = application_get_listen_segment_manager (server, listener); + if ((rv = session_alloc_and_init (sm, tc, 1, &s))) return rv; - /* Allocate vpp event queue for this thread if needed */ - vpp_session_event_queue_allocate (smm, tc->thread_index); + s->app_index = server->index; + s->listener_index = listener_index; + s->session_state = SESSION_STATE_ACCEPTING; /* Shoulder-tap the server */ if (notify) @@ -1154,46 +813,272 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index, } int -stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, - u32 app_index) +session_open_cl (u32 app_index, session_endpoint_t * rmt, u32 opaque) { transport_connection_t *tc; - u32 tci; - u64 value; + transport_endpoint_t *tep; + segment_manager_t *sm; + stream_session_t *s; + application_t *app; + int rv; + + tep = session_endpoint_to_transport (rmt); + rv = tp_vfts[rmt->transport_proto].open (tep); + if (rv < 0) + { + SESSION_DBG ("Transport failed to open connection."); + return VNET_API_ERROR_SESSION_CONNECT; + } + + tc = tp_vfts[rmt->transport_proto].get_half_open ((u32) rv); + + /* For dgram type of service, allocate session and fifos now. + */ + app = application_get (app_index); + sm = application_get_connect_segment_manager (app); + + if (session_alloc_and_init (sm, tc, 1, &s)) + return -1; + s->app_index = app->index; + s->session_state = SESSION_STATE_OPENED; + + /* Tell the app about the new event fifo for this session */ + app->cb_fns.session_connected_callback (app->index, opaque, s, 0); + + return 0; +} + +int +session_open_vc (u32 app_index, session_endpoint_t * rmt, u32 opaque) +{ + transport_connection_t *tc; + transport_endpoint_t *tep; + u64 handle; int rv; - /* Ask transport to open connection */ - rv = tp_vfts[sst].open (addr, port_host_byte_order); + tep = session_endpoint_to_transport (rmt); + rv = tp_vfts[rmt->transport_proto].open (tep); if (rv < 0) { - clib_warning ("Transport failed to open connection."); - return VNET_API_ERROR_SESSION_CONNECT_FAIL; + SESSION_DBG ("Transport failed to open connection."); + return VNET_API_ERROR_SESSION_CONNECT; } - tci = rv; + tc = tp_vfts[rmt->transport_proto].get_half_open ((u32) rv); + + /* If transport offers a stream service, only allocate session once the + * connection has been established. + * Add connection to half-open table and save app and tc index. The + * latter is needed to help establish the connection while the former + * is needed when the connect notify comes and we have to notify the + * external app + */ + handle = (((u64) app_index) << 32) | (u64) tc->c_index; + session_lookup_add_half_open (tc, handle); + + /* Store api_context (opaque) for when the reply comes. Not the nicest + * thing but better than allocating a separate half-open pool. + */ + tc->s_index = opaque; + return 0; +} + +int +session_open_app (u32 app_index, session_endpoint_t * rmt, u32 opaque) +{ + session_endpoint_extended_t *sep = (session_endpoint_extended_t *) rmt; + sep->app_index = app_index; + sep->opaque = opaque; + + return tp_vfts[rmt->transport_proto].open ((transport_endpoint_t *) sep); +} + +typedef int (*session_open_service_fn) (u32, session_endpoint_t *, u32); + +/* *INDENT-OFF* */ +static session_open_service_fn session_open_srv_fns[TRANSPORT_N_SERVICES] = { + session_open_vc, + session_open_cl, + session_open_app, +}; +/* *INDENT-ON* */ + +/** + * Ask transport to open connection to remote transport endpoint. + * + * Stores handle for matching request with reply since the call can be + * asynchronous. For instance, for TCP the 3-way handshake must complete + * before reply comes. Session is only created once connection is established. + * + * @param app_index Index of the application requesting the connect + * @param st Session type requested. + * @param tep Remote transport endpoint + * @param opaque Opaque data (typically, api_context) the application expects + * on open completion. + */ +int +session_open (u32 app_index, session_endpoint_t * rmt, u32 opaque) +{ + transport_service_type_t tst = tp_vfts[rmt->transport_proto].service_type; + return session_open_srv_fns[tst] (app_index, rmt, opaque); +} + +int +session_listen_vc (stream_session_t * s, session_endpoint_t * sep) +{ + transport_connection_t *tc; + u32 tci; + + /* Transport bind/listen */ + tci = tp_vfts[sep->transport_proto].bind (s->session_index, + session_endpoint_to_transport + (sep)); + + if (tci == (u32) ~ 0) + return -1; + + /* Attach transport to session */ + s->connection_index = tci; + tc = tp_vfts[sep->transport_proto].get_listener (tci); + + /* Weird but handle it ... */ + if (tc == 0) + return -1; + + /* Add to the main lookup table */ + session_lookup_add_connection (tc, s->session_index); + return 0; +} + +int +session_listen_cl (stream_session_t * s, session_endpoint_t * sep) +{ + transport_connection_t *tc; + application_t *server; + segment_manager_t *sm; + u32 tci; + + /* Transport bind/listen */ + tci = tp_vfts[sep->transport_proto].bind (s->session_index, + session_endpoint_to_transport + (sep)); + + if (tci == (u32) ~ 0) + return -1; + + /* Attach transport to session */ + s->connection_index = tci; + tc = tp_vfts[sep->transport_proto].get_listener (tci); + + /* Weird but handle it ... */ + if (tc == 0) + return -1; + + server = application_get (s->app_index); + sm = application_get_listen_segment_manager (server, s); + if (session_alloc_fifos (sm, s)) + return -1; + + /* Add to the main lookup table */ + session_lookup_add_connection (tc, s->session_index); + return 0; +} + +int +session_listen_app (stream_session_t * s, session_endpoint_t * sep) +{ + session_endpoint_extended_t esep; + clib_memcpy (&esep, sep, sizeof (*sep)); + esep.app_index = s->app_index; + + return tp_vfts[sep->transport_proto].bind (s->session_index, + (transport_endpoint_t *) & esep); +} - /* Get transport connection */ - tc = tp_vfts[sst].get_half_open (tci); +typedef int (*session_listen_service_fn) (stream_session_t *, + session_endpoint_t *); + +/* *INDENT-OFF* */ +static session_listen_service_fn +session_listen_srv_fns[TRANSPORT_N_SERVICES] = { + session_listen_vc, + session_listen_cl, + session_listen_app, +}; +/* *INDENT-ON* */ + +/** + * Ask transport to listen on local transport endpoint. + * + * @param s Session for which listen will be called. Note that unlike + * established sessions, listen sessions are not associated to a + * thread. + * @param tep Local endpoint to be listened on. + */ +int +stream_session_listen (stream_session_t * s, session_endpoint_t * sep) +{ + transport_service_type_t tst = tp_vfts[sep->transport_proto].service_type; + return session_listen_srv_fns[tst] (s, sep); +} - /* Store api_client_index and transport connection index */ - value = (((u64) app_index) << 32) | (u64) tc->c_index; +/** + * Ask transport to stop listening on local transport endpoint. + * + * @param s Session to stop listening on. It must be in state LISTENING. + */ +int +stream_session_stop_listen (stream_session_t * s) +{ + transport_proto_t tp = session_get_transport_proto (s); + transport_connection_t *tc; + if (s->session_state != SESSION_STATE_LISTENING) + { + clib_warning ("not a listening session"); + return -1; + } - /* Add to the half-open lookup table */ - stream_session_half_open_table_add (sst, tc, value); + tc = tp_vfts[tp].get_listener (s->connection_index); + if (!tc) + { + clib_warning ("no transport"); + return VNET_API_ERROR_ADDRESS_NOT_IN_USE; + } + session_lookup_del_connection (tc); + tp_vfts[tp].unbind (s->connection_index); return 0; } /** - * Disconnect session and propagate to transport. This should eventually + * Initialize session disconnect. + * + * Request is always sent to session node to ensure that all outstanding + * requests are served before transport is notified. + */ +void +stream_session_disconnect (stream_session_t * s) +{ + if (!s || s->session_state == SESSION_STATE_CLOSED) + return; + s->session_state = SESSION_STATE_CLOSED; + session_send_session_evt_to_thread (session_handle (s), + FIFO_EVENT_DISCONNECT, s->thread_index); +} + +/** + * Notify transport the session can be disconnected. This should eventually * result in a delete notification that allows us to cleanup session state. * Called for both active/passive disconnects. + * + * Must be called from the session's thread. */ void -stream_session_disconnect (stream_session_t * s) +stream_session_disconnect_transport (stream_session_t * s) { s->session_state = SESSION_STATE_CLOSED; - tp_vfts[s->session_type].close (s->connection_index, s->thread_index); + tp_vfts[session_get_transport_proto (s)].close (s->connection_index, + s->thread_index); } /** @@ -1205,139 +1090,386 @@ stream_session_disconnect (stream_session_t * s) void stream_session_cleanup (stream_session_t * s) { - session_manager_main_t *smm = &session_manager_main; int rv; s->session_state = SESSION_STATE_CLOSED; /* Delete from the main lookup table to avoid more enqueues */ - rv = stream_session_table_del (smm, s); + rv = session_lookup_del_session (s); if (rv) clib_warning ("hash delete error, rv %d", rv); - tp_vfts[s->session_type].cleanup (s->connection_index, s->thread_index); + tp_vfts[session_get_transport_proto (s)].cleanup (s->connection_index, + s->thread_index); +} + +/** + * Allocate event queues in the shared-memory segment + * + * That can either be a newly created memfd segment, that will need to be + * mapped by all stack users, or the binary api's svm region. The latter is + * assumed to be already mapped. NOTE that this assumption DOES NOT hold if + * api clients bootstrap shm api over sockets (i.e. use memfd segments) and + * vpp uses api svm region for event queues. + */ +void +session_vpp_event_queues_allocate (session_manager_main_t * smm) +{ + u32 evt_q_length = 2048, evt_size = sizeof (session_fifo_event_t); + ssvm_private_t *eqs = &smm->evt_qs_segment; + api_main_t *am = &api_main; + u64 eqs_size = 64 << 20; + pid_t vpp_pid = getpid (); + void *oldheap; + int i; + + if (smm->configured_event_queue_length) + evt_q_length = smm->configured_event_queue_length; + + if (smm->evt_qs_use_memfd_seg) + { + if (smm->evt_qs_segment_size) + eqs_size = smm->evt_qs_segment_size; + + eqs->ssvm_size = eqs_size; + eqs->i_am_master = 1; + eqs->my_pid = vpp_pid; + eqs->name = format (0, "%s%c", "evt-qs-segment", 0); + eqs->requested_va = smm->session_baseva; + + ssvm_master_init (eqs, SSVM_SEGMENT_MEMFD); + } + + if (smm->evt_qs_use_memfd_seg) + oldheap = ssvm_push_heap (eqs->sh); + else + oldheap = svm_push_data_heap (am->vlib_rp); + + for (i = 0; i < vec_len (smm->vpp_event_queues); i++) + { + smm->vpp_event_queues[i] = svm_queue_init (evt_q_length, evt_size, + vpp_pid, 0); + } + + if (smm->evt_qs_use_memfd_seg) + ssvm_pop_heap (oldheap); + else + svm_pop_heap (oldheap); +} + +ssvm_private_t * +session_manager_get_evt_q_segment (void) +{ + session_manager_main_t *smm = &session_manager_main; + if (smm->evt_qs_use_memfd_seg) + return &smm->evt_qs_segment; + return 0; } +/* *INDENT-OFF* */ +static session_fifo_rx_fn *session_tx_fns[TRANSPORT_TX_N_FNS] = { + session_tx_fifo_peek_and_snd, + session_tx_fifo_dequeue_and_snd, + session_tx_fifo_dequeue_internal, + session_tx_fifo_dequeue_and_snd +}; +/* *INDENT-ON* */ + +/** + * Initialize session layer for given transport proto and ip version + * + * Allocates per session type (transport proto + ip version) data structures + * and adds arc from session queue node to session type output node. + */ void -session_register_transport (u8 type, const transport_proto_vft_t * vft) +session_register_transport (transport_proto_t transport_proto, + const transport_proto_vft_t * vft, u8 is_ip4, + u32 output_node) { - session_manager_main_t *smm = vnet_get_session_manager_main (); + session_manager_main_t *smm = &session_manager_main; + session_type_t session_type; + u32 next_index = ~0; + + session_type = session_type_from_proto_and_ip (transport_proto, is_ip4); - vec_validate (tp_vfts, type); - tp_vfts[type] = *vft; + vec_validate (smm->session_type_to_next, session_type); + vec_validate (smm->session_tx_fns, session_type); + + /* *INDENT-OFF* */ + if (output_node != ~0) + { + foreach_vlib_main (({ + next_index = vlib_node_add_next (this_vlib_main, + session_queue_node.index, + output_node); + })); + } + /* *INDENT-ON* */ - /* If an offset function is provided, then peek instead of dequeue */ - smm->session_tx_fns[type] = - (vft->tx_fifo_offset) ? session_tx_fifo_peek_and_snd : - session_tx_fifo_dequeue_and_snd; + smm->session_type_to_next[session_type] = next_index; + smm->session_tx_fns[session_type] = session_tx_fns[vft->tx_type]; } -transport_proto_vft_t * -session_get_transport_vft (u8 type) +transport_connection_t * +session_get_transport (stream_session_t * s) { - if (type >= vec_len (tp_vfts)) - return 0; - return &tp_vfts[type]; + transport_proto_t tp; + if (s->session_state != SESSION_STATE_LISTENING) + { + tp = session_get_transport_proto (s); + return tp_vfts[tp].get_connection (s->connection_index, + s->thread_index); + } + return 0; +} + +transport_connection_t * +listen_session_get_transport (stream_session_t * s) +{ + transport_proto_t tp = session_get_transport_proto (s); + return tp_vfts[tp].get_listener (s->connection_index); +} + +int +listen_session_get_local_session_endpoint (stream_session_t * listener, + session_endpoint_t * sep) +{ + transport_proto_t tp = session_get_transport_proto (listener); + transport_connection_t *tc; + tc = tp_vfts[tp].get_listener (listener->connection_index); + if (!tc) + { + clib_warning ("no transport"); + return -1; + } + + /* N.B. The ip should not be copied because this is the local endpoint */ + sep->port = tc->lcl_port; + sep->transport_proto = tc->proto; + sep->is_ip4 = tc->is_ip4; + return 0; } static clib_error_t * session_manager_main_enable (vlib_main_t * vm) { + segment_manager_main_init_args_t _sm_args = { 0 }, *sm_args = &_sm_args; session_manager_main_t *smm = &session_manager_main; vlib_thread_main_t *vtm = vlib_get_thread_main (); - u32 num_threads; - int i; + u32 num_threads, preallocated_sessions_per_worker; + int i, j; num_threads = 1 /* main thread */ + vtm->n_threads; if (num_threads < 1) return clib_error_return (0, "n_thread_stacks not set"); - /* $$$ config parameters */ - svm_fifo_segment_init (0x200000000ULL /* first segment base VA */ , - 20 /* timeout in seconds */ ); - /* configure per-thread ** vectors */ vec_validate (smm->sessions, num_threads - 1); - vec_validate (smm->session_indices_to_enqueue_by_thread, num_threads - 1); vec_validate (smm->tx_buffers, num_threads - 1); - vec_validate (smm->fifo_events, num_threads - 1); - vec_validate (smm->evts_partially_read, num_threads - 1); - vec_validate (smm->current_enqueue_epoch, num_threads - 1); + vec_validate (smm->pending_event_vector, num_threads - 1); + vec_validate (smm->pending_disconnects, num_threads - 1); + vec_validate (smm->free_event_vector, num_threads - 1); vec_validate (smm->vpp_event_queues, num_threads - 1); + vec_validate (smm->peekers_rw_locks, num_threads - 1); + + for (i = 0; i < TRANSPORT_N_PROTO; i++) + { + vec_validate (smm->current_enqueue_epoch[i], num_threads - 1); + vec_validate (smm->session_to_enqueue[i], num_threads - 1); + for (j = 0; j < num_threads; j++) + smm->current_enqueue_epoch[i][j] = 1; + } - /* $$$$ preallocate hack config parameter */ - for (i = 0; i < 200000; i++) + for (i = 0; i < num_threads; i++) { - stream_session_t *ss; - pool_get (smm->sessions[0], ss); - memset (ss, 0, sizeof (*ss)); + vec_validate (smm->free_event_vector[i], 0); + _vec_len (smm->free_event_vector[i]) = 0; + vec_validate (smm->pending_event_vector[i], 0); + _vec_len (smm->pending_event_vector[i]) = 0; + vec_validate (smm->pending_disconnects[i], 0); + _vec_len (smm->pending_disconnects[i]) = 0; + if (num_threads > 1) + clib_rwlock_init (&smm->peekers_rw_locks[i]); } - for (i = 0; i < 200000; i++) - pool_put_index (smm->sessions[0], i); +#if SESSION_DBG + vec_validate (smm->last_event_poll_by_thread, num_threads - 1); +#endif - clib_bihash_init_16_8 (&smm->v4_session_hash, "v4 session table", - 200000 /* $$$$ config parameter nbuckets */ , - (64 << 20) /*$$$ config parameter table size */ ); - clib_bihash_init_48_8 (&smm->v6_session_hash, "v6 session table", - 200000 /* $$$$ config parameter nbuckets */ , - (64 << 20) /*$$$ config parameter table size */ ); + /* Allocate vpp event queues segment and queue */ + session_vpp_event_queues_allocate (smm); + + /* Initialize fifo segment main baseva and timeout */ + sm_args->baseva = smm->session_baseva + smm->evt_qs_segment_size; + sm_args->size = smm->session_va_space_size; + segment_manager_main_init (sm_args); + + /* Preallocate sessions */ + if (smm->preallocated_sessions) + { + if (num_threads == 1) + { + pool_init_fixed (smm->sessions[0], smm->preallocated_sessions); + } + else + { + int j; + preallocated_sessions_per_worker = + (1.1 * (f64) smm->preallocated_sessions / + (f64) (num_threads - 1)); - clib_bihash_init_16_8 (&smm->v4_half_open_hash, "v4 half-open table", - 200000 /* $$$$ config parameter nbuckets */ , - (64 << 20) /*$$$ config parameter table size */ ); - clib_bihash_init_48_8 (&smm->v6_half_open_hash, "v6 half-open table", - 200000 /* $$$$ config parameter nbuckets */ , - (64 << 20) /*$$$ config parameter table size */ ); + for (j = 1; j < num_threads; j++) + { + pool_init_fixed (smm->sessions[j], + preallocated_sessions_per_worker); + } + } + } - for (i = 0; i < SESSION_N_TYPES; i++) - smm->connect_manager_index[i] = INVALID_INDEX; + session_lookup_init (); + app_namespaces_init (); + transport_init (); smm->is_enabled = 1; - /* Enable TCP transport */ - vnet_tcp_enable_disable (vm, 1); + /* Enable transports */ + transport_enable_disable (vm, 1); return 0; } +void +session_node_enable_disable (u8 is_en) +{ + u8 state = is_en ? VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED; + /* *INDENT-OFF* */ + foreach_vlib_main (({ + vlib_node_set_state (this_vlib_main, session_queue_node.index, + state); + })); + /* *INDENT-ON* */ +} + clib_error_t * vnet_session_enable_disable (vlib_main_t * vm, u8 is_en) { + clib_error_t *error = 0; if (is_en) { if (session_manager_main.is_enabled) return 0; - vlib_node_set_state (vm, session_queue_node.index, - VLIB_NODE_STATE_POLLING); - - return session_manager_main_enable (vm); + session_node_enable_disable (is_en); + error = session_manager_main_enable (vm); } else { session_manager_main.is_enabled = 0; - vlib_node_set_state (vm, session_queue_node.index, - VLIB_NODE_STATE_DISABLED); + session_node_enable_disable (is_en); } - return 0; + return error; } clib_error_t * session_manager_main_init (vlib_main_t * vm) { session_manager_main_t *smm = &session_manager_main; - - smm->vlib_main = vm; - smm->vnet_main = vnet_get_main (); + smm->session_baseva = 0x200000000ULL; + smm->session_va_space_size = (u64) 128 << 30; + smm->evt_qs_segment_size = 64 << 20; smm->is_enabled = 0; + return 0; +} + +VLIB_INIT_FUNCTION (session_manager_main_init); + +static clib_error_t * +session_config_fn (vlib_main_t * vm, unformat_input_t * input) +{ + session_manager_main_t *smm = &session_manager_main; + u32 nitems; + uword tmp; + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "event-queue-length %d", &nitems)) + { + if (nitems >= 2048) + smm->configured_event_queue_length = nitems; + else + clib_warning ("event queue length %d too small, ignored", nitems); + } + else if (unformat (input, "preallocated-sessions %d", + &smm->preallocated_sessions)) + ; + else if (unformat (input, "v4-session-table-buckets %d", + &smm->configured_v4_session_table_buckets)) + ; + else if (unformat (input, "v4-halfopen-table-buckets %d", + &smm->configured_v4_halfopen_table_buckets)) + ; + else if (unformat (input, "v6-session-table-buckets %d", + &smm->configured_v6_session_table_buckets)) + ; + else if (unformat (input, "v6-halfopen-table-buckets %d", + &smm->configured_v6_halfopen_table_buckets)) + ; + else if (unformat (input, "v4-session-table-memory %U", + unformat_memory_size, &tmp)) + { + if (tmp >= 0x100000000) + return clib_error_return (0, "memory size %llx (%lld) too large", + tmp, tmp); + smm->configured_v4_session_table_memory = tmp; + } + else if (unformat (input, "v4-halfopen-table-memory %U", + unformat_memory_size, &tmp)) + { + if (tmp >= 0x100000000) + return clib_error_return (0, "memory size %llx (%lld) too large", + tmp, tmp); + smm->configured_v4_halfopen_table_memory = tmp; + } + else if (unformat (input, "v6-session-table-memory %U", + unformat_memory_size, &tmp)) + { + if (tmp >= 0x100000000) + return clib_error_return (0, "memory size %llx (%lld) too large", + tmp, tmp); + smm->configured_v6_session_table_memory = tmp; + } + else if (unformat (input, "v6-halfopen-table-memory %U", + unformat_memory_size, &tmp)) + { + if (tmp >= 0x100000000) + return clib_error_return (0, "memory size %llx (%lld) too large", + tmp, tmp); + smm->configured_v6_halfopen_table_memory = tmp; + } + else if (unformat (input, "local-endpoints-table-memory %U", + unformat_memory_size, &tmp)) + { + if (tmp >= 0x100000000) + return clib_error_return (0, "memory size %llx (%lld) too large", + tmp, tmp); + smm->local_endpoints_table_memory = tmp; + } + else if (unformat (input, "local-endpoints-table-buckets %d", + &smm->local_endpoints_table_buckets)) + ; + else if (unformat (input, "evt_qs_memfd_seg")) + smm->evt_qs_use_memfd_seg = 1; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } return 0; } -VLIB_INIT_FUNCTION (session_manager_main_init) +VLIB_CONFIG_FUNCTION (session_config_fn, "session"); + /* * fd.io coding-style-patch-verification: ON *