2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief Session and session manager
20 #include <vnet/session/session.h>
21 #include <vlibmemory/api.h>
22 #include <vnet/dpo/load_balance.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <vnet/session/application.h>
25 #include <vnet/tcp/tcp.h>
26 #include <vnet/session/session_debug.h>
29 * Per-type vector of transport protocol virtual function tables
31 static transport_proto_vft_t *tp_vfts;
33 session_manager_main_t session_manager_main;
36 * Session lookup key; (src-ip, dst-ip, src-port, dst-port, session-type)
37 * Value: (owner thread index << 32 | session_index);
40 stream_session_table_add_for_tc (transport_connection_t * tc, u64 value)
42 session_manager_main_t *smm = &session_manager_main;
48 case SESSION_TYPE_IP4_UDP:
49 case SESSION_TYPE_IP4_TCP:
50 make_v4_ss_kv_from_tc (&kv4, tc);
52 clib_bihash_add_del_16_8 (&smm->v4_session_hash, &kv4, 1 /* is_add */ );
54 case SESSION_TYPE_IP6_UDP:
55 case SESSION_TYPE_IP6_TCP:
56 make_v6_ss_kv_from_tc (&kv6, tc);
58 clib_bihash_add_del_48_8 (&smm->v6_session_hash, &kv6, 1 /* is_add */ );
61 clib_warning ("Session type not supported");
67 stream_session_table_add (session_manager_main_t * smm, stream_session_t * s,
70 transport_connection_t *tc;
72 tc = tp_vfts[s->session_type].get_connection (s->connection_index,
74 stream_session_table_add_for_tc (tc, value);
78 stream_session_half_open_table_add (session_type_t sst,
79 transport_connection_t * tc, u64 value)
81 session_manager_main_t *smm = &session_manager_main;
87 case SESSION_TYPE_IP4_UDP:
88 case SESSION_TYPE_IP4_TCP:
89 make_v4_ss_kv_from_tc (&kv4, tc);
91 clib_bihash_add_del_16_8 (&smm->v4_half_open_hash, &kv4,
94 case SESSION_TYPE_IP6_UDP:
95 case SESSION_TYPE_IP6_TCP:
96 make_v6_ss_kv_from_tc (&kv6, tc);
98 clib_bihash_add_del_48_8 (&smm->v6_half_open_hash, &kv6,
102 clib_warning ("Session type not supported");
108 stream_session_table_del_for_tc (transport_connection_t * tc)
110 session_manager_main_t *smm = &session_manager_main;
115 case SESSION_TYPE_IP4_UDP:
116 case SESSION_TYPE_IP4_TCP:
117 make_v4_ss_kv_from_tc (&kv4, tc);
118 return clib_bihash_add_del_16_8 (&smm->v4_session_hash, &kv4,
121 case SESSION_TYPE_IP6_UDP:
122 case SESSION_TYPE_IP6_TCP:
123 make_v6_ss_kv_from_tc (&kv6, tc);
124 return clib_bihash_add_del_48_8 (&smm->v6_session_hash, &kv6,
128 clib_warning ("Session type not supported");
136 stream_session_table_del (session_manager_main_t * smm, stream_session_t * s)
138 transport_connection_t *ts;
140 ts = tp_vfts[s->session_type].get_connection (s->connection_index,
142 return stream_session_table_del_for_tc (ts);
146 stream_session_half_open_table_del (session_manager_main_t * smm, u8 sst,
147 transport_connection_t * tc)
154 case SESSION_TYPE_IP4_UDP:
155 case SESSION_TYPE_IP4_TCP:
156 make_v4_ss_kv_from_tc (&kv4, tc);
157 clib_bihash_add_del_16_8 (&smm->v4_half_open_hash, &kv4,
160 case SESSION_TYPE_IP6_UDP:
161 case SESSION_TYPE_IP6_TCP:
162 make_v6_ss_kv_from_tc (&kv6, tc);
163 clib_bihash_add_del_48_8 (&smm->v6_half_open_hash, &kv6,
167 clib_warning ("Session type not supported");
173 stream_session_lookup_listener4 (ip4_address_t * lcl, u16 lcl_port, u8 proto)
175 session_manager_main_t *smm = &session_manager_main;
179 make_v4_listener_kv (&kv4, lcl, lcl_port, proto);
180 rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4);
182 return pool_elt_at_index (smm->listen_sessions[proto], (u32) kv4.value);
184 /* Zero out the lcl ip */
186 rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4);
188 return pool_elt_at_index (smm->listen_sessions[proto], kv4.value);
193 /** Looks up a session based on the 5-tuple passed as argument.
195 * First it tries to find an established session, if this fails, it tries
196 * finding a listener session if this fails, it tries a lookup with a
197 * wildcarded local source (listener bound to all interfaces)
200 stream_session_lookup4 (ip4_address_t * lcl, ip4_address_t * rmt,
201 u16 lcl_port, u16 rmt_port, u8 proto,
204 session_manager_main_t *smm = &session_manager_main;
208 /* Lookup session amongst established ones */
209 make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto);
210 rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4);
212 return stream_session_get_tsi (kv4.value, my_thread_index);
214 /* If nothing is found, check if any listener is available */
215 return stream_session_lookup_listener4 (lcl, lcl_port, proto);
219 stream_session_lookup_listener6 (ip6_address_t * lcl, u16 lcl_port, u8 proto)
221 session_manager_main_t *smm = &session_manager_main;
225 make_v6_listener_kv (&kv6, lcl, lcl_port, proto);
226 rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6);
228 return pool_elt_at_index (smm->listen_sessions[proto], kv6.value);
230 /* Zero out the lcl ip */
231 kv6.key[0] = kv6.key[1] = 0;
232 rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6);
234 return pool_elt_at_index (smm->listen_sessions[proto], kv6.value);
239 /* Looks up a session based on the 5-tuple passed as argument.
240 * First it tries to find an established session, if this fails, it tries
241 * finding a listener session if this fails, it tries a lookup with a
242 * wildcarded local source (listener bound to all interfaces) */
244 stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt,
245 u16 lcl_port, u16 rmt_port, u8 proto,
248 session_manager_main_t *smm = vnet_get_session_manager_main ();
252 make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto);
253 rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6);
255 return stream_session_get_tsi (kv6.value, my_thread_index);
257 /* If nothing is found, check if any listener is available */
258 return stream_session_lookup_listener6 (lcl, lcl_port, proto);
262 stream_session_lookup_listener (ip46_address_t * lcl, u16 lcl_port, u8 proto)
266 case SESSION_TYPE_IP4_UDP:
267 case SESSION_TYPE_IP4_TCP:
268 return stream_session_lookup_listener4 (&lcl->ip4, lcl_port, proto);
270 case SESSION_TYPE_IP6_UDP:
271 case SESSION_TYPE_IP6_TCP:
272 return stream_session_lookup_listener6 (&lcl->ip6, lcl_port, proto);
279 stream_session_half_open_lookup (session_manager_main_t * smm,
280 ip46_address_t * lcl, ip46_address_t * rmt,
281 u16 lcl_port, u16 rmt_port, u8 proto)
289 case SESSION_TYPE_IP4_UDP:
290 case SESSION_TYPE_IP4_TCP:
291 make_v4_ss_kv (&kv4, &lcl->ip4, &rmt->ip4, lcl_port, rmt_port, proto);
292 rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4);
299 case SESSION_TYPE_IP6_UDP:
300 case SESSION_TYPE_IP6_TCP:
301 make_v6_ss_kv (&kv6, &lcl->ip6, &rmt->ip6, lcl_port, rmt_port, proto);
302 rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6);
313 transport_connection_t *
314 stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt,
315 u16 lcl_port, u16 rmt_port, u8 proto,
318 session_manager_main_t *smm = &session_manager_main;
323 /* Lookup session amongst established ones */
324 make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto);
325 rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4);
328 s = stream_session_get_tsi (kv4.value, my_thread_index);
330 return tp_vfts[s->session_type].get_connection (s->connection_index,
334 /* If nothing is found, check if any listener is available */
335 s = stream_session_lookup_listener4 (lcl, lcl_port, proto);
337 return tp_vfts[s->session_type].get_listener (s->connection_index);
339 /* Finally, try half-open connections */
340 rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4);
342 return tp_vfts[proto].get_half_open (kv4.value & 0xFFFFFFFF);
347 transport_connection_t *
348 stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt,
349 u16 lcl_port, u16 rmt_port, u8 proto,
352 session_manager_main_t *smm = &session_manager_main;
357 make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto);
358 rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6);
361 s = stream_session_get_tsi (kv6.value, my_thread_index);
363 return tp_vfts[s->session_type].get_connection (s->connection_index,
367 /* If nothing is found, check if any listener is available */
368 s = stream_session_lookup_listener6 (lcl, lcl_port, proto);
370 return tp_vfts[s->session_type].get_listener (s->connection_index);
372 /* Finally, try half-open connections */
373 rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6);
375 return tp_vfts[proto].get_half_open (kv6.value & 0xFFFFFFFF);
381 * Allocate vpp event queue (once) per worker thread
384 session_vpp_event_queue_allocate (session_manager_main_t * smm,
387 api_main_t *am = &api_main;
390 if (smm->vpp_event_queues[thread_index] == 0)
392 /* Allocate event fifo in the /vpe-api shared-memory segment */
393 oldheap = svm_push_data_heap (am->vlib_rp);
395 smm->vpp_event_queues[thread_index] =
396 unix_shared_memory_queue_init (2048 /* nels $$$$ config */ ,
397 sizeof (session_fifo_event_t),
398 0 /* consumer pid */ ,
400 /* (do not) send signal when queue non-empty */
403 svm_pop_heap (oldheap);
408 stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc,
409 stream_session_t ** ret_s)
411 session_manager_main_t *smm = &session_manager_main;
412 svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0;
413 u32 fifo_segment_index;
417 u32 thread_index = tc->thread_index;
420 if ((rv = segment_manager_alloc_session_fifos (sm, &server_rx_fifo,
422 &fifo_segment_index)))
425 /* Create the session */
426 pool_get (smm->sessions[thread_index], s);
427 memset (s, 0, sizeof (*s));
429 /* Initialize backpointers */
430 pool_index = s - smm->sessions[thread_index];
431 server_rx_fifo->server_session_index = pool_index;
432 server_rx_fifo->server_thread_index = thread_index;
434 server_tx_fifo->server_session_index = pool_index;
435 server_tx_fifo->server_thread_index = thread_index;
437 s->server_rx_fifo = server_rx_fifo;
438 s->server_tx_fifo = server_tx_fifo;
440 /* Initialize state machine, such as it is... */
441 s->session_type = tc->proto;
442 s->session_state = SESSION_STATE_CONNECTING;
443 s->svm_segment_index = fifo_segment_index;
444 s->thread_index = thread_index;
445 s->session_index = pool_index;
447 /* Attach transport to session */
448 s->connection_index = tc->c_index;
450 /* Attach session to transport */
451 tc->s_index = s->session_index;
453 /* Add to the main lookup table */
454 value = (((u64) thread_index) << 32) | (u64) s->session_index;
455 stream_session_table_add_for_tc (tc, value);
463 * Enqueue data for delivery to session peer. Does not notify peer of enqueue
464 * event but on request can queue notification events for later delivery by
465 * calling stream_server_flush_enqueue_events().
467 * @param tc Transport connection which is to be enqueued data
468 * @param data Data to be enqueued
469 * @param len Length of data to be enqueued
470 * @param queue_event Flag to indicate if peer is to be notified or if event
471 * is to be queued. The former is useful when more data is
472 * enqueued and only one event is to be generated.
473 * @return Number of bytes enqueued or a negative value if enqueueing failed.
476 stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len,
482 s = stream_session_get (tc->s_index, tc->thread_index);
484 /* Make sure there's enough space left. We might've filled the pipes */
485 if (PREDICT_FALSE (len > svm_fifo_max_enqueue (s->server_rx_fifo)))
488 enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, s->pid, len, data);
492 /* Queue RX event on this fifo. Eventually these will need to be flushed
493 * by calling stream_server_flush_enqueue_events () */
494 session_manager_main_t *smm = vnet_get_session_manager_main ();
495 u32 thread_index = s->thread_index;
496 u32 my_enqueue_epoch = smm->current_enqueue_epoch[thread_index];
498 if (s->enqueue_epoch != my_enqueue_epoch)
500 s->enqueue_epoch = my_enqueue_epoch;
501 vec_add1 (smm->session_indices_to_enqueue_by_thread[thread_index],
502 s - smm->sessions[thread_index]);
509 /** Check if we have space in rx fifo to push more bytes */
511 stream_session_no_space (transport_connection_t * tc, u32 thread_index,
514 stream_session_t *s = stream_session_get (tc->c_index, thread_index);
516 if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY))
519 if (data_len > svm_fifo_max_enqueue (s->server_rx_fifo))
526 stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer,
527 u32 offset, u32 max_bytes)
529 stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
530 return svm_fifo_peek (s->server_tx_fifo, s->pid, offset, max_bytes, buffer);
534 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes)
536 stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
537 return svm_fifo_dequeue_drop (s->server_tx_fifo, s->pid, max_bytes);
541 * Notify session peer that new data has been enqueued.
543 * @param s Stream session for which the event is to be generated.
544 * @param block Flag to indicate if call should block if event queue is full.
546 * @return 0 on succes or negative number if failed to send notification.
549 stream_session_enqueue_notify (stream_session_t * s, u8 block)
552 session_fifo_event_t evt;
553 unix_shared_memory_queue_t *q;
554 static u32 serial_number;
556 if (PREDICT_FALSE (s->session_state == SESSION_STATE_CLOSED))
559 /* Get session's server */
560 app = application_get (s->app_index);
562 /* Built-in server? Hand event to the callback... */
563 if (app->cb_fns.builtin_server_rx_callback)
564 return app->cb_fns.builtin_server_rx_callback (s);
566 /* If no event, send one */
567 if (svm_fifo_set_event (s->server_rx_fifo))
569 /* Fabricate event */
570 evt.fifo = s->server_rx_fifo;
571 evt.event_type = FIFO_EVENT_SERVER_RX;
572 evt.event_id = serial_number++;
574 /* Add event to server's event queue */
575 q = app->event_queue;
577 /* Based on request block (or not) for lack of space */
578 if (block || PREDICT_TRUE (q->cursize < q->maxsize))
579 unix_shared_memory_queue_add (app->event_queue, (u8 *) & evt,
580 0 /* do wait for mutex */ );
583 clib_warning ("fifo full");
589 SESSION_EVT_DBG(SESSION_EVT_ENQ, s, ({
590 ed->data[0] = evt.event_id;
591 ed->data[1] = svm_fifo_max_dequeue (s->server_rx_fifo);
599 * Flushes queue of sessions that are to be notified of new data
602 * @param thread_index Thread index for which the flush is to be performed.
603 * @return 0 on success or a positive number indicating the number of
604 * failures due to API queue being full.
607 session_manager_flush_enqueue_events (u32 thread_index)
609 session_manager_main_t *smm = &session_manager_main;
610 u32 *session_indices_to_enqueue;
613 session_indices_to_enqueue =
614 smm->session_indices_to_enqueue_by_thread[thread_index];
616 for (i = 0; i < vec_len (session_indices_to_enqueue); i++)
618 stream_session_t *s0;
621 s0 = stream_session_get (session_indices_to_enqueue[i], thread_index);
622 if (stream_session_enqueue_notify (s0, 0 /* don't block */ ))
628 vec_reset_length (session_indices_to_enqueue);
630 smm->session_indices_to_enqueue_by_thread[thread_index] =
631 session_indices_to_enqueue;
633 /* Increment enqueue epoch for next round */
634 smm->current_enqueue_epoch[thread_index]++;
640 stream_session_connect_notify (transport_connection_t * tc, u8 sst,
643 session_manager_main_t *smm = &session_manager_main;
645 stream_session_t *new_s = 0;
649 handle = stream_session_half_open_lookup (smm, &tc->lcl_ip, &tc->rmt_ip,
650 tc->lcl_port, tc->rmt_port,
652 if (handle == HALF_OPEN_LOOKUP_INVALID_VALUE)
654 clib_warning ("This can't be good!");
658 /* Get the app's index from the handle we stored when opening connection */
659 app = application_get (handle >> 32);
660 api_context = tc->s_index;
664 segment_manager_t *sm;
665 sm = application_get_connect_segment_manager (app);
667 /* Create new session (svm segments are allocated if needed) */
668 if (stream_session_create_i (sm, tc, &new_s))
671 new_s->app_index = app->index;
675 app->cb_fns.session_connected_callback (app->index, api_context, new_s,
678 /* Cleanup session lookup */
679 stream_session_half_open_table_del (smm, sst, tc);
683 stream_session_accept_notify (transport_connection_t * tc)
685 application_t *server;
688 s = stream_session_get (tc->s_index, tc->thread_index);
689 server = application_get (s->app_index);
690 server->cb_fns.session_accept_callback (s);
694 * Notification from transport that connection is being closed.
696 * A disconnect is sent to application but state is not removed. Once
697 * disconnect is acknowledged by application, session disconnect is called.
698 * Ultimately this leads to close being called on transport (passive close).
701 stream_session_disconnect_notify (transport_connection_t * tc)
703 application_t *server;
706 s = stream_session_get (tc->s_index, tc->thread_index);
707 server = application_get (s->app_index);
708 server->cb_fns.session_disconnect_callback (s);
712 * Cleans up session and associated app if needed.
715 stream_session_delete (stream_session_t * s)
717 session_manager_main_t *smm = vnet_get_session_manager_main ();
719 /* Delete from the main lookup table. */
720 stream_session_table_del (smm, s);
722 /* Cleanup fifo segments */
723 segment_manager_dealloc_fifos (s->svm_segment_index, s->server_rx_fifo,
726 pool_put (smm->sessions[s->thread_index], s);
730 * Notification from transport that connection is being deleted
732 * This should be called only on previously fully established sessions. For
733 * instance failed connects should call stream_session_connect_notify and
734 * indicate that the connect has failed.
737 stream_session_delete_notify (transport_connection_t * tc)
741 /* App might've been removed already */
742 s = stream_session_get_if_valid (tc->s_index, tc->thread_index);
747 stream_session_delete (s);
751 * Notify application that connection has been reset.
754 stream_session_reset_notify (transport_connection_t * tc)
758 s = stream_session_get (tc->s_index, tc->thread_index);
760 app = application_get (s->app_index);
761 app->cb_fns.session_reset_callback (s);
765 * Accept a stream session. Optionally ping the server by callback.
768 stream_session_accept (transport_connection_t * tc, u32 listener_index,
771 application_t *server;
772 stream_session_t *s, *listener;
773 segment_manager_t *sm;
777 /* Find the server */
778 listener = listen_session_get (sst, listener_index);
779 server = application_get (listener->app_index);
781 sm = application_get_listen_segment_manager (server, listener);
782 if ((rv = stream_session_create_i (sm, tc, &s)))
785 s->app_index = server->index;
786 s->listener_index = listener_index;
788 /* Shoulder-tap the server */
791 server->cb_fns.session_accept_callback (s);
798 * Ask transport to open connection to remote transport endpoint.
800 * Stores handle for matching request with reply since the call can be
801 * asynchronous. For instance, for TCP the 3-way handshake must complete
802 * before reply comes. Session is only created once connection is established.
804 * @param app_index Index of the application requesting the connect
805 * @param st Session type requested.
806 * @param tep Remote transport endpoint
807 * @param res Resulting transport connection .
810 stream_session_open (u32 app_index, session_type_t st,
811 transport_endpoint_t * tep,
812 transport_connection_t ** res)
814 transport_connection_t *tc;
818 rv = tp_vfts[st].open (&tep->ip, tep->port);
821 clib_warning ("Transport failed to open connection.");
822 return VNET_API_ERROR_SESSION_CONNECT_FAIL;
825 tc = tp_vfts[st].get_half_open ((u32) rv);
827 /* Save app and tc index. The latter is needed to help establish the
828 * connection while the former is needed when the connect notify comes
829 * and we have to notify the external app */
830 handle = (((u64) app_index) << 32) | (u64) tc->c_index;
832 /* Add to the half-open lookup table */
833 stream_session_half_open_table_add (st, tc, handle);
841 * Ask transport to listen on local transport endpoint.
843 * @param s Session for which listen will be called. Note that unlike
844 * established sessions, listen sessions are not associated to a
846 * @param tep Local endpoint to be listened on.
849 stream_session_listen (stream_session_t * s, transport_endpoint_t * tep)
851 transport_connection_t *tc;
854 /* Transport bind/listen */
855 tci = tp_vfts[s->session_type].bind (s->session_index, &tep->ip, tep->port);
857 if (tci == (u32) ~ 0)
860 /* Attach transport to session */
861 s->connection_index = tci;
862 tc = tp_vfts[s->session_type].get_listener (tci);
864 /* Weird but handle it ... */
868 /* Add to the main lookup table */
869 stream_session_table_add_for_tc (tc, s->session_index);
875 * Ask transport to stop listening on local transport endpoint.
877 * @param s Session to stop listening on. It must be in state LISTENING.
880 stream_session_stop_listen (stream_session_t * s)
882 transport_connection_t *tc;
884 if (s->session_state != SESSION_STATE_LISTENING)
886 clib_warning ("not a listening session");
890 tc = tp_vfts[s->session_type].get_listener (s->connection_index);
893 clib_warning ("no transport");
894 return VNET_API_ERROR_ADDRESS_NOT_IN_USE;
897 stream_session_table_del_for_tc (tc);
898 tp_vfts[s->session_type].unbind (s->connection_index);
903 * Disconnect session and propagate to transport. This should eventually
904 * result in a delete notification that allows us to cleanup session state.
905 * Called for both active/passive disconnects.
908 stream_session_disconnect (stream_session_t * s)
910 // session_fifo_event_t evt;
912 s->session_state = SESSION_STATE_CLOSED;
913 /* RPC to vpp evt queue in the right thread */
915 tp_vfts[s->session_type].close (s->connection_index, s->thread_index);
918 // /* Fabricate event */
919 // evt.fifo = s->server_rx_fifo;
920 // evt.event_type = FIFO_EVENT_SERVER_RX;
921 // evt.event_id = serial_number++;
923 // /* Based on request block (or not) for lack of space */
924 // if (PREDICT_TRUE(q->cursize < q->maxsize))
925 // unix_shared_memory_queue_add (app->event_queue, (u8 *) &evt,
926 // 0 /* do wait for mutex */);
929 // clib_warning("fifo full");
936 * Cleanup transport and session state.
938 * Notify transport of the cleanup, wait for a delete notify to actually
939 * remove the session state.
942 stream_session_cleanup (stream_session_t * s)
944 session_manager_main_t *smm = &session_manager_main;
947 s->session_state = SESSION_STATE_CLOSED;
949 /* Delete from the main lookup table to avoid more enqueues */
950 rv = stream_session_table_del (smm, s);
952 clib_warning ("hash delete error, rv %d", rv);
954 tp_vfts[s->session_type].cleanup (s->connection_index, s->thread_index);
958 session_register_transport (u8 type, const transport_proto_vft_t * vft)
960 session_manager_main_t *smm = vnet_get_session_manager_main ();
962 vec_validate (tp_vfts, type);
963 tp_vfts[type] = *vft;
965 /* If an offset function is provided, then peek instead of dequeue */
966 smm->session_tx_fns[type] =
967 (vft->tx_fifo_offset) ? session_tx_fifo_peek_and_snd :
968 session_tx_fifo_dequeue_and_snd;
971 transport_proto_vft_t *
972 session_get_transport_vft (u8 type)
974 if (type >= vec_len (tp_vfts))
976 return &tp_vfts[type];
979 static clib_error_t *
980 session_manager_main_enable (vlib_main_t * vm)
982 session_manager_main_t *smm = &session_manager_main;
983 vlib_thread_main_t *vtm = vlib_get_thread_main ();
987 num_threads = 1 /* main thread */ + vtm->n_threads;
990 return clib_error_return (0, "n_thread_stacks not set");
992 /* $$$ config parameters */
993 svm_fifo_segment_init (0x200000000ULL /* first segment base VA */ ,
994 20 /* timeout in seconds */ );
996 /* configure per-thread ** vectors */
997 vec_validate (smm->sessions, num_threads - 1);
998 vec_validate (smm->session_indices_to_enqueue_by_thread, num_threads - 1);
999 vec_validate (smm->tx_buffers, num_threads - 1);
1000 vec_validate (smm->fifo_events, num_threads - 1);
1001 vec_validate (smm->evts_partially_read, num_threads - 1);
1002 vec_validate (smm->current_enqueue_epoch, num_threads - 1);
1003 vec_validate (smm->vpp_event_queues, num_threads - 1);
1006 vec_validate (smm->last_event_poll_by_thread, num_threads - 1);
1009 /* Allocate vpp event queues */
1010 for (i = 0; i < vec_len (smm->vpp_event_queues); i++)
1011 session_vpp_event_queue_allocate (smm, i);
1013 /* $$$$ preallocate hack config parameter */
1014 for (i = 0; i < 200000; i++)
1016 stream_session_t *ss;
1017 pool_get (smm->sessions[0], ss);
1018 memset (ss, 0, sizeof (*ss));
1021 for (i = 0; i < 200000; i++)
1022 pool_put_index (smm->sessions[0], i);
1024 clib_bihash_init_16_8 (&smm->v4_session_hash, "v4 session table",
1025 200000 /* $$$$ config parameter nbuckets */ ,
1026 (64 << 20) /*$$$ config parameter table size */ );
1027 clib_bihash_init_48_8 (&smm->v6_session_hash, "v6 session table",
1028 200000 /* $$$$ config parameter nbuckets */ ,
1029 (64 << 20) /*$$$ config parameter table size */ );
1031 clib_bihash_init_16_8 (&smm->v4_half_open_hash, "v4 half-open table",
1032 200000 /* $$$$ config parameter nbuckets */ ,
1033 (64 << 20) /*$$$ config parameter table size */ );
1034 clib_bihash_init_48_8 (&smm->v6_half_open_hash, "v6 half-open table",
1035 200000 /* $$$$ config parameter nbuckets */ ,
1036 (64 << 20) /*$$$ config parameter table size */ );
1038 smm->is_enabled = 1;
1040 /* Enable TCP transport */
1041 vnet_tcp_enable_disable (vm, 1);
1047 vnet_session_enable_disable (vlib_main_t * vm, u8 is_en)
1051 if (session_manager_main.is_enabled)
1054 vlib_node_set_state (vm, session_queue_node.index,
1055 VLIB_NODE_STATE_POLLING);
1057 return session_manager_main_enable (vm);
1061 session_manager_main.is_enabled = 0;
1062 vlib_node_set_state (vm, session_queue_node.index,
1063 VLIB_NODE_STATE_DISABLED);
1070 session_manager_main_init (vlib_main_t * vm)
1072 session_manager_main_t *smm = &session_manager_main;
1074 smm->vlib_main = vm;
1075 smm->vnet_main = vnet_get_main ();
1076 smm->is_enabled = 0;
1081 VLIB_INIT_FUNCTION (session_manager_main_init)
1083 * fd.io coding-style-patch-verification: ON
1086 * eval: (c-set-style "gnu")