2 * Copyright (c) 2017-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief Session and session manager
20 #include <vnet/session/session.h>
21 #include <vnet/session/session_debug.h>
22 #include <vnet/session/application.h>
23 #include <vnet/dpo/load_balance.h>
24 #include <vnet/fib/ip4_fib.h>
26 session_main_t session_main;
29 session_send_evt_to_thread (void *data, void *args, u32 thread_index,
30 session_evt_type_t evt_type)
35 u32 tries = 0, max_tries;
37 mq = session_main_get_vpp_event_queue (thread_index);
38 while (svm_msg_q_try_lock (mq))
40 max_tries = vlib_get_current_process (vlib_get_main ())? 1e6 : 3;
41 if (tries++ == max_tries)
43 SESSION_DBG ("failed to enqueue evt");
47 if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
49 svm_msg_q_unlock (mq);
52 msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
53 if (PREDICT_FALSE (svm_msg_q_msg_is_invalid (&msg)))
55 svm_msg_q_unlock (mq);
58 evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
59 evt->event_type = evt_type;
62 case SESSION_CTRL_EVT_RPC:
63 evt->rpc_args.fp = data;
64 evt->rpc_args.arg = args;
66 case SESSION_IO_EVT_TX:
67 case SESSION_IO_EVT_TX_FLUSH:
68 case SESSION_IO_EVT_BUILTIN_RX:
69 evt->session_index = *(u32 *) data;
71 case SESSION_IO_EVT_BUILTIN_TX:
72 case SESSION_CTRL_EVT_CLOSE:
73 evt->session_handle = session_handle ((session_t *) data);
76 clib_warning ("evt unhandled!");
77 svm_msg_q_unlock (mq);
81 svm_msg_q_add_and_unlock (mq, &msg);
86 session_send_io_evt_to_thread (svm_fifo_t * f, session_evt_type_t evt_type)
88 return session_send_evt_to_thread (&f->master_session_index, 0,
89 f->master_thread_index, evt_type);
93 session_send_io_evt_to_thread_custom (void *data, u32 thread_index,
94 session_evt_type_t evt_type)
96 return session_send_evt_to_thread (data, 0, thread_index, evt_type);
100 session_send_ctrl_evt_to_thread (session_t * s, session_evt_type_t evt_type)
102 /* only event supported for now is disconnect */
103 ASSERT (evt_type == SESSION_CTRL_EVT_CLOSE);
104 return session_send_evt_to_thread (s, 0, s->thread_index,
105 SESSION_CTRL_EVT_CLOSE);
109 session_send_rpc_evt_to_thread (u32 thread_index, void *fp, void *rpc_args)
111 if (thread_index != vlib_get_thread_index ())
112 session_send_evt_to_thread (fp, rpc_args, thread_index,
113 SESSION_CTRL_EVT_RPC);
116 void (*fnp) (void *) = fp;
122 session_program_transport_close (session_t * s)
124 u32 thread_index = vlib_get_thread_index ();
125 session_worker_t *wrk;
126 session_event_t *evt;
128 /* If we are in the handler thread, or being called with the worker barrier
129 * held, just append a new event to pending disconnects vector. */
130 if (vlib_thread_is_main_w_barrier () || thread_index == s->thread_index)
132 wrk = session_main_get_worker (s->thread_index);
133 vec_add2 (wrk->pending_disconnects, evt, 1);
134 clib_memset (evt, 0, sizeof (*evt));
135 evt->session_handle = session_handle (s);
136 evt->event_type = SESSION_CTRL_EVT_CLOSE;
139 session_send_ctrl_evt_to_thread (s, SESSION_CTRL_EVT_CLOSE);
143 session_alloc (u32 thread_index)
145 session_worker_t *wrk = &session_main.wrk[thread_index];
148 pool_get_aligned_will_expand (wrk->sessions, will_expand,
149 CLIB_CACHE_LINE_BYTES);
150 /* If we have peekers, let them finish */
151 if (PREDICT_FALSE (will_expand && vlib_num_workers ()))
153 clib_rwlock_writer_lock (&wrk->peekers_rw_locks);
154 pool_get_aligned (wrk->sessions, s, CLIB_CACHE_LINE_BYTES);
155 clib_rwlock_writer_unlock (&wrk->peekers_rw_locks);
159 pool_get_aligned (wrk->sessions, s, CLIB_CACHE_LINE_BYTES);
161 clib_memset (s, 0, sizeof (*s));
162 s->session_index = s - wrk->sessions;
163 s->thread_index = thread_index;
168 session_free (session_t * s)
170 pool_put (session_main.wrk[s->thread_index].sessions, s);
172 clib_memset (s, 0xFA, sizeof (*s));
176 session_free_w_fifos (session_t * s)
178 segment_manager_dealloc_fifos (s->rx_fifo, s->tx_fifo);
183 * Cleans up session and lookup table.
185 * Transport connection must still be valid.
188 session_delete (session_t * s)
192 /* Delete from the main lookup table. */
193 if ((rv = session_lookup_del_session (s)))
194 clib_warning ("hash delete error, rv %d", rv);
196 session_free_w_fifos (s);
200 session_alloc_for_connection (transport_connection_t * tc)
203 u32 thread_index = tc->thread_index;
205 ASSERT (thread_index == vlib_get_thread_index ()
206 || transport_protocol_is_cl (tc->proto));
208 s = session_alloc (thread_index);
209 s->session_type = session_type_from_proto_and_ip (tc->proto, tc->is_ip4);
210 s->enqueue_epoch = (u64) ~ 0;
211 s->session_state = SESSION_STATE_CLOSED;
213 /* Attach transport to session and vice versa */
214 s->connection_index = tc->c_index;
215 tc->s_index = s->session_index;
220 * Discards bytes from buffer chain
222 * It discards n_bytes_to_drop starting at first buffer after chain_b
225 session_enqueue_discard_chain_bytes (vlib_main_t * vm, vlib_buffer_t * b,
226 vlib_buffer_t ** chain_b,
229 vlib_buffer_t *next = *chain_b;
230 u32 to_drop = n_bytes_to_drop;
231 ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
232 while (to_drop && (next->flags & VLIB_BUFFER_NEXT_PRESENT))
234 next = vlib_get_buffer (vm, next->next_buffer);
235 if (next->current_length > to_drop)
237 vlib_buffer_advance (next, to_drop);
242 to_drop -= next->current_length;
243 next->current_length = 0;
249 b->total_length_not_including_first_buffer -= n_bytes_to_drop;
253 * Enqueue buffer chain tail
256 session_enqueue_chain_tail (session_t * s, vlib_buffer_t * b,
257 u32 offset, u8 is_in_order)
259 vlib_buffer_t *chain_b;
260 u32 chain_bi, len, diff;
261 vlib_main_t *vm = vlib_get_main ();
266 if (is_in_order && offset)
268 diff = offset - b->current_length;
269 if (diff > b->total_length_not_including_first_buffer)
272 session_enqueue_discard_chain_bytes (vm, b, &chain_b, diff);
273 chain_bi = vlib_get_buffer_index (vm, chain_b);
276 chain_bi = b->next_buffer;
280 chain_b = vlib_get_buffer (vm, chain_bi);
281 data = vlib_buffer_get_current (chain_b);
282 len = chain_b->current_length;
287 rv = svm_fifo_enqueue_nowait (s->rx_fifo, len, data);
294 return (rv > 0) ? (written + rv) : written;
300 /* written more than what was left in chain */
301 if (written > b->total_length_not_including_first_buffer)
304 /* drop the bytes that have already been delivered */
305 session_enqueue_discard_chain_bytes (vm, b, &chain_b, rv - len);
310 rv = svm_fifo_enqueue_with_offset (s->rx_fifo, offset, len, data);
313 clib_warning ("failed to enqueue multi-buffer seg");
319 while ((chain_bi = (chain_b->flags & VLIB_BUFFER_NEXT_PRESENT)
320 ? chain_b->next_buffer : 0));
329 * Enqueue data for delivery to session peer. Does not notify peer of enqueue
330 * event but on request can queue notification events for later delivery by
331 * calling stream_server_flush_enqueue_events().
333 * @param tc Transport connection which is to be enqueued data
334 * @param b Buffer to be enqueued
335 * @param offset Offset at which to start enqueueing if out-of-order
336 * @param queue_event Flag to indicate if peer is to be notified or if event
337 * is to be queued. The former is useful when more data is
338 * enqueued and only one event is to be generated.
339 * @param is_in_order Flag to indicate if data is in order
340 * @return Number of bytes enqueued or a negative value if enqueueing failed.
343 session_enqueue_stream_connection (transport_connection_t * tc,
344 vlib_buffer_t * b, u32 offset,
345 u8 queue_event, u8 is_in_order)
348 int enqueued = 0, rv, in_order_off;
350 s = session_get (tc->s_index, tc->thread_index);
354 enqueued = svm_fifo_enqueue_nowait (s->rx_fifo,
356 vlib_buffer_get_current (b));
357 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT)
360 in_order_off = enqueued > b->current_length ? enqueued : 0;
361 rv = session_enqueue_chain_tail (s, b, in_order_off, 1);
368 rv = svm_fifo_enqueue_with_offset (s->rx_fifo, offset,
370 vlib_buffer_get_current (b));
371 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && !rv))
372 session_enqueue_chain_tail (s, b, offset + b->current_length, 0);
373 /* if something was enqueued, report even this as success for ooo
374 * segment handling */
380 /* Queue RX event on this fifo. Eventually these will need to be flushed
381 * by calling stream_server_flush_enqueue_events () */
382 session_worker_t *wrk;
384 wrk = session_main_get_worker (s->thread_index);
385 if (s->enqueue_epoch != wrk->current_enqueue_epoch[tc->proto])
387 s->enqueue_epoch = wrk->current_enqueue_epoch[tc->proto];
388 vec_add1 (wrk->session_to_enqueue[tc->proto], s->session_index);
396 session_enqueue_dgram_connection (session_t * s,
397 session_dgram_hdr_t * hdr,
398 vlib_buffer_t * b, u8 proto, u8 queue_event)
400 int enqueued = 0, rv, in_order_off;
402 ASSERT (svm_fifo_max_enqueue (s->rx_fifo)
403 >= b->current_length + sizeof (*hdr));
405 svm_fifo_enqueue_nowait (s->rx_fifo, sizeof (session_dgram_hdr_t),
407 enqueued = svm_fifo_enqueue_nowait (s->rx_fifo, b->current_length,
408 vlib_buffer_get_current (b));
409 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && enqueued >= 0))
411 in_order_off = enqueued > b->current_length ? enqueued : 0;
412 rv = session_enqueue_chain_tail (s, b, in_order_off, 1);
418 /* Queue RX event on this fifo. Eventually these will need to be flushed
419 * by calling stream_server_flush_enqueue_events () */
420 session_worker_t *wrk;
422 wrk = session_main_get_worker (s->thread_index);
423 if (s->enqueue_epoch != wrk->current_enqueue_epoch[proto])
425 s->enqueue_epoch = wrk->current_enqueue_epoch[proto];
426 vec_add1 (wrk->session_to_enqueue[proto], s->session_index);
433 session_tx_fifo_peek_bytes (transport_connection_t * tc, u8 * buffer,
434 u32 offset, u32 max_bytes)
436 session_t *s = session_get (tc->s_index, tc->thread_index);
437 return svm_fifo_peek (s->tx_fifo, offset, max_bytes, buffer);
441 session_tx_fifo_dequeue_drop (transport_connection_t * tc, u32 max_bytes)
443 session_t *s = session_get (tc->s_index, tc->thread_index);
444 return svm_fifo_dequeue_drop (s->tx_fifo, max_bytes);
448 session_notify_subscribers (u32 app_index, session_t * s,
449 svm_fifo_t * f, session_evt_type_t evt_type)
451 app_worker_t *app_wrk;
455 app = application_get (app_index);
459 for (i = 0; i < f->n_subscribers; i++)
461 app_wrk = application_get_worker (app, f->subscribers[i]);
464 if (app_worker_lock_and_send_event (app_wrk, s, evt_type))
472 * Notify session peer that new data has been enqueued.
474 * @param s Stream session for which the event is to be generated.
475 * @param lock Flag to indicate if call should lock message queue.
477 * @return 0 on success or negative number if failed to send notification.
480 session_enqueue_notify_inline (session_t * s)
482 app_worker_t *app_wrk;
484 app_wrk = app_worker_get_if_valid (s->app_wrk_index);
485 if (PREDICT_FALSE (!app_wrk))
487 SESSION_DBG ("invalid s->app_index = %d", s->app_wrk_index);
492 SESSION_EVT_DBG(SESSION_EVT_ENQ, s, ({
493 ed->data[0] = SESSION_IO_EVT_RX;
494 ed->data[1] = svm_fifo_max_dequeue (s->rx_fifo);
498 if (PREDICT_FALSE (app_worker_lock_and_send_event (app_wrk, s,
502 if (PREDICT_FALSE (svm_fifo_n_subscribers (s->rx_fifo)))
503 return session_notify_subscribers (app_wrk->app_index, s,
504 s->rx_fifo, SESSION_IO_EVT_RX);
510 session_enqueue_notify (session_t * s)
512 return session_enqueue_notify_inline (s);
516 session_dequeue_notify (session_t * s)
518 app_worker_t *app_wrk;
520 app_wrk = app_worker_get_if_valid (s->app_wrk_index);
521 if (PREDICT_FALSE (!app_wrk))
524 if (PREDICT_FALSE (app_worker_lock_and_send_event (app_wrk, s,
528 if (PREDICT_FALSE (s->tx_fifo->n_subscribers))
529 return session_notify_subscribers (app_wrk->app_index, s,
530 s->tx_fifo, SESSION_IO_EVT_TX);
532 svm_fifo_clear_tx_ntf (s->tx_fifo);
538 * Flushes queue of sessions that are to be notified of new data
541 * @param thread_index Thread index for which the flush is to be performed.
542 * @return 0 on success or a positive number indicating the number of
543 * failures due to API queue being full.
546 session_main_flush_enqueue_events (u8 transport_proto, u32 thread_index)
548 session_worker_t *wrk = session_main_get_worker (thread_index);
553 indices = wrk->session_to_enqueue[transport_proto];
555 for (i = 0; i < vec_len (indices); i++)
557 s = session_get_if_valid (indices[i], thread_index);
558 if (PREDICT_FALSE (!s))
564 if (svm_fifo_has_event (s->rx_fifo) || svm_fifo_is_empty (s->rx_fifo))
567 if (PREDICT_FALSE (session_enqueue_notify_inline (s)))
571 vec_reset_length (indices);
572 wrk->session_to_enqueue[transport_proto] = indices;
573 wrk->current_enqueue_epoch[transport_proto]++;
579 session_main_flush_all_enqueue_events (u8 transport_proto)
581 vlib_thread_main_t *vtm = vlib_get_thread_main ();
583 for (i = 0; i < 1 + vtm->n_threads; i++)
584 errors += session_main_flush_enqueue_events (transport_proto, i);
589 session_stream_connect_notify (transport_connection_t * tc, u8 is_fail)
591 u32 opaque = 0, new_ti, new_si;
592 app_worker_t *app_wrk;
597 * Find connection handle and cleanup half-open table
599 ho_handle = session_lookup_half_open_handle (tc);
600 if (ho_handle == HALF_OPEN_LOOKUP_INVALID_VALUE)
602 SESSION_DBG ("half-open was removed!");
605 session_lookup_del_half_open (tc);
607 /* Get the app's index from the handle we stored when opening connection
608 * and the opaque (api_context for external apps) from transport session
610 app_wrk = app_worker_get_if_valid (ho_handle >> 32);
614 opaque = tc->s_index;
617 return app_worker_connect_notify (app_wrk, s, opaque);
619 s = session_alloc_for_connection (tc);
620 s->session_state = SESSION_STATE_CONNECTING;
621 s->app_wrk_index = app_wrk->wrk_index;
622 new_si = s->session_index;
623 new_ti = s->thread_index;
625 if (app_worker_init_connected (app_wrk, s))
628 app_worker_connect_notify (app_wrk, 0, opaque);
632 if (app_worker_connect_notify (app_wrk, s, opaque))
634 s = session_get (new_si, new_ti);
635 session_free_w_fifos (s);
639 s = session_get (new_si, new_ti);
640 s->session_state = SESSION_STATE_READY;
641 session_lookup_add_connection (tc, session_handle (s));
646 typedef struct _session_switch_pool_args
650 u32 new_thread_index;
651 u32 new_session_index;
652 } session_switch_pool_args_t;
655 session_switch_pool (void *cb_args)
657 session_switch_pool_args_t *args = (session_switch_pool_args_t *) cb_args;
659 ASSERT (args->thread_index == vlib_get_thread_index ());
660 s = session_get (args->session_index, args->thread_index);
661 s->tx_fifo->master_session_index = args->new_session_index;
662 s->tx_fifo->master_thread_index = args->new_thread_index;
663 transport_cleanup (session_get_transport_proto (s), s->connection_index,
666 clib_mem_free (cb_args);
670 * Move dgram session to the right thread
673 session_dgram_connect_notify (transport_connection_t * tc,
674 u32 old_thread_index, session_t ** new_session)
677 session_switch_pool_args_t *rpc_args;
680 * Clone half-open session to the right thread.
682 new_s = session_clone_safe (tc->s_index, old_thread_index);
683 new_s->connection_index = tc->c_index;
684 new_s->rx_fifo->master_session_index = new_s->session_index;
685 new_s->rx_fifo->master_thread_index = new_s->thread_index;
686 new_s->session_state = SESSION_STATE_READY;
687 session_lookup_add_connection (tc, session_handle (new_s));
690 * Ask thread owning the old session to clean it up and make us the tx
693 rpc_args = clib_mem_alloc (sizeof (*rpc_args));
694 rpc_args->new_session_index = new_s->session_index;
695 rpc_args->new_thread_index = new_s->thread_index;
696 rpc_args->session_index = tc->s_index;
697 rpc_args->thread_index = old_thread_index;
698 session_send_rpc_evt_to_thread (rpc_args->thread_index, session_switch_pool,
701 tc->s_index = new_s->session_index;
702 new_s->connection_index = tc->c_index;
703 *new_session = new_s;
708 * Notification from transport that connection is being closed.
710 * A disconnect is sent to application but state is not removed. Once
711 * disconnect is acknowledged by application, session disconnect is called.
712 * Ultimately this leads to close being called on transport (passive close).
715 session_transport_closing_notify (transport_connection_t * tc)
717 app_worker_t *app_wrk;
721 s = session_get (tc->s_index, tc->thread_index);
722 if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
724 s->session_state = SESSION_STATE_TRANSPORT_CLOSING;
725 app_wrk = app_worker_get_if_valid (s->app_wrk_index);
728 app = application_get (app_wrk->app_index);
729 app->cb_fns.session_disconnect_callback (s);
733 * Notification from transport that connection is being deleted
735 * This removes the session if it is still valid. It should be called only on
736 * previously fully established sessions. For instance failed connects should
737 * call stream_session_connect_notify and indicate that the connect has
741 session_transport_delete_notify (transport_connection_t * tc)
745 /* App might've been removed already */
746 if (!(s = session_get_if_valid (tc->s_index, tc->thread_index)))
749 /* Make sure we don't try to send anything more */
750 svm_fifo_dequeue_drop_all (s->tx_fifo);
752 switch (s->session_state)
754 case SESSION_STATE_CREATED:
755 /* Session was created but accept notification was not yet sent to the
756 * app. Cleanup everything. */
757 session_lookup_del_session (s);
758 session_free_w_fifos (s);
760 case SESSION_STATE_ACCEPTING:
761 case SESSION_STATE_TRANSPORT_CLOSING:
762 /* If transport finishes or times out before we get a reply
763 * from the app, mark transport as closed and wait for reply
764 * before removing the session. Cleanup session table in advance
765 * because transport will soon be closed and closed sessions
766 * are assumed to have been removed from the lookup table */
767 session_lookup_del_session (s);
768 s->session_state = SESSION_STATE_TRANSPORT_CLOSED;
770 case SESSION_STATE_CLOSING:
771 case SESSION_STATE_CLOSED_WAITING:
772 /* Cleanup lookup table as transport needs to still be valid.
773 * Program transport close to ensure that all session events
774 * have been cleaned up. Once transport close is called, the
775 * session is just removed because both transport and app have
776 * confirmed the close*/
777 session_lookup_del_session (s);
778 s->session_state = SESSION_STATE_TRANSPORT_CLOSED;
779 session_program_transport_close (s);
781 case SESSION_STATE_TRANSPORT_CLOSED:
783 case SESSION_STATE_CLOSED:
787 clib_warning ("session state %u", s->session_state);
794 * Notification from transport that session can be closed
796 * Should be called by transport only if it was closed with non-empty
797 * tx fifo and once it decides to begin the closing procedure prior to
798 * issuing a delete notify. This gives the chance to the session layer
799 * to cleanup any outstanding events.
802 session_transport_closed_notify (transport_connection_t * tc)
806 if (!(s = session_get_if_valid (tc->s_index, tc->thread_index)))
809 /* If app close has not been received or has not yet resulted in
810 * a transport close, only mark the session transport as closed */
811 if (s->session_state <= SESSION_STATE_CLOSING)
813 session_lookup_del_session (s);
814 s->session_state = SESSION_STATE_TRANSPORT_CLOSED;
817 s->session_state = SESSION_STATE_CLOSED;
821 * Notify application that connection has been reset.
824 session_transport_reset_notify (transport_connection_t * tc)
827 app_worker_t *app_wrk;
829 s = session_get (tc->s_index, tc->thread_index);
830 svm_fifo_dequeue_drop_all (s->tx_fifo);
831 if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
833 s->session_state = SESSION_STATE_TRANSPORT_CLOSING;
834 app_wrk = app_worker_get (s->app_wrk_index);
835 app = application_get (app_wrk->app_index);
836 app->cb_fns.session_reset_callback (s);
840 session_stream_accept_notify (transport_connection_t * tc)
842 app_worker_t *app_wrk;
845 s = session_get (tc->s_index, tc->thread_index);
846 app_wrk = app_worker_get_if_valid (s->app_wrk_index);
849 s->session_state = SESSION_STATE_ACCEPTING;
850 return app_worker_accept_notify (app_wrk, s);
854 * Accept a stream session. Optionally ping the server by callback.
857 session_stream_accept (transport_connection_t * tc, u32 listener_index,
863 s = session_alloc_for_connection (tc);
864 s->listener_index = listener_index;
865 s->session_state = SESSION_STATE_CREATED;
867 if ((rv = app_worker_init_accepted (s)))
870 session_lookup_add_connection (tc, session_handle (s));
872 /* Shoulder-tap the server */
875 app_worker_t *app_wrk = app_worker_get (s->app_wrk_index);
876 return app_worker_accept_notify (app_wrk, s);
883 session_open_cl (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
885 transport_connection_t *tc;
886 transport_endpoint_cfg_t *tep;
887 app_worker_t *app_wrk;
891 tep = session_endpoint_to_transport_cfg (rmt);
892 rv = transport_connect (rmt->transport_proto, tep);
895 SESSION_DBG ("Transport failed to open connection.");
896 return VNET_API_ERROR_SESSION_CONNECT;
899 tc = transport_get_half_open (rmt->transport_proto, (u32) rv);
901 /* For dgram type of service, allocate session and fifos now */
902 app_wrk = app_worker_get (app_wrk_index);
903 s = session_alloc_for_connection (tc);
904 s->app_wrk_index = app_wrk->wrk_index;
905 s->session_state = SESSION_STATE_OPENED;
906 if (app_worker_init_connected (app_wrk, s))
912 return app_worker_connect_notify (app_wrk, s, opaque);
916 session_open_vc (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
918 transport_connection_t *tc;
919 transport_endpoint_cfg_t *tep;
923 tep = session_endpoint_to_transport_cfg (rmt);
924 rv = transport_connect (rmt->transport_proto, tep);
927 SESSION_DBG ("Transport failed to open connection.");
928 return VNET_API_ERROR_SESSION_CONNECT;
931 tc = transport_get_half_open (rmt->transport_proto, (u32) rv);
933 /* If transport offers a stream service, only allocate session once the
934 * connection has been established.
935 * Add connection to half-open table and save app and tc index. The
936 * latter is needed to help establish the connection while the former
937 * is needed when the connect notify comes and we have to notify the
940 handle = (((u64) app_wrk_index) << 32) | (u64) tc->c_index;
941 session_lookup_add_half_open (tc, handle);
943 /* Store api_context (opaque) for when the reply comes. Not the nicest
944 * thing but better than allocating a separate half-open pool.
946 tc->s_index = opaque;
951 session_open_app (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
953 session_endpoint_cfg_t *sep = (session_endpoint_cfg_t *) rmt;
954 transport_endpoint_cfg_t *tep_cfg = session_endpoint_to_transport_cfg (sep);
956 sep->app_wrk_index = app_wrk_index;
957 sep->opaque = opaque;
959 return transport_connect (rmt->transport_proto, tep_cfg);
962 typedef int (*session_open_service_fn) (u32, session_endpoint_t *, u32);
965 static session_open_service_fn session_open_srv_fns[TRANSPORT_N_SERVICES] = {
973 * Ask transport to open connection to remote transport endpoint.
975 * Stores handle for matching request with reply since the call can be
976 * asynchronous. For instance, for TCP the 3-way handshake must complete
977 * before reply comes. Session is only created once connection is established.
979 * @param app_index Index of the application requesting the connect
980 * @param st Session type requested.
981 * @param tep Remote transport endpoint
982 * @param opaque Opaque data (typically, api_context) the application expects
983 * on open completion.
986 session_open (u32 app_wrk_index, session_endpoint_t * rmt, u32 opaque)
988 transport_service_type_t tst;
989 tst = transport_protocol_service_type (rmt->transport_proto);
990 return session_open_srv_fns[tst] (app_wrk_index, rmt, opaque);
994 * Ask transport to listen on session endpoint.
996 * @param s Session for which listen will be called. Note that unlike
997 * established sessions, listen sessions are not associated to a
999 * @param sep Local endpoint to be listened on.
1002 session_listen (session_t * ls, session_endpoint_cfg_t * sep)
1004 transport_endpoint_t *tep;
1005 u32 tc_index, s_index;
1007 /* Transport bind/listen */
1008 tep = session_endpoint_to_transport (sep);
1009 s_index = ls->session_index;
1010 tc_index = transport_start_listen (session_get_transport_proto (ls),
1013 if (tc_index == (u32) ~ 0)
1016 /* Attach transport to session. Lookup tables are populated by the app
1017 * worker because local tables (for ct sessions) are not backed by a fib */
1018 ls = listen_session_get (s_index);
1019 ls->connection_index = tc_index;
1025 * Ask transport to stop listening on local transport endpoint.
1027 * @param s Session to stop listening on. It must be in state LISTENING.
1030 session_stop_listen (session_t * s)
1032 transport_proto_t tp = session_get_transport_proto (s);
1033 transport_connection_t *tc;
1035 if (s->session_state != SESSION_STATE_LISTENING)
1038 tc = transport_get_listener (tp, s->connection_index);
1040 return VNET_API_ERROR_ADDRESS_NOT_IN_USE;
1042 session_lookup_del_connection (tc);
1043 transport_stop_listen (tp, s->connection_index);
1048 * Initialize session closing procedure.
1050 * Request is always sent to session node to ensure that all outstanding
1051 * requests are served before transport is notified.
1054 session_close (session_t * s)
1059 if (s->session_state >= SESSION_STATE_CLOSING)
1061 /* Session will only be removed once both app and transport
1062 * acknowledge the close */
1063 if (s->session_state == SESSION_STATE_TRANSPORT_CLOSED)
1064 session_program_transport_close (s);
1066 /* Session already closed. Clear the tx fifo */
1067 if (s->session_state == SESSION_STATE_CLOSED)
1068 svm_fifo_dequeue_drop_all (s->tx_fifo);
1072 s->session_state = SESSION_STATE_CLOSING;
1073 session_program_transport_close (s);
1077 * Notify transport the session can be disconnected. This should eventually
1078 * result in a delete notification that allows us to cleanup session state.
1079 * Called for both active/passive disconnects.
1081 * Must be called from the session's thread.
1084 session_transport_close (session_t * s)
1086 /* If transport is already closed, just free the session */
1087 if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)
1089 session_free_w_fifos (s);
1093 /* If tx queue wasn't drained, change state to closed waiting for transport.
1094 * This way, the transport, if it so wishes, can continue to try sending the
1095 * outstanding data (in closed state it cannot). It MUST however at one
1096 * point, either after sending everything or after a timeout, call delete
1097 * notify. This will finally lead to the complete cleanup of the session.
1099 if (svm_fifo_max_dequeue (s->tx_fifo))
1100 s->session_state = SESSION_STATE_CLOSED_WAITING;
1102 s->session_state = SESSION_STATE_CLOSED;
1104 transport_close (session_get_transport_proto (s), s->connection_index,
1109 * Cleanup transport and session state.
1111 * Notify transport of the cleanup and free the session. This should
1112 * be called only if transport reported some error and is already
1116 session_transport_cleanup (session_t * s)
1118 s->session_state = SESSION_STATE_CLOSED;
1120 /* Delete from main lookup table before we axe the the transport */
1121 session_lookup_del_session (s);
1122 transport_cleanup (session_get_transport_proto (s), s->connection_index,
1124 /* Since we called cleanup, no delete notification will come. So, make
1125 * sure the session is properly freed. */
1126 session_free_w_fifos (s);
1130 * Allocate event queues in the shared-memory segment
1132 * That can either be a newly created memfd segment, that will need to be
1133 * mapped by all stack users, or the binary api's svm region. The latter is
1134 * assumed to be already mapped. NOTE that this assumption DOES NOT hold if
1135 * api clients bootstrap shm api over sockets (i.e. use memfd segments) and
1136 * vpp uses api svm region for event queues.
1139 session_vpp_event_queues_allocate (session_main_t * smm)
1141 u32 evt_q_length = 2048, evt_size = sizeof (session_event_t);
1142 ssvm_private_t *eqs = &smm->evt_qs_segment;
1143 api_main_t *am = &api_main;
1144 uword eqs_size = 64 << 20;
1145 pid_t vpp_pid = getpid ();
1149 if (smm->configured_event_queue_length)
1150 evt_q_length = smm->configured_event_queue_length;
1152 if (smm->evt_qs_use_memfd_seg)
1154 if (smm->evt_qs_segment_size)
1155 eqs_size = smm->evt_qs_segment_size;
1157 eqs->ssvm_size = eqs_size;
1158 eqs->i_am_master = 1;
1159 eqs->my_pid = vpp_pid;
1160 eqs->name = format (0, "%s%c", "evt-qs-segment", 0);
1161 eqs->requested_va = smm->session_baseva;
1163 if (ssvm_master_init (eqs, SSVM_SEGMENT_MEMFD))
1165 clib_warning ("failed to initialize queue segment");
1170 if (smm->evt_qs_use_memfd_seg)
1171 oldheap = ssvm_push_heap (eqs->sh);
1173 oldheap = svm_push_data_heap (am->vlib_rp);
1175 for (i = 0; i < vec_len (smm->wrk); i++)
1177 svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
1178 svm_msg_q_ring_cfg_t rc[SESSION_MQ_N_RINGS] = {
1179 {evt_q_length, evt_size, 0}
1181 {evt_q_length >> 1, 256, 0}
1183 cfg->consumer_pid = 0;
1185 cfg->q_nitems = evt_q_length;
1186 cfg->ring_cfgs = rc;
1187 smm->wrk[i].vpp_event_queue = svm_msg_q_alloc (cfg);
1188 if (smm->evt_qs_use_memfd_seg)
1190 if (svm_msg_q_alloc_consumer_eventfd (smm->wrk[i].vpp_event_queue))
1191 clib_warning ("eventfd returned");
1195 if (smm->evt_qs_use_memfd_seg)
1196 ssvm_pop_heap (oldheap);
1198 svm_pop_heap (oldheap);
1202 session_main_get_evt_q_segment (void)
1204 session_main_t *smm = &session_main;
1205 if (smm->evt_qs_use_memfd_seg)
1206 return &smm->evt_qs_segment;
1211 session_segment_handle (session_t * s)
1215 if (s->session_state == SESSION_STATE_LISTENING)
1216 return SESSION_INVALID_HANDLE;
1219 return segment_manager_make_segment_handle (f->segment_manager,
1224 static session_fifo_rx_fn *session_tx_fns[TRANSPORT_TX_N_FNS] = {
1225 session_tx_fifo_peek_and_snd,
1226 session_tx_fifo_dequeue_and_snd,
1227 session_tx_fifo_dequeue_internal,
1228 session_tx_fifo_dequeue_and_snd
1233 * Initialize session layer for given transport proto and ip version
1235 * Allocates per session type (transport proto + ip version) data structures
1236 * and adds arc from session queue node to session type output node.
1239 session_register_transport (transport_proto_t transport_proto,
1240 const transport_proto_vft_t * vft, u8 is_ip4,
1243 session_main_t *smm = &session_main;
1244 session_type_t session_type;
1245 u32 next_index = ~0;
1247 session_type = session_type_from_proto_and_ip (transport_proto, is_ip4);
1249 vec_validate (smm->session_type_to_next, session_type);
1250 vec_validate (smm->session_tx_fns, session_type);
1253 if (output_node != ~0)
1255 foreach_vlib_main (({
1256 next_index = vlib_node_add_next (this_vlib_main,
1257 session_queue_node.index,
1263 smm->session_type_to_next[session_type] = next_index;
1264 smm->session_tx_fns[session_type] = session_tx_fns[vft->tx_type];
1267 transport_connection_t *
1268 session_get_transport (session_t * s)
1270 if (s->session_state != SESSION_STATE_LISTENING)
1271 return transport_get_connection (session_get_transport_proto (s),
1272 s->connection_index, s->thread_index);
1274 return transport_get_listener (session_get_transport_proto (s),
1275 s->connection_index);
1278 transport_connection_t *
1279 listen_session_get_transport (session_t * s)
1281 return transport_get_listener (session_get_transport_proto (s),
1282 s->connection_index);
1286 session_flush_frames_main_thread (vlib_main_t * vm)
1288 ASSERT (vlib_get_thread_index () == 0);
1289 vlib_process_signal_event_mt (vm, session_queue_process_node.index,
1290 SESSION_Q_PROCESS_FLUSH_FRAMES, 0);
1293 static clib_error_t *
1294 session_manager_main_enable (vlib_main_t * vm)
1296 segment_manager_main_init_args_t _sm_args = { 0 }, *sm_args = &_sm_args;
1297 session_main_t *smm = &session_main;
1298 vlib_thread_main_t *vtm = vlib_get_thread_main ();
1299 u32 num_threads, preallocated_sessions_per_worker;
1300 session_worker_t *wrk;
1303 num_threads = 1 /* main thread */ + vtm->n_threads;
1305 if (num_threads < 1)
1306 return clib_error_return (0, "n_thread_stacks not set");
1308 /* Allocate cache line aligned worker contexts */
1309 vec_validate_aligned (smm->wrk, num_threads - 1, CLIB_CACHE_LINE_BYTES);
1311 for (i = 0; i < TRANSPORT_N_PROTO; i++)
1313 for (j = 0; j < num_threads; j++)
1314 smm->wrk[j].current_enqueue_epoch[i] = 1;
1317 for (i = 0; i < num_threads; i++)
1320 vec_validate (wrk->free_event_vector, 128);
1321 _vec_len (wrk->free_event_vector) = 0;
1322 vec_validate (wrk->pending_event_vector, 128);
1323 _vec_len (wrk->pending_event_vector) = 0;
1324 vec_validate (wrk->pending_disconnects, 128);
1325 _vec_len (wrk->pending_disconnects) = 0;
1326 vec_validate (wrk->postponed_event_vector, 128);
1327 _vec_len (wrk->postponed_event_vector) = 0;
1329 wrk->last_vlib_time = vlib_time_now (vlib_mains[i]);
1330 wrk->dispatch_period = 500e-6;
1332 if (num_threads > 1)
1333 clib_rwlock_init (&smm->wrk[i].peekers_rw_locks);
1337 vec_validate (smm->last_event_poll_by_thread, num_threads - 1);
1340 /* Allocate vpp event queues segment and queue */
1341 session_vpp_event_queues_allocate (smm);
1343 /* Initialize fifo segment main baseva and timeout */
1344 sm_args->baseva = smm->session_baseva + smm->evt_qs_segment_size;
1345 sm_args->size = smm->session_va_space_size;
1346 segment_manager_main_init (sm_args);
1348 /* Preallocate sessions */
1349 if (smm->preallocated_sessions)
1351 if (num_threads == 1)
1353 pool_init_fixed (smm->wrk[0].sessions, smm->preallocated_sessions);
1358 preallocated_sessions_per_worker =
1359 (1.1 * (f64) smm->preallocated_sessions /
1360 (f64) (num_threads - 1));
1362 for (j = 1; j < num_threads; j++)
1364 pool_init_fixed (smm->wrk[j].sessions,
1365 preallocated_sessions_per_worker);
1370 session_lookup_init ();
1371 app_namespaces_init ();
1374 smm->is_enabled = 1;
1376 /* Enable transports */
1377 transport_enable_disable (vm, 1);
1378 transport_init_tx_pacers_period ();
1383 session_node_enable_disable (u8 is_en)
1385 u8 state = is_en ? VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED;
1386 vlib_thread_main_t *vtm = vlib_get_thread_main ();
1387 u8 have_workers = vtm->n_threads != 0;
1390 foreach_vlib_main (({
1391 if (have_workers && ii == 0)
1393 vlib_node_set_state (this_vlib_main, session_queue_process_node.index,
1397 vlib_node_t *n = vlib_get_node (this_vlib_main,
1398 session_queue_process_node.index);
1399 vlib_start_process (this_vlib_main, n->runtime_index);
1403 vlib_process_signal_event_mt (this_vlib_main,
1404 session_queue_process_node.index,
1405 SESSION_Q_PROCESS_STOP, 0);
1410 vlib_node_set_state (this_vlib_main, session_queue_node.index,
1417 vnet_session_enable_disable (vlib_main_t * vm, u8 is_en)
1419 clib_error_t *error = 0;
1422 if (session_main.is_enabled)
1425 session_node_enable_disable (is_en);
1426 error = session_manager_main_enable (vm);
1430 session_main.is_enabled = 0;
1431 session_node_enable_disable (is_en);
1438 session_manager_main_init (vlib_main_t * vm)
1440 session_main_t *smm = &session_main;
1441 smm->session_baseva = HIGH_SEGMENT_BASEVA;
1442 #if (HIGH_SEGMENT_BASEVA > (4ULL << 30))
1443 smm->session_va_space_size = 128ULL << 30;
1444 smm->evt_qs_segment_size = 64 << 20;
1446 smm->session_va_space_size = 128 << 20;
1447 smm->evt_qs_segment_size = 1 << 20;
1449 smm->is_enabled = 0;
1453 VLIB_INIT_FUNCTION (session_manager_main_init);
1455 static clib_error_t *
1456 session_config_fn (vlib_main_t * vm, unformat_input_t * input)
1458 session_main_t *smm = &session_main;
1462 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1464 if (unformat (input, "event-queue-length %d", &nitems))
1467 smm->configured_event_queue_length = nitems;
1469 clib_warning ("event queue length %d too small, ignored", nitems);
1471 else if (unformat (input, "preallocated-sessions %d",
1472 &smm->preallocated_sessions))
1474 else if (unformat (input, "v4-session-table-buckets %d",
1475 &smm->configured_v4_session_table_buckets))
1477 else if (unformat (input, "v4-halfopen-table-buckets %d",
1478 &smm->configured_v4_halfopen_table_buckets))
1480 else if (unformat (input, "v6-session-table-buckets %d",
1481 &smm->configured_v6_session_table_buckets))
1483 else if (unformat (input, "v6-halfopen-table-buckets %d",
1484 &smm->configured_v6_halfopen_table_buckets))
1486 else if (unformat (input, "v4-session-table-memory %U",
1487 unformat_memory_size, &tmp))
1489 if (tmp >= 0x100000000)
1490 return clib_error_return (0, "memory size %llx (%lld) too large",
1492 smm->configured_v4_session_table_memory = tmp;
1494 else if (unformat (input, "v4-halfopen-table-memory %U",
1495 unformat_memory_size, &tmp))
1497 if (tmp >= 0x100000000)
1498 return clib_error_return (0, "memory size %llx (%lld) too large",
1500 smm->configured_v4_halfopen_table_memory = tmp;
1502 else if (unformat (input, "v6-session-table-memory %U",
1503 unformat_memory_size, &tmp))
1505 if (tmp >= 0x100000000)
1506 return clib_error_return (0, "memory size %llx (%lld) too large",
1508 smm->configured_v6_session_table_memory = tmp;
1510 else if (unformat (input, "v6-halfopen-table-memory %U",
1511 unformat_memory_size, &tmp))
1513 if (tmp >= 0x100000000)
1514 return clib_error_return (0, "memory size %llx (%lld) too large",
1516 smm->configured_v6_halfopen_table_memory = tmp;
1518 else if (unformat (input, "local-endpoints-table-memory %U",
1519 unformat_memory_size, &tmp))
1521 if (tmp >= 0x100000000)
1522 return clib_error_return (0, "memory size %llx (%lld) too large",
1524 smm->local_endpoints_table_memory = tmp;
1526 else if (unformat (input, "local-endpoints-table-buckets %d",
1527 &smm->local_endpoints_table_buckets))
1529 else if (unformat (input, "evt_qs_memfd_seg"))
1530 smm->evt_qs_use_memfd_seg = 1;
1531 else if (unformat (input, "evt_qs_seg_size %U", unformat_memory_size,
1532 &smm->evt_qs_segment_size))
1535 return clib_error_return (0, "unknown input `%U'",
1536 format_unformat_error, input);
1541 VLIB_CONFIG_FUNCTION (session_config_fn, "session");
1544 * fd.io coding-style-patch-verification: ON
1547 * eval: (c-set-style "gnu")