2 * Copyright (c) 2017-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vppinfra/elog.h>
20 #include <vnet/session/transport.h>
21 #include <vnet/session/session.h>
22 #include <vnet/session/application.h>
23 #include <vnet/session/application_interface.h>
24 #include <vnet/session/application_local.h>
25 #include <vnet/session/session_debug.h>
26 #include <svm/queue.h>
27 #include <sys/timerfd.h>
29 #define app_check_thread_and_barrier(_fn, _arg) \
30 if (!vlib_thread_is_main_w_barrier ()) \
32 vlib_rpc_call_main_thread (_fn, (u8 *) _arg, sizeof(*_arg)); \
37 session_wrk_timerfd_update (session_worker_t *wrk, u64 time_ns)
39 struct itimerspec its;
41 its.it_value.tv_sec = 0;
42 its.it_value.tv_nsec = time_ns;
43 its.it_interval.tv_sec = 0;
44 its.it_interval.tv_nsec = its.it_value.tv_nsec;
46 if (timerfd_settime (wrk->timerfd, 0, &its, NULL) == -1)
47 clib_warning ("timerfd_settime");
51 session_wrk_tfd_timeout (session_wrk_state_t state, u32 thread_index)
53 if (state == SESSION_WRK_INTERRUPT)
54 return thread_index ? 1e6 : vlib_num_workers () ? 5e8 : 1e6;
55 else if (state == SESSION_WRK_IDLE)
56 return thread_index ? 1e8 : vlib_num_workers () ? 5e8 : 1e8;
62 session_wrk_set_state (session_worker_t *wrk, session_wrk_state_t state)
67 if (wrk->timerfd == -1)
69 time_ns = session_wrk_tfd_timeout (state, wrk->vm->thread_index);
70 session_wrk_timerfd_update (wrk, time_ns);
73 static transport_endpt_ext_cfg_t *
74 session_mq_get_ext_config (application_t *app, uword offset)
79 fs = application_get_rx_mqs_segment (app);
80 c = fs_chunk_ptr (fs->h, offset);
81 return (transport_endpt_ext_cfg_t *) c->data;
85 session_mq_free_ext_config (application_t *app, uword offset)
90 fs = application_get_rx_mqs_segment (app);
91 c = fs_chunk_ptr (fs->h, offset);
92 fifo_segment_collect_chunk (fs, 0 /* only one slice */, c);
96 session_mq_listen_handler (void *data)
98 session_listen_msg_t *mp = (session_listen_msg_t *) data;
99 vnet_listen_args_t _a, *a = &_a;
100 app_worker_t *app_wrk;
104 app_check_thread_and_barrier (session_mq_listen_handler, mp);
106 app = application_lookup (mp->client_index);
110 clib_memset (a, 0, sizeof (*a));
111 a->sep.is_ip4 = mp->is_ip4;
112 ip_copy (&a->sep.ip, &mp->ip, mp->is_ip4);
113 a->sep.port = mp->port;
114 a->sep.fib_index = mp->vrf;
115 a->sep.sw_if_index = ENDPOINT_INVALID_INDEX;
116 a->sep.transport_proto = mp->proto;
117 a->app_index = app->app_index;
118 a->wrk_map_index = mp->wrk_index;
119 a->sep_ext.transport_flags = mp->flags;
122 a->sep_ext.ext_cfg = session_mq_get_ext_config (app, mp->ext_config);
124 if ((rv = vnet_listen (a)))
125 clib_warning ("listen returned: %U", format_session_error, rv);
127 app_wrk = application_get_worker (app, mp->wrk_index);
128 mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv);
131 session_mq_free_ext_config (app, mp->ext_config);
135 session_mq_listen_uri_handler (void *data)
137 session_listen_uri_msg_t *mp = (session_listen_uri_msg_t *) data;
138 vnet_listen_args_t _a, *a = &_a;
139 app_worker_t *app_wrk;
143 app_check_thread_and_barrier (session_mq_listen_uri_handler, mp);
145 app = application_lookup (mp->client_index);
149 clib_memset (a, 0, sizeof (*a));
150 a->uri = (char *) mp->uri;
151 a->app_index = app->app_index;
152 rv = vnet_bind_uri (a);
154 app_wrk = application_get_worker (app, 0);
155 mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv);
159 session_mq_connect_one (session_connect_msg_t *mp)
161 vnet_connect_args_t _a, *a = &_a;
162 app_worker_t *app_wrk;
166 app = application_lookup (mp->client_index);
170 clib_memset (a, 0, sizeof (*a));
171 a->sep.is_ip4 = mp->is_ip4;
172 clib_memcpy_fast (&a->sep.ip, &mp->ip, sizeof (mp->ip));
173 a->sep.port = mp->port;
174 a->sep.transport_proto = mp->proto;
175 a->sep.peer.fib_index = mp->vrf;
176 a->sep.dscp = mp->dscp;
177 clib_memcpy_fast (&a->sep.peer.ip, &mp->lcl_ip, sizeof (mp->lcl_ip));
180 ip46_address_mask_ip4 (&a->sep.ip);
181 ip46_address_mask_ip4 (&a->sep.peer.ip);
183 a->sep.peer.port = mp->lcl_port;
184 a->sep.peer.sw_if_index = ENDPOINT_INVALID_INDEX;
185 a->sep_ext.parent_handle = mp->parent_handle;
186 a->sep_ext.transport_flags = mp->flags;
187 a->api_context = mp->context;
188 a->app_index = app->app_index;
189 a->wrk_map_index = mp->wrk_index;
192 a->sep_ext.ext_cfg = session_mq_get_ext_config (app, mp->ext_config);
194 if ((rv = vnet_connect (a)))
196 clib_warning ("connect returned: %U", format_session_error, rv);
197 app_wrk = application_get_worker (app, mp->wrk_index);
198 mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv);
202 session_mq_free_ext_config (app, mp->ext_config);
206 session_mq_handle_connects_rpc (void *arg)
208 u32 max_connects = 32, n_connects = 0;
209 vlib_main_t *vm = vlib_get_main ();
210 session_evt_elt_t *he, *elt, *next;
211 session_worker_t *fwrk, *wrk;
213 ASSERT (vlib_get_thread_index () == 0);
215 /* Pending connects on linked list pertaining to first worker */
216 fwrk = session_main_get_worker (1);
217 if (!fwrk->n_pending_connects)
220 vlib_worker_thread_barrier_sync (vm);
222 he = clib_llist_elt (fwrk->event_elts, fwrk->pending_connects);
223 elt = clib_llist_next (fwrk->event_elts, evt_list, he);
225 /* Avoid holding the barrier for too long */
226 while (n_connects < max_connects && elt != he)
228 next = clib_llist_next (fwrk->event_elts, evt_list, elt);
229 clib_llist_remove (fwrk->event_elts, evt_list, elt);
230 session_mq_connect_one (session_evt_ctrl_data (fwrk, elt));
231 session_evt_ctrl_data_free (fwrk, elt);
232 clib_llist_put (fwrk->event_elts, elt);
237 /* Decrement with worker barrier */
238 fwrk->n_pending_connects -= n_connects;
240 vlib_worker_thread_barrier_release (vm);
244 /* Switch worker to poll mode if it was in interrupt mode and had work or
245 * back to interrupt if threshold of loops without a connect is passed.
246 * While in poll mode, reprogram connects rpc */
247 wrk = session_main_get_worker (0);
248 if (wrk->state != SESSION_WRK_POLLING)
252 session_wrk_set_state (wrk, SESSION_WRK_POLLING);
253 vlib_node_set_state (vm, session_queue_node.index,
254 VLIB_NODE_STATE_POLLING);
255 wrk->no_connect_loops = 0;
262 if (++wrk->no_connect_loops > 1e5)
264 session_wrk_set_state (wrk, SESSION_WRK_INTERRUPT);
265 vlib_node_set_state (vm, session_queue_node.index,
266 VLIB_NODE_STATE_INTERRUPT);
270 wrk->no_connect_loops = 0;
273 if (wrk->state == SESSION_WRK_POLLING)
275 elt = session_evt_alloc_ctrl (wrk);
276 elt->evt.event_type = SESSION_CTRL_EVT_RPC;
277 elt->evt.rpc_args.fp = session_mq_handle_connects_rpc;
282 session_mq_connect_handler (session_worker_t *wrk, session_evt_elt_t *elt)
284 u32 thread_index = wrk - session_main.wrk;
285 session_evt_elt_t *he;
287 /* No workers, so just deal with the connect now */
288 if (PREDICT_FALSE (!thread_index))
290 session_mq_connect_one (session_evt_ctrl_data (wrk, elt));
294 if (PREDICT_FALSE (thread_index != 1))
296 clib_warning ("Connect on wrong thread. Dropping");
300 /* Add to pending list to be handled by main thread */
301 he = clib_llist_elt (wrk->event_elts, wrk->pending_connects);
302 clib_llist_add_tail (wrk->event_elts, evt_list, elt, he);
304 /* Decremented with worker barrier */
305 wrk->n_pending_connects += 1;
306 if (wrk->n_pending_connects == 1)
308 vlib_node_set_interrupt_pending (vlib_get_main_by_index (0),
309 session_queue_node.index);
310 session_send_rpc_evt_to_thread (0, session_mq_handle_connects_rpc, 0);
315 session_mq_connect_uri_handler (void *data)
317 session_connect_uri_msg_t *mp = (session_connect_uri_msg_t *) data;
318 vnet_connect_args_t _a, *a = &_a;
319 app_worker_t *app_wrk;
323 app_check_thread_and_barrier (session_mq_connect_uri_handler, mp);
325 app = application_lookup (mp->client_index);
329 clib_memset (a, 0, sizeof (*a));
330 a->uri = (char *) mp->uri;
331 a->api_context = mp->context;
332 a->app_index = app->app_index;
333 if ((rv = vnet_connect_uri (a)))
335 clib_warning ("connect_uri returned: %d", rv);
336 app_wrk = application_get_worker (app, 0 /* default wrk only */ );
337 mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv);
342 session_mq_shutdown_handler (void *data)
344 session_shutdown_msg_t *mp = (session_shutdown_msg_t *) data;
345 vnet_shutdown_args_t _a, *a = &_a;
348 app = application_lookup (mp->client_index);
352 a->app_index = app->app_index;
353 a->handle = mp->handle;
354 vnet_shutdown_session (a);
358 session_mq_disconnect_handler (void *data)
360 session_disconnect_msg_t *mp = (session_disconnect_msg_t *) data;
361 vnet_disconnect_args_t _a, *a = &_a;
364 app = application_lookup (mp->client_index);
368 a->app_index = app->app_index;
369 a->handle = mp->handle;
370 vnet_disconnect_session (a);
374 app_mq_detach_handler (void *data)
376 session_app_detach_msg_t *mp = (session_app_detach_msg_t *) data;
377 vnet_app_detach_args_t _a, *a = &_a;
380 app_check_thread_and_barrier (app_mq_detach_handler, mp);
382 app = application_lookup (mp->client_index);
386 a->app_index = app->app_index;
387 a->api_client_index = mp->client_index;
388 vnet_application_detach (a);
392 session_mq_unlisten_rpc (session_unlisten_msg_t *mp)
394 vlib_main_t *vm = vlib_get_main ();
395 vnet_unlisten_args_t _a, *a = &_a;
396 app_worker_t *app_wrk;
403 context = mp->context;
405 app = application_lookup (mp->client_index);
409 clib_memset (a, 0, sizeof (*a));
410 a->app_index = app->app_index;
412 a->wrk_map_index = mp->wrk_index;
414 vlib_worker_thread_barrier_sync (vm);
416 if ((rv = vnet_unlisten (a)))
417 clib_warning ("unlisten returned: %d", rv);
419 vlib_worker_thread_barrier_release (vm);
421 app_wrk = application_get_worker (app, a->wrk_map_index);
425 mq_send_unlisten_reply (app_wrk, sh, context, rv);
430 session_mq_unlisten_handler (session_worker_t *wrk, session_evt_elt_t *elt)
432 u32 thread_index = wrk - session_main.wrk;
433 session_unlisten_msg_t *mp, *arg;
435 mp = session_evt_ctrl_data (wrk, elt);
436 arg = clib_mem_alloc (sizeof (session_unlisten_msg_t));
437 clib_memcpy_fast (arg, mp, sizeof (*arg));
439 if (PREDICT_FALSE (!thread_index))
441 session_mq_unlisten_rpc (arg);
445 session_send_rpc_evt_to_thread_force (0, session_mq_unlisten_rpc, arg);
449 session_mq_accepted_reply_handler (void *data)
451 session_accepted_reply_msg_t *mp = (session_accepted_reply_msg_t *) data;
452 vnet_disconnect_args_t _a = { 0 }, *a = &_a;
453 session_state_t old_state;
454 app_worker_t *app_wrk;
457 /* Mail this back from the main thread. We're not polling in main
458 * thread so we're using other workers for notifications. */
459 if (session_thread_from_handle (mp->handle) == 0 && vlib_num_workers () &&
460 vlib_get_thread_index () != 0)
462 vlib_rpc_call_main_thread (session_mq_accepted_reply_handler,
463 (u8 *) mp, sizeof (*mp));
467 s = session_get_from_handle_if_valid (mp->handle);
471 app_wrk = app_worker_get (s->app_wrk_index);
472 if (app_wrk->app_index != mp->context)
474 clib_warning ("app doesn't own session");
478 /* Server isn't interested, disconnect the session */
481 a->app_index = mp->context;
482 a->handle = mp->handle;
483 vnet_disconnect_session (a);
487 /* Special handling for cut-through sessions */
488 if (!session_has_transport (s))
490 s->session_state = SESSION_STATE_READY;
491 ct_session_connect_notify (s, SESSION_E_NONE);
495 old_state = s->session_state;
496 s->session_state = SESSION_STATE_READY;
498 if (!svm_fifo_is_empty_prod (s->rx_fifo))
499 app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX);
501 /* Closed while waiting for app to reply. Resend disconnect */
502 if (old_state >= SESSION_STATE_TRANSPORT_CLOSING)
504 app_worker_close_notify (app_wrk, s);
505 s->session_state = old_state;
511 session_mq_reset_reply_handler (void *data)
513 vnet_disconnect_args_t _a = { 0 }, *a = &_a;
514 session_reset_reply_msg_t *mp;
515 app_worker_t *app_wrk;
518 u32 index, thread_index;
520 mp = (session_reset_reply_msg_t *) data;
521 app = application_lookup (mp->context);
525 session_parse_handle (mp->handle, &index, &thread_index);
526 s = session_get_if_valid (index, thread_index);
528 /* No session or not the right session */
529 if (!s || s->session_state < SESSION_STATE_TRANSPORT_CLOSING)
532 app_wrk = app_worker_get (s->app_wrk_index);
533 if (!app_wrk || app_wrk->app_index != app->app_index)
535 clib_warning ("App %u does not own handle 0x%lx!", app->app_index,
540 /* Client objected to resetting the session, log and continue */
543 clib_warning ("client retval %d", mp->retval);
547 /* This comes as a response to a reset, transport only waiting for
548 * confirmation to remove connection state, no need to disconnect */
549 a->handle = mp->handle;
550 a->app_index = app->app_index;
551 vnet_disconnect_session (a);
555 session_mq_disconnected_handler (void *data)
557 session_disconnected_reply_msg_t *rmp;
558 vnet_disconnect_args_t _a, *a = &_a;
559 svm_msg_q_msg_t _msg, *msg = &_msg;
560 session_disconnected_msg_t *mp;
561 app_worker_t *app_wrk;
562 session_event_t *evt;
567 mp = (session_disconnected_msg_t *) data;
568 if (!(s = session_get_from_handle_if_valid (mp->handle)))
570 clib_warning ("could not disconnect handle %llu", mp->handle);
573 app_wrk = app_worker_get (s->app_wrk_index);
574 app = application_lookup (mp->client_index);
575 if (!(app_wrk && app && app->app_index == app_wrk->app_index))
577 clib_warning ("could not disconnect session: %llu app: %u",
578 mp->handle, mp->client_index);
582 a->handle = mp->handle;
583 a->app_index = app_wrk->wrk_index;
584 rv = vnet_disconnect_session (a);
586 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
587 SESSION_MQ_CTRL_EVT_RING,
589 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
590 clib_memset (evt, 0, sizeof (*evt));
591 evt->event_type = SESSION_CTRL_EVT_DISCONNECTED_REPLY;
592 rmp = (session_disconnected_reply_msg_t *) evt->data;
593 rmp->handle = mp->handle;
594 rmp->context = mp->context;
596 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
600 session_mq_disconnected_reply_handler (void *data)
602 session_disconnected_reply_msg_t *mp;
603 vnet_disconnect_args_t _a, *a = &_a;
606 mp = (session_disconnected_reply_msg_t *) data;
608 /* Client objected to disconnecting the session, log and continue */
611 clib_warning ("client retval %d", mp->retval);
615 /* Disconnect has been confirmed. Confirm close to transport */
616 app = application_lookup (mp->context);
619 a->handle = mp->handle;
620 a->app_index = app->app_index;
621 vnet_disconnect_session (a);
626 session_mq_worker_update_handler (void *data)
628 session_worker_update_msg_t *mp = (session_worker_update_msg_t *) data;
629 session_worker_update_reply_msg_t *rmp;
630 svm_msg_q_msg_t _msg, *msg = &_msg;
631 app_worker_t *app_wrk;
632 u32 owner_app_wrk_map;
633 session_event_t *evt;
637 app = application_lookup (mp->client_index);
640 if (!(s = session_get_from_handle_if_valid (mp->handle)))
642 clib_warning ("invalid handle %llu", mp->handle);
645 app_wrk = app_worker_get (s->app_wrk_index);
646 if (app_wrk->app_index != app->app_index)
648 clib_warning ("app %u does not own session %llu", app->app_index,
652 owner_app_wrk_map = app_wrk->wrk_map_index;
653 app_wrk = application_get_worker (app, mp->wrk_index);
655 /* This needs to come from the new owner */
656 if (mp->req_wrk_index == owner_app_wrk_map)
658 session_req_worker_update_msg_t *wump;
660 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
661 SESSION_MQ_CTRL_EVT_RING,
663 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
664 clib_memset (evt, 0, sizeof (*evt));
665 evt->event_type = SESSION_CTRL_EVT_REQ_WORKER_UPDATE;
666 wump = (session_req_worker_update_msg_t *) evt->data;
667 wump->session_handle = mp->handle;
668 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
672 app_worker_own_session (app_wrk, s);
677 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
678 SESSION_MQ_CTRL_EVT_RING,
680 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
681 clib_memset (evt, 0, sizeof (*evt));
682 evt->event_type = SESSION_CTRL_EVT_WORKER_UPDATE_REPLY;
683 rmp = (session_worker_update_reply_msg_t *) evt->data;
684 rmp->handle = mp->handle;
686 rmp->rx_fifo = fifo_segment_fifo_offset (s->rx_fifo);
688 rmp->tx_fifo = fifo_segment_fifo_offset (s->tx_fifo);
689 rmp->segment_handle = session_segment_handle (s);
690 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
693 * Retransmit messages that may have been lost
695 if (s->tx_fifo && !svm_fifo_is_empty (s->tx_fifo))
696 session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
698 if (s->rx_fifo && !svm_fifo_is_empty (s->rx_fifo))
699 app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX);
701 if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
702 app_worker_close_notify (app_wrk, s);
706 session_mq_app_wrk_rpc_handler (void *data)
708 session_app_wrk_rpc_msg_t *mp = (session_app_wrk_rpc_msg_t *) data;
709 svm_msg_q_msg_t _msg, *msg = &_msg;
710 session_app_wrk_rpc_msg_t *rmp;
711 app_worker_t *app_wrk;
712 session_event_t *evt;
715 app = application_lookup (mp->client_index);
719 app_wrk = application_get_worker (app, mp->wrk_index);
721 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
722 SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT,
724 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
725 clib_memset (evt, 0, sizeof (*evt));
726 evt->event_type = SESSION_CTRL_EVT_APP_WRK_RPC;
727 rmp = (session_app_wrk_rpc_msg_t *) evt->data;
728 clib_memcpy (rmp->data, mp->data, sizeof (mp->data));
729 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
733 session_mq_transport_attr_handler (void *data)
735 session_transport_attr_msg_t *mp = (session_transport_attr_msg_t *) data;
736 session_transport_attr_reply_msg_t *rmp;
737 svm_msg_q_msg_t _msg, *msg = &_msg;
738 app_worker_t *app_wrk;
739 session_event_t *evt;
744 app = application_lookup (mp->client_index);
748 if (!(s = session_get_from_handle_if_valid (mp->handle)))
750 clib_warning ("invalid handle %llu", mp->handle);
753 app_wrk = app_worker_get (s->app_wrk_index);
754 if (app_wrk->app_index != app->app_index)
756 clib_warning ("app %u does not own session %llu", app->app_index,
761 rv = session_transport_attribute (s, mp->is_get, &mp->attr);
763 svm_msg_q_lock_and_alloc_msg_w_ring (
764 app_wrk->event_queue, SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT, msg);
765 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
766 clib_memset (evt, 0, sizeof (*evt));
767 evt->event_type = SESSION_CTRL_EVT_TRANSPORT_ATTR_REPLY;
768 rmp = (session_transport_attr_reply_msg_t *) evt->data;
769 rmp->handle = mp->handle;
771 rmp->is_get = mp->is_get;
772 if (!rv && mp->is_get)
773 rmp->attr = mp->attr;
774 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
777 vlib_node_registration_t session_queue_node;
782 u32 server_thread_index;
783 } session_queue_trace_t;
785 /* packet trace format function */
787 format_session_queue_trace (u8 * s, va_list * args)
789 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
790 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
791 session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *);
793 s = format (s, "session index %d thread index %d",
794 t->session_index, t->server_thread_index);
798 #define foreach_session_queue_error \
799 _ (TX, tx, INFO, "Packets transmitted") \
800 _ (TIMER, timer, INFO, "Timer events") \
801 _ (NO_BUFFER, no_buffer, ERROR, "Out of buffers")
805 #define _(f, n, s, d) SESSION_QUEUE_ERROR_##f,
806 foreach_session_queue_error
808 SESSION_QUEUE_N_ERROR,
809 } session_queue_error_t;
811 static vlib_error_desc_t session_error_counters[] = {
812 #define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
813 foreach_session_queue_error
819 SESSION_TX_NO_BUFFERS = -2,
825 session_tx_trace_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
826 u32 next_index, vlib_buffer_t **bufs, u16 n_segs,
827 session_t *s, u32 n_trace)
829 vlib_buffer_t **b = bufs;
831 while (n_trace && n_segs)
833 if (PREDICT_TRUE (vlib_trace_buffer (vm, node, next_index, b[0],
834 1 /* follow_chain */)))
836 session_queue_trace_t *t =
837 vlib_add_trace (vm, node, b[0], sizeof (*t));
838 t->session_index = s->session_index;
839 t->server_thread_index = s->thread_index;
845 vlib_set_trace_count (vm, node, n_trace);
849 session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx,
850 vlib_buffer_t * b, u16 * n_bufs, u8 peek_data)
852 vlib_buffer_t *chain_b, *prev_b;
853 u32 chain_bi0, to_deq, left_from_seg;
854 u16 len_to_deq, n_bytes_read;
857 b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
858 b->total_length_not_including_first_buffer = 0;
861 left_from_seg = clib_min (ctx->sp.snd_mss - b->current_length,
863 to_deq = left_from_seg;
864 for (j = 1; j < ctx->n_bufs_per_seg; j++)
867 len_to_deq = clib_min (to_deq, ctx->deq_per_buf);
870 chain_bi0 = ctx->tx_buffers[*n_bufs];
871 chain_b = vlib_get_buffer (vm, chain_bi0);
872 chain_b->current_data = 0;
873 data = vlib_buffer_get_current (chain_b);
876 n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo,
877 ctx->sp.tx_offset, len_to_deq, data);
878 ctx->sp.tx_offset += n_bytes_read;
882 if (ctx->transport_vft->transport_options.tx_type ==
885 svm_fifo_t *f = ctx->s->tx_fifo;
886 session_dgram_hdr_t *hdr = &ctx->hdr;
890 deq_now = clib_min (hdr->data_length - hdr->data_offset,
892 offset = hdr->data_offset + SESSION_CONN_HDR_LEN;
893 n_bytes_read = svm_fifo_peek (f, offset, deq_now, data);
894 ASSERT (n_bytes_read > 0);
896 hdr->data_offset += n_bytes_read;
897 if (hdr->data_offset == hdr->data_length)
899 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
900 svm_fifo_dequeue_drop (f, offset);
901 if (ctx->left_to_snd > n_bytes_read)
902 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
905 else if (ctx->left_to_snd == n_bytes_read)
906 svm_fifo_overwrite_head (ctx->s->tx_fifo, (u8 *) & ctx->hdr,
907 sizeof (session_dgram_pre_hdr_t));
910 n_bytes_read = svm_fifo_dequeue (ctx->s->tx_fifo,
913 ASSERT (n_bytes_read == len_to_deq);
914 chain_b->current_length = n_bytes_read;
915 b->total_length_not_including_first_buffer += chain_b->current_length;
917 /* update previous buffer */
918 prev_b->next_buffer = chain_bi0;
919 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
921 /* update current buffer */
922 chain_b->next_buffer = 0;
924 to_deq -= n_bytes_read;
929 && b->total_length_not_including_first_buffer == left_from_seg);
930 ctx->left_to_snd -= left_from_seg;
934 session_tx_fill_buffer (vlib_main_t * vm, session_tx_context_t * ctx,
935 vlib_buffer_t * b, u16 * n_bufs, u8 peek_data)
942 * Start with the first buffer in chain
945 b->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED;
948 data0 = vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
949 len_to_deq = clib_min (ctx->left_to_snd, ctx->deq_per_first_buf);
953 n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo, ctx->sp.tx_offset,
955 ASSERT (n_bytes_read > 0);
956 /* Keep track of progress locally, transport is also supposed to
957 * increment it independently when pushing the header */
958 ctx->sp.tx_offset += n_bytes_read;
962 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
964 session_dgram_hdr_t *hdr = &ctx->hdr;
965 svm_fifo_t *f = ctx->s->tx_fifo;
969 ASSERT (hdr->data_length > hdr->data_offset);
970 deq_now = clib_min (hdr->data_length - hdr->data_offset,
972 offset = hdr->data_offset + SESSION_CONN_HDR_LEN;
973 n_bytes_read = svm_fifo_peek (f, offset, deq_now, data0);
974 ASSERT (n_bytes_read > 0);
976 if (transport_connection_is_cless (ctx->tc))
978 ip_copy (&ctx->tc->rmt_ip, &hdr->rmt_ip, ctx->tc->is_ip4);
979 ctx->tc->rmt_port = hdr->rmt_port;
981 hdr->data_offset += n_bytes_read;
982 if (hdr->data_offset == hdr->data_length)
984 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
985 svm_fifo_dequeue_drop (f, offset);
986 if (ctx->left_to_snd > n_bytes_read)
987 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
990 else if (ctx->left_to_snd == n_bytes_read)
991 svm_fifo_overwrite_head (ctx->s->tx_fifo, (u8 *) & ctx->hdr,
992 sizeof (session_dgram_pre_hdr_t));
996 n_bytes_read = svm_fifo_dequeue (ctx->s->tx_fifo,
998 ASSERT (n_bytes_read > 0);
1001 b->current_length = n_bytes_read;
1002 ctx->left_to_snd -= n_bytes_read;
1005 * Fill in the remaining buffers in the chain, if any
1007 if (PREDICT_FALSE (ctx->n_bufs_per_seg > 1 && ctx->left_to_snd))
1008 session_tx_fifo_chain_tail (vm, ctx, b, n_bufs, peek_data);
1012 session_tx_not_ready (session_t * s, u8 peek_data)
1016 if (PREDICT_TRUE (s->session_state == SESSION_STATE_READY))
1018 /* Can retransmit for closed sessions but can't send new data if
1019 * session is not ready or closed */
1020 else if (s->session_state < SESSION_STATE_READY)
1022 /* Allow accepting session to send custom packets.
1023 * For instance, tcp want to send acks in established, but
1024 * the app has not called accept() yet */
1025 if (s->session_state == SESSION_STATE_ACCEPTING &&
1026 (s->flags & SESSION_F_CUSTOM_TX))
1030 else if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)
1032 /* Allow closed transports to still send custom packets.
1033 * For instance, tcp may want to send acks in time-wait. */
1034 if (s->session_state != SESSION_STATE_TRANSPORT_DELETED
1035 && (s->flags & SESSION_F_CUSTOM_TX))
1043 always_inline transport_connection_t *
1044 session_tx_get_transport (session_tx_context_t * ctx, u8 peek_data)
1048 return ctx->transport_vft->get_connection (ctx->s->connection_index,
1049 ctx->s->thread_index);
1053 if (ctx->s->session_state == SESSION_STATE_LISTENING)
1054 return ctx->transport_vft->get_listener (ctx->s->connection_index);
1057 return ctx->transport_vft->get_connection (ctx->s->connection_index,
1058 ctx->s->thread_index);
1064 session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx,
1065 u32 max_segs, u8 peek_data)
1067 u32 n_bytes_per_buf, n_bytes_per_seg;
1069 n_bytes_per_buf = vlib_buffer_get_default_data_size (vm);
1070 ctx->max_dequeue = svm_fifo_max_dequeue_cons (ctx->s->tx_fifo);
1074 /* Offset in rx fifo from where to peek data */
1075 if (PREDICT_FALSE (ctx->sp.tx_offset >= ctx->max_dequeue))
1077 ctx->max_len_to_snd = 0;
1080 ctx->max_dequeue -= ctx->sp.tx_offset;
1084 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
1086 u32 len, chain_limit;
1088 if (ctx->max_dequeue <= sizeof (ctx->hdr))
1090 ctx->max_len_to_snd = 0;
1094 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
1096 ASSERT (ctx->hdr.data_length > ctx->hdr.data_offset);
1097 len = ctx->hdr.data_length - ctx->hdr.data_offset;
1099 /* Process multiple dgrams if smaller than min (buf_space, mss).
1100 * This avoids handling multiple dgrams if they require buffer
1102 chain_limit = clib_min (n_bytes_per_buf - TRANSPORT_MAX_HDRS_LEN,
1104 if (ctx->hdr.data_length <= chain_limit)
1106 u32 first_dgram_len, dgram_len, offset, max_offset;
1107 session_dgram_hdr_t hdr;
1109 ctx->sp.snd_mss = clib_min (ctx->sp.snd_mss, len);
1110 offset = ctx->hdr.data_length + sizeof (session_dgram_hdr_t);
1111 first_dgram_len = len;
1112 max_offset = clib_min (ctx->max_dequeue, 16 << 10);
1114 while (offset < max_offset)
1116 svm_fifo_peek (ctx->s->tx_fifo, offset, sizeof (ctx->hdr),
1118 ASSERT (hdr.data_length > hdr.data_offset);
1119 dgram_len = hdr.data_length - hdr.data_offset;
1120 if (len + dgram_len > ctx->max_dequeue
1121 || first_dgram_len != dgram_len)
1124 offset += sizeof (hdr) + hdr.data_length;
1128 ctx->max_dequeue = len;
1131 ASSERT (ctx->max_dequeue > 0);
1133 /* Ensure we're not writing more than transport window allows */
1134 if (ctx->max_dequeue < ctx->sp.snd_space)
1136 /* Constrained by tx queue. Try to send only fully formed segments */
1137 ctx->max_len_to_snd = (ctx->max_dequeue > ctx->sp.snd_mss) ?
1138 (ctx->max_dequeue - (ctx->max_dequeue % ctx->sp.snd_mss)) :
1144 /* Expectation is that snd_space0 is already a multiple of snd_mss */
1145 ctx->max_len_to_snd = ctx->sp.snd_space;
1148 /* Check if we're tx constrained by the node */
1149 ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->sp.snd_mss);
1150 if (ctx->n_segs_per_evt > max_segs)
1152 ctx->n_segs_per_evt = max_segs;
1153 ctx->max_len_to_snd = max_segs * ctx->sp.snd_mss;
1156 ASSERT (n_bytes_per_buf > TRANSPORT_MAX_HDRS_LEN);
1157 if (ctx->n_segs_per_evt > 1)
1159 u32 n_bytes_last_seg, n_bufs_last_seg;
1161 n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->sp.snd_mss;
1162 n_bytes_last_seg = TRANSPORT_MAX_HDRS_LEN + ctx->max_len_to_snd
1163 - ((ctx->n_segs_per_evt - 1) * ctx->sp.snd_mss);
1164 ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
1165 n_bufs_last_seg = ceil ((f64) n_bytes_last_seg / n_bytes_per_buf);
1166 ctx->n_bufs_needed = ((ctx->n_segs_per_evt - 1) * ctx->n_bufs_per_seg)
1171 n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->max_len_to_snd;
1172 ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
1173 ctx->n_bufs_needed = ctx->n_bufs_per_seg;
1176 ctx->deq_per_buf = clib_min (ctx->sp.snd_mss, n_bytes_per_buf);
1177 ctx->deq_per_first_buf = clib_min (ctx->sp.snd_mss,
1179 TRANSPORT_MAX_HDRS_LEN);
1183 session_tx_maybe_reschedule (session_worker_t * wrk,
1184 session_tx_context_t * ctx,
1185 session_evt_elt_t * elt)
1187 session_t *s = ctx->s;
1189 svm_fifo_unset_event (s->tx_fifo);
1190 if (svm_fifo_max_dequeue_cons (s->tx_fifo) > ctx->sp.tx_offset)
1192 if (svm_fifo_set_event (s->tx_fifo))
1193 session_evt_add_head_old (wrk, elt);
1197 transport_connection_deschedule (ctx->tc);
1202 session_tx_fifo_read_and_snd_i (session_worker_t * wrk,
1203 vlib_node_runtime_t * node,
1204 session_evt_elt_t * elt,
1205 int *n_tx_packets, u8 peek_data)
1207 u32 n_trace, n_left, pbi, next_index, max_burst;
1208 session_tx_context_t *ctx = &wrk->ctx;
1209 session_main_t *smm = &session_main;
1210 session_event_t *e = &elt->evt;
1211 vlib_main_t *vm = wrk->vm;
1212 transport_proto_t tp;
1216 if (PREDICT_FALSE ((rv = session_tx_not_ready (ctx->s, peek_data))))
1219 session_evt_add_old (wrk, elt);
1220 return SESSION_TX_NO_DATA;
1223 next_index = smm->session_type_to_next[ctx->s->session_type];
1224 max_burst = SESSION_NODE_FRAME_SIZE - *n_tx_packets;
1226 tp = session_get_transport_proto (ctx->s);
1227 ctx->transport_vft = transport_protocol_get_vft (tp);
1228 ctx->tc = session_tx_get_transport (ctx, peek_data);
1230 if (PREDICT_FALSE (e->event_type == SESSION_IO_EVT_TX_FLUSH))
1232 if (ctx->transport_vft->flush_data)
1233 ctx->transport_vft->flush_data (ctx->tc);
1234 e->event_type = SESSION_IO_EVT_TX;
1237 if (ctx->s->flags & SESSION_F_CUSTOM_TX)
1240 ctx->s->flags &= ~SESSION_F_CUSTOM_TX;
1241 ctx->sp.max_burst_size = max_burst;
1242 n_custom_tx = ctx->transport_vft->custom_tx (ctx->tc, &ctx->sp);
1243 *n_tx_packets += n_custom_tx;
1245 (ctx->s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
1246 return SESSION_TX_OK;
1247 max_burst -= n_custom_tx;
1248 if (!max_burst || (ctx->s->flags & SESSION_F_CUSTOM_TX))
1250 session_evt_add_old (wrk, elt);
1251 return SESSION_TX_OK;
1255 /* Connection previously descheduled because it had no data to send.
1256 * Clear descheduled flag and reset pacer if in use */
1257 if (transport_connection_is_descheduled (ctx->tc))
1258 transport_connection_clear_descheduled (ctx->tc);
1260 transport_connection_snd_params (ctx->tc, &ctx->sp);
1262 if (!ctx->sp.snd_space)
1264 /* If the deschedule flag was set, remove session from scheduler.
1265 * Transport is responsible for rescheduling this session. */
1266 if (ctx->sp.flags & TRANSPORT_SND_F_DESCHED)
1267 transport_connection_deschedule (ctx->tc);
1268 /* Request to postpone the session, e.g., zero-wnd and transport
1269 * is not currently probing */
1270 else if (ctx->sp.flags & TRANSPORT_SND_F_POSTPONE)
1271 session_evt_add_old (wrk, elt);
1272 /* This flow queue is "empty" so it should be re-evaluated before
1273 * the ones that have data to send. */
1275 session_evt_add_head_old (wrk, elt);
1277 return SESSION_TX_NO_DATA;
1280 if (transport_connection_is_tx_paced (ctx->tc))
1282 u32 snd_space = transport_connection_tx_pacer_burst (ctx->tc);
1283 if (snd_space < TRANSPORT_PACER_MIN_BURST)
1285 session_evt_add_head_old (wrk, elt);
1286 return SESSION_TX_NO_DATA;
1288 snd_space = clib_min (ctx->sp.snd_space, snd_space);
1289 ctx->sp.snd_space = snd_space >= ctx->sp.snd_mss ?
1290 snd_space - snd_space % ctx->sp.snd_mss : snd_space;
1293 /* Check how much we can pull. */
1294 session_tx_set_dequeue_params (vm, ctx, max_burst, peek_data);
1296 if (PREDICT_FALSE (!ctx->max_len_to_snd))
1298 transport_connection_tx_pacer_reset_bucket (ctx->tc, 0);
1299 session_tx_maybe_reschedule (wrk, ctx, elt);
1300 return SESSION_TX_NO_DATA;
1303 vec_validate_aligned (ctx->tx_buffers, ctx->n_bufs_needed - 1,
1304 CLIB_CACHE_LINE_BYTES);
1305 n_bufs = vlib_buffer_alloc (vm, ctx->tx_buffers, ctx->n_bufs_needed);
1306 if (PREDICT_FALSE (n_bufs < ctx->n_bufs_needed))
1309 vlib_buffer_free (vm, ctx->tx_buffers, n_bufs);
1310 session_evt_add_head_old (wrk, elt);
1311 vlib_node_increment_counter (wrk->vm, node->node_index,
1312 SESSION_QUEUE_ERROR_NO_BUFFER, 1);
1313 return SESSION_TX_NO_BUFFERS;
1316 if (transport_connection_is_tx_paced (ctx->tc))
1317 transport_connection_tx_pacer_update_bytes (ctx->tc, ctx->max_len_to_snd);
1319 ctx->left_to_snd = ctx->max_len_to_snd;
1320 n_left = ctx->n_segs_per_evt;
1322 vec_validate (ctx->transport_pending_bufs, n_left);
1326 vlib_buffer_t *b0, *b1;
1329 pbi = ctx->tx_buffers[n_bufs - 3];
1330 pb = vlib_get_buffer (vm, pbi);
1331 vlib_prefetch_buffer_header (pb, STORE);
1332 pbi = ctx->tx_buffers[n_bufs - 4];
1333 pb = vlib_get_buffer (vm, pbi);
1334 vlib_prefetch_buffer_header (pb, STORE);
1336 bi0 = ctx->tx_buffers[--n_bufs];
1337 bi1 = ctx->tx_buffers[--n_bufs];
1339 b0 = vlib_get_buffer (vm, bi0);
1340 b1 = vlib_get_buffer (vm, bi1);
1342 session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data);
1343 session_tx_fill_buffer (vm, ctx, b1, &n_bufs, peek_data);
1345 ctx->transport_pending_bufs[ctx->n_segs_per_evt - n_left] = b0;
1346 ctx->transport_pending_bufs[ctx->n_segs_per_evt - n_left + 1] = b1;
1349 vec_add1 (wrk->pending_tx_buffers, bi0);
1350 vec_add1 (wrk->pending_tx_buffers, bi1);
1351 vec_add1 (wrk->pending_tx_nexts, next_index);
1352 vec_add1 (wrk->pending_tx_nexts, next_index);
1361 pbi = ctx->tx_buffers[n_bufs - 2];
1362 pb = vlib_get_buffer (vm, pbi);
1363 vlib_prefetch_buffer_header (pb, STORE);
1366 bi0 = ctx->tx_buffers[--n_bufs];
1367 b0 = vlib_get_buffer (vm, bi0);
1368 session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data);
1370 ctx->transport_pending_bufs[ctx->n_segs_per_evt - n_left] = b0;
1373 vec_add1 (wrk->pending_tx_buffers, bi0);
1374 vec_add1 (wrk->pending_tx_nexts, next_index);
1377 /* Ask transport to push headers */
1378 ctx->transport_vft->push_header (ctx->tc, ctx->transport_pending_bufs,
1379 ctx->n_segs_per_evt);
1381 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)) > 0))
1382 session_tx_trace_frame (vm, node, next_index, ctx->transport_pending_bufs,
1383 ctx->n_segs_per_evt, ctx->s, n_trace);
1385 if (PREDICT_FALSE (n_bufs))
1386 vlib_buffer_free (vm, ctx->tx_buffers, n_bufs);
1388 *n_tx_packets += ctx->n_segs_per_evt;
1390 SESSION_EVT (SESSION_EVT_DEQ, ctx->s, ctx->max_len_to_snd, ctx->max_dequeue,
1391 ctx->s->tx_fifo->has_event, wrk->last_vlib_time);
1393 ASSERT (ctx->left_to_snd == 0);
1395 /* If we couldn't dequeue all bytes reschedule as old flow. Otherwise,
1396 * check if application enqueued more data and reschedule accordingly */
1397 if (ctx->max_len_to_snd < ctx->max_dequeue)
1398 session_evt_add_old (wrk, elt);
1400 session_tx_maybe_reschedule (wrk, ctx, elt);
1404 u32 n_dequeued = ctx->max_len_to_snd;
1405 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
1406 n_dequeued += ctx->n_segs_per_evt * SESSION_CONN_HDR_LEN;
1407 if (svm_fifo_needs_deq_ntf (ctx->s->tx_fifo, n_dequeued))
1408 session_dequeue_notify (ctx->s);
1410 return SESSION_TX_OK;
1414 session_tx_fifo_peek_and_snd (session_worker_t * wrk,
1415 vlib_node_runtime_t * node,
1416 session_evt_elt_t * e, int *n_tx_packets)
1418 return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 1);
1422 session_tx_fifo_dequeue_and_snd (session_worker_t * wrk,
1423 vlib_node_runtime_t * node,
1424 session_evt_elt_t * e, int *n_tx_packets)
1426 return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 0);
1430 session_tx_fifo_dequeue_internal (session_worker_t * wrk,
1431 vlib_node_runtime_t * node,
1432 session_evt_elt_t * elt, int *n_tx_packets)
1434 transport_send_params_t *sp = &wrk->ctx.sp;
1435 session_t *s = wrk->ctx.s;
1438 if (PREDICT_FALSE (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
1441 /* Clear custom-tx flag used to request reschedule for tx */
1442 s->flags &= ~SESSION_F_CUSTOM_TX;
1445 sp->bytes_dequeued = 0;
1446 sp->max_burst_size = clib_min (SESSION_NODE_FRAME_SIZE - *n_tx_packets,
1447 TRANSPORT_PACER_MAX_BURST_PKTS);
1449 n_packets = transport_custom_tx (session_get_transport_proto (s), s, sp);
1450 *n_tx_packets += n_packets;
1452 if (s->flags & SESSION_F_CUSTOM_TX)
1454 session_evt_add_old (wrk, elt);
1456 else if (!(sp->flags & TRANSPORT_SND_F_DESCHED))
1458 svm_fifo_unset_event (s->tx_fifo);
1459 if (svm_fifo_max_dequeue_cons (s->tx_fifo))
1460 if (svm_fifo_set_event (s->tx_fifo))
1461 session_evt_add_head_old (wrk, elt);
1464 if (sp->bytes_dequeued &&
1465 svm_fifo_needs_deq_ntf (s->tx_fifo, sp->bytes_dequeued))
1466 session_dequeue_notify (s);
1471 always_inline session_t *
1472 session_event_get_session (session_worker_t * wrk, session_event_t * e)
1474 if (PREDICT_FALSE (pool_is_free_index (wrk->sessions, e->session_index)))
1477 ASSERT (session_is_valid (e->session_index, wrk->vm->thread_index));
1478 return pool_elt_at_index (wrk->sessions, e->session_index);
1482 session_event_dispatch_ctrl (session_worker_t * wrk, session_evt_elt_t * elt)
1484 clib_llist_index_t ei;
1485 void (*fp) (void *);
1489 ei = clib_llist_entry_index (wrk->event_elts, elt);
1492 switch (e->event_type)
1494 case SESSION_CTRL_EVT_RPC:
1495 fp = e->rpc_args.fp;
1496 (*fp) (e->rpc_args.arg);
1498 case SESSION_CTRL_EVT_HALF_CLOSE:
1499 s = session_get_from_handle_if_valid (e->session_handle);
1500 if (PREDICT_FALSE (!s))
1502 session_transport_half_close (s);
1504 case SESSION_CTRL_EVT_CLOSE:
1505 s = session_get_from_handle_if_valid (e->session_handle);
1506 if (PREDICT_FALSE (!s))
1508 session_transport_close (s);
1510 case SESSION_CTRL_EVT_RESET:
1511 s = session_get_from_handle_if_valid (e->session_handle);
1512 if (PREDICT_FALSE (!s))
1514 session_transport_reset (s);
1516 case SESSION_CTRL_EVT_LISTEN:
1517 session_mq_listen_handler (session_evt_ctrl_data (wrk, elt));
1519 case SESSION_CTRL_EVT_LISTEN_URI:
1520 session_mq_listen_uri_handler (session_evt_ctrl_data (wrk, elt));
1522 case SESSION_CTRL_EVT_UNLISTEN:
1523 session_mq_unlisten_handler (wrk, elt);
1525 case SESSION_CTRL_EVT_CONNECT:
1526 session_mq_connect_handler (wrk, elt);
1528 case SESSION_CTRL_EVT_CONNECT_URI:
1529 session_mq_connect_uri_handler (session_evt_ctrl_data (wrk, elt));
1531 case SESSION_CTRL_EVT_SHUTDOWN:
1532 session_mq_shutdown_handler (session_evt_ctrl_data (wrk, elt));
1534 case SESSION_CTRL_EVT_DISCONNECT:
1535 session_mq_disconnect_handler (session_evt_ctrl_data (wrk, elt));
1537 case SESSION_CTRL_EVT_DISCONNECTED:
1538 session_mq_disconnected_handler (session_evt_ctrl_data (wrk, elt));
1540 case SESSION_CTRL_EVT_ACCEPTED_REPLY:
1541 session_mq_accepted_reply_handler (session_evt_ctrl_data (wrk, elt));
1543 case SESSION_CTRL_EVT_DISCONNECTED_REPLY:
1544 session_mq_disconnected_reply_handler (session_evt_ctrl_data (wrk,
1547 case SESSION_CTRL_EVT_RESET_REPLY:
1548 session_mq_reset_reply_handler (session_evt_ctrl_data (wrk, elt));
1550 case SESSION_CTRL_EVT_WORKER_UPDATE:
1551 session_mq_worker_update_handler (session_evt_ctrl_data (wrk, elt));
1553 case SESSION_CTRL_EVT_APP_DETACH:
1554 app_mq_detach_handler (session_evt_ctrl_data (wrk, elt));
1556 case SESSION_CTRL_EVT_APP_WRK_RPC:
1557 session_mq_app_wrk_rpc_handler (session_evt_ctrl_data (wrk, elt));
1559 case SESSION_CTRL_EVT_TRANSPORT_ATTR:
1560 session_mq_transport_attr_handler (session_evt_ctrl_data (wrk, elt));
1563 clib_warning ("unhandled event type %d", e->event_type);
1566 /* Regrab elements in case pool moved */
1567 elt = clib_llist_elt (wrk->event_elts, ei);
1568 if (!clib_llist_elt_is_linked (elt, evt_list))
1571 if (e->event_type >= SESSION_CTRL_EVT_BOUND)
1572 session_evt_ctrl_data_free (wrk, elt);
1573 clib_llist_put (wrk->event_elts, elt);
1575 SESSION_EVT (SESSION_EVT_COUNTS, CNT_CTRL_EVTS, 1, wrk);
1579 session_event_dispatch_io (session_worker_t * wrk, vlib_node_runtime_t * node,
1580 session_evt_elt_t * elt, int *n_tx_packets)
1582 session_main_t *smm = &session_main;
1583 app_worker_t *app_wrk;
1584 clib_llist_index_t ei;
1588 ei = clib_llist_entry_index (wrk->event_elts, elt);
1591 switch (e->event_type)
1593 case SESSION_IO_EVT_TX_FLUSH:
1594 case SESSION_IO_EVT_TX:
1595 s = session_event_get_session (wrk, e);
1596 if (PREDICT_FALSE (!s))
1598 CLIB_PREFETCH (s->tx_fifo, sizeof (*(s->tx_fifo)), LOAD);
1600 /* Spray packets in per session type frames, since they go to
1601 * different nodes */
1602 (smm->session_tx_fns[s->session_type]) (wrk, node, elt, n_tx_packets);
1604 case SESSION_IO_EVT_RX:
1605 s = session_event_get_session (wrk, e);
1608 transport_app_rx_evt (session_get_transport_proto (s),
1609 s->connection_index, s->thread_index);
1611 case SESSION_IO_EVT_BUILTIN_RX:
1612 s = session_event_get_session (wrk, e);
1613 if (PREDICT_FALSE (!s || s->session_state >= SESSION_STATE_CLOSING))
1615 svm_fifo_unset_event (s->rx_fifo);
1616 app_wrk = app_worker_get (s->app_wrk_index);
1617 app_worker_builtin_rx (app_wrk, s);
1619 case SESSION_IO_EVT_BUILTIN_TX:
1620 s = session_get_from_handle_if_valid (e->session_handle);
1622 if (PREDICT_TRUE (s != 0))
1623 session_tx_fifo_dequeue_internal (wrk, node, elt, n_tx_packets);
1626 clib_warning ("unhandled event type %d", e->event_type);
1629 SESSION_EVT (SESSION_IO_EVT_COUNTS, e->event_type, 1, wrk);
1631 /* Regrab elements in case pool moved */
1632 elt = clib_llist_elt (wrk->event_elts, ei);
1633 if (!clib_llist_elt_is_linked (elt, evt_list))
1634 clib_llist_put (wrk->event_elts, elt);
1638 static const u32 session_evt_msg_sizes[] = {
1639 #define _(symc, sym) \
1640 [SESSION_CTRL_EVT_ ## symc] = sizeof (session_ ## sym ##_msg_t),
1641 foreach_session_ctrl_evt
1647 session_update_time_subscribers (session_main_t *smm, clib_time_type_t now,
1650 session_update_time_fn *fn;
1652 vec_foreach (fn, smm->update_time_fns)
1653 (*fn) (now, thread_index);
1657 session_evt_add_to_list (session_worker_t * wrk, session_event_t * evt)
1659 session_evt_elt_t *elt;
1661 if (evt->event_type >= SESSION_CTRL_EVT_RPC)
1663 elt = session_evt_alloc_ctrl (wrk);
1664 if (evt->event_type >= SESSION_CTRL_EVT_BOUND)
1666 elt->evt.ctrl_data_index = session_evt_ctrl_data_alloc (wrk);
1667 elt->evt.event_type = evt->event_type;
1668 clib_memcpy_fast (session_evt_ctrl_data (wrk, elt), evt->data,
1669 session_evt_msg_sizes[evt->event_type]);
1673 /* Internal control events fit into io events footprint */
1674 clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt));
1679 elt = session_evt_alloc_new (wrk);
1680 clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt));
1685 session_flush_pending_tx_buffers (session_worker_t * wrk,
1686 vlib_node_runtime_t * node)
1688 vlib_buffer_enqueue_to_next_vec (wrk->vm, node, &wrk->pending_tx_buffers,
1689 &wrk->pending_tx_nexts,
1690 vec_len (wrk->pending_tx_nexts));
1691 vec_reset_length (wrk->pending_tx_buffers);
1692 vec_reset_length (wrk->pending_tx_nexts);
1696 session_wrk_handle_mq (session_worker_t *wrk, svm_msg_q_t *mq)
1698 svm_msg_q_msg_t _msg, *msg = &_msg;
1699 u32 i, n_to_dequeue = 0;
1700 session_event_t *evt;
1702 n_to_dequeue = svm_msg_q_size (mq);
1703 for (i = 0; i < n_to_dequeue; i++)
1705 svm_msg_q_sub_raw (mq, msg);
1706 evt = svm_msg_q_msg_data (mq, msg);
1707 session_evt_add_to_list (wrk, evt);
1708 svm_msg_q_free_msg (mq, msg);
1711 return n_to_dequeue;
1715 session_wrk_update_state (session_worker_t *wrk)
1717 vlib_main_t *vm = wrk->vm;
1719 if (wrk->state == SESSION_WRK_POLLING)
1721 if (clib_llist_elts (wrk->event_elts) == 4 &&
1722 vlib_last_vectors_per_main_loop (vm) < 1)
1724 session_wrk_set_state (wrk, SESSION_WRK_INTERRUPT);
1725 vlib_node_set_state (vm, session_queue_node.index,
1726 VLIB_NODE_STATE_INTERRUPT);
1729 else if (wrk->state == SESSION_WRK_INTERRUPT)
1731 if (clib_llist_elts (wrk->event_elts) > 4 ||
1732 vlib_last_vectors_per_main_loop (vm) > 1)
1734 session_wrk_set_state (wrk, SESSION_WRK_POLLING);
1735 vlib_node_set_state (vm, session_queue_node.index,
1736 VLIB_NODE_STATE_POLLING);
1738 else if (PREDICT_FALSE (!pool_elts (wrk->sessions)))
1740 session_wrk_set_state (wrk, SESSION_WRK_IDLE);
1745 if (clib_llist_elts (wrk->event_elts))
1747 session_wrk_set_state (wrk, SESSION_WRK_INTERRUPT);
1753 session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1754 vlib_frame_t * frame)
1756 u32 thread_index = vm->thread_index, __clib_unused n_evts;
1757 session_evt_elt_t *elt, *ctrl_he, *new_he, *old_he;
1758 session_main_t *smm = vnet_get_session_main ();
1759 session_worker_t *wrk = &smm->wrk[thread_index];
1760 clib_llist_index_t ei, next_ei, old_ti;
1763 SESSION_EVT (SESSION_EVT_DISPATCH_START, wrk);
1765 session_wrk_update_time (wrk, vlib_time_now (vm));
1768 * Update transport time
1770 session_update_time_subscribers (smm, wrk->last_vlib_time, thread_index);
1771 n_tx_packets = vec_len (wrk->pending_tx_buffers);
1772 SESSION_EVT (SESSION_EVT_DSP_CNTRS, UPDATE_TIME, wrk);
1775 * Dequeue new internal mq events
1778 n_evts = session_wrk_handle_mq (wrk, wrk->vpp_event_queue);
1779 SESSION_EVT (SESSION_EVT_DSP_CNTRS, MQ_DEQ, wrk, n_evts);
1782 * Handle control events
1785 ei = wrk->ctrl_head;
1786 ctrl_he = clib_llist_elt (wrk->event_elts, ei);
1787 next_ei = clib_llist_next_index (ctrl_he, evt_list);
1788 old_ti = clib_llist_prev_index (ctrl_he, evt_list);
1789 while (ei != old_ti)
1792 elt = clib_llist_elt (wrk->event_elts, next_ei);
1793 next_ei = clib_llist_next_index (elt, evt_list);
1794 clib_llist_remove (wrk->event_elts, evt_list, elt);
1795 session_event_dispatch_ctrl (wrk, elt);
1798 SESSION_EVT (SESSION_EVT_DSP_CNTRS, CTRL_EVTS, wrk);
1801 * Handle the new io events.
1804 new_he = clib_llist_elt (wrk->event_elts, wrk->new_head);
1805 old_he = clib_llist_elt (wrk->event_elts, wrk->old_head);
1806 old_ti = clib_llist_prev_index (old_he, evt_list);
1808 ei = clib_llist_next_index (new_he, evt_list);
1809 while (ei != wrk->new_head && n_tx_packets < SESSION_NODE_FRAME_SIZE)
1811 elt = clib_llist_elt (wrk->event_elts, ei);
1812 ei = clib_llist_next_index (elt, evt_list);
1813 clib_llist_remove (wrk->event_elts, evt_list, elt);
1814 session_event_dispatch_io (wrk, node, elt, &n_tx_packets);
1817 SESSION_EVT (SESSION_EVT_DSP_CNTRS, NEW_IO_EVTS, wrk);
1820 * Handle the old io events, if we had any prior to processing the new ones
1823 if (old_ti != wrk->old_head)
1825 old_he = clib_llist_elt (wrk->event_elts, wrk->old_head);
1826 ei = clib_llist_next_index (old_he, evt_list);
1828 while (n_tx_packets < SESSION_NODE_FRAME_SIZE)
1830 elt = clib_llist_elt (wrk->event_elts, ei);
1831 next_ei = clib_llist_next_index (elt, evt_list);
1832 clib_llist_remove (wrk->event_elts, evt_list, elt);
1834 session_event_dispatch_io (wrk, node, elt, &n_tx_packets);
1843 SESSION_EVT (SESSION_EVT_DSP_CNTRS, OLD_IO_EVTS, wrk);
1845 if (vec_len (wrk->pending_tx_buffers))
1846 session_flush_pending_tx_buffers (wrk, node);
1848 vlib_node_increment_counter (vm, session_queue_node.index,
1849 SESSION_QUEUE_ERROR_TX, n_tx_packets);
1851 SESSION_EVT (SESSION_EVT_DISPATCH_END, wrk, n_tx_packets);
1853 if (wrk->flags & SESSION_WRK_F_ADAPTIVE)
1854 session_wrk_update_state (wrk);
1856 return n_tx_packets;
1860 VLIB_REGISTER_NODE (session_queue_node) = {
1861 .function = session_queue_node_fn,
1862 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
1863 .name = "session-queue",
1864 .format_trace = format_session_queue_trace,
1865 .type = VLIB_NODE_TYPE_INPUT,
1866 .n_errors = SESSION_QUEUE_N_ERROR,
1867 .error_counters = session_error_counters,
1868 .state = VLIB_NODE_STATE_DISABLED,
1872 static clib_error_t *
1873 session_wrk_tfd_read_ready (clib_file_t *cf)
1875 session_worker_t *wrk = session_main_get_worker (cf->private_data);
1879 vlib_node_set_interrupt_pending (wrk->vm, session_queue_node.index);
1880 rv = read (wrk->timerfd, &buf, sizeof (buf));
1881 if (rv < 0 && errno != EAGAIN)
1882 clib_unix_warning ("failed");
1886 static clib_error_t *
1887 session_wrk_tfd_write_ready (clib_file_t *cf)
1893 session_wrk_enable_adaptive_mode (session_worker_t *wrk)
1895 u32 thread_index = wrk->vm->thread_index;
1896 clib_file_t template = { 0 };
1898 if ((wrk->timerfd = timerfd_create (CLOCK_MONOTONIC, TFD_NONBLOCK)) < 0)
1899 clib_warning ("timerfd_create");
1901 template.read_function = session_wrk_tfd_read_ready;
1902 template.write_function = session_wrk_tfd_write_ready;
1903 template.file_descriptor = wrk->timerfd;
1904 template.private_data = thread_index;
1905 template.polling_thread_index = thread_index;
1906 template.description = format (0, "session-wrk-tfd-%u", thread_index);
1908 wrk->timerfd_file = clib_file_add (&file_main, &template);
1909 wrk->flags |= SESSION_WRK_F_ADAPTIVE;
1912 static clib_error_t *
1913 session_queue_exit (vlib_main_t * vm)
1915 if (vlib_get_n_threads () < 2)
1919 * Shut off (especially) worker-thread session nodes.
1920 * Otherwise, vpp can crash as the main thread unmaps the
1923 vlib_worker_thread_barrier_sync (vm);
1924 session_node_enable_disable (0 /* is_enable */ );
1925 vlib_worker_thread_barrier_release (vm);
1929 VLIB_MAIN_LOOP_EXIT_FUNCTION (session_queue_exit);
1932 session_queue_run_on_main (vlib_main_t * vm)
1934 vlib_node_runtime_t *node;
1936 node = vlib_node_get_runtime (vm, session_queue_node.index);
1937 return session_queue_node_fn (vm, node, 0);
1941 session_queue_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
1944 uword *event_data = 0;
1950 vlib_process_wait_for_event_or_clock (vm, timeout);
1951 event_type = vlib_process_get_events (vm, (uword **) & event_data);
1955 case SESSION_Q_PROCESS_RUN_ON_MAIN:
1956 /* Run session queue node on main thread */
1957 session_queue_run_on_main (vm);
1959 case SESSION_Q_PROCESS_STOP:
1960 vlib_node_set_state (vm, session_queue_process_node.index,
1961 VLIB_NODE_STATE_DISABLED);
1965 /* Timed out. Run on main to ensure all events are handled */
1966 session_queue_run_on_main (vm);
1969 vec_reset_length (event_data);
1975 VLIB_REGISTER_NODE (session_queue_process_node) =
1977 .function = session_queue_process,
1978 .type = VLIB_NODE_TYPE_PROCESS,
1979 .name = "session-queue-process",
1980 .state = VLIB_NODE_STATE_DISABLED,
1984 static_always_inline uword
1985 session_queue_pre_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1986 vlib_frame_t * frame)
1988 session_main_t *sm = &session_main;
1989 if (!sm->wrk[0].vpp_event_queue)
1991 node = vlib_node_get_runtime (vm, session_queue_node.index);
1992 return session_queue_node_fn (vm, node, frame);
1996 VLIB_REGISTER_NODE (session_queue_pre_input_node) =
1998 .function = session_queue_pre_input_inline,
1999 .type = VLIB_NODE_TYPE_PRE_INPUT,
2000 .name = "session-queue-main",
2001 .state = VLIB_NODE_STATE_DISABLED,
2006 * fd.io coding-style-patch-verification: ON
2009 * eval: (c-set-style "gnu")