2 * Copyright (c) 2017-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vppinfra/elog.h>
20 #include <vnet/session/transport.h>
21 #include <vnet/session/session.h>
22 #include <vnet/session/application.h>
23 #include <vnet/session/application_interface.h>
24 #include <vnet/session/application_local.h>
25 #include <vnet/session/session_debug.h>
26 #include <svm/queue.h>
27 #include <sys/timerfd.h>
29 #define app_check_thread_and_barrier(_fn, _arg) \
30 if (!vlib_thread_is_main_w_barrier ()) \
32 vlib_rpc_call_main_thread (_fn, (u8 *) _arg, sizeof(*_arg)); \
36 static transport_endpt_ext_cfg_t *
37 session_mq_get_ext_config (application_t *app, uword offset)
42 fs = application_get_rx_mqs_segment (app);
43 c = fs_chunk_ptr (fs->h, offset);
44 return (transport_endpt_ext_cfg_t *) c->data;
48 session_mq_free_ext_config (application_t *app, uword offset)
50 u32 ctrl_thread = vlib_num_workers () ? 1 : 0;
54 fs = application_get_rx_mqs_segment (app);
55 c = fs_chunk_ptr (fs->h, offset);
56 fifo_segment_collect_chunk (fs, ctrl_thread, c);
60 session_mq_listen_handler (void *data)
62 session_listen_msg_t *mp = (session_listen_msg_t *) data;
63 vnet_listen_args_t _a, *a = &_a;
64 app_worker_t *app_wrk;
68 app_check_thread_and_barrier (session_mq_listen_handler, mp);
70 app = application_lookup (mp->client_index);
74 clib_memset (a, 0, sizeof (*a));
75 a->sep.is_ip4 = mp->is_ip4;
76 ip_copy (&a->sep.ip, &mp->ip, mp->is_ip4);
77 a->sep.port = mp->port;
78 a->sep.fib_index = mp->vrf;
79 a->sep.sw_if_index = ENDPOINT_INVALID_INDEX;
80 a->sep.transport_proto = mp->proto;
81 a->sep_ext.ckpair_index = mp->ckpair_index;
82 a->sep_ext.crypto_engine = mp->crypto_engine;
83 a->app_index = app->app_index;
84 a->wrk_map_index = mp->wrk_index;
85 a->sep_ext.transport_flags = mp->flags;
88 a->sep_ext.ext_cfg = session_mq_get_ext_config (app, mp->ext_config);
90 if ((rv = vnet_listen (a)))
91 clib_warning ("listen returned: %U", format_session_error, rv);
93 app_wrk = application_get_worker (app, mp->wrk_index);
94 mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv);
97 session_mq_free_ext_config (app, mp->ext_config);
101 session_mq_listen_uri_handler (void *data)
103 session_listen_uri_msg_t *mp = (session_listen_uri_msg_t *) data;
104 vnet_listen_args_t _a, *a = &_a;
105 app_worker_t *app_wrk;
109 app_check_thread_and_barrier (session_mq_listen_uri_handler, mp);
111 app = application_lookup (mp->client_index);
115 clib_memset (a, 0, sizeof (*a));
116 a->uri = (char *) mp->uri;
117 a->app_index = app->app_index;
118 rv = vnet_bind_uri (a);
120 app_wrk = application_get_worker (app, 0);
121 mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv);
125 session_mq_connect_handler (void *data)
127 session_connect_msg_t *mp = (session_connect_msg_t *) data;
128 vnet_connect_args_t _a, *a = &_a;
129 app_worker_t *app_wrk;
133 app_check_thread_and_barrier (session_mq_connect_handler, mp);
135 app = application_lookup (mp->client_index);
139 clib_memset (a, 0, sizeof (*a));
140 a->sep.is_ip4 = mp->is_ip4;
141 clib_memcpy_fast (&a->sep.ip, &mp->ip, sizeof (mp->ip));
142 a->sep.port = mp->port;
143 a->sep.transport_proto = mp->proto;
144 a->sep.peer.fib_index = mp->vrf;
145 clib_memcpy_fast (&a->sep.peer.ip, &mp->lcl_ip, sizeof (mp->lcl_ip));
148 ip46_address_mask_ip4 (&a->sep.ip);
149 ip46_address_mask_ip4 (&a->sep.peer.ip);
151 a->sep.peer.port = mp->lcl_port;
152 a->sep.peer.sw_if_index = ENDPOINT_INVALID_INDEX;
153 a->sep_ext.parent_handle = mp->parent_handle;
154 a->sep_ext.ckpair_index = mp->ckpair_index;
155 a->sep_ext.crypto_engine = mp->crypto_engine;
156 a->sep_ext.transport_flags = mp->flags;
157 if (mp->hostname_len)
159 vec_validate (a->sep_ext.hostname, mp->hostname_len - 1);
160 clib_memcpy_fast (a->sep_ext.hostname, mp->hostname, mp->hostname_len);
162 a->api_context = mp->context;
163 a->app_index = app->app_index;
164 a->wrk_map_index = mp->wrk_index;
167 a->sep_ext.ext_cfg = session_mq_get_ext_config (app, mp->ext_config);
169 if ((rv = vnet_connect (a)))
171 clib_warning ("connect returned: %U", format_session_error, rv);
172 app_wrk = application_get_worker (app, mp->wrk_index);
173 mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv);
177 session_mq_free_ext_config (app, mp->ext_config);
179 vec_free (a->sep_ext.hostname);
183 session_mq_connect_uri_handler (void *data)
185 session_connect_uri_msg_t *mp = (session_connect_uri_msg_t *) data;
186 vnet_connect_args_t _a, *a = &_a;
187 app_worker_t *app_wrk;
191 app_check_thread_and_barrier (session_mq_connect_uri_handler, mp);
193 app = application_lookup (mp->client_index);
197 clib_memset (a, 0, sizeof (*a));
198 a->uri = (char *) mp->uri;
199 a->api_context = mp->context;
200 a->app_index = app->app_index;
201 if ((rv = vnet_connect_uri (a)))
203 clib_warning ("connect_uri returned: %d", rv);
204 app_wrk = application_get_worker (app, 0 /* default wrk only */ );
205 mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv);
210 session_mq_disconnect_handler (void *data)
212 session_disconnect_msg_t *mp = (session_disconnect_msg_t *) data;
213 vnet_disconnect_args_t _a, *a = &_a;
216 app = application_lookup (mp->client_index);
220 a->app_index = app->app_index;
221 a->handle = mp->handle;
222 vnet_disconnect_session (a);
226 app_mq_detach_handler (void *data)
228 session_app_detach_msg_t *mp = (session_app_detach_msg_t *) data;
229 vnet_app_detach_args_t _a, *a = &_a;
232 app_check_thread_and_barrier (app_mq_detach_handler, mp);
234 app = application_lookup (mp->client_index);
238 a->app_index = app->app_index;
239 a->api_client_index = mp->client_index;
240 vnet_application_detach (a);
244 session_mq_unlisten_handler (void *data)
246 session_unlisten_msg_t *mp = (session_unlisten_msg_t *) data;
247 vnet_unlisten_args_t _a, *a = &_a;
248 app_worker_t *app_wrk;
252 app_check_thread_and_barrier (session_mq_unlisten_handler, mp);
254 app = application_lookup (mp->client_index);
258 clib_memset (a, 0, sizeof (*a));
259 a->app_index = app->app_index;
260 a->handle = mp->handle;
261 a->wrk_map_index = mp->wrk_index;
262 if ((rv = vnet_unlisten (a)))
263 clib_warning ("unlisten returned: %d", rv);
265 app_wrk = application_get_worker (app, a->wrk_map_index);
269 mq_send_unlisten_reply (app_wrk, mp->handle, mp->context, rv);
273 session_mq_accepted_reply_handler (void *data)
275 session_accepted_reply_msg_t *mp = (session_accepted_reply_msg_t *) data;
276 vnet_disconnect_args_t _a = { 0 }, *a = &_a;
277 session_state_t old_state;
278 app_worker_t *app_wrk;
281 /* Server isn't interested, kill the session */
284 a->app_index = mp->context;
285 a->handle = mp->handle;
286 vnet_disconnect_session (a);
290 /* Mail this back from the main thread. We're not polling in main
291 * thread so we're using other workers for notifications. */
292 if (vlib_num_workers () && vlib_get_thread_index () != 0
293 && session_thread_from_handle (mp->handle) == 0)
295 vlib_rpc_call_main_thread (session_mq_accepted_reply_handler,
296 (u8 *) mp, sizeof (*mp));
300 s = session_get_from_handle_if_valid (mp->handle);
304 app_wrk = app_worker_get (s->app_wrk_index);
305 if (app_wrk->app_index != mp->context)
307 clib_warning ("app doesn't own session");
311 if (!session_has_transport (s))
313 s->session_state = SESSION_STATE_READY;
314 if (ct_session_connect_notify (s))
319 old_state = s->session_state;
320 s->session_state = SESSION_STATE_READY;
322 if (!svm_fifo_is_empty_prod (s->rx_fifo))
323 app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX);
325 /* Closed while waiting for app to reply. Resend disconnect */
326 if (old_state >= SESSION_STATE_TRANSPORT_CLOSING)
328 app_worker_close_notify (app_wrk, s);
329 s->session_state = old_state;
336 session_mq_reset_reply_handler (void *data)
338 vnet_disconnect_args_t _a = { 0 }, *a = &_a;
339 session_reset_reply_msg_t *mp;
340 app_worker_t *app_wrk;
343 u32 index, thread_index;
345 mp = (session_reset_reply_msg_t *) data;
346 app = application_lookup (mp->context);
350 session_parse_handle (mp->handle, &index, &thread_index);
351 s = session_get_if_valid (index, thread_index);
353 /* No session or not the right session */
354 if (!s || s->session_state < SESSION_STATE_TRANSPORT_CLOSING)
357 app_wrk = app_worker_get (s->app_wrk_index);
358 if (!app_wrk || app_wrk->app_index != app->app_index)
360 clib_warning ("App %u does not own handle 0x%lx!", app->app_index,
365 /* Client objected to resetting the session, log and continue */
368 clib_warning ("client retval %d", mp->retval);
372 /* This comes as a response to a reset, transport only waiting for
373 * confirmation to remove connection state, no need to disconnect */
374 a->handle = mp->handle;
375 a->app_index = app->app_index;
376 vnet_disconnect_session (a);
380 session_mq_disconnected_handler (void *data)
382 session_disconnected_reply_msg_t *rmp;
383 vnet_disconnect_args_t _a, *a = &_a;
384 svm_msg_q_msg_t _msg, *msg = &_msg;
385 session_disconnected_msg_t *mp;
386 app_worker_t *app_wrk;
387 session_event_t *evt;
392 mp = (session_disconnected_msg_t *) data;
393 if (!(s = session_get_from_handle_if_valid (mp->handle)))
395 clib_warning ("could not disconnect handle %llu", mp->handle);
398 app_wrk = app_worker_get (s->app_wrk_index);
399 app = application_lookup (mp->client_index);
400 if (!(app_wrk && app && app->app_index == app_wrk->app_index))
402 clib_warning ("could not disconnect session: %llu app: %u",
403 mp->handle, mp->client_index);
407 a->handle = mp->handle;
408 a->app_index = app_wrk->wrk_index;
409 rv = vnet_disconnect_session (a);
411 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
412 SESSION_MQ_CTRL_EVT_RING,
414 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
415 clib_memset (evt, 0, sizeof (*evt));
416 evt->event_type = SESSION_CTRL_EVT_DISCONNECTED_REPLY;
417 rmp = (session_disconnected_reply_msg_t *) evt->data;
418 rmp->handle = mp->handle;
419 rmp->context = mp->context;
421 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
425 session_mq_disconnected_reply_handler (void *data)
427 session_disconnected_reply_msg_t *mp;
428 vnet_disconnect_args_t _a, *a = &_a;
431 mp = (session_disconnected_reply_msg_t *) data;
433 /* Client objected to disconnecting the session, log and continue */
436 clib_warning ("client retval %d", mp->retval);
440 /* Disconnect has been confirmed. Confirm close to transport */
441 app = application_lookup (mp->context);
444 a->handle = mp->handle;
445 a->app_index = app->app_index;
446 vnet_disconnect_session (a);
451 session_mq_worker_update_handler (void *data)
453 session_worker_update_msg_t *mp = (session_worker_update_msg_t *) data;
454 session_worker_update_reply_msg_t *rmp;
455 svm_msg_q_msg_t _msg, *msg = &_msg;
456 app_worker_t *app_wrk;
457 u32 owner_app_wrk_map;
458 session_event_t *evt;
462 app = application_lookup (mp->client_index);
465 if (!(s = session_get_from_handle_if_valid (mp->handle)))
467 clib_warning ("invalid handle %llu", mp->handle);
470 app_wrk = app_worker_get (s->app_wrk_index);
471 if (app_wrk->app_index != app->app_index)
473 clib_warning ("app %u does not own session %llu", app->app_index,
477 owner_app_wrk_map = app_wrk->wrk_map_index;
478 app_wrk = application_get_worker (app, mp->wrk_index);
480 /* This needs to come from the new owner */
481 if (mp->req_wrk_index == owner_app_wrk_map)
483 session_req_worker_update_msg_t *wump;
485 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
486 SESSION_MQ_CTRL_EVT_RING,
488 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
489 clib_memset (evt, 0, sizeof (*evt));
490 evt->event_type = SESSION_CTRL_EVT_REQ_WORKER_UPDATE;
491 wump = (session_req_worker_update_msg_t *) evt->data;
492 wump->session_handle = mp->handle;
493 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
497 app_worker_own_session (app_wrk, s);
502 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
503 SESSION_MQ_CTRL_EVT_RING,
505 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
506 clib_memset (evt, 0, sizeof (*evt));
507 evt->event_type = SESSION_CTRL_EVT_WORKER_UPDATE_REPLY;
508 rmp = (session_worker_update_reply_msg_t *) evt->data;
509 rmp->handle = mp->handle;
511 rmp->rx_fifo = fifo_segment_fifo_offset (s->rx_fifo);
513 rmp->tx_fifo = fifo_segment_fifo_offset (s->tx_fifo);
514 rmp->segment_handle = session_segment_handle (s);
515 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
518 * Retransmit messages that may have been lost
520 if (s->tx_fifo && !svm_fifo_is_empty (s->tx_fifo))
521 session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
523 if (s->rx_fifo && !svm_fifo_is_empty (s->rx_fifo))
524 app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX);
526 if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
527 app_worker_close_notify (app_wrk, s);
531 session_mq_app_wrk_rpc_handler (void *data)
533 session_app_wrk_rpc_msg_t *mp = (session_app_wrk_rpc_msg_t *) data;
534 svm_msg_q_msg_t _msg, *msg = &_msg;
535 session_app_wrk_rpc_msg_t *rmp;
536 app_worker_t *app_wrk;
537 session_event_t *evt;
540 app = application_lookup (mp->client_index);
544 app_wrk = application_get_worker (app, mp->wrk_index);
546 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
547 SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT,
549 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
550 clib_memset (evt, 0, sizeof (*evt));
551 evt->event_type = SESSION_CTRL_EVT_APP_WRK_RPC;
552 rmp = (session_app_wrk_rpc_msg_t *) evt->data;
553 clib_memcpy (rmp->data, mp->data, sizeof (mp->data));
554 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
558 session_mq_transport_attr_handler (void *data)
560 session_transport_attr_msg_t *mp = (session_transport_attr_msg_t *) data;
561 session_transport_attr_reply_msg_t *rmp;
562 svm_msg_q_msg_t _msg, *msg = &_msg;
563 app_worker_t *app_wrk;
564 session_event_t *evt;
569 app = application_lookup (mp->client_index);
573 if (!(s = session_get_from_handle_if_valid (mp->handle)))
575 clib_warning ("invalid handle %llu", mp->handle);
578 app_wrk = app_worker_get (s->app_wrk_index);
579 if (app_wrk->app_index != app->app_index)
581 clib_warning ("app %u does not own session %llu", app->app_index,
586 rv = session_transport_attribute (s, mp->is_get, &mp->attr);
588 svm_msg_q_lock_and_alloc_msg_w_ring (
589 app_wrk->event_queue, SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT, msg);
590 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
591 clib_memset (evt, 0, sizeof (*evt));
592 evt->event_type = SESSION_CTRL_EVT_TRANSPORT_ATTR_REPLY;
593 rmp = (session_transport_attr_reply_msg_t *) evt->data;
594 rmp->handle = mp->handle;
596 rmp->is_get = mp->is_get;
597 if (!rv && mp->is_get)
598 rmp->attr = mp->attr;
599 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
602 vlib_node_registration_t session_queue_node;
607 u32 server_thread_index;
608 } session_queue_trace_t;
610 /* packet trace format function */
612 format_session_queue_trace (u8 * s, va_list * args)
614 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
615 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
616 session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *);
618 s = format (s, "session index %d thread index %d",
619 t->session_index, t->server_thread_index);
623 #define foreach_session_queue_error \
624 _(TX, "Packets transmitted") \
625 _(TIMER, "Timer events") \
626 _(NO_BUFFER, "Out of buffers")
630 #define _(sym,str) SESSION_QUEUE_ERROR_##sym,
631 foreach_session_queue_error
633 SESSION_QUEUE_N_ERROR,
634 } session_queue_error_t;
636 static char *session_queue_error_strings[] = {
637 #define _(sym,string) string,
638 foreach_session_queue_error
644 SESSION_TX_NO_BUFFERS = -2,
650 session_tx_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
651 u32 next_index, u32 * to_next, u16 n_segs,
652 session_t * s, u32 n_trace)
654 while (n_trace && n_segs)
656 vlib_buffer_t *b = vlib_get_buffer (vm, to_next[0]);
659 (vm, node, next_index, b, 1 /* follow_chain */ )))
661 session_queue_trace_t *t =
662 vlib_add_trace (vm, node, b, sizeof (*t));
663 t->session_index = s->session_index;
664 t->server_thread_index = s->thread_index;
670 vlib_set_trace_count (vm, node, n_trace);
674 session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx,
675 vlib_buffer_t * b, u16 * n_bufs, u8 peek_data)
677 vlib_buffer_t *chain_b, *prev_b;
678 u32 chain_bi0, to_deq, left_from_seg;
679 session_worker_t *wrk;
680 u16 len_to_deq, n_bytes_read;
683 wrk = session_main_get_worker (ctx->s->thread_index);
684 b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
685 b->total_length_not_including_first_buffer = 0;
688 left_from_seg = clib_min (ctx->sp.snd_mss - b->current_length,
690 to_deq = left_from_seg;
691 for (j = 1; j < ctx->n_bufs_per_seg; j++)
694 len_to_deq = clib_min (to_deq, ctx->deq_per_buf);
697 chain_bi0 = wrk->tx_buffers[*n_bufs];
698 chain_b = vlib_get_buffer (vm, chain_bi0);
699 chain_b->current_data = 0;
700 data = vlib_buffer_get_current (chain_b);
703 n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo,
704 ctx->sp.tx_offset, len_to_deq, data);
705 ctx->sp.tx_offset += n_bytes_read;
709 if (ctx->transport_vft->transport_options.tx_type ==
712 svm_fifo_t *f = ctx->s->tx_fifo;
713 session_dgram_hdr_t *hdr = &ctx->hdr;
717 deq_now = clib_min (hdr->data_length - hdr->data_offset,
719 offset = hdr->data_offset + SESSION_CONN_HDR_LEN;
720 n_bytes_read = svm_fifo_peek (f, offset, deq_now, data);
721 ASSERT (n_bytes_read > 0);
723 hdr->data_offset += n_bytes_read;
724 if (hdr->data_offset == hdr->data_length)
726 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
727 svm_fifo_dequeue_drop (f, offset);
728 if (ctx->left_to_snd > n_bytes_read)
729 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
732 else if (ctx->left_to_snd == n_bytes_read)
733 svm_fifo_overwrite_head (ctx->s->tx_fifo, (u8 *) & ctx->hdr,
734 sizeof (session_dgram_pre_hdr_t));
737 n_bytes_read = svm_fifo_dequeue (ctx->s->tx_fifo,
740 ASSERT (n_bytes_read == len_to_deq);
741 chain_b->current_length = n_bytes_read;
742 b->total_length_not_including_first_buffer += chain_b->current_length;
744 /* update previous buffer */
745 prev_b->next_buffer = chain_bi0;
746 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
748 /* update current buffer */
749 chain_b->next_buffer = 0;
751 to_deq -= n_bytes_read;
756 && b->total_length_not_including_first_buffer == left_from_seg);
757 ctx->left_to_snd -= left_from_seg;
761 session_tx_fill_buffer (vlib_main_t * vm, session_tx_context_t * ctx,
762 vlib_buffer_t * b, u16 * n_bufs, u8 peek_data)
769 * Start with the first buffer in chain
772 b->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED;
775 data0 = vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
776 len_to_deq = clib_min (ctx->left_to_snd, ctx->deq_per_first_buf);
780 n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo, ctx->sp.tx_offset,
782 ASSERT (n_bytes_read > 0);
783 /* Keep track of progress locally, transport is also supposed to
784 * increment it independently when pushing the header */
785 ctx->sp.tx_offset += n_bytes_read;
789 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
791 session_dgram_hdr_t *hdr = &ctx->hdr;
792 svm_fifo_t *f = ctx->s->tx_fifo;
796 ASSERT (hdr->data_length > hdr->data_offset);
797 deq_now = clib_min (hdr->data_length - hdr->data_offset,
799 offset = hdr->data_offset + SESSION_CONN_HDR_LEN;
800 n_bytes_read = svm_fifo_peek (f, offset, deq_now, data0);
801 ASSERT (n_bytes_read > 0);
803 if (ctx->s->session_state == SESSION_STATE_LISTENING)
805 ip_copy (&ctx->tc->rmt_ip, &hdr->rmt_ip, ctx->tc->is_ip4);
806 ctx->tc->rmt_port = hdr->rmt_port;
808 hdr->data_offset += n_bytes_read;
809 if (hdr->data_offset == hdr->data_length)
811 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
812 svm_fifo_dequeue_drop (f, offset);
813 if (ctx->left_to_snd > n_bytes_read)
814 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
817 else if (ctx->left_to_snd == n_bytes_read)
818 svm_fifo_overwrite_head (ctx->s->tx_fifo, (u8 *) & ctx->hdr,
819 sizeof (session_dgram_pre_hdr_t));
823 n_bytes_read = svm_fifo_dequeue (ctx->s->tx_fifo,
825 ASSERT (n_bytes_read > 0);
828 b->current_length = n_bytes_read;
829 ctx->left_to_snd -= n_bytes_read;
832 * Fill in the remaining buffers in the chain, if any
834 if (PREDICT_FALSE (ctx->n_bufs_per_seg > 1 && ctx->left_to_snd))
835 session_tx_fifo_chain_tail (vm, ctx, b, n_bufs, peek_data);
839 session_tx_not_ready (session_t * s, u8 peek_data)
843 if (PREDICT_TRUE (s->session_state == SESSION_STATE_READY))
845 /* Can retransmit for closed sessions but can't send new data if
846 * session is not ready or closed */
847 else if (s->session_state < SESSION_STATE_READY)
849 else if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)
851 /* Allow closed transports to still send custom packets.
852 * For instance, tcp may want to send acks in time-wait. */
853 if (s->session_state != SESSION_STATE_TRANSPORT_DELETED
854 && (s->flags & SESSION_F_CUSTOM_TX))
862 always_inline transport_connection_t *
863 session_tx_get_transport (session_tx_context_t * ctx, u8 peek_data)
867 return ctx->transport_vft->get_connection (ctx->s->connection_index,
868 ctx->s->thread_index);
872 if (ctx->s->session_state == SESSION_STATE_LISTENING)
873 return ctx->transport_vft->get_listener (ctx->s->connection_index);
876 return ctx->transport_vft->get_connection (ctx->s->connection_index,
877 ctx->s->thread_index);
883 session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx,
884 u32 max_segs, u8 peek_data)
886 u32 n_bytes_per_buf, n_bytes_per_seg;
888 n_bytes_per_buf = vlib_buffer_get_default_data_size (vm);
889 ctx->max_dequeue = svm_fifo_max_dequeue_cons (ctx->s->tx_fifo);
893 /* Offset in rx fifo from where to peek data */
894 if (PREDICT_FALSE (ctx->sp.tx_offset >= ctx->max_dequeue))
896 ctx->max_len_to_snd = 0;
899 ctx->max_dequeue -= ctx->sp.tx_offset;
903 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
905 u32 len, chain_limit;
907 if (ctx->max_dequeue <= sizeof (ctx->hdr))
909 ctx->max_len_to_snd = 0;
913 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
915 ASSERT (ctx->hdr.data_length > ctx->hdr.data_offset);
916 len = ctx->hdr.data_length - ctx->hdr.data_offset;
918 /* Process multiple dgrams if smaller than min (buf_space, mss).
919 * This avoids handling multiple dgrams if they require buffer
921 chain_limit = clib_min (n_bytes_per_buf - TRANSPORT_MAX_HDRS_LEN,
923 if (ctx->hdr.data_length <= chain_limit)
925 u32 first_dgram_len, dgram_len, offset, max_offset;
926 session_dgram_hdr_t hdr;
928 ctx->sp.snd_mss = clib_min (ctx->sp.snd_mss, len);
929 offset = ctx->hdr.data_length + sizeof (session_dgram_hdr_t);
930 first_dgram_len = len;
931 max_offset = clib_min (ctx->max_dequeue, 16 << 10);
933 while (offset < max_offset)
935 svm_fifo_peek (ctx->s->tx_fifo, offset, sizeof (ctx->hdr),
937 ASSERT (hdr.data_length > hdr.data_offset);
938 dgram_len = hdr.data_length - hdr.data_offset;
939 if (len + dgram_len > ctx->max_dequeue
940 || first_dgram_len != dgram_len)
943 offset += sizeof (hdr) + hdr.data_length;
947 ctx->max_dequeue = len;
950 ASSERT (ctx->max_dequeue > 0);
952 /* Ensure we're not writing more than transport window allows */
953 if (ctx->max_dequeue < ctx->sp.snd_space)
955 /* Constrained by tx queue. Try to send only fully formed segments */
956 ctx->max_len_to_snd = (ctx->max_dequeue > ctx->sp.snd_mss) ?
957 (ctx->max_dequeue - (ctx->max_dequeue % ctx->sp.snd_mss)) :
963 /* Expectation is that snd_space0 is already a multiple of snd_mss */
964 ctx->max_len_to_snd = ctx->sp.snd_space;
967 /* Check if we're tx constrained by the node */
968 ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->sp.snd_mss);
969 if (ctx->n_segs_per_evt > max_segs)
971 ctx->n_segs_per_evt = max_segs;
972 ctx->max_len_to_snd = max_segs * ctx->sp.snd_mss;
975 ASSERT (n_bytes_per_buf > TRANSPORT_MAX_HDRS_LEN);
976 if (ctx->n_segs_per_evt > 1)
978 u32 n_bytes_last_seg, n_bufs_last_seg;
980 n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->sp.snd_mss;
981 n_bytes_last_seg = TRANSPORT_MAX_HDRS_LEN + ctx->max_len_to_snd
982 - ((ctx->n_segs_per_evt - 1) * ctx->sp.snd_mss);
983 ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
984 n_bufs_last_seg = ceil ((f64) n_bytes_last_seg / n_bytes_per_buf);
985 ctx->n_bufs_needed = ((ctx->n_segs_per_evt - 1) * ctx->n_bufs_per_seg)
990 n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->max_len_to_snd;
991 ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
992 ctx->n_bufs_needed = ctx->n_bufs_per_seg;
995 ctx->deq_per_buf = clib_min (ctx->sp.snd_mss, n_bytes_per_buf);
996 ctx->deq_per_first_buf = clib_min (ctx->sp.snd_mss,
998 TRANSPORT_MAX_HDRS_LEN);
1002 session_tx_maybe_reschedule (session_worker_t * wrk,
1003 session_tx_context_t * ctx,
1004 session_evt_elt_t * elt)
1006 session_t *s = ctx->s;
1008 svm_fifo_unset_event (s->tx_fifo);
1009 if (svm_fifo_max_dequeue_cons (s->tx_fifo) > ctx->sp.tx_offset)
1010 if (svm_fifo_set_event (s->tx_fifo))
1011 session_evt_add_head_old (wrk, elt);
1015 session_tx_fifo_read_and_snd_i (session_worker_t * wrk,
1016 vlib_node_runtime_t * node,
1017 session_evt_elt_t * elt,
1018 int *n_tx_packets, u8 peek_data)
1020 u32 n_trace, n_left, pbi, next_index, max_burst;
1021 session_tx_context_t *ctx = &wrk->ctx;
1022 session_main_t *smm = &session_main;
1023 session_event_t *e = &elt->evt;
1024 vlib_main_t *vm = wrk->vm;
1025 transport_proto_t tp;
1029 if (PREDICT_FALSE ((rv = session_tx_not_ready (ctx->s, peek_data))))
1032 session_evt_add_old (wrk, elt);
1033 return SESSION_TX_NO_DATA;
1036 next_index = smm->session_type_to_next[ctx->s->session_type];
1037 max_burst = SESSION_NODE_FRAME_SIZE - *n_tx_packets;
1039 tp = session_get_transport_proto (ctx->s);
1040 ctx->transport_vft = transport_protocol_get_vft (tp);
1041 ctx->tc = session_tx_get_transport (ctx, peek_data);
1043 if (PREDICT_FALSE (e->event_type == SESSION_IO_EVT_TX_FLUSH))
1045 if (ctx->transport_vft->flush_data)
1046 ctx->transport_vft->flush_data (ctx->tc);
1047 e->event_type = SESSION_IO_EVT_TX;
1050 if (ctx->s->flags & SESSION_F_CUSTOM_TX)
1053 ctx->s->flags &= ~SESSION_F_CUSTOM_TX;
1054 ctx->sp.max_burst_size = max_burst;
1055 n_custom_tx = ctx->transport_vft->custom_tx (ctx->tc, &ctx->sp);
1056 *n_tx_packets += n_custom_tx;
1058 (ctx->s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
1059 return SESSION_TX_OK;
1060 max_burst -= n_custom_tx;
1061 if (!max_burst || (ctx->s->flags & SESSION_F_CUSTOM_TX))
1063 session_evt_add_old (wrk, elt);
1064 return SESSION_TX_OK;
1068 transport_connection_snd_params (ctx->tc, &ctx->sp);
1070 if (!ctx->sp.snd_space)
1072 /* If the deschedule flag was set, remove session from scheduler.
1073 * Transport is responsible for rescheduling this session. */
1074 if (ctx->sp.flags & TRANSPORT_SND_F_DESCHED)
1075 transport_connection_deschedule (ctx->tc);
1076 /* Request to postpone the session, e.g., zero-wnd and transport
1077 * is not currently probing */
1078 else if (ctx->sp.flags & TRANSPORT_SND_F_POSTPONE)
1079 session_evt_add_old (wrk, elt);
1080 /* This flow queue is "empty" so it should be re-evaluated before
1081 * the ones that have data to send. */
1083 session_evt_add_head_old (wrk, elt);
1085 return SESSION_TX_NO_DATA;
1088 if (transport_connection_is_tx_paced (ctx->tc))
1090 u32 snd_space = transport_connection_tx_pacer_burst (ctx->tc);
1091 if (snd_space < TRANSPORT_PACER_MIN_BURST)
1093 session_evt_add_head_old (wrk, elt);
1094 return SESSION_TX_NO_DATA;
1096 snd_space = clib_min (ctx->sp.snd_space, snd_space);
1097 ctx->sp.snd_space = snd_space >= ctx->sp.snd_mss ?
1098 snd_space - snd_space % ctx->sp.snd_mss : snd_space;
1101 /* Check how much we can pull. */
1102 session_tx_set_dequeue_params (vm, ctx, max_burst, peek_data);
1104 if (PREDICT_FALSE (!ctx->max_len_to_snd))
1106 transport_connection_tx_pacer_reset_bucket (ctx->tc, 0);
1107 session_tx_maybe_reschedule (wrk, ctx, elt);
1108 return SESSION_TX_NO_DATA;
1111 vec_validate_aligned (wrk->tx_buffers, ctx->n_bufs_needed - 1,
1112 CLIB_CACHE_LINE_BYTES);
1113 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, ctx->n_bufs_needed);
1114 if (PREDICT_FALSE (n_bufs < ctx->n_bufs_needed))
1117 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1118 session_evt_add_head_old (wrk, elt);
1119 vlib_node_increment_counter (wrk->vm, node->node_index,
1120 SESSION_QUEUE_ERROR_NO_BUFFER, 1);
1121 return SESSION_TX_NO_BUFFERS;
1124 if (transport_connection_is_tx_paced (ctx->tc))
1125 transport_connection_tx_pacer_update_bytes (ctx->tc, ctx->max_len_to_snd);
1127 ctx->left_to_snd = ctx->max_len_to_snd;
1128 n_left = ctx->n_segs_per_evt;
1132 vlib_buffer_t *b0, *b1;
1135 pbi = wrk->tx_buffers[n_bufs - 3];
1136 pb = vlib_get_buffer (vm, pbi);
1137 vlib_prefetch_buffer_header (pb, STORE);
1138 pbi = wrk->tx_buffers[n_bufs - 4];
1139 pb = vlib_get_buffer (vm, pbi);
1140 vlib_prefetch_buffer_header (pb, STORE);
1142 bi0 = wrk->tx_buffers[--n_bufs];
1143 bi1 = wrk->tx_buffers[--n_bufs];
1145 b0 = vlib_get_buffer (vm, bi0);
1146 b1 = vlib_get_buffer (vm, bi1);
1148 session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data);
1149 session_tx_fill_buffer (vm, ctx, b1, &n_bufs, peek_data);
1151 ctx->transport_vft->push_header (ctx->tc, b0);
1152 ctx->transport_vft->push_header (ctx->tc, b1);
1156 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
1157 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
1159 vec_add1 (wrk->pending_tx_buffers, bi0);
1160 vec_add1 (wrk->pending_tx_buffers, bi1);
1161 vec_add1 (wrk->pending_tx_nexts, next_index);
1162 vec_add1 (wrk->pending_tx_nexts, next_index);
1171 pbi = wrk->tx_buffers[n_bufs - 2];
1172 pb = vlib_get_buffer (vm, pbi);
1173 vlib_prefetch_buffer_header (pb, STORE);
1176 bi0 = wrk->tx_buffers[--n_bufs];
1177 b0 = vlib_get_buffer (vm, bi0);
1178 session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data);
1180 /* Ask transport to push header after current_length and
1181 * total_length_not_including_first_buffer are updated */
1182 ctx->transport_vft->push_header (ctx->tc, b0);
1186 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
1188 vec_add1 (wrk->pending_tx_buffers, bi0);
1189 vec_add1 (wrk->pending_tx_nexts, next_index);
1192 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)) > 0))
1193 session_tx_trace_frame (vm, node, next_index, wrk->pending_tx_buffers,
1194 ctx->n_segs_per_evt, ctx->s, n_trace);
1196 if (PREDICT_FALSE (n_bufs))
1197 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1199 *n_tx_packets += ctx->n_segs_per_evt;
1201 SESSION_EVT (SESSION_EVT_DEQ, ctx->s, ctx->max_len_to_snd, ctx->max_dequeue,
1202 ctx->s->tx_fifo->has_event, wrk->last_vlib_time);
1204 ASSERT (ctx->left_to_snd == 0);
1206 /* If we couldn't dequeue all bytes reschedule as old flow. Otherwise,
1207 * check if application enqueued more data and reschedule accordingly */
1208 if (ctx->max_len_to_snd < ctx->max_dequeue)
1209 session_evt_add_old (wrk, elt);
1211 session_tx_maybe_reschedule (wrk, ctx, elt);
1215 u32 n_dequeued = ctx->max_len_to_snd;
1216 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
1217 n_dequeued += ctx->n_segs_per_evt * SESSION_CONN_HDR_LEN;
1218 if (svm_fifo_needs_deq_ntf (ctx->s->tx_fifo, n_dequeued))
1219 session_dequeue_notify (ctx->s);
1221 return SESSION_TX_OK;
1225 session_tx_fifo_peek_and_snd (session_worker_t * wrk,
1226 vlib_node_runtime_t * node,
1227 session_evt_elt_t * e, int *n_tx_packets)
1229 return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 1);
1233 session_tx_fifo_dequeue_and_snd (session_worker_t * wrk,
1234 vlib_node_runtime_t * node,
1235 session_evt_elt_t * e, int *n_tx_packets)
1237 return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 0);
1241 session_tx_fifo_dequeue_internal (session_worker_t * wrk,
1242 vlib_node_runtime_t * node,
1243 session_evt_elt_t * elt, int *n_tx_packets)
1245 transport_send_params_t *sp = &wrk->ctx.sp;
1246 session_t *s = wrk->ctx.s;
1249 if (PREDICT_FALSE (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
1252 /* Clear custom-tx flag used to request reschedule for tx */
1253 s->flags &= ~SESSION_F_CUSTOM_TX;
1255 sp->max_burst_size = clib_min (SESSION_NODE_FRAME_SIZE - *n_tx_packets,
1256 TRANSPORT_PACER_MAX_BURST_PKTS);
1258 n_packets = transport_custom_tx (session_get_transport_proto (s), s, sp);
1259 *n_tx_packets += n_packets;
1261 if (s->flags & SESSION_F_CUSTOM_TX)
1263 session_evt_add_old (wrk, elt);
1265 else if (!(sp->flags & TRANSPORT_SND_F_DESCHED))
1267 svm_fifo_unset_event (s->tx_fifo);
1268 if (svm_fifo_max_dequeue_cons (s->tx_fifo))
1269 if (svm_fifo_set_event (s->tx_fifo))
1270 session_evt_add_head_old (wrk, elt);
1273 if (sp->max_burst_size &&
1274 svm_fifo_needs_deq_ntf (s->tx_fifo, sp->max_burst_size))
1275 session_dequeue_notify (s);
1280 always_inline session_t *
1281 session_event_get_session (session_worker_t * wrk, session_event_t * e)
1283 if (PREDICT_FALSE (pool_is_free_index (wrk->sessions, e->session_index)))
1286 ASSERT (session_is_valid (e->session_index, wrk->vm->thread_index));
1287 return pool_elt_at_index (wrk->sessions, e->session_index);
1291 session_event_dispatch_ctrl (session_worker_t * wrk, session_evt_elt_t * elt)
1293 clib_llist_index_t ei;
1294 void (*fp) (void *);
1298 ei = clib_llist_entry_index (wrk->event_elts, elt);
1301 switch (e->event_type)
1303 case SESSION_CTRL_EVT_RPC:
1304 fp = e->rpc_args.fp;
1305 (*fp) (e->rpc_args.arg);
1307 case SESSION_CTRL_EVT_CLOSE:
1308 s = session_get_from_handle_if_valid (e->session_handle);
1309 if (PREDICT_FALSE (!s))
1311 session_transport_close (s);
1313 case SESSION_CTRL_EVT_RESET:
1314 s = session_get_from_handle_if_valid (e->session_handle);
1315 if (PREDICT_FALSE (!s))
1317 session_transport_reset (s);
1319 case SESSION_CTRL_EVT_LISTEN:
1320 session_mq_listen_handler (session_evt_ctrl_data (wrk, elt));
1322 case SESSION_CTRL_EVT_LISTEN_URI:
1323 session_mq_listen_uri_handler (session_evt_ctrl_data (wrk, elt));
1325 case SESSION_CTRL_EVT_UNLISTEN:
1326 session_mq_unlisten_handler (session_evt_ctrl_data (wrk, elt));
1328 case SESSION_CTRL_EVT_CONNECT:
1329 session_mq_connect_handler (session_evt_ctrl_data (wrk, elt));
1331 case SESSION_CTRL_EVT_CONNECT_URI:
1332 session_mq_connect_uri_handler (session_evt_ctrl_data (wrk, elt));
1334 case SESSION_CTRL_EVT_DISCONNECT:
1335 session_mq_disconnect_handler (session_evt_ctrl_data (wrk, elt));
1337 case SESSION_CTRL_EVT_DISCONNECTED:
1338 session_mq_disconnected_handler (session_evt_ctrl_data (wrk, elt));
1340 case SESSION_CTRL_EVT_ACCEPTED_REPLY:
1341 session_mq_accepted_reply_handler (session_evt_ctrl_data (wrk, elt));
1343 case SESSION_CTRL_EVT_DISCONNECTED_REPLY:
1344 session_mq_disconnected_reply_handler (session_evt_ctrl_data (wrk,
1347 case SESSION_CTRL_EVT_RESET_REPLY:
1348 session_mq_reset_reply_handler (session_evt_ctrl_data (wrk, elt));
1350 case SESSION_CTRL_EVT_WORKER_UPDATE:
1351 session_mq_worker_update_handler (session_evt_ctrl_data (wrk, elt));
1353 case SESSION_CTRL_EVT_APP_DETACH:
1354 app_mq_detach_handler (session_evt_ctrl_data (wrk, elt));
1356 case SESSION_CTRL_EVT_APP_WRK_RPC:
1357 session_mq_app_wrk_rpc_handler (session_evt_ctrl_data (wrk, elt));
1359 case SESSION_CTRL_EVT_TRANSPORT_ATTR:
1360 session_mq_transport_attr_handler (session_evt_ctrl_data (wrk, elt));
1363 clib_warning ("unhandled event type %d", e->event_type);
1366 /* Regrab elements in case pool moved */
1367 elt = pool_elt_at_index (wrk->event_elts, ei);
1368 if (!clib_llist_elt_is_linked (elt, evt_list))
1371 if (e->event_type >= SESSION_CTRL_EVT_BOUND)
1372 session_evt_ctrl_data_free (wrk, elt);
1373 session_evt_elt_free (wrk, elt);
1375 SESSION_EVT (SESSION_EVT_COUNTS, CNT_CTRL_EVTS, 1, wrk);
1379 session_event_dispatch_io (session_worker_t * wrk, vlib_node_runtime_t * node,
1380 session_evt_elt_t * elt, int *n_tx_packets)
1382 session_main_t *smm = &session_main;
1383 app_worker_t *app_wrk;
1384 clib_llist_index_t ei;
1388 ei = clib_llist_entry_index (wrk->event_elts, elt);
1391 switch (e->event_type)
1393 case SESSION_IO_EVT_TX_FLUSH:
1394 case SESSION_IO_EVT_TX:
1395 s = session_event_get_session (wrk, e);
1396 if (PREDICT_FALSE (!s))
1398 CLIB_PREFETCH (s->tx_fifo, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
1400 /* Spray packets in per session type frames, since they go to
1401 * different nodes */
1402 (smm->session_tx_fns[s->session_type]) (wrk, node, elt, n_tx_packets);
1404 case SESSION_IO_EVT_RX:
1405 s = session_event_get_session (wrk, e);
1408 transport_app_rx_evt (session_get_transport_proto (s),
1409 s->connection_index, s->thread_index);
1411 case SESSION_IO_EVT_BUILTIN_RX:
1412 s = session_event_get_session (wrk, e);
1413 if (PREDICT_FALSE (!s || s->session_state >= SESSION_STATE_CLOSING))
1415 svm_fifo_unset_event (s->rx_fifo);
1416 app_wrk = app_worker_get (s->app_wrk_index);
1417 app_worker_builtin_rx (app_wrk, s);
1419 case SESSION_IO_EVT_BUILTIN_TX:
1420 s = session_get_from_handle_if_valid (e->session_handle);
1422 if (PREDICT_TRUE (s != 0))
1423 session_tx_fifo_dequeue_internal (wrk, node, elt, n_tx_packets);
1426 clib_warning ("unhandled event type %d", e->event_type);
1429 SESSION_EVT (SESSION_IO_EVT_COUNTS, e->event_type, 1, wrk);
1431 /* Regrab elements in case pool moved */
1432 elt = pool_elt_at_index (wrk->event_elts, ei);
1433 if (!clib_llist_elt_is_linked (elt, evt_list))
1434 session_evt_elt_free (wrk, elt);
1438 static const u32 session_evt_msg_sizes[] = {
1439 #define _(symc, sym) \
1440 [SESSION_CTRL_EVT_ ## symc] = sizeof (session_ ## sym ##_msg_t),
1441 foreach_session_ctrl_evt
1447 session_evt_add_to_list (session_worker_t * wrk, session_event_t * evt)
1449 session_evt_elt_t *elt;
1451 if (evt->event_type >= SESSION_CTRL_EVT_RPC)
1453 elt = session_evt_alloc_ctrl (wrk);
1454 if (evt->event_type >= SESSION_CTRL_EVT_BOUND)
1456 elt->evt.ctrl_data_index = session_evt_ctrl_data_alloc (wrk);
1457 elt->evt.event_type = evt->event_type;
1458 clib_memcpy_fast (session_evt_ctrl_data (wrk, elt), evt->data,
1459 session_evt_msg_sizes[evt->event_type]);
1463 /* Internal control events fit into io events footprint */
1464 clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt));
1469 elt = session_evt_alloc_new (wrk);
1470 clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt));
1475 session_flush_pending_tx_buffers (session_worker_t * wrk,
1476 vlib_node_runtime_t * node)
1478 vlib_buffer_enqueue_to_next (wrk->vm, node, wrk->pending_tx_buffers,
1479 wrk->pending_tx_nexts,
1480 vec_len (wrk->pending_tx_nexts));
1481 vec_reset_length (wrk->pending_tx_buffers);
1482 vec_reset_length (wrk->pending_tx_nexts);
1486 session_wrk_handle_mq (session_worker_t *wrk, svm_msg_q_t *mq)
1488 svm_msg_q_msg_t _msg, *msg = &_msg;
1489 u32 i, n_to_dequeue = 0;
1490 session_event_t *evt;
1492 n_to_dequeue = svm_msg_q_size (mq);
1493 for (i = 0; i < n_to_dequeue; i++)
1495 svm_msg_q_sub_raw (mq, msg);
1496 evt = svm_msg_q_msg_data (mq, msg);
1497 session_evt_add_to_list (wrk, evt);
1498 svm_msg_q_free_msg (mq, msg);
1501 return n_to_dequeue;
1505 session_wrk_timerfd_update (session_worker_t *wrk, u64 time_ns)
1507 struct itimerspec its;
1509 its.it_value.tv_sec = 0;
1510 its.it_value.tv_nsec = time_ns;
1511 its.it_interval.tv_sec = 0;
1512 its.it_interval.tv_nsec = its.it_value.tv_nsec;
1514 if (timerfd_settime (wrk->timerfd, 0, &its, NULL) == -1)
1515 clib_warning ("timerfd_settime");
1519 session_wrk_tfd_timeout (session_wrk_state_t state, u32 thread_index)
1521 if (state == SESSION_WRK_INTERRUPT)
1522 return thread_index ? 1e6 : vlib_num_workers () ? 5e8 : 1e6;
1523 else if (state == SESSION_WRK_IDLE)
1524 return thread_index ? 1e8 : vlib_num_workers () ? 5e8 : 1e8;
1530 session_wrk_set_state (session_worker_t *wrk, session_wrk_state_t state)
1535 time_ns = session_wrk_tfd_timeout (state, wrk->vm->thread_index);
1536 session_wrk_timerfd_update (wrk, time_ns);
1540 session_wrk_update_state (session_worker_t *wrk)
1542 vlib_main_t *vm = wrk->vm;
1544 if (wrk->state == SESSION_WRK_POLLING)
1546 if (pool_elts (wrk->event_elts) == 3 &&
1547 vlib_last_vectors_per_main_loop (vm) < 1)
1549 session_wrk_set_state (wrk, SESSION_WRK_INTERRUPT);
1550 vlib_node_set_state (vm, session_queue_node.index,
1551 VLIB_NODE_STATE_INTERRUPT);
1554 else if (wrk->state == SESSION_WRK_INTERRUPT)
1556 if (pool_elts (wrk->event_elts) > 3 ||
1557 vlib_last_vectors_per_main_loop (vm) > 1)
1559 session_wrk_set_state (wrk, SESSION_WRK_POLLING);
1560 vlib_node_set_state (vm, session_queue_node.index,
1561 VLIB_NODE_STATE_POLLING);
1563 else if (PREDICT_FALSE (!pool_elts (wrk->sessions)))
1565 session_wrk_set_state (wrk, SESSION_WRK_IDLE);
1570 if (pool_elts (wrk->event_elts))
1572 session_wrk_set_state (wrk, SESSION_WRK_INTERRUPT);
1578 session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1579 vlib_frame_t * frame)
1581 u32 thread_index = vm->thread_index, __clib_unused n_evts;
1582 session_evt_elt_t *elt, *ctrl_he, *new_he, *old_he;
1583 session_main_t *smm = vnet_get_session_main ();
1584 session_worker_t *wrk = &smm->wrk[thread_index];
1585 clib_llist_index_t ei, next_ei, old_ti;
1588 SESSION_EVT (SESSION_EVT_DISPATCH_START, wrk);
1590 session_wrk_update_time (wrk, vlib_time_now (vm));
1593 * Update transport time
1595 transport_update_time (wrk->last_vlib_time, thread_index);
1596 n_tx_packets = vec_len (wrk->pending_tx_buffers);
1597 SESSION_EVT (SESSION_EVT_DSP_CNTRS, UPDATE_TIME, wrk);
1600 * Dequeue new internal mq events
1603 n_evts = session_wrk_handle_mq (wrk, wrk->vpp_event_queue);
1604 SESSION_EVT (SESSION_EVT_DSP_CNTRS, MQ_DEQ, wrk, n_evts);
1607 * Handle control events
1610 ctrl_he = pool_elt_at_index (wrk->event_elts, wrk->ctrl_head);
1612 clib_llist_foreach_safe (wrk->event_elts, evt_list, ctrl_he, elt, ({
1613 clib_llist_remove (wrk->event_elts, evt_list, elt);
1614 session_event_dispatch_ctrl (wrk, elt);
1617 SESSION_EVT (SESSION_EVT_DSP_CNTRS, CTRL_EVTS, wrk);
1620 * Handle the new io events.
1623 new_he = pool_elt_at_index (wrk->event_elts, wrk->new_head);
1624 old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
1625 old_ti = clib_llist_prev_index (old_he, evt_list);
1627 ei = clib_llist_next_index (new_he, evt_list);
1628 while (ei != wrk->new_head && n_tx_packets < SESSION_NODE_FRAME_SIZE)
1630 elt = pool_elt_at_index (wrk->event_elts, ei);
1631 ei = clib_llist_next_index (elt, evt_list);
1632 clib_llist_remove (wrk->event_elts, evt_list, elt);
1633 session_event_dispatch_io (wrk, node, elt, &n_tx_packets);
1636 SESSION_EVT (SESSION_EVT_DSP_CNTRS, NEW_IO_EVTS, wrk);
1639 * Handle the old io events, if we had any prior to processing the new ones
1642 if (old_ti != wrk->old_head)
1644 old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
1645 ei = clib_llist_next_index (old_he, evt_list);
1647 while (n_tx_packets < SESSION_NODE_FRAME_SIZE)
1649 elt = pool_elt_at_index (wrk->event_elts, ei);
1650 next_ei = clib_llist_next_index (elt, evt_list);
1651 clib_llist_remove (wrk->event_elts, evt_list, elt);
1653 session_event_dispatch_io (wrk, node, elt, &n_tx_packets);
1662 SESSION_EVT (SESSION_EVT_DSP_CNTRS, OLD_IO_EVTS, wrk);
1664 if (vec_len (wrk->pending_tx_buffers))
1665 session_flush_pending_tx_buffers (wrk, node);
1667 vlib_node_increment_counter (vm, session_queue_node.index,
1668 SESSION_QUEUE_ERROR_TX, n_tx_packets);
1670 SESSION_EVT (SESSION_EVT_DISPATCH_END, wrk, n_tx_packets);
1672 if (wrk->flags & SESSION_WRK_F_ADAPTIVE)
1673 session_wrk_update_state (wrk);
1675 return n_tx_packets;
1679 VLIB_REGISTER_NODE (session_queue_node) =
1681 .function = session_queue_node_fn,
1682 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
1683 .name = "session-queue",
1684 .format_trace = format_session_queue_trace,
1685 .type = VLIB_NODE_TYPE_INPUT,
1686 .n_errors = ARRAY_LEN (session_queue_error_strings),
1687 .error_strings = session_queue_error_strings,
1688 .state = VLIB_NODE_STATE_DISABLED,
1692 static clib_error_t *
1693 session_wrk_tfd_read_ready (clib_file_t *cf)
1695 session_worker_t *wrk = session_main_get_worker (cf->private_data);
1699 vlib_node_set_interrupt_pending (wrk->vm, session_queue_node.index);
1700 rv = read (wrk->timerfd, &buf, sizeof (buf));
1701 if (rv < 0 && errno != EAGAIN)
1702 clib_unix_warning ("failed");
1706 static clib_error_t *
1707 session_wrk_tfd_write_ready (clib_file_t *cf)
1713 session_wrk_enable_adaptive_mode (session_worker_t *wrk)
1715 u32 thread_index = wrk->vm->thread_index;
1716 clib_file_t template = { 0 };
1718 if ((wrk->timerfd = timerfd_create (CLOCK_MONOTONIC, TFD_NONBLOCK)) < 0)
1719 clib_warning ("timerfd_create");
1721 template.read_function = session_wrk_tfd_read_ready;
1722 template.write_function = session_wrk_tfd_write_ready;
1723 template.file_descriptor = wrk->timerfd;
1724 template.private_data = thread_index;
1725 template.polling_thread_index = thread_index;
1726 template.description = format (0, "session-wrk-tfd-%u", thread_index);
1728 wrk->timerfd_file = clib_file_add (&file_main, &template);
1729 wrk->flags |= SESSION_WRK_F_ADAPTIVE;
1732 static clib_error_t *
1733 session_queue_exit (vlib_main_t * vm)
1735 if (vlib_get_n_threads () < 2)
1739 * Shut off (especially) worker-thread session nodes.
1740 * Otherwise, vpp can crash as the main thread unmaps the
1743 vlib_worker_thread_barrier_sync (vm);
1744 session_node_enable_disable (0 /* is_enable */ );
1745 vlib_worker_thread_barrier_release (vm);
1749 VLIB_MAIN_LOOP_EXIT_FUNCTION (session_queue_exit);
1752 session_queue_run_on_main (vlib_main_t * vm)
1754 vlib_node_runtime_t *node;
1756 node = vlib_node_get_runtime (vm, session_queue_node.index);
1757 return session_queue_node_fn (vm, node, 0);
1761 session_queue_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
1764 uword *event_data = 0;
1770 vlib_process_wait_for_event_or_clock (vm, timeout);
1771 event_type = vlib_process_get_events (vm, (uword **) & event_data);
1775 case SESSION_Q_PROCESS_RUN_ON_MAIN:
1776 /* Run session queue node on main thread */
1777 session_queue_run_on_main (vm);
1779 case SESSION_Q_PROCESS_STOP:
1780 vlib_node_set_state (vm, session_queue_process_node.index,
1781 VLIB_NODE_STATE_DISABLED);
1785 /* Timed out. Run on main to ensure all events are handled */
1786 session_queue_run_on_main (vm);
1789 vec_reset_length (event_data);
1795 VLIB_REGISTER_NODE (session_queue_process_node) =
1797 .function = session_queue_process,
1798 .type = VLIB_NODE_TYPE_PROCESS,
1799 .name = "session-queue-process",
1800 .state = VLIB_NODE_STATE_DISABLED,
1804 static_always_inline uword
1805 session_queue_pre_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1806 vlib_frame_t * frame)
1808 session_main_t *sm = &session_main;
1809 if (!sm->wrk[0].vpp_event_queue)
1811 node = vlib_node_get_runtime (vm, session_queue_node.index);
1812 return session_queue_node_fn (vm, node, frame);
1816 VLIB_REGISTER_NODE (session_queue_pre_input_node) =
1818 .function = session_queue_pre_input_inline,
1819 .type = VLIB_NODE_TYPE_PRE_INPUT,
1820 .name = "session-queue-main",
1821 .state = VLIB_NODE_STATE_DISABLED,
1826 * fd.io coding-style-patch-verification: ON
1829 * eval: (c-set-style "gnu")