2 * Copyright (c) 2017-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vppinfra/elog.h>
20 #include <vnet/session/transport.h>
21 #include <vnet/session/session.h>
22 #include <vnet/session/application.h>
23 #include <vnet/session/application_interface.h>
24 #include <vnet/session/application_local.h>
25 #include <vnet/session/session_debug.h>
26 #include <svm/queue.h>
28 #define app_check_thread_and_barrier(_fn, _arg) \
29 if (!vlib_thread_is_main_w_barrier ()) \
31 vlib_rpc_call_main_thread (_fn, (u8 *) _arg, sizeof(*_arg)); \
36 session_mq_listen_handler (void *data)
38 session_listen_msg_t *mp = (session_listen_msg_t *) data;
39 vnet_listen_args_t _a, *a = &_a;
40 app_worker_t *app_wrk;
44 app_check_thread_and_barrier (session_mq_listen_handler, mp);
46 app = application_lookup (mp->client_index);
50 clib_memset (a, 0, sizeof (*a));
51 a->sep.is_ip4 = mp->is_ip4;
52 clib_memcpy_fast (&a->sep.ip, &mp->ip, sizeof (mp->ip));
54 ip46_address_mask_ip4 (&a->sep.ip);
55 a->sep.port = mp->port;
56 a->sep.fib_index = mp->vrf;
57 a->sep.sw_if_index = ENDPOINT_INVALID_INDEX;
58 a->sep.transport_proto = mp->proto;
59 a->sep_ext.ckpair_index = mp->ckpair_index;
60 a->sep_ext.crypto_engine = mp->crypto_engine;
61 a->app_index = app->app_index;
62 a->wrk_map_index = mp->wrk_index;
63 a->sep_ext.transport_flags = mp->flags;
65 if ((rv = vnet_listen (a)))
66 clib_warning ("listen returned: %d", rv);
68 app_wrk = application_get_worker (app, mp->wrk_index);
69 mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv);
74 session_mq_listen_uri_handler (void *data)
76 session_listen_uri_msg_t *mp = (session_listen_uri_msg_t *) data;
77 vnet_listen_args_t _a, *a = &_a;
78 app_worker_t *app_wrk;
82 app_check_thread_and_barrier (session_mq_listen_uri_handler, mp);
84 app = application_lookup (mp->client_index);
88 clib_memset (a, 0, sizeof (*a));
89 a->uri = (char *) mp->uri;
90 a->app_index = app->app_index;
91 rv = vnet_bind_uri (a);
93 app_wrk = application_get_worker (app, 0);
94 mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv);
98 session_mq_connect_handler (void *data)
100 session_connect_msg_t *mp = (session_connect_msg_t *) data;
101 vnet_connect_args_t _a, *a = &_a;
102 app_worker_t *app_wrk;
106 app_check_thread_and_barrier (session_mq_connect_handler, mp);
108 app = application_lookup (mp->client_index);
112 clib_memset (a, 0, sizeof (*a));
113 a->sep.is_ip4 = mp->is_ip4;
114 clib_memcpy_fast (&a->sep.ip, &mp->ip, sizeof (mp->ip));
115 a->sep.port = mp->port;
116 a->sep.transport_proto = mp->proto;
117 a->sep.peer.fib_index = mp->vrf;
118 clib_memcpy_fast (&a->sep.peer.ip, &mp->lcl_ip, sizeof (mp->lcl_ip));
121 ip46_address_mask_ip4 (&a->sep.ip);
122 ip46_address_mask_ip4 (&a->sep.peer.ip);
124 a->sep.peer.port = mp->lcl_port;
125 a->sep.peer.sw_if_index = ENDPOINT_INVALID_INDEX;
126 a->sep_ext.parent_handle = mp->parent_handle;
127 a->sep_ext.ckpair_index = mp->ckpair_index;
128 a->sep_ext.crypto_engine = mp->crypto_engine;
129 a->sep_ext.transport_flags = mp->flags;
130 if (mp->hostname_len)
132 vec_validate (a->sep_ext.hostname, mp->hostname_len - 1);
133 clib_memcpy_fast (a->sep_ext.hostname, mp->hostname, mp->hostname_len);
135 a->api_context = mp->context;
136 a->app_index = app->app_index;
137 a->wrk_map_index = mp->wrk_index;
139 if ((rv = vnet_connect (a)))
141 clib_warning ("connect returned: %U", format_session_error, rv);
142 app_wrk = application_get_worker (app, mp->wrk_index);
143 mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv);
146 vec_free (a->sep_ext.hostname);
150 session_mq_connect_uri_handler (void *data)
152 session_connect_uri_msg_t *mp = (session_connect_uri_msg_t *) data;
153 vnet_connect_args_t _a, *a = &_a;
154 app_worker_t *app_wrk;
158 app_check_thread_and_barrier (session_mq_connect_uri_handler, mp);
160 app = application_lookup (mp->client_index);
164 clib_memset (a, 0, sizeof (*a));
165 a->uri = (char *) mp->uri;
166 a->api_context = mp->context;
167 a->app_index = app->app_index;
168 if ((rv = vnet_connect_uri (a)))
170 clib_warning ("connect_uri returned: %d", rv);
171 app_wrk = application_get_worker (app, 0 /* default wrk only */ );
172 mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv);
177 session_mq_disconnect_handler (void *data)
179 session_disconnect_msg_t *mp = (session_disconnect_msg_t *) data;
180 vnet_disconnect_args_t _a, *a = &_a;
183 app = application_lookup (mp->client_index);
187 a->app_index = app->app_index;
188 a->handle = mp->handle;
189 vnet_disconnect_session (a);
193 app_mq_detach_handler (void *data)
195 session_app_detach_msg_t *mp = (session_app_detach_msg_t *) data;
196 vnet_app_detach_args_t _a, *a = &_a;
199 app_check_thread_and_barrier (app_mq_detach_handler, mp);
201 app = application_lookup (mp->client_index);
205 a->app_index = app->app_index;
206 a->api_client_index = mp->client_index;
207 vnet_application_detach (a);
211 session_mq_unlisten_handler (void *data)
213 session_unlisten_msg_t *mp = (session_unlisten_msg_t *) data;
214 vnet_unlisten_args_t _a, *a = &_a;
215 app_worker_t *app_wrk;
219 app_check_thread_and_barrier (session_mq_unlisten_handler, mp);
221 app = application_lookup (mp->client_index);
225 clib_memset (a, 0, sizeof (*a));
226 a->app_index = app->app_index;
227 a->handle = mp->handle;
228 a->wrk_map_index = mp->wrk_index;
229 if ((rv = vnet_unlisten (a)))
230 clib_warning ("unlisten returned: %d", rv);
232 app_wrk = application_get_worker (app, a->wrk_map_index);
236 mq_send_unlisten_reply (app_wrk, mp->handle, mp->context, rv);
240 session_mq_accepted_reply_handler (void *data)
242 session_accepted_reply_msg_t *mp = (session_accepted_reply_msg_t *) data;
243 vnet_disconnect_args_t _a = { 0 }, *a = &_a;
244 session_state_t old_state;
245 app_worker_t *app_wrk;
248 /* Server isn't interested, kill the session */
251 a->app_index = mp->context;
252 a->handle = mp->handle;
253 vnet_disconnect_session (a);
257 /* Mail this back from the main thread. We're not polling in main
258 * thread so we're using other workers for notifications. */
259 if (vlib_num_workers () && vlib_get_thread_index () != 0
260 && session_thread_from_handle (mp->handle) == 0)
262 vlib_rpc_call_main_thread (session_mq_accepted_reply_handler,
263 (u8 *) mp, sizeof (*mp));
267 s = session_get_from_handle_if_valid (mp->handle);
271 app_wrk = app_worker_get (s->app_wrk_index);
272 if (app_wrk->app_index != mp->context)
274 clib_warning ("app doesn't own session");
278 if (!session_has_transport (s))
280 s->session_state = SESSION_STATE_READY;
281 if (ct_session_connect_notify (s))
286 old_state = s->session_state;
287 s->session_state = SESSION_STATE_READY;
289 if (!svm_fifo_is_empty_prod (s->rx_fifo))
290 app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX);
292 /* Closed while waiting for app to reply. Resend disconnect */
293 if (old_state >= SESSION_STATE_TRANSPORT_CLOSING)
295 app_worker_close_notify (app_wrk, s);
296 s->session_state = old_state;
303 session_mq_reset_reply_handler (void *data)
305 vnet_disconnect_args_t _a = { 0 }, *a = &_a;
306 session_reset_reply_msg_t *mp;
307 app_worker_t *app_wrk;
310 u32 index, thread_index;
312 mp = (session_reset_reply_msg_t *) data;
313 app = application_lookup (mp->context);
317 session_parse_handle (mp->handle, &index, &thread_index);
318 s = session_get_if_valid (index, thread_index);
320 /* No session or not the right session */
321 if (!s || s->session_state < SESSION_STATE_TRANSPORT_CLOSING)
324 app_wrk = app_worker_get (s->app_wrk_index);
325 if (!app_wrk || app_wrk->app_index != app->app_index)
327 clib_warning ("App %u does not own handle 0x%lx!", app->app_index,
332 /* Client objected to resetting the session, log and continue */
335 clib_warning ("client retval %d", mp->retval);
339 /* This comes as a response to a reset, transport only waiting for
340 * confirmation to remove connection state, no need to disconnect */
341 a->handle = mp->handle;
342 a->app_index = app->app_index;
343 vnet_disconnect_session (a);
347 session_mq_disconnected_handler (void *data)
349 session_disconnected_reply_msg_t *rmp;
350 vnet_disconnect_args_t _a, *a = &_a;
351 svm_msg_q_msg_t _msg, *msg = &_msg;
352 session_disconnected_msg_t *mp;
353 app_worker_t *app_wrk;
354 session_event_t *evt;
359 mp = (session_disconnected_msg_t *) data;
360 if (!(s = session_get_from_handle_if_valid (mp->handle)))
362 clib_warning ("could not disconnect handle %llu", mp->handle);
365 app_wrk = app_worker_get (s->app_wrk_index);
366 app = application_lookup (mp->client_index);
367 if (!(app_wrk && app && app->app_index == app_wrk->app_index))
369 clib_warning ("could not disconnect session: %llu app: %u",
370 mp->handle, mp->client_index);
374 a->handle = mp->handle;
375 a->app_index = app_wrk->wrk_index;
376 rv = vnet_disconnect_session (a);
378 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
379 SESSION_MQ_CTRL_EVT_RING,
381 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
382 clib_memset (evt, 0, sizeof (*evt));
383 evt->event_type = SESSION_CTRL_EVT_DISCONNECTED_REPLY;
384 rmp = (session_disconnected_reply_msg_t *) evt->data;
385 rmp->handle = mp->handle;
386 rmp->context = mp->context;
388 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
392 session_mq_disconnected_reply_handler (void *data)
394 session_disconnected_reply_msg_t *mp;
395 vnet_disconnect_args_t _a, *a = &_a;
398 mp = (session_disconnected_reply_msg_t *) data;
400 /* Client objected to disconnecting the session, log and continue */
403 clib_warning ("client retval %d", mp->retval);
407 /* Disconnect has been confirmed. Confirm close to transport */
408 app = application_lookup (mp->context);
411 a->handle = mp->handle;
412 a->app_index = app->app_index;
413 vnet_disconnect_session (a);
418 session_mq_worker_update_handler (void *data)
420 session_worker_update_msg_t *mp = (session_worker_update_msg_t *) data;
421 session_worker_update_reply_msg_t *rmp;
422 svm_msg_q_msg_t _msg, *msg = &_msg;
423 app_worker_t *app_wrk;
424 u32 owner_app_wrk_map;
425 session_event_t *evt;
429 app = application_lookup (mp->client_index);
432 if (!(s = session_get_from_handle_if_valid (mp->handle)))
434 clib_warning ("invalid handle %llu", mp->handle);
437 app_wrk = app_worker_get (s->app_wrk_index);
438 if (app_wrk->app_index != app->app_index)
440 clib_warning ("app %u does not own session %llu", app->app_index,
444 owner_app_wrk_map = app_wrk->wrk_map_index;
445 app_wrk = application_get_worker (app, mp->wrk_index);
447 /* This needs to come from the new owner */
448 if (mp->req_wrk_index == owner_app_wrk_map)
450 session_req_worker_update_msg_t *wump;
452 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
453 SESSION_MQ_CTRL_EVT_RING,
455 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
456 clib_memset (evt, 0, sizeof (*evt));
457 evt->event_type = SESSION_CTRL_EVT_REQ_WORKER_UPDATE;
458 wump = (session_req_worker_update_msg_t *) evt->data;
459 wump->session_handle = mp->handle;
460 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
464 app_worker_own_session (app_wrk, s);
469 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
470 SESSION_MQ_CTRL_EVT_RING,
472 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
473 clib_memset (evt, 0, sizeof (*evt));
474 evt->event_type = SESSION_CTRL_EVT_WORKER_UPDATE_REPLY;
475 rmp = (session_worker_update_reply_msg_t *) evt->data;
476 rmp->handle = mp->handle;
477 rmp->rx_fifo = pointer_to_uword (s->rx_fifo);
478 rmp->tx_fifo = pointer_to_uword (s->tx_fifo);
479 rmp->segment_handle = session_segment_handle (s);
480 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
483 * Retransmit messages that may have been lost
485 if (s->tx_fifo && !svm_fifo_is_empty (s->tx_fifo))
486 session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
488 if (s->rx_fifo && !svm_fifo_is_empty (s->rx_fifo))
489 app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX);
491 if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
492 app_worker_close_notify (app_wrk, s);
495 vlib_node_registration_t session_queue_node;
500 u32 server_thread_index;
501 } session_queue_trace_t;
503 /* packet trace format function */
505 format_session_queue_trace (u8 * s, va_list * args)
507 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
508 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
509 session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *);
511 s = format (s, "session index %d thread index %d",
512 t->session_index, t->server_thread_index);
516 #define foreach_session_queue_error \
517 _(TX, "Packets transmitted") \
518 _(TIMER, "Timer events") \
519 _(NO_BUFFER, "Out of buffers")
523 #define _(sym,str) SESSION_QUEUE_ERROR_##sym,
524 foreach_session_queue_error
526 SESSION_QUEUE_N_ERROR,
527 } session_queue_error_t;
529 static char *session_queue_error_strings[] = {
530 #define _(sym,string) string,
531 foreach_session_queue_error
537 SESSION_TX_NO_BUFFERS = -2,
543 session_tx_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
544 u32 next_index, u32 * to_next, u16 n_segs,
545 session_t * s, u32 n_trace)
547 session_queue_trace_t *t;
551 for (i = 0; i < clib_min (n_trace, n_segs); i++)
553 b = vlib_get_buffer (vm, to_next[i]);
554 vlib_trace_buffer (vm, node, next_index, b, 1 /* follow_chain */ );
555 t = vlib_add_trace (vm, node, b, sizeof (*t));
556 t->session_index = s->session_index;
557 t->server_thread_index = s->thread_index;
559 vlib_set_trace_count (vm, node, n_trace - i);
563 session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx,
564 vlib_buffer_t * b, u16 * n_bufs, u8 peek_data)
566 vlib_buffer_t *chain_b, *prev_b;
567 u32 chain_bi0, to_deq, left_from_seg;
568 session_worker_t *wrk;
569 u16 len_to_deq, n_bytes_read;
572 wrk = session_main_get_worker (ctx->s->thread_index);
573 b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
574 b->total_length_not_including_first_buffer = 0;
577 left_from_seg = clib_min (ctx->sp.snd_mss - b->current_length,
579 to_deq = left_from_seg;
580 for (j = 1; j < ctx->n_bufs_per_seg; j++)
583 len_to_deq = clib_min (to_deq, ctx->deq_per_buf);
586 chain_bi0 = wrk->tx_buffers[*n_bufs];
587 chain_b = vlib_get_buffer (vm, chain_bi0);
588 chain_b->current_data = 0;
589 data = vlib_buffer_get_current (chain_b);
592 n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo,
593 ctx->sp.tx_offset, len_to_deq, data);
594 ctx->sp.tx_offset += n_bytes_read;
598 if (ctx->transport_vft->transport_options.tx_type ==
601 svm_fifo_t *f = ctx->s->tx_fifo;
602 session_dgram_hdr_t *hdr = &ctx->hdr;
604 deq_now = clib_min (hdr->data_length - hdr->data_offset,
606 n_bytes_read = svm_fifo_peek (f, hdr->data_offset, deq_now,
608 ASSERT (n_bytes_read > 0);
610 hdr->data_offset += n_bytes_read;
611 if (hdr->data_offset == hdr->data_length)
613 u32 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
614 svm_fifo_dequeue_drop (f, offset);
615 if (ctx->left_to_snd > n_bytes_read)
616 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
619 else if (ctx->left_to_snd == n_bytes_read)
620 svm_fifo_overwrite_head (ctx->s->tx_fifo, (u8 *) & ctx->hdr,
621 sizeof (session_dgram_pre_hdr_t));
624 n_bytes_read = svm_fifo_dequeue (ctx->s->tx_fifo,
627 ASSERT (n_bytes_read == len_to_deq);
628 chain_b->current_length = n_bytes_read;
629 b->total_length_not_including_first_buffer += chain_b->current_length;
631 /* update previous buffer */
632 prev_b->next_buffer = chain_bi0;
633 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
635 /* update current buffer */
636 chain_b->next_buffer = 0;
638 to_deq -= n_bytes_read;
643 && b->total_length_not_including_first_buffer == left_from_seg);
644 ctx->left_to_snd -= left_from_seg;
648 session_tx_fill_buffer (vlib_main_t * vm, session_tx_context_t * ctx,
649 vlib_buffer_t * b, u16 * n_bufs, u8 peek_data)
656 * Start with the first buffer in chain
659 b->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED;
662 data0 = vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
663 len_to_deq = clib_min (ctx->left_to_snd, ctx->deq_per_first_buf);
667 n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo, ctx->sp.tx_offset,
669 ASSERT (n_bytes_read > 0);
670 /* Keep track of progress locally, transport is also supposed to
671 * increment it independently when pushing the header */
672 ctx->sp.tx_offset += n_bytes_read;
676 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
678 session_dgram_hdr_t *hdr = &ctx->hdr;
679 svm_fifo_t *f = ctx->s->tx_fifo;
683 ASSERT (hdr->data_length > hdr->data_offset);
684 deq_now = clib_min (hdr->data_length - hdr->data_offset,
686 offset = hdr->data_offset + SESSION_CONN_HDR_LEN;
687 n_bytes_read = svm_fifo_peek (f, offset, deq_now, data0);
688 ASSERT (n_bytes_read > 0);
690 if (ctx->s->session_state == SESSION_STATE_LISTENING)
692 ip_copy (&ctx->tc->rmt_ip, &hdr->rmt_ip, ctx->tc->is_ip4);
693 ctx->tc->rmt_port = hdr->rmt_port;
695 hdr->data_offset += n_bytes_read;
696 if (hdr->data_offset == hdr->data_length)
698 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
699 svm_fifo_dequeue_drop (f, offset);
700 if (ctx->left_to_snd > n_bytes_read)
701 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
704 else if (ctx->left_to_snd == n_bytes_read)
705 svm_fifo_overwrite_head (ctx->s->tx_fifo, (u8 *) & ctx->hdr,
706 sizeof (session_dgram_pre_hdr_t));
710 n_bytes_read = svm_fifo_dequeue (ctx->s->tx_fifo,
712 ASSERT (n_bytes_read > 0);
715 b->current_length = n_bytes_read;
716 ctx->left_to_snd -= n_bytes_read;
719 * Fill in the remaining buffers in the chain, if any
721 if (PREDICT_FALSE (ctx->n_bufs_per_seg > 1 && ctx->left_to_snd))
722 session_tx_fifo_chain_tail (vm, ctx, b, n_bufs, peek_data);
726 session_tx_not_ready (session_t * s, u8 peek_data)
730 if (PREDICT_TRUE (s->session_state == SESSION_STATE_READY))
732 /* Can retransmit for closed sessions but can't send new data if
733 * session is not ready or closed */
734 else if (s->session_state < SESSION_STATE_READY)
736 else if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)
738 /* Allow closed transports to still send custom packets.
739 * For instance, tcp may want to send acks in time-wait. */
740 if (s->session_state != SESSION_STATE_TRANSPORT_DELETED
741 && (s->flags & SESSION_F_CUSTOM_TX))
749 always_inline transport_connection_t *
750 session_tx_get_transport (session_tx_context_t * ctx, u8 peek_data)
754 return ctx->transport_vft->get_connection (ctx->s->connection_index,
755 ctx->s->thread_index);
759 if (ctx->s->session_state == SESSION_STATE_LISTENING)
760 return ctx->transport_vft->get_listener (ctx->s->connection_index);
763 return ctx->transport_vft->get_connection (ctx->s->connection_index,
764 ctx->s->thread_index);
770 session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx,
771 u32 max_segs, u8 peek_data)
773 u32 n_bytes_per_buf, n_bytes_per_seg;
775 n_bytes_per_buf = vlib_buffer_get_default_data_size (vm);
776 ctx->max_dequeue = svm_fifo_max_dequeue_cons (ctx->s->tx_fifo);
780 /* Offset in rx fifo from where to peek data */
781 if (PREDICT_FALSE (ctx->sp.tx_offset >= ctx->max_dequeue))
783 ctx->max_len_to_snd = 0;
786 ctx->max_dequeue -= ctx->sp.tx_offset;
790 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
792 u32 len, chain_limit;
794 if (ctx->max_dequeue <= sizeof (ctx->hdr))
796 ctx->max_len_to_snd = 0;
800 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
802 ASSERT (ctx->hdr.data_length > ctx->hdr.data_offset);
803 len = ctx->hdr.data_length - ctx->hdr.data_offset;
805 /* Process multiple dgrams if smaller than min (buf_space, mss).
806 * This avoids handling multiple dgrams if they require buffer
808 chain_limit = clib_min (n_bytes_per_buf - TRANSPORT_MAX_HDRS_LEN,
810 if (ctx->hdr.data_length <= chain_limit)
812 u32 first_dgram_len, dgram_len, offset, max_offset;
813 session_dgram_hdr_t hdr;
815 ctx->sp.snd_mss = clib_min (ctx->sp.snd_mss, len);
816 offset = ctx->hdr.data_length + sizeof (session_dgram_hdr_t);
817 first_dgram_len = len;
818 max_offset = clib_min (ctx->max_dequeue, 16 << 10);
820 while (offset < max_offset)
822 svm_fifo_peek (ctx->s->tx_fifo, offset, sizeof (ctx->hdr),
824 ASSERT (hdr.data_length > hdr.data_offset);
825 dgram_len = hdr.data_length - hdr.data_offset;
826 if (len + dgram_len > ctx->max_dequeue
827 || first_dgram_len != dgram_len)
830 offset += sizeof (hdr) + hdr.data_length;
834 ctx->max_dequeue = len;
837 ASSERT (ctx->max_dequeue > 0);
839 /* Ensure we're not writing more than transport window allows */
840 if (ctx->max_dequeue < ctx->sp.snd_space)
842 /* Constrained by tx queue. Try to send only fully formed segments */
843 ctx->max_len_to_snd = (ctx->max_dequeue > ctx->sp.snd_mss) ?
844 (ctx->max_dequeue - (ctx->max_dequeue % ctx->sp.snd_mss)) :
850 /* Expectation is that snd_space0 is already a multiple of snd_mss */
851 ctx->max_len_to_snd = ctx->sp.snd_space;
854 /* Check if we're tx constrained by the node */
855 ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->sp.snd_mss);
856 if (ctx->n_segs_per_evt > max_segs)
858 ctx->n_segs_per_evt = max_segs;
859 ctx->max_len_to_snd = max_segs * ctx->sp.snd_mss;
862 ASSERT (n_bytes_per_buf > TRANSPORT_MAX_HDRS_LEN);
863 if (ctx->n_segs_per_evt > 1)
865 u32 n_bytes_last_seg, n_bufs_last_seg;
867 n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->sp.snd_mss;
868 n_bytes_last_seg = TRANSPORT_MAX_HDRS_LEN + ctx->max_len_to_snd
869 - ((ctx->n_segs_per_evt - 1) * ctx->sp.snd_mss);
870 ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
871 n_bufs_last_seg = ceil ((f64) n_bytes_last_seg / n_bytes_per_buf);
872 ctx->n_bufs_needed = ((ctx->n_segs_per_evt - 1) * ctx->n_bufs_per_seg)
877 n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->max_len_to_snd;
878 ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
879 ctx->n_bufs_needed = ctx->n_bufs_per_seg;
882 ctx->deq_per_buf = clib_min (ctx->sp.snd_mss, n_bytes_per_buf);
883 ctx->deq_per_first_buf = clib_min (ctx->sp.snd_mss,
885 TRANSPORT_MAX_HDRS_LEN);
889 session_tx_maybe_reschedule (session_worker_t * wrk,
890 session_tx_context_t * ctx,
891 session_evt_elt_t * elt)
893 session_t *s = ctx->s;
895 svm_fifo_unset_event (s->tx_fifo);
896 if (svm_fifo_max_dequeue_cons (s->tx_fifo) > ctx->sp.tx_offset)
897 if (svm_fifo_set_event (s->tx_fifo))
898 session_evt_add_head_old (wrk, elt);
902 session_tx_fifo_read_and_snd_i (session_worker_t * wrk,
903 vlib_node_runtime_t * node,
904 session_evt_elt_t * elt,
905 int *n_tx_packets, u8 peek_data)
907 u32 n_trace, n_left, pbi, next_index, max_burst;
908 session_tx_context_t *ctx = &wrk->ctx;
909 session_main_t *smm = &session_main;
910 session_event_t *e = &elt->evt;
911 vlib_main_t *vm = wrk->vm;
912 transport_proto_t tp;
916 if (PREDICT_FALSE ((rv = session_tx_not_ready (ctx->s, peek_data))))
919 session_evt_add_old (wrk, elt);
920 return SESSION_TX_NO_DATA;
923 next_index = smm->session_type_to_next[ctx->s->session_type];
924 max_burst = VLIB_FRAME_SIZE - *n_tx_packets;
926 tp = session_get_transport_proto (ctx->s);
927 ctx->transport_vft = transport_protocol_get_vft (tp);
928 ctx->tc = session_tx_get_transport (ctx, peek_data);
930 if (PREDICT_FALSE (e->event_type == SESSION_IO_EVT_TX_FLUSH))
932 if (ctx->transport_vft->flush_data)
933 ctx->transport_vft->flush_data (ctx->tc);
934 e->event_type = SESSION_IO_EVT_TX;
937 if (ctx->s->flags & SESSION_F_CUSTOM_TX)
940 ctx->s->flags &= ~SESSION_F_CUSTOM_TX;
941 ctx->sp.max_burst_size = max_burst;
942 n_custom_tx = ctx->transport_vft->custom_tx (ctx->tc, &ctx->sp);
943 *n_tx_packets += n_custom_tx;
945 (ctx->s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
946 return SESSION_TX_OK;
947 max_burst -= n_custom_tx;
948 if (!max_burst || (ctx->s->flags & SESSION_F_CUSTOM_TX))
950 session_evt_add_old (wrk, elt);
951 return SESSION_TX_OK;
955 transport_connection_snd_params (ctx->tc, &ctx->sp);
957 if (!ctx->sp.snd_space)
959 /* If the deschedule flag was set, remove session from scheduler.
960 * Transport is responsible for rescheduling this session. */
961 if (ctx->sp.flags & TRANSPORT_SND_F_DESCHED)
962 transport_connection_deschedule (ctx->tc);
963 /* Request to postpone the session, e.g., zero-wnd and transport
964 * is not currently probing */
965 else if (ctx->sp.flags & TRANSPORT_SND_F_POSTPONE)
966 session_evt_add_old (wrk, elt);
967 /* This flow queue is "empty" so it should be re-evaluated before
968 * the ones that have data to send. */
970 session_evt_add_head_old (wrk, elt);
972 return SESSION_TX_NO_DATA;
975 if (transport_connection_is_tx_paced (ctx->tc))
977 u32 snd_space = transport_connection_tx_pacer_burst (ctx->tc);
978 if (snd_space < TRANSPORT_PACER_MIN_BURST)
980 session_evt_add_head_old (wrk, elt);
981 return SESSION_TX_NO_DATA;
983 snd_space = clib_min (ctx->sp.snd_space, snd_space);
984 ctx->sp.snd_space = snd_space >= ctx->sp.snd_mss ?
985 snd_space - snd_space % ctx->sp.snd_mss : snd_space;
988 /* Check how much we can pull. */
989 session_tx_set_dequeue_params (vm, ctx, max_burst, peek_data);
991 if (PREDICT_FALSE (!ctx->max_len_to_snd))
993 transport_connection_tx_pacer_reset_bucket (ctx->tc, 0);
994 session_tx_maybe_reschedule (wrk, ctx, elt);
995 return SESSION_TX_NO_DATA;
998 vec_validate_aligned (wrk->tx_buffers, ctx->n_bufs_needed - 1,
999 CLIB_CACHE_LINE_BYTES);
1000 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, ctx->n_bufs_needed);
1001 if (PREDICT_FALSE (n_bufs < ctx->n_bufs_needed))
1004 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1005 session_evt_add_head_old (wrk, elt);
1006 vlib_node_increment_counter (wrk->vm, node->node_index,
1007 SESSION_QUEUE_ERROR_NO_BUFFER, 1);
1008 return SESSION_TX_NO_BUFFERS;
1011 transport_connection_update_tx_bytes (ctx->tc, ctx->max_len_to_snd);
1013 ctx->left_to_snd = ctx->max_len_to_snd;
1014 n_left = ctx->n_segs_per_evt;
1018 vlib_buffer_t *b0, *b1;
1021 pbi = wrk->tx_buffers[n_bufs - 3];
1022 pb = vlib_get_buffer (vm, pbi);
1023 vlib_prefetch_buffer_header (pb, STORE);
1024 pbi = wrk->tx_buffers[n_bufs - 4];
1025 pb = vlib_get_buffer (vm, pbi);
1026 vlib_prefetch_buffer_header (pb, STORE);
1028 bi0 = wrk->tx_buffers[--n_bufs];
1029 bi1 = wrk->tx_buffers[--n_bufs];
1031 b0 = vlib_get_buffer (vm, bi0);
1032 b1 = vlib_get_buffer (vm, bi1);
1034 session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data);
1035 session_tx_fill_buffer (vm, ctx, b1, &n_bufs, peek_data);
1037 ctx->transport_vft->push_header (ctx->tc, b0);
1038 ctx->transport_vft->push_header (ctx->tc, b1);
1042 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
1043 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
1045 vec_add1 (wrk->pending_tx_buffers, bi0);
1046 vec_add1 (wrk->pending_tx_buffers, bi1);
1047 vec_add1 (wrk->pending_tx_nexts, next_index);
1048 vec_add1 (wrk->pending_tx_nexts, next_index);
1057 pbi = wrk->tx_buffers[n_bufs - 2];
1058 pb = vlib_get_buffer (vm, pbi);
1059 vlib_prefetch_buffer_header (pb, STORE);
1062 bi0 = wrk->tx_buffers[--n_bufs];
1063 b0 = vlib_get_buffer (vm, bi0);
1064 session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data);
1066 /* Ask transport to push header after current_length and
1067 * total_length_not_including_first_buffer are updated */
1068 ctx->transport_vft->push_header (ctx->tc, b0);
1072 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
1074 vec_add1 (wrk->pending_tx_buffers, bi0);
1075 vec_add1 (wrk->pending_tx_nexts, next_index);
1078 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)) > 0))
1079 session_tx_trace_frame (vm, node, next_index, wrk->pending_tx_buffers,
1080 ctx->n_segs_per_evt, ctx->s, n_trace);
1082 if (PREDICT_FALSE (n_bufs))
1083 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1085 *n_tx_packets += ctx->n_segs_per_evt;
1087 SESSION_EVT (SESSION_EVT_DEQ, ctx->s, ctx->max_len_to_snd, ctx->max_dequeue,
1088 ctx->s->tx_fifo->has_event, wrk->last_vlib_time);
1090 ASSERT (ctx->left_to_snd == 0);
1092 /* If we couldn't dequeue all bytes reschedule as old flow. Otherwise,
1093 * check if application enqueued more data and reschedule accordingly */
1094 if (ctx->max_len_to_snd < ctx->max_dequeue)
1095 session_evt_add_old (wrk, elt);
1097 session_tx_maybe_reschedule (wrk, ctx, elt);
1101 if (svm_fifo_needs_deq_ntf (ctx->s->tx_fifo, ctx->max_len_to_snd))
1102 session_dequeue_notify (ctx->s);
1104 return SESSION_TX_OK;
1108 session_tx_fifo_peek_and_snd (session_worker_t * wrk,
1109 vlib_node_runtime_t * node,
1110 session_evt_elt_t * e, int *n_tx_packets)
1112 return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 1);
1116 session_tx_fifo_dequeue_and_snd (session_worker_t * wrk,
1117 vlib_node_runtime_t * node,
1118 session_evt_elt_t * e, int *n_tx_packets)
1120 return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 0);
1124 session_tx_fifo_dequeue_internal (session_worker_t * wrk,
1125 vlib_node_runtime_t * node,
1126 session_evt_elt_t * elt, int *n_tx_packets)
1128 transport_send_params_t *sp = &wrk->ctx.sp;
1129 session_t *s = wrk->ctx.s;
1132 if (PREDICT_FALSE (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
1135 /* Clear custom-tx flag used to request reschedule for tx */
1136 s->flags &= ~SESSION_F_CUSTOM_TX;
1138 sp->max_burst_size = clib_min (VLIB_FRAME_SIZE - *n_tx_packets,
1139 TRANSPORT_PACER_MAX_BURST_PKTS);
1141 n_packets = transport_custom_tx (session_get_transport_proto (s), s, sp);
1142 *n_tx_packets += n_packets;
1144 if (s->flags & SESSION_F_CUSTOM_TX)
1146 session_evt_add_old (wrk, elt);
1148 else if (!(sp->flags & TRANSPORT_SND_F_DESCHED))
1150 svm_fifo_unset_event (s->tx_fifo);
1151 if (svm_fifo_max_dequeue_cons (s->tx_fifo))
1152 if (svm_fifo_set_event (s->tx_fifo))
1153 session_evt_add_head_old (wrk, elt);
1159 always_inline session_t *
1160 session_event_get_session (session_event_t * e, u8 thread_index)
1162 return session_get_if_valid (e->session_index, thread_index);
1166 session_event_dispatch_ctrl (session_worker_t * wrk, session_evt_elt_t * elt)
1168 clib_llist_index_t ei;
1169 void (*fp) (void *);
1173 ei = clib_llist_entry_index (wrk->event_elts, elt);
1176 switch (e->event_type)
1178 case SESSION_CTRL_EVT_RPC:
1179 fp = e->rpc_args.fp;
1180 (*fp) (e->rpc_args.arg);
1182 case SESSION_CTRL_EVT_CLOSE:
1183 s = session_get_from_handle_if_valid (e->session_handle);
1184 if (PREDICT_FALSE (!s))
1186 session_transport_close (s);
1188 case SESSION_CTRL_EVT_RESET:
1189 s = session_get_from_handle_if_valid (e->session_handle);
1190 if (PREDICT_FALSE (!s))
1192 session_transport_reset (s);
1194 case SESSION_CTRL_EVT_LISTEN:
1195 session_mq_listen_handler (session_evt_ctrl_data (wrk, elt));
1197 case SESSION_CTRL_EVT_LISTEN_URI:
1198 session_mq_listen_uri_handler (session_evt_ctrl_data (wrk, elt));
1200 case SESSION_CTRL_EVT_UNLISTEN:
1201 session_mq_unlisten_handler (session_evt_ctrl_data (wrk, elt));
1203 case SESSION_CTRL_EVT_CONNECT:
1204 session_mq_connect_handler (session_evt_ctrl_data (wrk, elt));
1206 case SESSION_CTRL_EVT_CONNECT_URI:
1207 session_mq_connect_uri_handler (session_evt_ctrl_data (wrk, elt));
1209 case SESSION_CTRL_EVT_DISCONNECT:
1210 session_mq_disconnect_handler (session_evt_ctrl_data (wrk, elt));
1212 case SESSION_CTRL_EVT_DISCONNECTED:
1213 session_mq_disconnected_handler (session_evt_ctrl_data (wrk, elt));
1215 case SESSION_CTRL_EVT_ACCEPTED_REPLY:
1216 session_mq_accepted_reply_handler (session_evt_ctrl_data (wrk, elt));
1218 case SESSION_CTRL_EVT_DISCONNECTED_REPLY:
1219 session_mq_disconnected_reply_handler (session_evt_ctrl_data (wrk,
1222 case SESSION_CTRL_EVT_RESET_REPLY:
1223 session_mq_reset_reply_handler (session_evt_ctrl_data (wrk, elt));
1225 case SESSION_CTRL_EVT_WORKER_UPDATE:
1226 session_mq_worker_update_handler (session_evt_ctrl_data (wrk, elt));
1228 case SESSION_CTRL_EVT_APP_DETACH:
1229 app_mq_detach_handler (session_evt_ctrl_data (wrk, elt));
1232 clib_warning ("unhandled event type %d", e->event_type);
1235 /* Regrab elements in case pool moved */
1236 elt = pool_elt_at_index (wrk->event_elts, ei);
1237 if (!clib_llist_elt_is_linked (elt, evt_list))
1240 if (e->event_type >= SESSION_CTRL_EVT_BOUND)
1241 session_evt_ctrl_data_free (wrk, elt);
1242 session_evt_elt_free (wrk, elt);
1244 SESSION_EVT (SESSION_EVT_COUNTS, CNT_CTRL_EVTS, 1, wrk);
1248 session_event_dispatch_io (session_worker_t * wrk, vlib_node_runtime_t * node,
1249 session_evt_elt_t * elt, u32 thread_index,
1252 session_main_t *smm = &session_main;
1253 app_worker_t *app_wrk;
1254 clib_llist_index_t ei;
1258 ei = clib_llist_entry_index (wrk->event_elts, elt);
1261 switch (e->event_type)
1263 case SESSION_IO_EVT_TX_FLUSH:
1264 case SESSION_IO_EVT_TX:
1265 s = session_event_get_session (e, thread_index);
1266 if (PREDICT_FALSE (!s))
1268 CLIB_PREFETCH (s->tx_fifo, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
1270 /* Spray packets in per session type frames, since they go to
1271 * different nodes */
1272 (smm->session_tx_fns[s->session_type]) (wrk, node, elt, n_tx_packets);
1274 case SESSION_IO_EVT_RX:
1275 s = session_event_get_session (e, thread_index);
1278 transport_app_rx_evt (session_get_transport_proto (s),
1279 s->connection_index, s->thread_index);
1281 case SESSION_IO_EVT_BUILTIN_RX:
1282 s = session_event_get_session (e, thread_index);
1283 if (PREDICT_FALSE (!s || s->session_state >= SESSION_STATE_CLOSING))
1285 svm_fifo_unset_event (s->rx_fifo);
1286 app_wrk = app_worker_get (s->app_wrk_index);
1287 app_worker_builtin_rx (app_wrk, s);
1289 case SESSION_IO_EVT_BUILTIN_TX:
1290 s = session_get_from_handle_if_valid (e->session_handle);
1292 if (PREDICT_TRUE (s != 0))
1293 session_tx_fifo_dequeue_internal (wrk, node, elt, n_tx_packets);
1296 clib_warning ("unhandled event type %d", e->event_type);
1299 SESSION_EVT (SESSION_IO_EVT_COUNTS, e->event_type, 1, wrk);
1301 /* Regrab elements in case pool moved */
1302 elt = pool_elt_at_index (wrk->event_elts, ei);
1303 if (!clib_llist_elt_is_linked (elt, evt_list))
1304 session_evt_elt_free (wrk, elt);
1308 static const u32 session_evt_msg_sizes[] = {
1309 #define _(symc, sym) \
1310 [SESSION_CTRL_EVT_ ## symc] = sizeof (session_ ## sym ##_msg_t),
1311 foreach_session_ctrl_evt
1317 session_evt_add_to_list (session_worker_t * wrk, session_event_t * evt)
1319 session_evt_elt_t *elt;
1321 if (evt->event_type >= SESSION_CTRL_EVT_RPC)
1323 elt = session_evt_alloc_ctrl (wrk);
1324 if (evt->event_type >= SESSION_CTRL_EVT_BOUND)
1326 elt->evt.ctrl_data_index = session_evt_ctrl_data_alloc (wrk);
1327 elt->evt.event_type = evt->event_type;
1328 clib_memcpy_fast (session_evt_ctrl_data (wrk, elt), evt->data,
1329 session_evt_msg_sizes[evt->event_type]);
1333 /* Internal control events fit into io events footprint */
1334 clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt));
1339 elt = session_evt_alloc_new (wrk);
1340 clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt));
1345 session_flush_pending_tx_buffers (session_worker_t * wrk,
1346 vlib_node_runtime_t * node)
1348 vlib_buffer_enqueue_to_next (wrk->vm, node, wrk->pending_tx_buffers,
1349 wrk->pending_tx_nexts,
1350 vec_len (wrk->pending_tx_nexts));
1351 vec_reset_length (wrk->pending_tx_buffers);
1352 vec_reset_length (wrk->pending_tx_nexts);
1356 session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1357 vlib_frame_t * frame)
1359 session_main_t *smm = vnet_get_session_main ();
1360 u32 thread_index = vm->thread_index, n_to_dequeue;
1361 session_worker_t *wrk = &smm->wrk[thread_index];
1362 session_evt_elt_t *elt, *ctrl_he, *new_he, *old_he;
1363 clib_llist_index_t ei, next_ei, old_ti;
1364 svm_msg_q_msg_t _msg, *msg = &_msg;
1365 int i = 0, n_tx_packets;
1366 session_event_t *evt;
1369 SESSION_EVT (SESSION_EVT_DISPATCH_START, wrk);
1371 wrk->last_vlib_time = vlib_time_now (vm);
1372 wrk->last_vlib_us_time = wrk->last_vlib_time * CLIB_US_TIME_FREQ;
1375 * Update transport time
1377 transport_update_time (wrk->last_vlib_time, thread_index);
1378 n_tx_packets = vec_len (wrk->pending_tx_buffers);
1379 SESSION_EVT (SESSION_EVT_DSP_CNTRS, UPDATE_TIME, wrk);
1382 * Dequeue and handle new events
1385 /* Try to dequeue what is available. Don't wait for lock.
1386 * XXX: we may need priorities here */
1387 mq = wrk->vpp_event_queue;
1388 n_to_dequeue = svm_msg_q_size (mq);
1389 if (n_to_dequeue && svm_msg_q_try_lock (mq) == 0)
1391 for (i = 0; i < n_to_dequeue; i++)
1393 svm_msg_q_sub_w_lock (mq, msg);
1394 evt = svm_msg_q_msg_data (mq, msg);
1395 session_evt_add_to_list (wrk, evt);
1396 svm_msg_q_free_msg (mq, msg);
1398 svm_msg_q_unlock (mq);
1401 SESSION_EVT (SESSION_EVT_DSP_CNTRS, MQ_DEQ, wrk, n_to_dequeue, !i);
1404 * Handle control events
1407 ctrl_he = pool_elt_at_index (wrk->event_elts, wrk->ctrl_head);
1410 clib_llist_foreach_safe (wrk->event_elts, evt_list, ctrl_he, elt, ({
1411 clib_llist_remove (wrk->event_elts, evt_list, elt);
1412 session_event_dispatch_ctrl (wrk, elt);
1416 SESSION_EVT (SESSION_EVT_DSP_CNTRS, CTRL_EVTS, wrk);
1419 * Handle the new io events.
1422 new_he = pool_elt_at_index (wrk->event_elts, wrk->new_head);
1423 old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
1424 old_ti = clib_llist_prev_index (old_he, evt_list);
1426 ei = clib_llist_next_index (new_he, evt_list);
1427 while (ei != wrk->new_head && n_tx_packets < VLIB_FRAME_SIZE)
1429 elt = pool_elt_at_index (wrk->event_elts, ei);
1430 ei = clib_llist_next_index (elt, evt_list);
1431 clib_llist_remove (wrk->event_elts, evt_list, elt);
1432 session_event_dispatch_io (wrk, node, elt, thread_index, &n_tx_packets);
1435 SESSION_EVT (SESSION_EVT_DSP_CNTRS, NEW_IO_EVTS, wrk);
1438 * Handle the old io events, if we had any prior to processing the new ones
1441 if (old_ti != wrk->old_head)
1443 old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
1444 ei = clib_llist_next_index (old_he, evt_list);
1446 while (n_tx_packets < VLIB_FRAME_SIZE)
1448 elt = pool_elt_at_index (wrk->event_elts, ei);
1449 next_ei = clib_llist_next_index (elt, evt_list);
1450 clib_llist_remove (wrk->event_elts, evt_list, elt);
1452 session_event_dispatch_io (wrk, node, elt, thread_index,
1462 SESSION_EVT (SESSION_EVT_DSP_CNTRS, OLD_IO_EVTS, wrk);
1464 if (vec_len (wrk->pending_tx_buffers))
1465 session_flush_pending_tx_buffers (wrk, node);
1467 vlib_node_increment_counter (vm, session_queue_node.index,
1468 SESSION_QUEUE_ERROR_TX, n_tx_packets);
1470 SESSION_EVT (SESSION_EVT_DISPATCH_END, wrk, n_tx_packets);
1472 return n_tx_packets;
1476 VLIB_REGISTER_NODE (session_queue_node) =
1478 .function = session_queue_node_fn,
1479 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
1480 .name = "session-queue",
1481 .format_trace = format_session_queue_trace,
1482 .type = VLIB_NODE_TYPE_INPUT,
1483 .n_errors = ARRAY_LEN (session_queue_error_strings),
1484 .error_strings = session_queue_error_strings,
1485 .state = VLIB_NODE_STATE_DISABLED,
1489 static clib_error_t *
1490 session_queue_exit (vlib_main_t * vm)
1492 if (vec_len (vlib_mains) < 2)
1496 * Shut off (especially) worker-thread session nodes.
1497 * Otherwise, vpp can crash as the main thread unmaps the
1500 vlib_worker_thread_barrier_sync (vm);
1501 session_node_enable_disable (0 /* is_enable */ );
1502 vlib_worker_thread_barrier_release (vm);
1506 VLIB_MAIN_LOOP_EXIT_FUNCTION (session_queue_exit);
1509 session_queue_run_on_main (vlib_main_t * vm)
1511 vlib_node_runtime_t *node;
1513 node = vlib_node_get_runtime (vm, session_queue_node.index);
1514 return session_queue_node_fn (vm, node, 0);
1518 session_queue_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
1521 uword *event_data = 0;
1527 vlib_process_wait_for_event_or_clock (vm, timeout);
1528 event_type = vlib_process_get_events (vm, (uword **) & event_data);
1532 case SESSION_Q_PROCESS_RUN_ON_MAIN:
1533 /* Run session queue node on main thread */
1534 session_queue_run_on_main (vm);
1536 case SESSION_Q_PROCESS_STOP:
1540 /* Timed out. Run on main to ensure all events are handled */
1541 session_queue_run_on_main (vm);
1544 vec_reset_length (event_data);
1550 VLIB_REGISTER_NODE (session_queue_process_node) =
1552 .function = session_queue_process,
1553 .type = VLIB_NODE_TYPE_PROCESS,
1554 .name = "session-queue-process",
1555 .state = VLIB_NODE_STATE_DISABLED,
1559 static_always_inline uword
1560 session_queue_pre_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1561 vlib_frame_t * frame)
1563 session_main_t *sm = &session_main;
1564 if (!sm->wrk[0].vpp_event_queue)
1566 node = vlib_node_get_runtime (vm, session_queue_node.index);
1567 return session_queue_node_fn (vm, node, frame);
1571 VLIB_REGISTER_NODE (session_queue_pre_input_node) =
1573 .function = session_queue_pre_input_inline,
1574 .type = VLIB_NODE_TYPE_PRE_INPUT,
1575 .name = "session-queue-main",
1576 .state = VLIB_NODE_STATE_DISABLED,
1581 * fd.io coding-style-patch-verification: ON
1584 * eval: (c-set-style "gnu")