2 * Copyright (c) 2017-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vppinfra/elog.h>
20 #include <vnet/session/transport.h>
21 #include <vnet/session/session.h>
22 #include <vnet/session/application.h>
23 #include <vnet/session/application_interface.h>
24 #include <vnet/session/application_local.h>
25 #include <vnet/session/session_debug.h>
26 #include <svm/queue.h>
28 #define app_check_thread_and_barrier(_fn, _arg) \
29 if (!vlib_thread_is_main_w_barrier ()) \
31 vlib_rpc_call_main_thread (_fn, (u8 *) _arg, sizeof(*_arg)); \
36 session_mq_listen_handler (void *data)
38 session_listen_msg_t *mp = (session_listen_msg_t *) data;
39 vnet_listen_args_t _a, *a = &_a;
40 app_worker_t *app_wrk;
44 app_check_thread_and_barrier (session_mq_listen_handler, mp);
46 app = application_lookup (mp->client_index);
50 clib_memset (a, 0, sizeof (*a));
51 a->sep.is_ip4 = mp->is_ip4;
52 ip_copy (&a->sep.ip, &mp->ip, mp->is_ip4);
53 a->sep.port = mp->port;
54 a->sep.fib_index = mp->vrf;
55 a->sep.sw_if_index = ENDPOINT_INVALID_INDEX;
56 a->sep.transport_proto = mp->proto;
57 a->sep_ext.ckpair_index = mp->ckpair_index;
58 a->sep_ext.crypto_engine = mp->crypto_engine;
59 a->app_index = app->app_index;
60 a->wrk_map_index = mp->wrk_index;
61 a->sep_ext.transport_flags = mp->flags;
63 if ((rv = vnet_listen (a)))
64 clib_warning ("listen returned: %U", format_session_error, rv);
66 app_wrk = application_get_worker (app, mp->wrk_index);
67 mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv);
72 session_mq_listen_uri_handler (void *data)
74 session_listen_uri_msg_t *mp = (session_listen_uri_msg_t *) data;
75 vnet_listen_args_t _a, *a = &_a;
76 app_worker_t *app_wrk;
80 app_check_thread_and_barrier (session_mq_listen_uri_handler, mp);
82 app = application_lookup (mp->client_index);
86 clib_memset (a, 0, sizeof (*a));
87 a->uri = (char *) mp->uri;
88 a->app_index = app->app_index;
89 rv = vnet_bind_uri (a);
91 app_wrk = application_get_worker (app, 0);
92 mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv);
96 session_mq_connect_handler (void *data)
98 session_connect_msg_t *mp = (session_connect_msg_t *) data;
99 vnet_connect_args_t _a, *a = &_a;
100 app_worker_t *app_wrk;
104 app_check_thread_and_barrier (session_mq_connect_handler, mp);
106 app = application_lookup (mp->client_index);
110 clib_memset (a, 0, sizeof (*a));
111 a->sep.is_ip4 = mp->is_ip4;
112 clib_memcpy_fast (&a->sep.ip, &mp->ip, sizeof (mp->ip));
113 a->sep.port = mp->port;
114 a->sep.transport_proto = mp->proto;
115 a->sep.peer.fib_index = mp->vrf;
116 clib_memcpy_fast (&a->sep.peer.ip, &mp->lcl_ip, sizeof (mp->lcl_ip));
119 ip46_address_mask_ip4 (&a->sep.ip);
120 ip46_address_mask_ip4 (&a->sep.peer.ip);
122 a->sep.peer.port = mp->lcl_port;
123 a->sep.peer.sw_if_index = ENDPOINT_INVALID_INDEX;
124 a->sep_ext.parent_handle = mp->parent_handle;
125 a->sep_ext.ckpair_index = mp->ckpair_index;
126 a->sep_ext.crypto_engine = mp->crypto_engine;
127 a->sep_ext.transport_flags = mp->flags;
128 if (mp->hostname_len)
130 vec_validate (a->sep_ext.hostname, mp->hostname_len - 1);
131 clib_memcpy_fast (a->sep_ext.hostname, mp->hostname, mp->hostname_len);
133 a->api_context = mp->context;
134 a->app_index = app->app_index;
135 a->wrk_map_index = mp->wrk_index;
137 if ((rv = vnet_connect (a)))
139 clib_warning ("connect returned: %U", format_session_error, rv);
140 app_wrk = application_get_worker (app, mp->wrk_index);
141 mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv);
144 vec_free (a->sep_ext.hostname);
148 session_mq_connect_uri_handler (void *data)
150 session_connect_uri_msg_t *mp = (session_connect_uri_msg_t *) data;
151 vnet_connect_args_t _a, *a = &_a;
152 app_worker_t *app_wrk;
156 app_check_thread_and_barrier (session_mq_connect_uri_handler, mp);
158 app = application_lookup (mp->client_index);
162 clib_memset (a, 0, sizeof (*a));
163 a->uri = (char *) mp->uri;
164 a->api_context = mp->context;
165 a->app_index = app->app_index;
166 if ((rv = vnet_connect_uri (a)))
168 clib_warning ("connect_uri returned: %d", rv);
169 app_wrk = application_get_worker (app, 0 /* default wrk only */ );
170 mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv);
175 session_mq_disconnect_handler (void *data)
177 session_disconnect_msg_t *mp = (session_disconnect_msg_t *) data;
178 vnet_disconnect_args_t _a, *a = &_a;
181 app = application_lookup (mp->client_index);
185 a->app_index = app->app_index;
186 a->handle = mp->handle;
187 vnet_disconnect_session (a);
191 app_mq_detach_handler (void *data)
193 session_app_detach_msg_t *mp = (session_app_detach_msg_t *) data;
194 vnet_app_detach_args_t _a, *a = &_a;
197 app_check_thread_and_barrier (app_mq_detach_handler, mp);
199 app = application_lookup (mp->client_index);
203 a->app_index = app->app_index;
204 a->api_client_index = mp->client_index;
205 vnet_application_detach (a);
209 session_mq_unlisten_handler (void *data)
211 session_unlisten_msg_t *mp = (session_unlisten_msg_t *) data;
212 vnet_unlisten_args_t _a, *a = &_a;
213 app_worker_t *app_wrk;
217 app_check_thread_and_barrier (session_mq_unlisten_handler, mp);
219 app = application_lookup (mp->client_index);
223 clib_memset (a, 0, sizeof (*a));
224 a->app_index = app->app_index;
225 a->handle = mp->handle;
226 a->wrk_map_index = mp->wrk_index;
227 if ((rv = vnet_unlisten (a)))
228 clib_warning ("unlisten returned: %d", rv);
230 app_wrk = application_get_worker (app, a->wrk_map_index);
234 mq_send_unlisten_reply (app_wrk, mp->handle, mp->context, rv);
238 session_mq_accepted_reply_handler (void *data)
240 session_accepted_reply_msg_t *mp = (session_accepted_reply_msg_t *) data;
241 vnet_disconnect_args_t _a = { 0 }, *a = &_a;
242 session_state_t old_state;
243 app_worker_t *app_wrk;
246 /* Server isn't interested, kill the session */
249 a->app_index = mp->context;
250 a->handle = mp->handle;
251 vnet_disconnect_session (a);
255 /* Mail this back from the main thread. We're not polling in main
256 * thread so we're using other workers for notifications. */
257 if (vlib_num_workers () && vlib_get_thread_index () != 0
258 && session_thread_from_handle (mp->handle) == 0)
260 vlib_rpc_call_main_thread (session_mq_accepted_reply_handler,
261 (u8 *) mp, sizeof (*mp));
265 s = session_get_from_handle_if_valid (mp->handle);
269 app_wrk = app_worker_get (s->app_wrk_index);
270 if (app_wrk->app_index != mp->context)
272 clib_warning ("app doesn't own session");
276 if (!session_has_transport (s))
278 s->session_state = SESSION_STATE_READY;
279 if (ct_session_connect_notify (s))
284 old_state = s->session_state;
285 s->session_state = SESSION_STATE_READY;
287 if (!svm_fifo_is_empty_prod (s->rx_fifo))
288 app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX);
290 /* Closed while waiting for app to reply. Resend disconnect */
291 if (old_state >= SESSION_STATE_TRANSPORT_CLOSING)
293 app_worker_close_notify (app_wrk, s);
294 s->session_state = old_state;
301 session_mq_reset_reply_handler (void *data)
303 vnet_disconnect_args_t _a = { 0 }, *a = &_a;
304 session_reset_reply_msg_t *mp;
305 app_worker_t *app_wrk;
308 u32 index, thread_index;
310 mp = (session_reset_reply_msg_t *) data;
311 app = application_lookup (mp->context);
315 session_parse_handle (mp->handle, &index, &thread_index);
316 s = session_get_if_valid (index, thread_index);
318 /* No session or not the right session */
319 if (!s || s->session_state < SESSION_STATE_TRANSPORT_CLOSING)
322 app_wrk = app_worker_get (s->app_wrk_index);
323 if (!app_wrk || app_wrk->app_index != app->app_index)
325 clib_warning ("App %u does not own handle 0x%lx!", app->app_index,
330 /* Client objected to resetting the session, log and continue */
333 clib_warning ("client retval %d", mp->retval);
337 /* This comes as a response to a reset, transport only waiting for
338 * confirmation to remove connection state, no need to disconnect */
339 a->handle = mp->handle;
340 a->app_index = app->app_index;
341 vnet_disconnect_session (a);
345 session_mq_disconnected_handler (void *data)
347 session_disconnected_reply_msg_t *rmp;
348 vnet_disconnect_args_t _a, *a = &_a;
349 svm_msg_q_msg_t _msg, *msg = &_msg;
350 session_disconnected_msg_t *mp;
351 app_worker_t *app_wrk;
352 session_event_t *evt;
357 mp = (session_disconnected_msg_t *) data;
358 if (!(s = session_get_from_handle_if_valid (mp->handle)))
360 clib_warning ("could not disconnect handle %llu", mp->handle);
363 app_wrk = app_worker_get (s->app_wrk_index);
364 app = application_lookup (mp->client_index);
365 if (!(app_wrk && app && app->app_index == app_wrk->app_index))
367 clib_warning ("could not disconnect session: %llu app: %u",
368 mp->handle, mp->client_index);
372 a->handle = mp->handle;
373 a->app_index = app_wrk->wrk_index;
374 rv = vnet_disconnect_session (a);
376 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
377 SESSION_MQ_CTRL_EVT_RING,
379 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
380 clib_memset (evt, 0, sizeof (*evt));
381 evt->event_type = SESSION_CTRL_EVT_DISCONNECTED_REPLY;
382 rmp = (session_disconnected_reply_msg_t *) evt->data;
383 rmp->handle = mp->handle;
384 rmp->context = mp->context;
386 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
390 session_mq_disconnected_reply_handler (void *data)
392 session_disconnected_reply_msg_t *mp;
393 vnet_disconnect_args_t _a, *a = &_a;
396 mp = (session_disconnected_reply_msg_t *) data;
398 /* Client objected to disconnecting the session, log and continue */
401 clib_warning ("client retval %d", mp->retval);
405 /* Disconnect has been confirmed. Confirm close to transport */
406 app = application_lookup (mp->context);
409 a->handle = mp->handle;
410 a->app_index = app->app_index;
411 vnet_disconnect_session (a);
416 session_mq_worker_update_handler (void *data)
418 session_worker_update_msg_t *mp = (session_worker_update_msg_t *) data;
419 session_worker_update_reply_msg_t *rmp;
420 svm_msg_q_msg_t _msg, *msg = &_msg;
421 app_worker_t *app_wrk;
422 u32 owner_app_wrk_map;
423 session_event_t *evt;
427 app = application_lookup (mp->client_index);
430 if (!(s = session_get_from_handle_if_valid (mp->handle)))
432 clib_warning ("invalid handle %llu", mp->handle);
435 app_wrk = app_worker_get (s->app_wrk_index);
436 if (app_wrk->app_index != app->app_index)
438 clib_warning ("app %u does not own session %llu", app->app_index,
442 owner_app_wrk_map = app_wrk->wrk_map_index;
443 app_wrk = application_get_worker (app, mp->wrk_index);
445 /* This needs to come from the new owner */
446 if (mp->req_wrk_index == owner_app_wrk_map)
448 session_req_worker_update_msg_t *wump;
450 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
451 SESSION_MQ_CTRL_EVT_RING,
453 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
454 clib_memset (evt, 0, sizeof (*evt));
455 evt->event_type = SESSION_CTRL_EVT_REQ_WORKER_UPDATE;
456 wump = (session_req_worker_update_msg_t *) evt->data;
457 wump->session_handle = mp->handle;
458 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
462 app_worker_own_session (app_wrk, s);
467 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
468 SESSION_MQ_CTRL_EVT_RING,
470 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
471 clib_memset (evt, 0, sizeof (*evt));
472 evt->event_type = SESSION_CTRL_EVT_WORKER_UPDATE_REPLY;
473 rmp = (session_worker_update_reply_msg_t *) evt->data;
474 rmp->handle = mp->handle;
475 rmp->rx_fifo = pointer_to_uword (s->rx_fifo->shr);
476 rmp->tx_fifo = pointer_to_uword (s->tx_fifo->shr);
477 rmp->segment_handle = session_segment_handle (s);
478 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
481 * Retransmit messages that may have been lost
483 if (s->tx_fifo && !svm_fifo_is_empty (s->tx_fifo))
484 session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
486 if (s->rx_fifo && !svm_fifo_is_empty (s->rx_fifo))
487 app_worker_lock_and_send_event (app_wrk, s, SESSION_IO_EVT_RX);
489 if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
490 app_worker_close_notify (app_wrk, s);
494 session_mq_app_wrk_rpc_handler (void *data)
496 session_app_wrk_rpc_msg_t *mp = (session_app_wrk_rpc_msg_t *) data;
497 svm_msg_q_msg_t _msg, *msg = &_msg;
498 session_app_wrk_rpc_msg_t *rmp;
499 app_worker_t *app_wrk;
500 session_event_t *evt;
503 app = application_lookup (mp->client_index);
507 app_wrk = application_get_worker (app, mp->wrk_index);
509 svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue,
510 SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT,
512 evt = svm_msg_q_msg_data (app_wrk->event_queue, msg);
513 clib_memset (evt, 0, sizeof (*evt));
514 evt->event_type = SESSION_CTRL_EVT_APP_WRK_RPC;
515 rmp = (session_app_wrk_rpc_msg_t *) evt->data;
516 clib_memcpy (rmp->data, mp->data, sizeof (mp->data));
517 svm_msg_q_add_and_unlock (app_wrk->event_queue, msg);
520 vlib_node_registration_t session_queue_node;
525 u32 server_thread_index;
526 } session_queue_trace_t;
528 /* packet trace format function */
530 format_session_queue_trace (u8 * s, va_list * args)
532 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
533 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
534 session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *);
536 s = format (s, "session index %d thread index %d",
537 t->session_index, t->server_thread_index);
541 #define foreach_session_queue_error \
542 _(TX, "Packets transmitted") \
543 _(TIMER, "Timer events") \
544 _(NO_BUFFER, "Out of buffers")
548 #define _(sym,str) SESSION_QUEUE_ERROR_##sym,
549 foreach_session_queue_error
551 SESSION_QUEUE_N_ERROR,
552 } session_queue_error_t;
554 static char *session_queue_error_strings[] = {
555 #define _(sym,string) string,
556 foreach_session_queue_error
562 SESSION_TX_NO_BUFFERS = -2,
568 session_tx_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
569 u32 next_index, u32 * to_next, u16 n_segs,
570 session_t * s, u32 n_trace)
572 while (n_trace && n_segs)
574 vlib_buffer_t *b = vlib_get_buffer (vm, to_next[0]);
577 (vm, node, next_index, b, 1 /* follow_chain */ )))
579 session_queue_trace_t *t =
580 vlib_add_trace (vm, node, b, sizeof (*t));
581 t->session_index = s->session_index;
582 t->server_thread_index = s->thread_index;
588 vlib_set_trace_count (vm, node, n_trace);
592 session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx,
593 vlib_buffer_t * b, u16 * n_bufs, u8 peek_data)
595 vlib_buffer_t *chain_b, *prev_b;
596 u32 chain_bi0, to_deq, left_from_seg;
597 session_worker_t *wrk;
598 u16 len_to_deq, n_bytes_read;
601 wrk = session_main_get_worker (ctx->s->thread_index);
602 b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
603 b->total_length_not_including_first_buffer = 0;
606 left_from_seg = clib_min (ctx->sp.snd_mss - b->current_length,
608 to_deq = left_from_seg;
609 for (j = 1; j < ctx->n_bufs_per_seg; j++)
612 len_to_deq = clib_min (to_deq, ctx->deq_per_buf);
615 chain_bi0 = wrk->tx_buffers[*n_bufs];
616 chain_b = vlib_get_buffer (vm, chain_bi0);
617 chain_b->current_data = 0;
618 data = vlib_buffer_get_current (chain_b);
621 n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo,
622 ctx->sp.tx_offset, len_to_deq, data);
623 ctx->sp.tx_offset += n_bytes_read;
627 if (ctx->transport_vft->transport_options.tx_type ==
630 svm_fifo_t *f = ctx->s->tx_fifo;
631 session_dgram_hdr_t *hdr = &ctx->hdr;
633 deq_now = clib_min (hdr->data_length - hdr->data_offset,
635 n_bytes_read = svm_fifo_peek (f, hdr->data_offset, deq_now,
637 ASSERT (n_bytes_read > 0);
639 hdr->data_offset += n_bytes_read;
640 if (hdr->data_offset == hdr->data_length)
642 u32 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
643 svm_fifo_dequeue_drop (f, offset);
644 if (ctx->left_to_snd > n_bytes_read)
645 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
648 else if (ctx->left_to_snd == n_bytes_read)
649 svm_fifo_overwrite_head (ctx->s->tx_fifo, (u8 *) & ctx->hdr,
650 sizeof (session_dgram_pre_hdr_t));
653 n_bytes_read = svm_fifo_dequeue (ctx->s->tx_fifo,
656 ASSERT (n_bytes_read == len_to_deq);
657 chain_b->current_length = n_bytes_read;
658 b->total_length_not_including_first_buffer += chain_b->current_length;
660 /* update previous buffer */
661 prev_b->next_buffer = chain_bi0;
662 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
664 /* update current buffer */
665 chain_b->next_buffer = 0;
667 to_deq -= n_bytes_read;
672 && b->total_length_not_including_first_buffer == left_from_seg);
673 ctx->left_to_snd -= left_from_seg;
677 session_tx_fill_buffer (vlib_main_t * vm, session_tx_context_t * ctx,
678 vlib_buffer_t * b, u16 * n_bufs, u8 peek_data)
685 * Start with the first buffer in chain
688 b->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED;
691 data0 = vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
692 len_to_deq = clib_min (ctx->left_to_snd, ctx->deq_per_first_buf);
696 n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo, ctx->sp.tx_offset,
698 ASSERT (n_bytes_read > 0);
699 /* Keep track of progress locally, transport is also supposed to
700 * increment it independently when pushing the header */
701 ctx->sp.tx_offset += n_bytes_read;
705 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
707 session_dgram_hdr_t *hdr = &ctx->hdr;
708 svm_fifo_t *f = ctx->s->tx_fifo;
712 ASSERT (hdr->data_length > hdr->data_offset);
713 deq_now = clib_min (hdr->data_length - hdr->data_offset,
715 offset = hdr->data_offset + SESSION_CONN_HDR_LEN;
716 n_bytes_read = svm_fifo_peek (f, offset, deq_now, data0);
717 ASSERT (n_bytes_read > 0);
719 if (ctx->s->session_state == SESSION_STATE_LISTENING)
721 ip_copy (&ctx->tc->rmt_ip, &hdr->rmt_ip, ctx->tc->is_ip4);
722 ctx->tc->rmt_port = hdr->rmt_port;
724 hdr->data_offset += n_bytes_read;
725 if (hdr->data_offset == hdr->data_length)
727 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
728 svm_fifo_dequeue_drop (f, offset);
729 if (ctx->left_to_snd > n_bytes_read)
730 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
733 else if (ctx->left_to_snd == n_bytes_read)
734 svm_fifo_overwrite_head (ctx->s->tx_fifo, (u8 *) & ctx->hdr,
735 sizeof (session_dgram_pre_hdr_t));
739 n_bytes_read = svm_fifo_dequeue (ctx->s->tx_fifo,
741 ASSERT (n_bytes_read > 0);
744 b->current_length = n_bytes_read;
745 ctx->left_to_snd -= n_bytes_read;
748 * Fill in the remaining buffers in the chain, if any
750 if (PREDICT_FALSE (ctx->n_bufs_per_seg > 1 && ctx->left_to_snd))
751 session_tx_fifo_chain_tail (vm, ctx, b, n_bufs, peek_data);
755 session_tx_not_ready (session_t * s, u8 peek_data)
759 if (PREDICT_TRUE (s->session_state == SESSION_STATE_READY))
761 /* Can retransmit for closed sessions but can't send new data if
762 * session is not ready or closed */
763 else if (s->session_state < SESSION_STATE_READY)
765 else if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)
767 /* Allow closed transports to still send custom packets.
768 * For instance, tcp may want to send acks in time-wait. */
769 if (s->session_state != SESSION_STATE_TRANSPORT_DELETED
770 && (s->flags & SESSION_F_CUSTOM_TX))
778 always_inline transport_connection_t *
779 session_tx_get_transport (session_tx_context_t * ctx, u8 peek_data)
783 return ctx->transport_vft->get_connection (ctx->s->connection_index,
784 ctx->s->thread_index);
788 if (ctx->s->session_state == SESSION_STATE_LISTENING)
789 return ctx->transport_vft->get_listener (ctx->s->connection_index);
792 return ctx->transport_vft->get_connection (ctx->s->connection_index,
793 ctx->s->thread_index);
799 session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx,
800 u32 max_segs, u8 peek_data)
802 u32 n_bytes_per_buf, n_bytes_per_seg;
804 n_bytes_per_buf = vlib_buffer_get_default_data_size (vm);
805 ctx->max_dequeue = svm_fifo_max_dequeue_cons (ctx->s->tx_fifo);
809 /* Offset in rx fifo from where to peek data */
810 if (PREDICT_FALSE (ctx->sp.tx_offset >= ctx->max_dequeue))
812 ctx->max_len_to_snd = 0;
815 ctx->max_dequeue -= ctx->sp.tx_offset;
819 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
821 u32 len, chain_limit;
823 if (ctx->max_dequeue <= sizeof (ctx->hdr))
825 ctx->max_len_to_snd = 0;
829 svm_fifo_peek (ctx->s->tx_fifo, 0, sizeof (ctx->hdr),
831 ASSERT (ctx->hdr.data_length > ctx->hdr.data_offset);
832 len = ctx->hdr.data_length - ctx->hdr.data_offset;
834 /* Process multiple dgrams if smaller than min (buf_space, mss).
835 * This avoids handling multiple dgrams if they require buffer
837 chain_limit = clib_min (n_bytes_per_buf - TRANSPORT_MAX_HDRS_LEN,
839 if (ctx->hdr.data_length <= chain_limit)
841 u32 first_dgram_len, dgram_len, offset, max_offset;
842 session_dgram_hdr_t hdr;
844 ctx->sp.snd_mss = clib_min (ctx->sp.snd_mss, len);
845 offset = ctx->hdr.data_length + sizeof (session_dgram_hdr_t);
846 first_dgram_len = len;
847 max_offset = clib_min (ctx->max_dequeue, 16 << 10);
849 while (offset < max_offset)
851 svm_fifo_peek (ctx->s->tx_fifo, offset, sizeof (ctx->hdr),
853 ASSERT (hdr.data_length > hdr.data_offset);
854 dgram_len = hdr.data_length - hdr.data_offset;
855 if (len + dgram_len > ctx->max_dequeue
856 || first_dgram_len != dgram_len)
859 offset += sizeof (hdr) + hdr.data_length;
863 ctx->max_dequeue = len;
866 ASSERT (ctx->max_dequeue > 0);
868 /* Ensure we're not writing more than transport window allows */
869 if (ctx->max_dequeue < ctx->sp.snd_space)
871 /* Constrained by tx queue. Try to send only fully formed segments */
872 ctx->max_len_to_snd = (ctx->max_dequeue > ctx->sp.snd_mss) ?
873 (ctx->max_dequeue - (ctx->max_dequeue % ctx->sp.snd_mss)) :
879 /* Expectation is that snd_space0 is already a multiple of snd_mss */
880 ctx->max_len_to_snd = ctx->sp.snd_space;
883 /* Check if we're tx constrained by the node */
884 ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->sp.snd_mss);
885 if (ctx->n_segs_per_evt > max_segs)
887 ctx->n_segs_per_evt = max_segs;
888 ctx->max_len_to_snd = max_segs * ctx->sp.snd_mss;
891 ASSERT (n_bytes_per_buf > TRANSPORT_MAX_HDRS_LEN);
892 if (ctx->n_segs_per_evt > 1)
894 u32 n_bytes_last_seg, n_bufs_last_seg;
896 n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->sp.snd_mss;
897 n_bytes_last_seg = TRANSPORT_MAX_HDRS_LEN + ctx->max_len_to_snd
898 - ((ctx->n_segs_per_evt - 1) * ctx->sp.snd_mss);
899 ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
900 n_bufs_last_seg = ceil ((f64) n_bytes_last_seg / n_bytes_per_buf);
901 ctx->n_bufs_needed = ((ctx->n_segs_per_evt - 1) * ctx->n_bufs_per_seg)
906 n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->max_len_to_snd;
907 ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
908 ctx->n_bufs_needed = ctx->n_bufs_per_seg;
911 ctx->deq_per_buf = clib_min (ctx->sp.snd_mss, n_bytes_per_buf);
912 ctx->deq_per_first_buf = clib_min (ctx->sp.snd_mss,
914 TRANSPORT_MAX_HDRS_LEN);
918 session_tx_maybe_reschedule (session_worker_t * wrk,
919 session_tx_context_t * ctx,
920 session_evt_elt_t * elt)
922 session_t *s = ctx->s;
924 svm_fifo_unset_event (s->tx_fifo);
925 if (svm_fifo_max_dequeue_cons (s->tx_fifo) > ctx->sp.tx_offset)
926 if (svm_fifo_set_event (s->tx_fifo))
927 session_evt_add_head_old (wrk, elt);
931 session_tx_fifo_read_and_snd_i (session_worker_t * wrk,
932 vlib_node_runtime_t * node,
933 session_evt_elt_t * elt,
934 int *n_tx_packets, u8 peek_data)
936 u32 n_trace, n_left, pbi, next_index, max_burst;
937 session_tx_context_t *ctx = &wrk->ctx;
938 session_main_t *smm = &session_main;
939 session_event_t *e = &elt->evt;
940 vlib_main_t *vm = wrk->vm;
941 transport_proto_t tp;
945 if (PREDICT_FALSE ((rv = session_tx_not_ready (ctx->s, peek_data))))
948 session_evt_add_old (wrk, elt);
949 return SESSION_TX_NO_DATA;
952 next_index = smm->session_type_to_next[ctx->s->session_type];
953 max_burst = SESSION_NODE_FRAME_SIZE - *n_tx_packets;
955 tp = session_get_transport_proto (ctx->s);
956 ctx->transport_vft = transport_protocol_get_vft (tp);
957 ctx->tc = session_tx_get_transport (ctx, peek_data);
959 if (PREDICT_FALSE (e->event_type == SESSION_IO_EVT_TX_FLUSH))
961 if (ctx->transport_vft->flush_data)
962 ctx->transport_vft->flush_data (ctx->tc);
963 e->event_type = SESSION_IO_EVT_TX;
966 if (ctx->s->flags & SESSION_F_CUSTOM_TX)
969 ctx->s->flags &= ~SESSION_F_CUSTOM_TX;
970 ctx->sp.max_burst_size = max_burst;
971 n_custom_tx = ctx->transport_vft->custom_tx (ctx->tc, &ctx->sp);
972 *n_tx_packets += n_custom_tx;
974 (ctx->s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
975 return SESSION_TX_OK;
976 max_burst -= n_custom_tx;
977 if (!max_burst || (ctx->s->flags & SESSION_F_CUSTOM_TX))
979 session_evt_add_old (wrk, elt);
980 return SESSION_TX_OK;
984 transport_connection_snd_params (ctx->tc, &ctx->sp);
986 if (!ctx->sp.snd_space)
988 /* If the deschedule flag was set, remove session from scheduler.
989 * Transport is responsible for rescheduling this session. */
990 if (ctx->sp.flags & TRANSPORT_SND_F_DESCHED)
991 transport_connection_deschedule (ctx->tc);
992 /* Request to postpone the session, e.g., zero-wnd and transport
993 * is not currently probing */
994 else if (ctx->sp.flags & TRANSPORT_SND_F_POSTPONE)
995 session_evt_add_old (wrk, elt);
996 /* This flow queue is "empty" so it should be re-evaluated before
997 * the ones that have data to send. */
999 session_evt_add_head_old (wrk, elt);
1001 return SESSION_TX_NO_DATA;
1004 if (transport_connection_is_tx_paced (ctx->tc))
1006 u32 snd_space = transport_connection_tx_pacer_burst (ctx->tc);
1007 if (snd_space < TRANSPORT_PACER_MIN_BURST)
1009 session_evt_add_head_old (wrk, elt);
1010 return SESSION_TX_NO_DATA;
1012 snd_space = clib_min (ctx->sp.snd_space, snd_space);
1013 ctx->sp.snd_space = snd_space >= ctx->sp.snd_mss ?
1014 snd_space - snd_space % ctx->sp.snd_mss : snd_space;
1017 /* Check how much we can pull. */
1018 session_tx_set_dequeue_params (vm, ctx, max_burst, peek_data);
1020 if (PREDICT_FALSE (!ctx->max_len_to_snd))
1022 transport_connection_tx_pacer_reset_bucket (ctx->tc, 0);
1023 session_tx_maybe_reschedule (wrk, ctx, elt);
1024 return SESSION_TX_NO_DATA;
1027 vec_validate_aligned (wrk->tx_buffers, ctx->n_bufs_needed - 1,
1028 CLIB_CACHE_LINE_BYTES);
1029 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, ctx->n_bufs_needed);
1030 if (PREDICT_FALSE (n_bufs < ctx->n_bufs_needed))
1033 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1034 session_evt_add_head_old (wrk, elt);
1035 vlib_node_increment_counter (wrk->vm, node->node_index,
1036 SESSION_QUEUE_ERROR_NO_BUFFER, 1);
1037 return SESSION_TX_NO_BUFFERS;
1040 if (transport_connection_is_tx_paced (ctx->tc))
1041 transport_connection_tx_pacer_update_bytes (ctx->tc, ctx->max_len_to_snd);
1043 ctx->left_to_snd = ctx->max_len_to_snd;
1044 n_left = ctx->n_segs_per_evt;
1048 vlib_buffer_t *b0, *b1;
1051 pbi = wrk->tx_buffers[n_bufs - 3];
1052 pb = vlib_get_buffer (vm, pbi);
1053 vlib_prefetch_buffer_header (pb, STORE);
1054 pbi = wrk->tx_buffers[n_bufs - 4];
1055 pb = vlib_get_buffer (vm, pbi);
1056 vlib_prefetch_buffer_header (pb, STORE);
1058 bi0 = wrk->tx_buffers[--n_bufs];
1059 bi1 = wrk->tx_buffers[--n_bufs];
1061 b0 = vlib_get_buffer (vm, bi0);
1062 b1 = vlib_get_buffer (vm, bi1);
1064 session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data);
1065 session_tx_fill_buffer (vm, ctx, b1, &n_bufs, peek_data);
1067 ctx->transport_vft->push_header (ctx->tc, b0);
1068 ctx->transport_vft->push_header (ctx->tc, b1);
1072 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
1073 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
1075 vec_add1 (wrk->pending_tx_buffers, bi0);
1076 vec_add1 (wrk->pending_tx_buffers, bi1);
1077 vec_add1 (wrk->pending_tx_nexts, next_index);
1078 vec_add1 (wrk->pending_tx_nexts, next_index);
1087 pbi = wrk->tx_buffers[n_bufs - 2];
1088 pb = vlib_get_buffer (vm, pbi);
1089 vlib_prefetch_buffer_header (pb, STORE);
1092 bi0 = wrk->tx_buffers[--n_bufs];
1093 b0 = vlib_get_buffer (vm, bi0);
1094 session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data);
1096 /* Ask transport to push header after current_length and
1097 * total_length_not_including_first_buffer are updated */
1098 ctx->transport_vft->push_header (ctx->tc, b0);
1102 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
1104 vec_add1 (wrk->pending_tx_buffers, bi0);
1105 vec_add1 (wrk->pending_tx_nexts, next_index);
1108 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)) > 0))
1109 session_tx_trace_frame (vm, node, next_index, wrk->pending_tx_buffers,
1110 ctx->n_segs_per_evt, ctx->s, n_trace);
1112 if (PREDICT_FALSE (n_bufs))
1113 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1115 *n_tx_packets += ctx->n_segs_per_evt;
1117 SESSION_EVT (SESSION_EVT_DEQ, ctx->s, ctx->max_len_to_snd, ctx->max_dequeue,
1118 ctx->s->tx_fifo->has_event, wrk->last_vlib_time);
1120 ASSERT (ctx->left_to_snd == 0);
1122 /* If we couldn't dequeue all bytes reschedule as old flow. Otherwise,
1123 * check if application enqueued more data and reschedule accordingly */
1124 if (ctx->max_len_to_snd < ctx->max_dequeue)
1125 session_evt_add_old (wrk, elt);
1127 session_tx_maybe_reschedule (wrk, ctx, elt);
1131 u32 n_dequeued = ctx->max_len_to_snd;
1132 if (ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM)
1133 n_dequeued += ctx->n_segs_per_evt * SESSION_CONN_HDR_LEN;
1134 if (svm_fifo_needs_deq_ntf (ctx->s->tx_fifo, n_dequeued))
1135 session_dequeue_notify (ctx->s);
1137 return SESSION_TX_OK;
1141 session_tx_fifo_peek_and_snd (session_worker_t * wrk,
1142 vlib_node_runtime_t * node,
1143 session_evt_elt_t * e, int *n_tx_packets)
1145 return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 1);
1149 session_tx_fifo_dequeue_and_snd (session_worker_t * wrk,
1150 vlib_node_runtime_t * node,
1151 session_evt_elt_t * e, int *n_tx_packets)
1153 return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 0);
1157 session_tx_fifo_dequeue_internal (session_worker_t * wrk,
1158 vlib_node_runtime_t * node,
1159 session_evt_elt_t * elt, int *n_tx_packets)
1161 transport_send_params_t *sp = &wrk->ctx.sp;
1162 session_t *s = wrk->ctx.s;
1165 if (PREDICT_FALSE (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED))
1168 /* Clear custom-tx flag used to request reschedule for tx */
1169 s->flags &= ~SESSION_F_CUSTOM_TX;
1171 sp->max_burst_size = clib_min (SESSION_NODE_FRAME_SIZE - *n_tx_packets,
1172 TRANSPORT_PACER_MAX_BURST_PKTS);
1174 n_packets = transport_custom_tx (session_get_transport_proto (s), s, sp);
1175 *n_tx_packets += n_packets;
1177 if (s->flags & SESSION_F_CUSTOM_TX)
1179 session_evt_add_old (wrk, elt);
1181 else if (!(sp->flags & TRANSPORT_SND_F_DESCHED))
1183 svm_fifo_unset_event (s->tx_fifo);
1184 if (svm_fifo_max_dequeue_cons (s->tx_fifo))
1185 if (svm_fifo_set_event (s->tx_fifo))
1186 session_evt_add_head_old (wrk, elt);
1192 always_inline session_t *
1193 session_event_get_session (session_worker_t * wrk, session_event_t * e)
1195 if (PREDICT_FALSE (pool_is_free_index (wrk->sessions, e->session_index)))
1198 ASSERT (session_is_valid (e->session_index, wrk->vm->thread_index));
1199 return pool_elt_at_index (wrk->sessions, e->session_index);
1203 session_event_dispatch_ctrl (session_worker_t * wrk, session_evt_elt_t * elt)
1205 clib_llist_index_t ei;
1206 void (*fp) (void *);
1210 ei = clib_llist_entry_index (wrk->event_elts, elt);
1213 switch (e->event_type)
1215 case SESSION_CTRL_EVT_RPC:
1216 fp = e->rpc_args.fp;
1217 (*fp) (e->rpc_args.arg);
1219 case SESSION_CTRL_EVT_CLOSE:
1220 s = session_get_from_handle_if_valid (e->session_handle);
1221 if (PREDICT_FALSE (!s))
1223 session_transport_close (s);
1225 case SESSION_CTRL_EVT_RESET:
1226 s = session_get_from_handle_if_valid (e->session_handle);
1227 if (PREDICT_FALSE (!s))
1229 session_transport_reset (s);
1231 case SESSION_CTRL_EVT_LISTEN:
1232 session_mq_listen_handler (session_evt_ctrl_data (wrk, elt));
1234 case SESSION_CTRL_EVT_LISTEN_URI:
1235 session_mq_listen_uri_handler (session_evt_ctrl_data (wrk, elt));
1237 case SESSION_CTRL_EVT_UNLISTEN:
1238 session_mq_unlisten_handler (session_evt_ctrl_data (wrk, elt));
1240 case SESSION_CTRL_EVT_CONNECT:
1241 session_mq_connect_handler (session_evt_ctrl_data (wrk, elt));
1243 case SESSION_CTRL_EVT_CONNECT_URI:
1244 session_mq_connect_uri_handler (session_evt_ctrl_data (wrk, elt));
1246 case SESSION_CTRL_EVT_DISCONNECT:
1247 session_mq_disconnect_handler (session_evt_ctrl_data (wrk, elt));
1249 case SESSION_CTRL_EVT_DISCONNECTED:
1250 session_mq_disconnected_handler (session_evt_ctrl_data (wrk, elt));
1252 case SESSION_CTRL_EVT_ACCEPTED_REPLY:
1253 session_mq_accepted_reply_handler (session_evt_ctrl_data (wrk, elt));
1255 case SESSION_CTRL_EVT_DISCONNECTED_REPLY:
1256 session_mq_disconnected_reply_handler (session_evt_ctrl_data (wrk,
1259 case SESSION_CTRL_EVT_RESET_REPLY:
1260 session_mq_reset_reply_handler (session_evt_ctrl_data (wrk, elt));
1262 case SESSION_CTRL_EVT_WORKER_UPDATE:
1263 session_mq_worker_update_handler (session_evt_ctrl_data (wrk, elt));
1265 case SESSION_CTRL_EVT_APP_DETACH:
1266 app_mq_detach_handler (session_evt_ctrl_data (wrk, elt));
1268 case SESSION_CTRL_EVT_APP_WRK_RPC:
1269 session_mq_app_wrk_rpc_handler (session_evt_ctrl_data (wrk, elt));
1272 clib_warning ("unhandled event type %d", e->event_type);
1275 /* Regrab elements in case pool moved */
1276 elt = pool_elt_at_index (wrk->event_elts, ei);
1277 if (!clib_llist_elt_is_linked (elt, evt_list))
1280 if (e->event_type >= SESSION_CTRL_EVT_BOUND)
1281 session_evt_ctrl_data_free (wrk, elt);
1282 session_evt_elt_free (wrk, elt);
1284 SESSION_EVT (SESSION_EVT_COUNTS, CNT_CTRL_EVTS, 1, wrk);
1288 session_event_dispatch_io (session_worker_t * wrk, vlib_node_runtime_t * node,
1289 session_evt_elt_t * elt, int *n_tx_packets)
1291 session_main_t *smm = &session_main;
1292 app_worker_t *app_wrk;
1293 clib_llist_index_t ei;
1297 ei = clib_llist_entry_index (wrk->event_elts, elt);
1300 switch (e->event_type)
1302 case SESSION_IO_EVT_TX_FLUSH:
1303 case SESSION_IO_EVT_TX:
1304 s = session_event_get_session (wrk, e);
1305 if (PREDICT_FALSE (!s))
1307 CLIB_PREFETCH (s->tx_fifo, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
1309 /* Spray packets in per session type frames, since they go to
1310 * different nodes */
1311 (smm->session_tx_fns[s->session_type]) (wrk, node, elt, n_tx_packets);
1313 case SESSION_IO_EVT_RX:
1314 s = session_event_get_session (wrk, e);
1317 transport_app_rx_evt (session_get_transport_proto (s),
1318 s->connection_index, s->thread_index);
1320 case SESSION_IO_EVT_BUILTIN_RX:
1321 s = session_event_get_session (wrk, e);
1322 if (PREDICT_FALSE (!s || s->session_state >= SESSION_STATE_CLOSING))
1324 svm_fifo_unset_event (s->rx_fifo);
1325 app_wrk = app_worker_get (s->app_wrk_index);
1326 app_worker_builtin_rx (app_wrk, s);
1328 case SESSION_IO_EVT_BUILTIN_TX:
1329 s = session_get_from_handle_if_valid (e->session_handle);
1331 if (PREDICT_TRUE (s != 0))
1332 session_tx_fifo_dequeue_internal (wrk, node, elt, n_tx_packets);
1335 clib_warning ("unhandled event type %d", e->event_type);
1338 SESSION_EVT (SESSION_IO_EVT_COUNTS, e->event_type, 1, wrk);
1340 /* Regrab elements in case pool moved */
1341 elt = pool_elt_at_index (wrk->event_elts, ei);
1342 if (!clib_llist_elt_is_linked (elt, evt_list))
1343 session_evt_elt_free (wrk, elt);
1347 static const u32 session_evt_msg_sizes[] = {
1348 #define _(symc, sym) \
1349 [SESSION_CTRL_EVT_ ## symc] = sizeof (session_ ## sym ##_msg_t),
1350 foreach_session_ctrl_evt
1356 session_evt_add_to_list (session_worker_t * wrk, session_event_t * evt)
1358 session_evt_elt_t *elt;
1360 if (evt->event_type >= SESSION_CTRL_EVT_RPC)
1362 elt = session_evt_alloc_ctrl (wrk);
1363 if (evt->event_type >= SESSION_CTRL_EVT_BOUND)
1365 elt->evt.ctrl_data_index = session_evt_ctrl_data_alloc (wrk);
1366 elt->evt.event_type = evt->event_type;
1367 clib_memcpy_fast (session_evt_ctrl_data (wrk, elt), evt->data,
1368 session_evt_msg_sizes[evt->event_type]);
1372 /* Internal control events fit into io events footprint */
1373 clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt));
1378 elt = session_evt_alloc_new (wrk);
1379 clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt));
1384 session_flush_pending_tx_buffers (session_worker_t * wrk,
1385 vlib_node_runtime_t * node)
1387 vlib_buffer_enqueue_to_next (wrk->vm, node, wrk->pending_tx_buffers,
1388 wrk->pending_tx_nexts,
1389 vec_len (wrk->pending_tx_nexts));
1390 vec_reset_length (wrk->pending_tx_buffers);
1391 vec_reset_length (wrk->pending_tx_nexts);
1395 session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1396 vlib_frame_t * frame)
1398 session_main_t *smm = vnet_get_session_main ();
1399 u32 thread_index = vm->thread_index, n_to_dequeue;
1400 session_worker_t *wrk = &smm->wrk[thread_index];
1401 session_evt_elt_t *elt, *ctrl_he, *new_he, *old_he;
1402 clib_llist_index_t ei, next_ei, old_ti;
1403 svm_msg_q_msg_t _msg, *msg = &_msg;
1404 int i = 0, n_tx_packets;
1405 session_event_t *evt;
1408 SESSION_EVT (SESSION_EVT_DISPATCH_START, wrk);
1410 wrk->last_vlib_time = vlib_time_now (vm);
1411 wrk->last_vlib_us_time = wrk->last_vlib_time * CLIB_US_TIME_FREQ;
1414 * Update transport time
1416 transport_update_time (wrk->last_vlib_time, thread_index);
1417 n_tx_packets = vec_len (wrk->pending_tx_buffers);
1418 SESSION_EVT (SESSION_EVT_DSP_CNTRS, UPDATE_TIME, wrk);
1421 * Dequeue and handle new events
1424 /* Try to dequeue what is available. Don't wait for lock.
1425 * XXX: we may need priorities here */
1426 mq = wrk->vpp_event_queue;
1427 n_to_dequeue = svm_msg_q_size (mq);
1428 if (n_to_dequeue && svm_msg_q_try_lock (mq) == 0)
1430 for (i = 0; i < n_to_dequeue; i++)
1432 svm_msg_q_sub_w_lock (mq, msg);
1433 evt = svm_msg_q_msg_data (mq, msg);
1434 session_evt_add_to_list (wrk, evt);
1435 svm_msg_q_free_msg (mq, msg);
1437 svm_msg_q_unlock (mq);
1440 SESSION_EVT (SESSION_EVT_DSP_CNTRS, MQ_DEQ, wrk, n_to_dequeue, !i);
1443 * Handle control events
1446 ctrl_he = pool_elt_at_index (wrk->event_elts, wrk->ctrl_head);
1449 clib_llist_foreach_safe (wrk->event_elts, evt_list, ctrl_he, elt, ({
1450 clib_llist_remove (wrk->event_elts, evt_list, elt);
1451 session_event_dispatch_ctrl (wrk, elt);
1455 SESSION_EVT (SESSION_EVT_DSP_CNTRS, CTRL_EVTS, wrk);
1458 * Handle the new io events.
1461 new_he = pool_elt_at_index (wrk->event_elts, wrk->new_head);
1462 old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
1463 old_ti = clib_llist_prev_index (old_he, evt_list);
1465 ei = clib_llist_next_index (new_he, evt_list);
1466 while (ei != wrk->new_head && n_tx_packets < SESSION_NODE_FRAME_SIZE)
1468 elt = pool_elt_at_index (wrk->event_elts, ei);
1469 ei = clib_llist_next_index (elt, evt_list);
1470 clib_llist_remove (wrk->event_elts, evt_list, elt);
1471 session_event_dispatch_io (wrk, node, elt, &n_tx_packets);
1474 SESSION_EVT (SESSION_EVT_DSP_CNTRS, NEW_IO_EVTS, wrk);
1477 * Handle the old io events, if we had any prior to processing the new ones
1480 if (old_ti != wrk->old_head)
1482 old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head);
1483 ei = clib_llist_next_index (old_he, evt_list);
1485 while (n_tx_packets < SESSION_NODE_FRAME_SIZE)
1487 elt = pool_elt_at_index (wrk->event_elts, ei);
1488 next_ei = clib_llist_next_index (elt, evt_list);
1489 clib_llist_remove (wrk->event_elts, evt_list, elt);
1491 session_event_dispatch_io (wrk, node, elt, &n_tx_packets);
1500 SESSION_EVT (SESSION_EVT_DSP_CNTRS, OLD_IO_EVTS, wrk);
1502 if (vec_len (wrk->pending_tx_buffers))
1503 session_flush_pending_tx_buffers (wrk, node);
1505 vlib_node_increment_counter (vm, session_queue_node.index,
1506 SESSION_QUEUE_ERROR_TX, n_tx_packets);
1508 SESSION_EVT (SESSION_EVT_DISPATCH_END, wrk, n_tx_packets);
1510 return n_tx_packets;
1514 VLIB_REGISTER_NODE (session_queue_node) =
1516 .function = session_queue_node_fn,
1517 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
1518 .name = "session-queue",
1519 .format_trace = format_session_queue_trace,
1520 .type = VLIB_NODE_TYPE_INPUT,
1521 .n_errors = ARRAY_LEN (session_queue_error_strings),
1522 .error_strings = session_queue_error_strings,
1523 .state = VLIB_NODE_STATE_DISABLED,
1527 static clib_error_t *
1528 session_queue_exit (vlib_main_t * vm)
1530 if (vec_len (vlib_mains) < 2)
1534 * Shut off (especially) worker-thread session nodes.
1535 * Otherwise, vpp can crash as the main thread unmaps the
1538 vlib_worker_thread_barrier_sync (vm);
1539 session_node_enable_disable (0 /* is_enable */ );
1540 vlib_worker_thread_barrier_release (vm);
1544 VLIB_MAIN_LOOP_EXIT_FUNCTION (session_queue_exit);
1547 session_queue_run_on_main (vlib_main_t * vm)
1549 vlib_node_runtime_t *node;
1551 node = vlib_node_get_runtime (vm, session_queue_node.index);
1552 return session_queue_node_fn (vm, node, 0);
1556 session_queue_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
1559 uword *event_data = 0;
1565 vlib_process_wait_for_event_or_clock (vm, timeout);
1566 event_type = vlib_process_get_events (vm, (uword **) & event_data);
1570 case SESSION_Q_PROCESS_RUN_ON_MAIN:
1571 /* Run session queue node on main thread */
1572 session_queue_run_on_main (vm);
1574 case SESSION_Q_PROCESS_STOP:
1575 vlib_node_set_state (vm, session_queue_process_node.index,
1576 VLIB_NODE_STATE_DISABLED);
1580 /* Timed out. Run on main to ensure all events are handled */
1581 session_queue_run_on_main (vm);
1584 vec_reset_length (event_data);
1590 VLIB_REGISTER_NODE (session_queue_process_node) =
1592 .function = session_queue_process,
1593 .type = VLIB_NODE_TYPE_PROCESS,
1594 .name = "session-queue-process",
1595 .state = VLIB_NODE_STATE_DISABLED,
1599 static_always_inline uword
1600 session_queue_pre_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1601 vlib_frame_t * frame)
1603 session_main_t *sm = &session_main;
1604 if (!sm->wrk[0].vpp_event_queue)
1606 node = vlib_node_get_runtime (vm, session_queue_node.index);
1607 return session_queue_node_fn (vm, node, frame);
1611 VLIB_REGISTER_NODE (session_queue_pre_input_node) =
1613 .function = session_queue_pre_input_inline,
1614 .type = VLIB_NODE_TYPE_PRE_INPUT,
1615 .name = "session-queue-main",
1616 .state = VLIB_NODE_STATE_DISABLED,
1621 * fd.io coding-style-patch-verification: ON
1624 * eval: (c-set-style "gnu")