2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vlib/vlib.h>
18 #include <vnet/vnet.h>
19 #include <vppinfra/elog.h>
20 #include <vnet/session/transport.h>
21 #include <vnet/session/session.h>
22 #include <vnet/session/application.h>
23 #include <vnet/session/session_debug.h>
24 #include <svm/queue.h>
26 vlib_node_registration_t session_queue_node;
31 u32 server_thread_index;
32 } session_queue_trace_t;
34 /* packet trace format function */
36 format_session_queue_trace (u8 * s, va_list * args)
38 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
39 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
40 session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *);
42 s = format (s, "SESSION_QUEUE: session index %d, server thread index %d",
43 t->session_index, t->server_thread_index);
47 vlib_node_registration_t session_queue_node;
49 #define foreach_session_queue_error \
50 _(TX, "Packets transmitted") \
51 _(TIMER, "Timer events") \
52 _(NO_BUFFER, "Out of buffers")
56 #define _(sym,str) SESSION_QUEUE_ERROR_##sym,
57 foreach_session_queue_error
59 SESSION_QUEUE_N_ERROR,
60 } session_queue_error_t;
62 static char *session_queue_error_strings[] = {
63 #define _(sym,string) string,
64 foreach_session_queue_error
70 SESSION_TX_NO_BUFFERS = -2,
77 session_tx_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
78 u32 next_index, u32 * to_next, u16 n_segs,
79 stream_session_t * s, u32 n_trace)
81 session_queue_trace_t *t;
85 for (i = 0; i < clib_min (n_trace, n_segs); i++)
87 b = vlib_get_buffer (vm, to_next[i - n_segs]);
88 vlib_trace_buffer (vm, node, next_index, b, 1 /* follow_chain */ );
89 t = vlib_add_trace (vm, node, b, sizeof (*t));
90 t->session_index = s->session_index;
91 t->server_thread_index = s->thread_index;
93 vlib_set_trace_count (vm, node, n_trace - i);
97 session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx,
98 vlib_buffer_t * b, u16 * n_bufs, u8 peek_data)
100 session_manager_main_t *smm = &session_manager_main;
101 vlib_buffer_t *chain_b, *prev_b;
102 u32 chain_bi0, to_deq, left_from_seg;
103 u16 len_to_deq, n_bytes_read;
106 b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
107 b->total_length_not_including_first_buffer = 0;
110 left_from_seg = clib_min (ctx->snd_mss - b->current_length,
112 to_deq = left_from_seg;
113 for (j = 1; j < ctx->n_bufs_per_seg; j++)
116 len_to_deq = clib_min (to_deq, ctx->deq_per_buf);
119 chain_bi0 = smm->tx_buffers[ctx->s->thread_index][*n_bufs];
120 _vec_len (smm->tx_buffers[ctx->s->thread_index]) = *n_bufs;
122 chain_b = vlib_get_buffer (vm, chain_bi0);
123 chain_b->current_data = 0;
124 data = vlib_buffer_get_current (chain_b);
127 n_bytes_read = svm_fifo_peek (ctx->s->server_tx_fifo,
128 ctx->tx_offset, len_to_deq, data);
129 ctx->tx_offset += n_bytes_read;
133 if (ctx->transport_vft->tx_type == TRANSPORT_TX_DGRAM)
135 svm_fifo_t *f = ctx->s->server_tx_fifo;
136 session_dgram_hdr_t *hdr = &ctx->hdr;
138 deq_now = clib_min (hdr->data_length - hdr->data_offset,
140 n_bytes_read = svm_fifo_peek (f, hdr->data_offset, deq_now,
142 ASSERT (n_bytes_read > 0);
144 hdr->data_offset += n_bytes_read;
145 if (hdr->data_offset == hdr->data_length)
146 svm_fifo_dequeue_drop (f, hdr->data_length);
149 n_bytes_read = svm_fifo_dequeue_nowait (ctx->s->server_tx_fifo,
152 ASSERT (n_bytes_read == len_to_deq);
153 chain_b->current_length = n_bytes_read;
154 b->total_length_not_including_first_buffer += chain_b->current_length;
156 /* update previous buffer */
157 prev_b->next_buffer = chain_bi0;
158 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
160 /* update current buffer */
161 chain_b->next_buffer = 0;
163 to_deq -= n_bytes_read;
168 && b->total_length_not_including_first_buffer == left_from_seg);
169 ctx->left_to_snd -= left_from_seg;
173 session_output_try_get_buffers (vlib_main_t * vm,
174 session_manager_main_t * smm,
175 u32 thread_index, u16 * n_bufs, u32 wanted)
178 vec_validate_aligned (smm->tx_buffers[thread_index], wanted - 1,
179 CLIB_CACHE_LINE_BYTES);
180 n_alloc = vlib_buffer_alloc (vm, &smm->tx_buffers[thread_index][*n_bufs],
183 _vec_len (smm->tx_buffers[thread_index]) = *n_bufs;
188 session_tx_fill_buffer (vlib_main_t * vm, session_tx_context_t * ctx,
189 vlib_buffer_t * b, u16 * n_bufs, u8 peek_data)
196 * Start with the first buffer in chain
199 b->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED;
202 data0 = vlib_buffer_make_headroom (b, MAX_HDRS_LEN);
203 len_to_deq = clib_min (ctx->left_to_snd, ctx->deq_per_first_buf);
207 n_bytes_read = svm_fifo_peek (ctx->s->server_tx_fifo, ctx->tx_offset,
209 ASSERT (n_bytes_read > 0);
210 /* Keep track of progress locally, transport is also supposed to
211 * increment it independently when pushing the header */
212 ctx->tx_offset += n_bytes_read;
216 if (ctx->transport_vft->tx_type == TRANSPORT_TX_DGRAM)
218 session_dgram_hdr_t *hdr = &ctx->hdr;
219 svm_fifo_t *f = ctx->s->server_tx_fifo;
223 ASSERT (hdr->data_length > hdr->data_offset);
224 deq_now = clib_min (hdr->data_length - hdr->data_offset,
226 offset = hdr->data_offset + SESSION_CONN_HDR_LEN;
227 n_bytes_read = svm_fifo_peek (f, offset, deq_now, data0);
228 ASSERT (n_bytes_read > 0);
230 if (ctx->s->session_state == SESSION_STATE_LISTENING)
232 ip_copy (&ctx->tc->rmt_ip, &hdr->rmt_ip, ctx->tc->is_ip4);
233 ctx->tc->rmt_port = hdr->rmt_port;
235 hdr->data_offset += n_bytes_read;
236 if (hdr->data_offset == hdr->data_length)
238 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
239 svm_fifo_dequeue_drop (f, offset);
244 n_bytes_read = svm_fifo_dequeue_nowait (ctx->s->server_tx_fifo,
246 ASSERT (n_bytes_read > 0);
249 b->current_length = n_bytes_read;
250 ctx->left_to_snd -= n_bytes_read;
253 * Fill in the remaining buffers in the chain, if any
255 if (PREDICT_FALSE (ctx->n_bufs_per_seg > 1 && ctx->left_to_snd))
256 session_tx_fifo_chain_tail (vm, ctx, b, n_bufs, peek_data);
259 SESSION_EVT_DBG(SESSION_EVT_DEQ, ctx->s, ({
260 ed->data[0] = FIFO_EVENT_APP_TX;
261 ed->data[1] = ctx->max_dequeue;
262 ed->data[2] = len_to_deq;
263 ed->data[3] = ctx->left_to_snd;
269 session_tx_not_ready (stream_session_t * s, u8 peek_data)
273 /* Can retransmit for closed sessions but can't send new data if
274 * session is not ready or closed */
275 if (s->session_state < SESSION_STATE_READY)
277 if (s->session_state == SESSION_STATE_CLOSED)
283 always_inline transport_connection_t *
284 session_tx_get_transport (session_tx_context_t * ctx, u8 peek_data)
288 return ctx->transport_vft->get_connection (ctx->s->connection_index,
289 ctx->s->thread_index);
293 if (ctx->s->session_state == SESSION_STATE_LISTENING)
294 return ctx->transport_vft->get_listener (ctx->s->connection_index);
297 return ctx->transport_vft->get_connection (ctx->s->connection_index,
298 ctx->s->thread_index);
304 session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx,
305 u32 max_segs, u8 peek_data)
307 u32 n_bytes_per_buf, n_bytes_per_seg;
308 ctx->max_dequeue = svm_fifo_max_dequeue (ctx->s->server_tx_fifo);
311 /* Offset in rx fifo from where to peek data */
312 ctx->tx_offset = ctx->transport_vft->tx_fifo_offset (ctx->tc);
313 if (PREDICT_FALSE (ctx->tx_offset >= ctx->max_dequeue))
315 ctx->max_len_to_snd = 0;
318 ctx->max_dequeue -= ctx->tx_offset;
322 if (ctx->transport_vft->tx_type == TRANSPORT_TX_DGRAM)
324 if (ctx->max_dequeue <= sizeof (ctx->hdr))
326 ctx->max_len_to_snd = 0;
329 svm_fifo_peek (ctx->s->server_tx_fifo, 0, sizeof (ctx->hdr),
331 ASSERT (ctx->hdr.data_length > ctx->hdr.data_offset);
332 ctx->max_dequeue = ctx->hdr.data_length - ctx->hdr.data_offset;
335 ASSERT (ctx->max_dequeue > 0);
337 /* Ensure we're not writing more than transport window allows */
338 if (ctx->max_dequeue < ctx->snd_space)
340 /* Constrained by tx queue. Try to send only fully formed segments */
341 ctx->max_len_to_snd =
342 (ctx->max_dequeue > ctx->snd_mss) ?
343 ctx->max_dequeue - ctx->max_dequeue % ctx->snd_mss : ctx->max_dequeue;
348 /* Expectation is that snd_space0 is already a multiple of snd_mss */
349 ctx->max_len_to_snd = ctx->snd_space;
352 /* Check if we're tx constrained by the node */
353 ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->snd_mss);
354 if (ctx->n_segs_per_evt > max_segs)
356 ctx->n_segs_per_evt = max_segs;
357 ctx->max_len_to_snd = max_segs * ctx->snd_mss;
360 n_bytes_per_buf = vlib_buffer_free_list_buffer_size (vm,
361 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
362 ASSERT (n_bytes_per_buf > MAX_HDRS_LEN);
363 n_bytes_per_seg = MAX_HDRS_LEN + ctx->snd_mss;
364 ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf);
365 ctx->deq_per_buf = clib_min (ctx->snd_mss, n_bytes_per_buf);
366 ctx->deq_per_first_buf = clib_min (ctx->snd_mss,
367 n_bytes_per_buf - MAX_HDRS_LEN);
371 session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node,
372 session_fifo_event_t * e,
373 stream_session_t * s, int *n_tx_packets,
376 u32 next_index, next0, next1, *to_next, n_left_to_next;
377 u32 n_trace = vlib_get_trace_count (vm, node), n_bufs_needed = 0;
378 u32 thread_index = s->thread_index, n_left, pbi;
379 session_manager_main_t *smm = &session_manager_main;
380 session_tx_context_t *ctx = &smm->ctx[thread_index];
381 transport_proto_t tp;
385 if (PREDICT_FALSE ((rv = session_tx_not_ready (s, peek_data))))
388 vec_add1 (smm->pending_event_vector[thread_index], *e);
389 return SESSION_TX_NO_DATA;
392 next_index = smm->session_type_to_next[s->session_type];
393 next0 = next1 = next_index;
395 tp = session_get_transport_proto (s);
397 ctx->transport_vft = transport_protocol_get_vft (tp);
398 ctx->tc = session_tx_get_transport (ctx, peek_data);
399 ctx->snd_mss = ctx->transport_vft->send_mss (ctx->tc);
400 ctx->snd_space = ctx->transport_vft->send_space (ctx->tc);
401 if (ctx->snd_space == 0 || ctx->snd_mss == 0)
403 vec_add1 (smm->pending_event_vector[thread_index], *e);
404 return SESSION_TX_NO_DATA;
407 /* Allow enqueuing of a new event */
408 svm_fifo_unset_event (s->server_tx_fifo);
410 /* Check how much we can pull. */
411 session_tx_set_dequeue_params (vm, ctx, VLIB_FRAME_SIZE - *n_tx_packets,
414 if (PREDICT_FALSE (!ctx->max_len_to_snd))
415 return SESSION_TX_NO_DATA;
417 n_bufs = vec_len (smm->tx_buffers[thread_index]);
418 n_bufs_needed = ctx->n_segs_per_evt * ctx->n_bufs_per_seg;
421 * Make sure we have at least one full frame of buffers ready
423 if (n_bufs < n_bufs_needed)
425 session_output_try_get_buffers (vm, smm, thread_index, &n_bufs,
426 ctx->n_bufs_per_seg * VLIB_FRAME_SIZE);
427 if (PREDICT_FALSE (n_bufs < n_bufs_needed))
429 vec_add1 (smm->pending_event_vector[thread_index], *e);
430 return SESSION_TX_NO_BUFFERS;
435 * Write until we fill up a frame
437 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
438 if (PREDICT_FALSE (ctx->n_segs_per_evt > n_left_to_next))
440 ctx->n_segs_per_evt = n_left_to_next;
441 ctx->max_len_to_snd = ctx->snd_mss * n_left_to_next;
443 ctx->left_to_snd = ctx->max_len_to_snd;
444 n_left = ctx->n_segs_per_evt;
448 vlib_buffer_t *b0, *b1;
451 pbi = smm->tx_buffers[thread_index][n_bufs - 3];
452 pb = vlib_get_buffer (vm, pbi);
453 vlib_prefetch_buffer_header (pb, STORE);
454 pbi = smm->tx_buffers[thread_index][n_bufs - 4];
455 pb = vlib_get_buffer (vm, pbi);
456 vlib_prefetch_buffer_header (pb, STORE);
458 to_next[0] = bi0 = smm->tx_buffers[thread_index][--n_bufs];
459 to_next[1] = bi1 = smm->tx_buffers[thread_index][--n_bufs];
461 b0 = vlib_get_buffer (vm, bi0);
462 b1 = vlib_get_buffer (vm, bi1);
464 session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data);
465 session_tx_fill_buffer (vm, ctx, b1, &n_bufs, peek_data);
467 ctx->transport_vft->push_header (ctx->tc, b0);
468 ctx->transport_vft->push_header (ctx->tc, b1);
474 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
475 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
477 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
478 n_left_to_next, bi0, bi1, next0,
488 pbi = smm->tx_buffers[thread_index][n_bufs - 2];
489 pb = vlib_get_buffer (vm, pbi);
490 vlib_prefetch_buffer_header (pb, STORE);
493 to_next[0] = bi0 = smm->tx_buffers[thread_index][--n_bufs];
494 b0 = vlib_get_buffer (vm, bi0);
495 session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data);
497 /* Ask transport to push header after current_length and
498 * total_length_not_including_first_buffer are updated */
499 ctx->transport_vft->push_header (ctx->tc, b0);
505 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
507 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
508 n_left_to_next, bi0, next0);
511 if (PREDICT_FALSE (n_trace > 0))
512 session_tx_trace_frame (vm, node, next_index, to_next,
513 ctx->n_segs_per_evt, s, n_trace);
515 _vec_len (smm->tx_buffers[thread_index]) = n_bufs;
516 *n_tx_packets += ctx->n_segs_per_evt;
517 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
519 /* If we couldn't dequeue all bytes mark as partially read */
520 ASSERT (ctx->left_to_snd == 0);
521 if (ctx->max_len_to_snd < ctx->max_dequeue)
522 if (svm_fifo_set_event (s->server_tx_fifo))
523 vec_add1 (smm->pending_event_vector[thread_index], *e);
525 if (!peek_data && ctx->transport_vft->tx_type == TRANSPORT_TX_DGRAM)
527 /* Fix dgram pre header */
528 if (ctx->max_len_to_snd < ctx->max_dequeue)
529 svm_fifo_overwrite_head (s->server_tx_fifo, (u8 *) & ctx->hdr,
530 sizeof (session_dgram_pre_hdr_t));
531 /* More data needs to be read */
532 else if (svm_fifo_max_dequeue (s->server_tx_fifo) > 0)
533 if (svm_fifo_set_event (s->server_tx_fifo))
534 vec_add1 (smm->pending_event_vector[thread_index], *e);
536 return SESSION_TX_OK;
540 session_tx_fifo_peek_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node,
541 session_fifo_event_t * e,
542 stream_session_t * s, int *n_tx_pkts)
544 return session_tx_fifo_read_and_snd_i (vm, node, e, s, n_tx_pkts, 1);
548 session_tx_fifo_dequeue_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node,
549 session_fifo_event_t * e,
550 stream_session_t * s, int *n_tx_pkts)
552 return session_tx_fifo_read_and_snd_i (vm, node, e, s, n_tx_pkts, 0);
556 session_tx_fifo_dequeue_internal (vlib_main_t * vm,
557 vlib_node_runtime_t * node,
558 session_fifo_event_t * e,
559 stream_session_t * s, int *n_tx_pkts)
562 app = application_get (s->opaque);
563 svm_fifo_unset_event (s->server_tx_fifo);
564 return app->cb_fns.builtin_app_tx_callback (s);
567 always_inline stream_session_t *
568 session_event_get_session (session_fifo_event_t * e, u8 thread_index)
570 return session_get_if_valid (e->fifo->master_session_index, thread_index);
574 session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
575 vlib_frame_t * frame)
577 session_manager_main_t *smm = vnet_get_session_manager_main ();
578 u32 thread_index = vm->thread_index, n_to_dequeue, n_events;
579 session_fifo_event_t *pending_events, *e;
580 session_fifo_event_t *fifo_events;
581 svm_msg_q_msg_t _msg, *msg = &_msg;
582 f64 now = vlib_time_now (vm);
583 int n_tx_packets = 0, i, rv;
588 SESSION_EVT_DBG (SESSION_EVT_POLL_GAP_TRACK, smm, thread_index);
591 * Update transport time
593 transport_update_time (now, thread_index);
596 * Get vpp queue events that we can dequeue without blocking
598 mq = smm->vpp_event_queues[thread_index];
599 fifo_events = smm->free_event_vector[thread_index];
600 n_to_dequeue = svm_msg_q_size (mq);
601 pending_events = smm->pending_event_vector[thread_index];
603 if (!n_to_dequeue && !vec_len (pending_events)
604 && !vec_len (smm->pending_disconnects[thread_index]))
607 SESSION_EVT_DBG (SESSION_EVT_DEQ_NODE, 0);
610 * If we didn't manage to process previous events try going
611 * over them again without dequeuing new ones.
612 * XXX: Handle senders to sessions that can't keep up
614 if (0 && vec_len (pending_events) >= 100)
616 clib_warning ("too many fifo events unsolved");
620 /* See you in the next life, don't be late
621 * XXX: we may need priorities here */
622 if (svm_msg_q_try_lock (mq))
625 for (i = 0; i < n_to_dequeue; i++)
627 vec_add2 (fifo_events, e, 1);
628 svm_msg_q_sub_w_lock (mq, msg);
629 clib_memcpy (e, svm_msg_q_msg_data (mq, msg), sizeof (*e));
630 svm_msg_q_free_msg (mq, msg);
633 svm_msg_q_unlock (mq);
635 vec_append (fifo_events, pending_events);
636 vec_append (fifo_events, smm->pending_disconnects[thread_index]);
638 _vec_len (pending_events) = 0;
639 smm->pending_event_vector[thread_index] = pending_events;
640 _vec_len (smm->pending_disconnects[thread_index]) = 0;
643 n_events = vec_len (fifo_events);
644 for (i = 0; i < n_events; i++)
646 stream_session_t *s; /* $$$ prefetch 1 ahead maybe */
647 session_fifo_event_t *e;
651 switch (e->event_type)
653 case FIFO_EVENT_APP_TX:
654 /* Don't try to send more that one frame per dispatch cycle */
655 if (n_tx_packets == VLIB_FRAME_SIZE)
657 vec_add1 (smm->pending_event_vector[thread_index], *e);
661 s = session_event_get_session (e, thread_index);
662 if (PREDICT_FALSE (!s))
664 clib_warning ("It's dead, Jim!");
667 to_dequeue = svm_fifo_max_dequeue (s->server_tx_fifo);
669 /* Spray packets in per session type frames, since they go to
671 rv = (smm->session_tx_fns[s->session_type]) (vm, node, e, s,
673 if (PREDICT_TRUE (rv == SESSION_TX_OK))
675 /* Notify app there's tx space if not polling */
676 if (PREDICT_FALSE (to_dequeue == s->server_tx_fifo->nitems
677 && !svm_fifo_has_event (s->server_tx_fifo)))
678 session_dequeue_notify (s);
680 else if (PREDICT_FALSE (rv == SESSION_TX_NO_BUFFERS))
682 vlib_node_increment_counter (vm, node->node_index,
683 SESSION_QUEUE_ERROR_NO_BUFFER, 1);
687 case FIFO_EVENT_DISCONNECT:
688 /* Make sure stream disconnects run after the pending list is
690 s = session_get_from_handle (e->session_handle);
694 vec_add1 (smm->pending_disconnects[thread_index], *e);
697 /* If tx queue is still not empty, wait */
698 if (svm_fifo_max_dequeue (s->server_tx_fifo))
700 vec_add1 (smm->pending_disconnects[thread_index], *e);
704 stream_session_disconnect_transport (s);
706 case FIFO_EVENT_BUILTIN_RX:
707 s = session_event_get_session (e, thread_index);
708 if (PREDICT_FALSE (!s))
710 svm_fifo_unset_event (s->server_rx_fifo);
711 app = application_get (s->app_index);
712 app->cb_fns.builtin_app_rx_callback (s);
716 (*fp) (e->rpc_args.arg);
720 clib_warning ("unhandled event type %d", e->event_type);
724 _vec_len (fifo_events) = 0;
725 smm->free_event_vector[thread_index] = fifo_events;
727 vlib_node_increment_counter (vm, session_queue_node.index,
728 SESSION_QUEUE_ERROR_TX, n_tx_packets);
730 SESSION_EVT_DBG (SESSION_EVT_DISPATCH_END, smm, thread_index);
736 VLIB_REGISTER_NODE (session_queue_node) =
738 .function = session_queue_node_fn,
739 .name = "session-queue",
740 .format_trace = format_session_queue_trace,
741 .type = VLIB_NODE_TYPE_INPUT,
742 .n_errors = ARRAY_LEN (session_queue_error_strings),
743 .error_strings = session_queue_error_strings,
744 .state = VLIB_NODE_STATE_DISABLED,
749 dump_thread_0_event_queue (void)
751 session_manager_main_t *smm = vnet_get_session_manager_main ();
752 vlib_main_t *vm = &vlib_global_main;
753 u32 my_thread_index = vm->thread_index;
754 session_fifo_event_t _e, *e = &_e;
755 svm_msg_q_ring_t *ring;
756 stream_session_t *s0;
757 svm_msg_q_msg_t *msg;
761 mq = smm->vpp_event_queues[my_thread_index];
764 for (i = 0; i < mq->q->cursize; i++)
766 msg = (svm_msg_q_msg_t *) (&mq->q->data[0] + mq->q->elsize * index);
767 ring = svm_msg_q_ring (mq, msg->ring_index);
768 clib_memcpy (e, svm_msg_q_msg_data (mq, msg), ring->elsize);
770 switch (e->event_type)
772 case FIFO_EVENT_APP_TX:
773 s0 = session_event_get_session (e, my_thread_index);
774 fformat (stdout, "[%04d] TX session %d\n", i, s0->session_index);
777 case FIFO_EVENT_DISCONNECT:
778 s0 = session_get_from_handle (e->session_handle);
779 fformat (stdout, "[%04d] disconnect session %d\n", i,
783 case FIFO_EVENT_BUILTIN_RX:
784 s0 = session_event_get_session (e, my_thread_index);
785 fformat (stdout, "[%04d] builtin_rx %d\n", i, s0->session_index);
789 fformat (stdout, "[%04d] RPC call %llx with %llx\n",
790 i, (u64) (e->rpc_args.fp), (u64) (e->rpc_args.arg));
794 fformat (stdout, "[%04d] unhandled event type %d\n",
801 if (index == mq->q->maxsize)
807 session_node_cmp_event (session_fifo_event_t * e, svm_fifo_t * f)
810 switch (e->event_type)
812 case FIFO_EVENT_APP_RX:
813 case FIFO_EVENT_APP_TX:
814 case FIFO_EVENT_BUILTIN_RX:
818 case FIFO_EVENT_DISCONNECT:
821 s = session_get_from_handle (e->session_handle);
824 clib_warning ("session has event but doesn't exist!");
827 if (s->server_rx_fifo == f || s->server_tx_fifo == f)
837 session_node_lookup_fifo_event (svm_fifo_t * f, session_fifo_event_t * e)
839 session_manager_main_t *smm = vnet_get_session_manager_main ();
841 session_fifo_event_t *pending_event_vector, *evt;
842 int i, index, found = 0;
843 svm_msg_q_msg_t *msg;
844 svm_msg_q_ring_t *ring;
848 thread_index = f->master_thread_index;
852 mq = smm->vpp_event_queues[thread_index];
854 for (i = 0; i < mq->q->cursize; i++)
856 msg = (svm_msg_q_msg_t *) (&mq->q->data[0] + mq->q->elsize * index);
857 ring = svm_msg_q_ring (mq, msg->ring_index);
858 clib_memcpy (e, svm_msg_q_msg_data (mq, msg), ring->elsize);
859 found = session_node_cmp_event (e, f);
862 if (++index == mq->q->maxsize)
866 * Search pending events vector
868 pending_event_vector = smm->pending_event_vector[thread_index];
869 vec_foreach (evt, pending_event_vector)
871 found = session_node_cmp_event (evt, f);
874 clib_memcpy (e, evt, sizeof (*evt));
881 static clib_error_t *
882 session_queue_exit (vlib_main_t * vm)
884 if (vec_len (vlib_mains) < 2)
888 * Shut off (especially) worker-thread session nodes.
889 * Otherwise, vpp can crash as the main thread unmaps the
892 vlib_worker_thread_barrier_sync (vm);
893 session_node_enable_disable (0 /* is_enable */ );
894 vlib_worker_thread_barrier_release (vm);
898 VLIB_MAIN_LOOP_EXIT_FUNCTION (session_queue_exit);
901 session_queue_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
904 f64 now, timeout = 1.0;
905 uword *event_data = 0;
910 vlib_process_wait_for_event_or_clock (vm, timeout);
911 now = vlib_time_now (vm);
912 event_type = vlib_process_get_events (vm, (uword **) & event_data);
916 case SESSION_Q_PROCESS_FLUSH_FRAMES:
917 /* Flush the frames by updating all transports times */
918 transport_update_time (now, 0);
920 case SESSION_Q_PROCESS_STOP:
924 /* Timed out. Update time for all transports to trigger all
925 * outstanding retransmits. */
926 transport_update_time (now, 0);
929 vec_reset_length (event_data);
935 VLIB_REGISTER_NODE (session_queue_process_node) =
937 .function = session_queue_process,
938 .type = VLIB_NODE_TYPE_PROCESS,
939 .name = "session-queue-process",
940 .state = VLIB_NODE_STATE_DISABLED,
946 * fd.io coding-style-patch-verification: ON
949 * eval: (c-set-style "gnu")