X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fsession%2Fsession_node.c;h=b1c2428874e06eaf76b9791a25b8c8d1b601a285;hb=70f879d2852dfc042ad0911a4a6e4a1714c0eb83;hp=ca6663c0b01ceaf37fcdb2fb9a67e27fddfbf118;hpb=2062ec0d67fb83fa25fc938c992a8e882612c777;p=vpp.git diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index ca6663c0b01..b1c2428874e 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -25,12 +25,208 @@ #include #include -static void session_mq_accepted_reply_handler (void *data); +#define app_check_thread_and_barrier(_fn, _arg) \ + if (!vlib_thread_is_main_w_barrier ()) \ + { \ + vlib_rpc_call_main_thread (_fn, (u8 *) _arg, sizeof(*_arg)); \ + return; \ + } static void -accepted_notify_cb (void *data, u32 data_len) +session_mq_listen_handler (void *data) { - session_mq_accepted_reply_handler (data); + session_listen_msg_t *mp = (session_listen_msg_t *) data; + vnet_listen_args_t _a, *a = &_a; + app_worker_t *app_wrk; + application_t *app; + int rv; + + app_check_thread_and_barrier (session_mq_listen_handler, mp); + + app = application_lookup (mp->client_index); + if (!app) + return; + + clib_memset (a, 0, sizeof (*a)); + a->sep.is_ip4 = mp->is_ip4; + clib_memcpy_fast (&a->sep.ip, &mp->ip, sizeof (mp->ip)); + a->sep.port = mp->port; + a->sep.fib_index = mp->vrf; + a->sep.sw_if_index = ENDPOINT_INVALID_INDEX; + a->sep.transport_proto = mp->proto; + a->sep_ext.ckpair_index = mp->ckpair_index; + a->sep_ext.crypto_engine = mp->crypto_engine; + a->app_index = app->app_index; + a->wrk_map_index = mp->wrk_index; + + if ((rv = vnet_listen (a))) + clib_warning ("listen returned: %d", rv); + + app_wrk = application_get_worker (app, mp->wrk_index); + mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv); + return; +} + +static void +session_mq_listen_uri_handler (void *data) +{ + session_listen_uri_msg_t *mp = (session_listen_uri_msg_t *) data; + vnet_listen_args_t _a, *a = &_a; + app_worker_t *app_wrk; + application_t *app; + int rv; + + app_check_thread_and_barrier (session_mq_listen_uri_handler, mp); + + app = application_lookup (mp->client_index); + if (!app) + return; + + clib_memset (a, 0, sizeof (*a)); + a->uri = (char *) mp->uri; + a->app_index = app->app_index; + rv = vnet_bind_uri (a); + + app_wrk = application_get_worker (app, 0); + mq_send_session_bound_cb (app_wrk->wrk_index, mp->context, a->handle, rv); +} + +static void +session_mq_connect_handler (void *data) +{ + session_connect_msg_t *mp = (session_connect_msg_t *) data; + vnet_connect_args_t _a, *a = &_a; + app_worker_t *app_wrk; + application_t *app; + int rv; + + app_check_thread_and_barrier (session_mq_connect_handler, mp); + + app = application_lookup (mp->client_index); + if (!app) + return; + + clib_memset (a, 0, sizeof (*a)); + a->sep.is_ip4 = mp->is_ip4; + clib_memcpy_fast (&a->sep.ip, &mp->ip, sizeof (mp->ip)); + a->sep.port = mp->port; + a->sep.transport_proto = mp->proto; + a->sep.peer.fib_index = mp->vrf; + clib_memcpy_fast (&a->sep.peer.ip, &mp->lcl_ip, sizeof (mp->lcl_ip)); + a->sep.peer.sw_if_index = ENDPOINT_INVALID_INDEX; + a->sep_ext.parent_handle = mp->parent_handle; + a->sep_ext.ckpair_index = mp->ckpair_index; + a->sep_ext.crypto_engine = mp->crypto_engine; + a->sep_ext.flags = mp->flags; + if (mp->hostname_len) + { + vec_validate (a->sep_ext.hostname, mp->hostname_len - 1); + clib_memcpy_fast (a->sep_ext.hostname, mp->hostname, mp->hostname_len); + } + a->api_context = mp->context; + a->app_index = app->app_index; + a->wrk_map_index = mp->wrk_index; + + if ((rv = vnet_connect (a))) + { + clib_warning ("connect returned: %U", format_vnet_api_errno, rv); + app_wrk = application_get_worker (app, mp->wrk_index); + mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, + /* is_fail */ 1); + } + + vec_free (a->sep_ext.hostname); +} + +static void +session_mq_connect_uri_handler (void *data) +{ + session_connect_uri_msg_t *mp = (session_connect_uri_msg_t *) data; + vnet_connect_args_t _a, *a = &_a; + app_worker_t *app_wrk; + application_t *app; + int rv; + + app_check_thread_and_barrier (session_mq_connect_uri_handler, mp); + + app = application_lookup (mp->client_index); + if (!app) + return; + + clib_memset (a, 0, sizeof (*a)); + a->uri = (char *) mp->uri; + a->api_context = mp->context; + a->app_index = app->app_index; + if ((rv = vnet_connect_uri (a))) + { + clib_warning ("connect_uri returned: %d", rv); + app_wrk = application_get_worker (app, 0 /* default wrk only */ ); + mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, + /* is_fail */ 1); + } +} + +static void +session_mq_disconnect_handler (void *data) +{ + session_disconnect_msg_t *mp = (session_disconnect_msg_t *) data; + vnet_disconnect_args_t _a, *a = &_a; + application_t *app; + + app = application_lookup (mp->client_index); + if (!app) + return; + + a->app_index = app->app_index; + a->handle = mp->handle; + vnet_disconnect_session (a); +} + +static void +app_mq_detach_handler (void *data) +{ + session_app_detach_msg_t *mp = (session_app_detach_msg_t *) data; + vnet_app_detach_args_t _a, *a = &_a; + application_t *app; + + app_check_thread_and_barrier (app_mq_detach_handler, mp); + + app = application_lookup (mp->client_index); + if (!app) + return; + + a->app_index = app->app_index; + a->api_client_index = mp->client_index; + vnet_application_detach (a); +} + +static void +session_mq_unlisten_handler (void *data) +{ + session_unlisten_msg_t *mp = (session_unlisten_msg_t *) data; + vnet_unlisten_args_t _a, *a = &_a; + app_worker_t *app_wrk; + application_t *app; + int rv; + + app_check_thread_and_barrier (session_mq_unlisten_handler, mp); + + app = application_lookup (mp->client_index); + if (!app) + return; + + clib_memset (a, 0, sizeof (*a)); + a->app_index = app->app_index; + a->handle = mp->handle; + a->wrk_map_index = mp->wrk_index; + if ((rv = vnet_unlisten (a))) + clib_warning ("unlisten returned: %d", rv); + + app_wrk = application_get_worker (app, a->wrk_map_index); + if (!app_wrk) + return; + + mq_send_unlisten_reply (app_wrk, mp->handle, mp->context, rv); } static void @@ -56,8 +252,8 @@ session_mq_accepted_reply_handler (void *data) if (vlib_num_workers () && vlib_get_thread_index () != 0 && session_thread_from_handle (mp->handle) == 0) { - vl_api_rpc_call_main_thread (accepted_notify_cb, data, - sizeof (session_accepted_reply_msg_t)); + vlib_rpc_call_main_thread (session_mq_accepted_reply_handler, + (u8 *) mp, sizeof (*mp)); return; } @@ -114,14 +310,14 @@ session_mq_reset_reply_handler (void *data) session_parse_handle (mp->handle, &index, &thread_index); s = session_get_if_valid (index, thread_index); - /* Session was already closed or already cleaned up */ - if (!s || s->session_state != SESSION_STATE_TRANSPORT_CLOSING) + /* No session or not the right session */ + if (!s || s->session_state < SESSION_STATE_TRANSPORT_CLOSING) return; app_wrk = app_worker_get (s->app_wrk_index); if (!app_wrk || app_wrk->app_index != app->app_index) { - clib_warning ("App % does not own handle 0x%lx!", app->app_index, + clib_warning ("App %u does not own handle 0x%lx!", app->app_index, mp->handle); return; } @@ -175,7 +371,6 @@ session_mq_disconnected_handler (void *data) svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue, SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT, msg); - svm_msg_q_unlock (app_wrk->event_queue); evt = svm_msg_q_msg_data (app_wrk->event_queue, msg); clib_memset (evt, 0, sizeof (*evt)); evt->event_type = SESSION_CTRL_EVT_DISCONNECTED_REPLY; @@ -183,7 +378,7 @@ session_mq_disconnected_handler (void *data) rmp->handle = mp->handle; rmp->context = mp->context; rmp->retval = rv; - svm_msg_q_add (app_wrk->event_queue, msg, SVM_Q_WAIT); + svm_msg_q_add_and_unlock (app_wrk->event_queue, msg); } static void @@ -250,13 +445,12 @@ session_mq_worker_update_handler (void *data) svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue, SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT, msg); - svm_msg_q_unlock (app_wrk->event_queue); evt = svm_msg_q_msg_data (app_wrk->event_queue, msg); clib_memset (evt, 0, sizeof (*evt)); evt->event_type = SESSION_CTRL_EVT_REQ_WORKER_UPDATE; wump = (session_req_worker_update_msg_t *) evt->data; wump->session_handle = mp->handle; - svm_msg_q_add (app_wrk->event_queue, msg, SVM_Q_WAIT); + svm_msg_q_add_and_unlock (app_wrk->event_queue, msg); return; } @@ -268,7 +462,6 @@ session_mq_worker_update_handler (void *data) svm_msg_q_lock_and_alloc_msg_w_ring (app_wrk->event_queue, SESSION_MQ_CTRL_EVT_RING, SVM_Q_WAIT, msg); - svm_msg_q_unlock (app_wrk->event_queue); evt = svm_msg_q_msg_data (app_wrk->event_queue, msg); clib_memset (evt, 0, sizeof (*evt)); evt->event_type = SESSION_CTRL_EVT_WORKER_UPDATE_REPLY; @@ -277,7 +470,7 @@ session_mq_worker_update_handler (void *data) rmp->rx_fifo = pointer_to_uword (s->rx_fifo); rmp->tx_fifo = pointer_to_uword (s->tx_fifo); rmp->segment_handle = session_segment_handle (s); - svm_msg_q_add (app_wrk->event_queue, msg, SVM_Q_WAIT); + svm_msg_q_add_and_unlock (app_wrk->event_queue, msg); /* * Retransmit messages that may have been lost @@ -308,7 +501,7 @@ format_session_queue_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *); - s = format (s, "SESSION_QUEUE: session index %d, server thread index %d", + s = format (s, "session index %d thread index %d", t->session_index, t->server_thread_index); return s; } @@ -350,7 +543,7 @@ session_tx_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node, for (i = 0; i < clib_min (n_trace, n_segs); i++) { - b = vlib_get_buffer (vm, to_next[i - n_segs]); + b = vlib_get_buffer (vm, to_next[i]); vlib_trace_buffer (vm, node, next_index, b, 1 /* follow_chain */ ); t = vlib_add_trace (vm, node, b, sizeof (*t)); t->session_index = s->session_index; @@ -374,7 +567,7 @@ session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx, b->total_length_not_including_first_buffer = 0; chain_b = b; - left_from_seg = clib_min (ctx->snd_mss - b->current_length, + left_from_seg = clib_min (ctx->sp.snd_mss - b->current_length, ctx->left_to_snd); to_deq = left_from_seg; for (j = 1; j < ctx->n_bufs_per_seg; j++) @@ -390,8 +583,8 @@ session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx, if (peek_data) { n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo, - ctx->tx_offset, len_to_deq, data); - ctx->tx_offset += n_bytes_read; + ctx->sp.tx_offset, len_to_deq, data); + ctx->sp.tx_offset += n_bytes_read; } else { @@ -458,12 +651,12 @@ session_tx_fill_buffer (vlib_main_t * vm, session_tx_context_t * ctx, if (peek_data) { - n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo, ctx->tx_offset, + n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo, ctx->sp.tx_offset, len_to_deq, data0); ASSERT (n_bytes_read > 0); /* Keep track of progress locally, transport is also supposed to * increment it independently when pushing the header */ - ctx->tx_offset += n_bytes_read; + ctx->sp.tx_offset += n_bytes_read; } else { @@ -508,15 +701,6 @@ session_tx_fill_buffer (vlib_main_t * vm, session_tx_context_t * ctx, */ if (PREDICT_FALSE (ctx->n_bufs_per_seg > 1 && ctx->left_to_snd)) session_tx_fifo_chain_tail (vm, ctx, b, n_bufs, peek_data); - - /* *INDENT-OFF* */ - SESSION_EVT_DBG(SESSION_EVT_DEQ, ctx->s, ({ - ed->data[0] = SESSION_IO_EVT_TX; - ed->data[1] = ctx->max_dequeue; - ed->data[2] = len_to_deq; - ed->data[3] = ctx->left_to_snd; - })); - /* *INDENT-ON* */ } always_inline u8 @@ -524,12 +708,21 @@ session_tx_not_ready (session_t * s, u8 peek_data) { if (peek_data) { + if (PREDICT_TRUE (s->session_state == SESSION_STATE_READY)) + return 0; /* Can retransmit for closed sessions but can't send new data if * session is not ready or closed */ - if (s->session_state < SESSION_STATE_READY) + else if (s->session_state < SESSION_STATE_READY) return 1; - if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED) - return 2; + else if (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED) + { + /* Allow closed transports to still send custom packets. + * For instance, tcp may want to send acks in time-wait. */ + if (s->session_state != SESSION_STATE_TRANSPORT_DELETED + && (s->flags & SESSION_F_CUSTOM_TX)) + return 0; + return 2; + } } return 0; } @@ -563,13 +756,12 @@ session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx, if (peek_data) { /* Offset in rx fifo from where to peek data */ - ctx->tx_offset = ctx->transport_vft->tx_fifo_offset (ctx->tc); - if (PREDICT_FALSE (ctx->tx_offset >= ctx->max_dequeue)) + if (PREDICT_FALSE (ctx->sp.tx_offset >= ctx->max_dequeue)) { ctx->max_len_to_snd = 0; return; } - ctx->max_dequeue -= ctx->tx_offset; + ctx->max_dequeue -= ctx->sp.tx_offset; } else { @@ -589,49 +781,62 @@ session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx, ASSERT (ctx->max_dequeue > 0); /* Ensure we're not writing more than transport window allows */ - if (ctx->max_dequeue < ctx->snd_space) + if (ctx->max_dequeue < ctx->sp.snd_space) { /* Constrained by tx queue. Try to send only fully formed segments */ - ctx->max_len_to_snd = - (ctx->max_dequeue > ctx->snd_mss) ? - ctx->max_dequeue - ctx->max_dequeue % ctx->snd_mss : ctx->max_dequeue; + ctx->max_len_to_snd = (ctx->max_dequeue > ctx->sp.snd_mss) ? + (ctx->max_dequeue - (ctx->max_dequeue % ctx->sp.snd_mss)) : + ctx->max_dequeue; /* TODO Nagle ? */ } else { /* Expectation is that snd_space0 is already a multiple of snd_mss */ - ctx->max_len_to_snd = ctx->snd_space; + ctx->max_len_to_snd = ctx->sp.snd_space; } /* Check if we're tx constrained by the node */ - ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->snd_mss); + ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->sp.snd_mss); if (ctx->n_segs_per_evt > max_segs) { ctx->n_segs_per_evt = max_segs; - ctx->max_len_to_snd = max_segs * ctx->snd_mss; + ctx->max_len_to_snd = max_segs * ctx->sp.snd_mss; } n_bytes_per_buf = vlib_buffer_get_default_data_size (vm); ASSERT (n_bytes_per_buf > TRANSPORT_MAX_HDRS_LEN); - n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->snd_mss; + n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->sp.snd_mss; ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf); - ctx->deq_per_buf = clib_min (ctx->snd_mss, n_bytes_per_buf); - ctx->deq_per_first_buf = clib_min (ctx->snd_mss, + ctx->deq_per_buf = clib_min (ctx->sp.snd_mss, n_bytes_per_buf); + ctx->deq_per_first_buf = clib_min (ctx->sp.snd_mss, n_bytes_per_buf - TRANSPORT_MAX_HDRS_LEN); } +always_inline void +session_tx_maybe_reschedule (session_worker_t * wrk, + session_tx_context_t * ctx, + session_evt_elt_t * elt) +{ + session_t *s = ctx->s; + + svm_fifo_unset_event (s->tx_fifo); + if (svm_fifo_max_dequeue_cons (s->tx_fifo) > ctx->sp.tx_offset) + if (svm_fifo_set_event (s->tx_fifo)) + session_evt_add_head_old (wrk, elt); +} + always_inline int -session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, - session_worker_t * wrk, - session_evt_elt_t * elt, int *n_tx_packets, - u8 peek_data) +session_tx_fifo_read_and_snd_i (session_worker_t * wrk, + vlib_node_runtime_t * node, + session_evt_elt_t * elt, + int *n_tx_packets, u8 peek_data) { - u32 next_index, next0, next1, *to_next, n_left_to_next, n_left, pbi; - u32 n_trace = vlib_get_trace_count (vm, node), n_bufs_needed = 0; - session_main_t *smm = &session_main; + u32 n_trace, n_bufs_needed = 0, n_left, pbi, next_index, max_burst; session_tx_context_t *ctx = &wrk->ctx; + session_main_t *smm = &session_main; session_event_t *e = &elt->evt; + vlib_main_t *vm = wrk->vm; transport_proto_t tp; vlib_buffer_t *pb; u16 n_bufs, rv; @@ -639,43 +844,83 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, if (PREDICT_FALSE ((rv = session_tx_not_ready (ctx->s, peek_data)))) { if (rv < 2) - session_evt_add_pending (wrk, elt); + session_evt_add_old (wrk, elt); return SESSION_TX_NO_DATA; } next_index = smm->session_type_to_next[ctx->s->session_type]; - next0 = next1 = next_index; + max_burst = VLIB_FRAME_SIZE - *n_tx_packets; tp = session_get_transport_proto (ctx->s); ctx->transport_vft = transport_protocol_get_vft (tp); ctx->tc = session_tx_get_transport (ctx, peek_data); - ctx->snd_mss = ctx->transport_vft->send_mss (ctx->tc); if (PREDICT_FALSE (e->event_type == SESSION_IO_EVT_TX_FLUSH)) { if (ctx->transport_vft->flush_data) ctx->transport_vft->flush_data (ctx->tc); + e->event_type = SESSION_IO_EVT_TX; } - ctx->snd_space = transport_connection_snd_space (ctx->tc, - vm->clib_time. - last_cpu_time, - ctx->snd_mss); - if (ctx->snd_space == 0 || ctx->snd_mss == 0) + if (ctx->s->flags & SESSION_F_CUSTOM_TX) { - session_evt_add_pending (wrk, elt); + u32 n_custom_tx; + ctx->s->flags &= ~SESSION_F_CUSTOM_TX; + n_custom_tx = ctx->transport_vft->custom_tx (ctx->tc, max_burst); + *n_tx_packets += n_custom_tx; + if (PREDICT_FALSE + (ctx->s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)) + return SESSION_TX_OK; + max_burst -= n_custom_tx; + if (!max_burst) + { + session_evt_add_old (wrk, elt); + return SESSION_TX_OK; + } + } + + transport_connection_snd_params (ctx->tc, &ctx->sp); + + if (!ctx->sp.snd_space) + { + /* This flow queue is "empty" so it should be re-evaluated before + * the ones that have data to send. */ + if (PREDICT_TRUE (!ctx->sp.flags)) + session_evt_add_head_old (wrk, elt); + /* Request to postpone the session, e.g., zero-wnd and transport + * is not currently probing */ + else if (ctx->sp.flags & TRANSPORT_SND_F_POSTPONE) + session_evt_add_old (wrk, elt); + /* If the deschedule flag was set, remove session from scheduler. + * Transport is responsible for rescheduling this session. */ + else + transport_connection_deschedule (ctx->tc); + return SESSION_TX_NO_DATA; } - /* Allow enqueuing of a new event */ - svm_fifo_unset_event (ctx->s->tx_fifo); + if (transport_connection_is_tx_paced (ctx->tc)) + { + u32 snd_space = transport_connection_tx_pacer_burst (ctx->tc); + if (snd_space < TRANSPORT_PACER_MIN_BURST) + { + session_evt_add_head_old (wrk, elt); + return SESSION_TX_NO_DATA; + } + snd_space = clib_min (ctx->sp.snd_space, snd_space); + ctx->sp.snd_space = snd_space >= ctx->sp.snd_mss ? + snd_space - snd_space % ctx->sp.snd_mss : snd_space; + } /* Check how much we can pull. */ - session_tx_set_dequeue_params (vm, ctx, VLIB_FRAME_SIZE - *n_tx_packets, - peek_data); + session_tx_set_dequeue_params (vm, ctx, max_burst, peek_data); if (PREDICT_FALSE (!ctx->max_len_to_snd)) - return SESSION_TX_NO_DATA; + { + transport_connection_tx_pacer_reset_bucket (ctx->tc, 0); + session_tx_maybe_reschedule (wrk, ctx, elt); + return SESSION_TX_NO_DATA; + } n_bufs_needed = ctx->n_segs_per_evt * ctx->n_bufs_per_seg; vec_validate_aligned (wrk->tx_buffers, n_bufs_needed - 1, @@ -685,19 +930,12 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, { if (n_bufs) vlib_buffer_free (vm, wrk->tx_buffers, n_bufs); - session_evt_add_pending (wrk, elt); + session_evt_add_head_old (wrk, elt); + vlib_node_increment_counter (wrk->vm, node->node_index, + SESSION_QUEUE_ERROR_NO_BUFFER, 1); return SESSION_TX_NO_BUFFERS; } - /* - * Write until we fill up a frame - */ - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - if (PREDICT_FALSE (ctx->n_segs_per_evt > n_left_to_next)) - { - ctx->n_segs_per_evt = n_left_to_next; - ctx->max_len_to_snd = ctx->snd_mss * n_left_to_next; - } ctx->left_to_snd = ctx->max_len_to_snd; n_left = ctx->n_segs_per_evt; @@ -713,8 +951,8 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, pb = vlib_get_buffer (vm, pbi); vlib_prefetch_buffer_header (pb, STORE); - to_next[0] = bi0 = wrk->tx_buffers[--n_bufs]; - to_next[1] = bi1 = wrk->tx_buffers[--n_bufs]; + bi0 = wrk->tx_buffers[--n_bufs]; + bi1 = wrk->tx_buffers[--n_bufs]; b0 = vlib_get_buffer (vm, bi0); b1 = vlib_get_buffer (vm, bi1); @@ -725,16 +963,15 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, ctx->transport_vft->push_header (ctx->tc, b0); ctx->transport_vft->push_header (ctx->tc, b1); - to_next += 2; - n_left_to_next -= 2; n_left -= 2; VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1); - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, - n_left_to_next, bi0, bi1, next0, - next1); + vec_add1 (wrk->pending_tx_buffers, bi0); + vec_add1 (wrk->pending_tx_buffers, bi1); + vec_add1 (wrk->pending_tx_nexts, next_index); + vec_add1 (wrk->pending_tx_nexts, next_index); } while (n_left) { @@ -748,7 +985,7 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_prefetch_buffer_header (pb, STORE); } - to_next[0] = bi0 = wrk->tx_buffers[--n_bufs]; + bi0 = wrk->tx_buffers[--n_bufs]; b0 = vlib_get_buffer (vm, bi0); session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data); @@ -756,32 +993,35 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, * total_length_not_including_first_buffer are updated */ ctx->transport_vft->push_header (ctx->tc, b0); - to_next += 1; - n_left_to_next -= 1; n_left -= 1; VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, - n_left_to_next, bi0, next0); + vec_add1 (wrk->pending_tx_buffers, bi0); + vec_add1 (wrk->pending_tx_nexts, next_index); } - if (PREDICT_FALSE (n_trace > 0)) - session_tx_trace_frame (vm, node, next_index, to_next, + if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)) > 0)) + session_tx_trace_frame (vm, node, next_index, wrk->pending_tx_buffers, ctx->n_segs_per_evt, ctx->s, n_trace); + if (PREDICT_FALSE (n_bufs)) - { - vlib_buffer_free (vm, wrk->tx_buffers, n_bufs); - } + vlib_buffer_free (vm, wrk->tx_buffers, n_bufs); + *n_tx_packets += ctx->n_segs_per_evt; - transport_connection_update_tx_stats (ctx->tc, ctx->max_len_to_snd); - vlib_put_next_frame (vm, node, next_index, n_left_to_next); + transport_connection_update_tx_bytes (ctx->tc, ctx->max_len_to_snd); + + SESSION_EVT (SESSION_EVT_DEQ, ctx->s, ctx->max_len_to_snd, ctx->max_dequeue, + ctx->s->tx_fifo->has_event, wrk->last_vlib_time); - /* If we couldn't dequeue all bytes mark as partially read */ ASSERT (ctx->left_to_snd == 0); + + /* If we couldn't dequeue all bytes reschedule as old flow. Otherwise, + * check if application enqueued more data and reschedule accordingly */ if (ctx->max_len_to_snd < ctx->max_dequeue) - if (svm_fifo_set_event (ctx->s->tx_fifo)) - session_evt_add_pending (wrk, elt); + session_evt_add_old (wrk, elt); + else + session_tx_maybe_reschedule (wrk, ctx, elt); if (!peek_data && ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM) @@ -793,7 +1033,7 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, /* More data needs to be read */ else if (svm_fifo_max_dequeue_cons (ctx->s->tx_fifo) > 0) if (svm_fifo_set_event (ctx->s->tx_fifo)) - session_evt_add_pending (wrk, elt); + session_evt_add_old (wrk, elt); if (svm_fifo_needs_deq_ntf (ctx->s->tx_fifo, ctx->max_len_to_snd)) session_dequeue_notify (ctx->s); @@ -802,33 +1042,33 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, } int -session_tx_fifo_peek_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, - session_worker_t * wrk, - session_evt_elt_t * e, int *n_tx_pkts) +session_tx_fifo_peek_and_snd (session_worker_t * wrk, + vlib_node_runtime_t * node, + session_evt_elt_t * e, int *n_tx_packets) { - return session_tx_fifo_read_and_snd_i (vm, node, wrk, e, n_tx_pkts, 1); + return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 1); } int -session_tx_fifo_dequeue_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, - session_worker_t * wrk, - session_evt_elt_t * e, int *n_tx_pkts) +session_tx_fifo_dequeue_and_snd (session_worker_t * wrk, + vlib_node_runtime_t * node, + session_evt_elt_t * e, int *n_tx_packets) { - return session_tx_fifo_read_and_snd_i (vm, node, wrk, e, n_tx_pkts, 0); + return session_tx_fifo_read_and_snd_i (wrk, node, e, n_tx_packets, 0); } int -session_tx_fifo_dequeue_internal (vlib_main_t * vm, +session_tx_fifo_dequeue_internal (session_worker_t * wrk, vlib_node_runtime_t * node, - session_worker_t * wrk, - session_evt_elt_t * e, int *n_tx_pkts) + session_evt_elt_t * e, int *n_tx_packets) { session_t *s = wrk->ctx.s; if (PREDICT_FALSE (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)) return 0; svm_fifo_unset_event (s->tx_fifo); - return transport_custom_tx (session_get_transport_proto (s), s); + return transport_custom_tx (session_get_transport_proto (s), s, + VLIB_FRAME_SIZE - *n_tx_packets); } always_inline session_t * @@ -837,16 +1077,191 @@ session_event_get_session (session_event_t * e, u8 thread_index) return session_get_if_valid (e->session_index, thread_index); } -static void -session_update_dispatch_period (session_worker_t * wrk, f64 now, - u32 thread_index) +always_inline void +session_event_dispatch_ctrl (session_worker_t * wrk, session_evt_elt_t * elt) { - if (wrk->last_tx_packets) + clib_llist_index_t ei; + void (*fp) (void *); + session_event_t *e; + session_t *s; + + ei = clib_llist_entry_index (wrk->event_elts, elt); + e = &elt->evt; + + switch (e->event_type) + { + case SESSION_CTRL_EVT_RPC: + fp = e->rpc_args.fp; + (*fp) (e->rpc_args.arg); + break; + case SESSION_CTRL_EVT_CLOSE: + s = session_get_from_handle_if_valid (e->session_handle); + if (PREDICT_FALSE (!s)) + break; + session_transport_close (s); + break; + case SESSION_CTRL_EVT_RESET: + s = session_get_from_handle_if_valid (e->session_handle); + if (PREDICT_FALSE (!s)) + break; + session_transport_reset (s); + break; + case SESSION_CTRL_EVT_LISTEN: + session_mq_listen_handler (session_evt_ctrl_data (wrk, elt)); + break; + case SESSION_CTRL_EVT_LISTEN_URI: + session_mq_listen_uri_handler (session_evt_ctrl_data (wrk, elt)); + break; + case SESSION_CTRL_EVT_UNLISTEN: + session_mq_unlisten_handler (session_evt_ctrl_data (wrk, elt)); + break; + case SESSION_CTRL_EVT_CONNECT: + session_mq_connect_handler (session_evt_ctrl_data (wrk, elt)); + break; + case SESSION_CTRL_EVT_CONNECT_URI: + session_mq_connect_uri_handler (session_evt_ctrl_data (wrk, elt)); + break; + case SESSION_CTRL_EVT_DISCONNECT: + session_mq_disconnect_handler (session_evt_ctrl_data (wrk, elt)); + break; + case SESSION_CTRL_EVT_DISCONNECTED: + session_mq_disconnected_handler (session_evt_ctrl_data (wrk, elt)); + break; + case SESSION_CTRL_EVT_ACCEPTED_REPLY: + session_mq_accepted_reply_handler (session_evt_ctrl_data (wrk, elt)); + break; + case SESSION_CTRL_EVT_DISCONNECTED_REPLY: + session_mq_disconnected_reply_handler (session_evt_ctrl_data (wrk, + elt)); + break; + case SESSION_CTRL_EVT_RESET_REPLY: + session_mq_reset_reply_handler (session_evt_ctrl_data (wrk, elt)); + break; + case SESSION_CTRL_EVT_WORKER_UPDATE: + session_mq_worker_update_handler (session_evt_ctrl_data (wrk, elt)); + break; + case SESSION_CTRL_EVT_APP_DETACH: + app_mq_detach_handler (session_evt_ctrl_data (wrk, elt)); + break; + default: + clib_warning ("unhandled event type %d", e->event_type); + } + + /* Regrab elements in case pool moved */ + elt = pool_elt_at_index (wrk->event_elts, ei); + if (!clib_llist_elt_is_linked (elt, evt_list)) + { + e = &elt->evt; + if (e->event_type >= SESSION_CTRL_EVT_BOUND) + session_evt_ctrl_data_free (wrk, elt); + session_evt_elt_free (wrk, elt); + } +} + +always_inline void +session_event_dispatch_io (session_worker_t * wrk, vlib_node_runtime_t * node, + session_evt_elt_t * elt, u32 thread_index, + int *n_tx_packets) +{ + session_main_t *smm = &session_main; + app_worker_t *app_wrk; + clib_llist_index_t ei; + session_event_t *e; + session_t *s; + + ei = clib_llist_entry_index (wrk->event_elts, elt); + e = &elt->evt; + + switch (e->event_type) { - f64 sample = now - wrk->last_vlib_time; - wrk->dispatch_period = (wrk->dispatch_period + sample) * 0.5; + case SESSION_IO_EVT_TX_FLUSH: + case SESSION_IO_EVT_TX: + s = session_event_get_session (e, thread_index); + if (PREDICT_FALSE (!s)) + break; + CLIB_PREFETCH (s->tx_fifo, 2 * CLIB_CACHE_LINE_BYTES, LOAD); + wrk->ctx.s = s; + /* Spray packets in per session type frames, since they go to + * different nodes */ + (smm->session_tx_fns[s->session_type]) (wrk, node, elt, n_tx_packets); + break; + case SESSION_IO_EVT_RX: + s = session_event_get_session (e, thread_index); + if (!s) + break; + transport_app_rx_evt (session_get_transport_proto (s), + s->connection_index, s->thread_index); + break; + case SESSION_IO_EVT_BUILTIN_RX: + s = session_event_get_session (e, thread_index); + if (PREDICT_FALSE (!s || s->session_state >= SESSION_STATE_CLOSING)) + break; + svm_fifo_unset_event (s->rx_fifo); + app_wrk = app_worker_get (s->app_wrk_index); + app_worker_builtin_rx (app_wrk, s); + break; + case SESSION_IO_EVT_BUILTIN_TX: + s = session_get_from_handle_if_valid (e->session_handle); + wrk->ctx.s = s; + if (PREDICT_TRUE (s != 0)) + session_tx_fifo_dequeue_internal (wrk, node, elt, n_tx_packets); + break; + default: + clib_warning ("unhandled event type %d", e->event_type); } - wrk->last_vlib_time = now; + + /* Regrab elements in case pool moved */ + elt = pool_elt_at_index (wrk->event_elts, ei); + if (!clib_llist_elt_is_linked (elt, evt_list)) + session_evt_elt_free (wrk, elt); +} + +/* *INDENT-OFF* */ +static const u32 session_evt_msg_sizes[] = { +#define _(symc, sym) \ + [SESSION_CTRL_EVT_ ## symc] = sizeof (session_ ## sym ##_msg_t), + foreach_session_ctrl_evt +#undef _ +}; +/* *INDENT-ON* */ + +always_inline void +session_evt_add_to_list (session_worker_t * wrk, session_event_t * evt) +{ + session_evt_elt_t *elt; + + if (evt->event_type >= SESSION_CTRL_EVT_RPC) + { + elt = session_evt_alloc_ctrl (wrk); + if (evt->event_type >= SESSION_CTRL_EVT_BOUND) + { + elt->evt.ctrl_data_index = session_evt_ctrl_data_alloc (wrk); + elt->evt.event_type = evt->event_type; + clib_memcpy_fast (session_evt_ctrl_data (wrk, elt), evt->data, + session_evt_msg_sizes[evt->event_type]); + } + else + { + /* Internal control events fit into io events footprint */ + clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt)); + } + } + else + { + elt = session_evt_alloc_new (wrk); + clib_memcpy_fast (&elt->evt, evt, sizeof (elt->evt)); + } +} + +static void +session_flush_pending_tx_buffers (session_worker_t * wrk, + vlib_node_runtime_t * node) +{ + vlib_buffer_enqueue_to_next (wrk->vm, node, wrk->pending_tx_buffers, + wrk->pending_tx_nexts, + vec_len (wrk->pending_tx_nexts)); + vec_reset_length (wrk->pending_tx_buffers); + vec_reset_length (wrk->pending_tx_nexts); } static uword @@ -856,31 +1271,27 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, session_main_t *smm = vnet_get_session_main (); u32 thread_index = vm->thread_index, n_to_dequeue; session_worker_t *wrk = &smm->wrk[thread_index]; - session_evt_elt_t *elt, *new_he, *new_te, *pending_he; - session_evt_elt_t *disconnects_he, *postponed_he; + session_evt_elt_t *elt, *ctrl_he, *new_he, *old_he; + clib_llist_index_t ei, next_ei, old_ti; svm_msg_q_msg_t _msg, *msg = &_msg; - f64 now = vlib_time_now (vm); - int n_tx_packets = 0, i, rv; - app_worker_t *app_wrk; + int i, n_tx_packets; + session_event_t *evt; svm_msg_q_t *mq; - void (*fp) (void *); - SESSION_EVT_DBG (SESSION_EVT_POLL_GAP_TRACK, smm, thread_index); + SESSION_EVT (SESSION_EVT_DISPATCH_START, wrk); + + wrk->last_vlib_time = vlib_time_now (vm); + wrk->last_vlib_us_time = wrk->last_vlib_time * CLIB_US_TIME_FREQ; /* * Update transport time */ - session_update_dispatch_period (wrk, now, thread_index); - transport_update_time (now, thread_index); - - SESSION_EVT_DBG (SESSION_EVT_DEQ_NODE, 0); + transport_update_time (wrk->last_vlib_time, thread_index); + n_tx_packets = vec_len (wrk->pending_tx_buffers); - /* Make sure postponed events are handled first */ - new_he = pool_elt_at_index (wrk->event_elts, wrk->new_head); - new_te = clib_llist_prev (wrk->event_elts, evt_list, new_he); - - postponed_he = pool_elt_at_index (wrk->event_elts, wrk->postponed_head); - clib_llist_splice (wrk->event_elts, evt_list, new_te, postponed_he); + /* + * Dequeue and handle new events + */ /* Try to dequeue what is available. Don't wait for lock. * XXX: we may need priorities here */ @@ -890,149 +1301,76 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { for (i = 0; i < n_to_dequeue; i++) { - elt = session_evt_elt_alloc (wrk); svm_msg_q_sub_w_lock (mq, msg); - /* Works because reply messages are smaller than a session evt. - * If we ever need to support bigger messages this needs to be - * fixed */ - clib_memcpy_fast (&elt->evt, svm_msg_q_msg_data (mq, msg), - sizeof (elt->evt)); + evt = svm_msg_q_msg_data (mq, msg); + session_evt_add_to_list (wrk, evt); svm_msg_q_free_msg (mq, msg); - new_he = pool_elt_at_index (wrk->event_elts, wrk->new_head); - clib_llist_add_tail (wrk->event_elts, evt_list, elt, new_he); } svm_msg_q_unlock (mq); } - pending_he = pool_elt_at_index (wrk->event_elts, wrk->pending_head); - postponed_he = pool_elt_at_index (wrk->event_elts, wrk->postponed_head); - disconnects_he = pool_elt_at_index (wrk->event_elts, wrk->disconnects_head); + /* + * Handle control events + */ - new_te = clib_llist_prev (wrk->event_elts, evt_list, new_he); - clib_llist_splice (wrk->event_elts, evt_list, new_te, pending_he); - new_te = clib_llist_prev (wrk->event_elts, evt_list, new_he); - clib_llist_splice (wrk->event_elts, evt_list, new_te, disconnects_he); + ctrl_he = pool_elt_at_index (wrk->event_elts, wrk->ctrl_head); - while (!clib_llist_is_empty (wrk->event_elts, evt_list, new_he)) + /* *INDENT-OFF* */ + clib_llist_foreach_safe (wrk->event_elts, evt_list, ctrl_he, elt, ({ + clib_llist_remove (wrk->event_elts, evt_list, elt); + session_event_dispatch_ctrl (wrk, elt); + })); + /* *INDENT-ON* */ + + /* + * Handle the new io events. + */ + + new_he = pool_elt_at_index (wrk->event_elts, wrk->new_head); + old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head); + old_ti = clib_llist_prev_index (old_he, evt_list); + + ei = clib_llist_next_index (new_he, evt_list); + while (ei != wrk->new_head && n_tx_packets < VLIB_FRAME_SIZE) { - clib_llist_index_t ei; - session_event_t *e; - session_t *s; + elt = pool_elt_at_index (wrk->event_elts, ei); + ei = clib_llist_next_index (elt, evt_list); + clib_llist_remove (wrk->event_elts, evt_list, elt); + session_event_dispatch_io (wrk, node, elt, thread_index, &n_tx_packets); + } - clib_llist_pop_first (wrk->event_elts, evt_list, elt, new_he); - ei = clib_llist_entry_index (wrk->event_elts, elt); - e = &elt->evt; - switch (e->event_type) - { - case SESSION_IO_EVT_TX_FLUSH: - case SESSION_IO_EVT_TX: - /* Don't try to send more that one frame per dispatch cycle */ - if (n_tx_packets == VLIB_FRAME_SIZE) - { - session_evt_add_postponed (wrk, elt); - continue; - } + /* + * Handle the old io events, if we had any prior to processing the new ones + */ - s = session_event_get_session (e, thread_index); - if (PREDICT_FALSE (!s)) - { - clib_warning ("session was freed!"); - break; - } - CLIB_PREFETCH (s->tx_fifo, 2 * CLIB_CACHE_LINE_BYTES, LOAD); - wrk->ctx.s = s; - /* Spray packets in per session type frames, since they go to - * different nodes */ - rv = (smm->session_tx_fns[s->session_type]) (vm, node, wrk, elt, - &n_tx_packets); - if (PREDICT_FALSE (rv == SESSION_TX_NO_BUFFERS)) - { - vlib_node_increment_counter (vm, node->node_index, - SESSION_QUEUE_ERROR_NO_BUFFER, 1); - break; - } - break; - case SESSION_IO_EVT_RX: - s = session_event_get_session (e, thread_index); - if (!s) - break; - transport_app_rx_evt (session_get_transport_proto (s), - s->connection_index, s->thread_index); - break; - case SESSION_CTRL_EVT_CLOSE: - s = session_get_from_handle_if_valid (e->session_handle); - if (PREDICT_FALSE (!s)) - break; + if (old_ti != wrk->old_head) + { + old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head); + ei = clib_llist_next_index (old_he, evt_list); - /* Make sure session disconnects run after the pending list is - * drained, i.e., postpone if the first time. If not the first - * and the tx queue is still not empty, try to wait for some - * dispatch cycles */ - if (!e->postponed - || (e->postponed < 200 - && svm_fifo_max_dequeue_cons (s->tx_fifo))) - { - e->postponed += 1; - session_evt_add_pending (wrk, elt); - continue; - } + while (n_tx_packets < VLIB_FRAME_SIZE) + { + elt = pool_elt_at_index (wrk->event_elts, ei); + next_ei = clib_llist_next_index (elt, evt_list); + clib_llist_remove (wrk->event_elts, evt_list, elt); - session_transport_close (s); - break; - case SESSION_IO_EVT_BUILTIN_RX: - s = session_event_get_session (e, thread_index); - if (PREDICT_FALSE (!s || s->session_state >= SESSION_STATE_CLOSING)) - break; - svm_fifo_unset_event (s->rx_fifo); - app_wrk = app_worker_get (s->app_wrk_index); - app_worker_builtin_rx (app_wrk, s); - break; - case SESSION_IO_EVT_BUILTIN_TX: - s = session_get_from_handle_if_valid (e->session_handle); - wrk->ctx.s = s; - if (PREDICT_TRUE (s != 0)) - session_tx_fifo_dequeue_internal (vm, node, wrk, elt, - &n_tx_packets); - break; - case SESSION_CTRL_EVT_RPC: - fp = e->rpc_args.fp; - (*fp) (e->rpc_args.arg); - break; - case SESSION_CTRL_EVT_DISCONNECTED: - session_mq_disconnected_handler (e->data); - break; - case SESSION_CTRL_EVT_ACCEPTED_REPLY: - session_mq_accepted_reply_handler (e->data); - break; - case SESSION_CTRL_EVT_CONNECTED_REPLY: - break; - case SESSION_CTRL_EVT_DISCONNECTED_REPLY: - session_mq_disconnected_reply_handler (e->data); - break; - case SESSION_CTRL_EVT_RESET_REPLY: - session_mq_reset_reply_handler (e->data); - break; - case SESSION_CTRL_EVT_WORKER_UPDATE: - session_mq_worker_update_handler (e->data); - break; - default: - clib_warning ("unhandled event type %d", e->event_type); - } + session_event_dispatch_io (wrk, node, elt, thread_index, + &n_tx_packets); - /* Regrab elements in case pool moved */ - elt = pool_elt_at_index (wrk->event_elts, ei); - if (!clib_llist_elt_is_linked (elt, evt_list)) - session_evt_elt_free (wrk, elt); + if (ei == old_ti) + break; - new_he = pool_elt_at_index (wrk->event_elts, wrk->new_head); + ei = next_ei; + }; } - wrk->last_tx_packets = n_tx_packets; + if (vec_len (wrk->pending_tx_buffers)) + session_flush_pending_tx_buffers (wrk, node); vlib_node_increment_counter (vm, session_queue_node.index, SESSION_QUEUE_ERROR_TX, n_tx_packets); - SESSION_EVT_DBG (SESSION_EVT_DISPATCH_END, smm, thread_index); + SESSION_EVT (SESSION_EVT_DISPATCH_END, wrk, n_tx_packets); return n_tx_packets; } @@ -1041,6 +1379,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, VLIB_REGISTER_NODE (session_queue_node) = { .function = session_queue_node_fn, + .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED, .name = "session-queue", .format_trace = format_session_queue_trace, .type = VLIB_NODE_TYPE_INPUT, @@ -1118,6 +1457,8 @@ session_node_cmp_event (session_event_t * e, svm_fifo_t * f) case SESSION_IO_EVT_RX: case SESSION_IO_EVT_TX: case SESSION_IO_EVT_BUILTIN_RX: + case SESSION_IO_EVT_BUILTIN_TX: + case SESSION_IO_EVT_TX_FLUSH: if (e->session_index == f->master_session_index) return 1; break; @@ -1167,8 +1508,7 @@ session_node_lookup_fifo_event (svm_fifo_t * f, session_event_t * e) found = session_node_cmp_event (e, f); if (found) return 1; - if (++index == mq->q->maxsize) - index = 0; + index = (index + 1) % mq->q->maxsize; } /* * Search pending events vector @@ -1176,17 +1516,31 @@ session_node_lookup_fifo_event (svm_fifo_t * f, session_event_t * e) /* *INDENT-OFF* */ clib_llist_foreach (wrk->event_elts, evt_list, - session_evt_pending_head (wrk), elt, ({ + pool_elt_at_index (wrk->event_elts, wrk->new_head), + elt, ({ found = session_node_cmp_event (&elt->evt, f); if (found) { clib_memcpy_fast (e, &elt->evt, sizeof (*e)); - break; + goto done; } + })); + /* *INDENT-ON* */ + /* *INDENT-OFF* */ + clib_llist_foreach (wrk->event_elts, evt_list, + pool_elt_at_index (wrk->event_elts, wrk->old_head), + elt, ({ + found = session_node_cmp_event (&elt->evt, f); + if (found) + { + clib_memcpy_fast (e, &elt->evt, sizeof (*e)); + goto done; + } })); /* *INDENT-ON* */ +done: return found; } @@ -1260,6 +1614,7 @@ session_queue_pre_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, session_main_t *sm = &session_main; if (!sm->wrk[0].vpp_event_queue) return 0; + node = vlib_node_get_runtime (vm, session_queue_node.index); return session_queue_node_fn (vm, node, frame); }