X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fsession%2Fsession_node.c;h=30eca1e65773c347eeff4e6ad1e614d0551233f9;hb=ed8db52539a8d8239a9a43bea53328d25eb47f0d;hp=b4b384fc617df95679f169b0347320b31f41ef5f;hpb=45b7973dddc9f1b50d7f20cc1abe150b2ad9931f;p=vpp.git diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index b4b384fc617..30eca1e6577 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -50,11 +50,14 @@ session_mq_listen_handler (void *data) clib_memset (a, 0, sizeof (*a)); a->sep.is_ip4 = mp->is_ip4; clib_memcpy_fast (&a->sep.ip, &mp->ip, sizeof (mp->ip)); + if (mp->is_ip4) + ip46_address_mask_ip4 (&a->sep.ip); a->sep.port = mp->port; a->sep.fib_index = mp->vrf; a->sep.sw_if_index = ENDPOINT_INVALID_INDEX; a->sep.transport_proto = mp->proto; a->sep_ext.ckpair_index = mp->ckpair_index; + a->sep_ext.crypto_engine = mp->crypto_engine; a->app_index = app->app_index; a->wrk_map_index = mp->wrk_index; @@ -112,9 +115,17 @@ session_mq_connect_handler (void *data) a->sep.transport_proto = mp->proto; a->sep.peer.fib_index = mp->vrf; clib_memcpy_fast (&a->sep.peer.ip, &mp->lcl_ip, sizeof (mp->lcl_ip)); + if (mp->is_ip4) + { + ip46_address_mask_ip4 (&a->sep.ip); + ip46_address_mask_ip4 (&a->sep.peer.ip); + } + a->sep.peer.port = mp->lcl_port; a->sep.peer.sw_if_index = ENDPOINT_INVALID_INDEX; a->sep_ext.parent_handle = mp->parent_handle; a->sep_ext.ckpair_index = mp->ckpair_index; + a->sep_ext.crypto_engine = mp->crypto_engine; + a->sep_ext.transport_flags = mp->flags; if (mp->hostname_len) { vec_validate (a->sep_ext.hostname, mp->hostname_len - 1); @@ -128,8 +139,7 @@ session_mq_connect_handler (void *data) { clib_warning ("connect returned: %U", format_vnet_api_errno, rv); app_wrk = application_get_worker (app, mp->wrk_index); - mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, - /* is_fail */ 1); + mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv); } vec_free (a->sep_ext.hostname); @@ -158,8 +168,7 @@ session_mq_connect_uri_handler (void *data) { clib_warning ("connect_uri returned: %d", rv); app_wrk = application_get_worker (app, 0 /* default wrk only */ ); - mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, - /* is_fail */ 1); + mq_send_session_connected_cb (app_wrk->wrk_index, mp->context, 0, rv); } } @@ -307,8 +316,8 @@ session_mq_reset_reply_handler (void *data) session_parse_handle (mp->handle, &index, &thread_index); s = session_get_if_valid (index, thread_index); - /* Session was already closed or already cleaned up */ - if (!s || s->session_state != SESSION_STATE_TRANSPORT_CLOSING) + /* No session or not the right session */ + if (!s || s->session_state < SESSION_STATE_TRANSPORT_CLOSING) return; app_wrk = app_worker_get (s->app_wrk_index); @@ -498,7 +507,7 @@ format_session_queue_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); session_queue_trace_t *t = va_arg (*args, session_queue_trace_t *); - s = format (s, "SESSION_QUEUE: session index %d, server thread index %d", + s = format (s, "session index %d thread index %d", t->session_index, t->server_thread_index); return s; } @@ -540,7 +549,7 @@ session_tx_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node, for (i = 0; i < clib_min (n_trace, n_segs); i++) { - b = vlib_get_buffer (vm, to_next[i - n_segs]); + b = vlib_get_buffer (vm, to_next[i]); vlib_trace_buffer (vm, node, next_index, b, 1 /* follow_chain */ ); t = vlib_add_trace (vm, node, b, sizeof (*t)); t->session_index = s->session_index; @@ -564,7 +573,7 @@ session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx, b->total_length_not_including_first_buffer = 0; chain_b = b; - left_from_seg = clib_min (ctx->snd_mss - b->current_length, + left_from_seg = clib_min (ctx->sp.snd_mss - b->current_length, ctx->left_to_snd); to_deq = left_from_seg; for (j = 1; j < ctx->n_bufs_per_seg; j++) @@ -580,8 +589,8 @@ session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx, if (peek_data) { n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo, - ctx->tx_offset, len_to_deq, data); - ctx->tx_offset += n_bytes_read; + ctx->sp.tx_offset, len_to_deq, data); + ctx->sp.tx_offset += n_bytes_read; } else { @@ -648,12 +657,12 @@ session_tx_fill_buffer (vlib_main_t * vm, session_tx_context_t * ctx, if (peek_data) { - n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo, ctx->tx_offset, + n_bytes_read = svm_fifo_peek (ctx->s->tx_fifo, ctx->sp.tx_offset, len_to_deq, data0); ASSERT (n_bytes_read > 0); /* Keep track of progress locally, transport is also supposed to * increment it independently when pushing the header */ - ctx->tx_offset += n_bytes_read; + ctx->sp.tx_offset += n_bytes_read; } else { @@ -753,13 +762,12 @@ session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx, if (peek_data) { /* Offset in rx fifo from where to peek data */ - ctx->tx_offset = ctx->transport_vft->tx_fifo_offset (ctx->tc); - if (PREDICT_FALSE (ctx->tx_offset >= ctx->max_dequeue)) + if (PREDICT_FALSE (ctx->sp.tx_offset >= ctx->max_dequeue)) { ctx->max_len_to_snd = 0; return; } - ctx->max_dequeue -= ctx->tx_offset; + ctx->max_dequeue -= ctx->sp.tx_offset; } else { @@ -779,38 +787,51 @@ session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx, ASSERT (ctx->max_dequeue > 0); /* Ensure we're not writing more than transport window allows */ - if (ctx->max_dequeue < ctx->snd_space) + if (ctx->max_dequeue < ctx->sp.snd_space) { /* Constrained by tx queue. Try to send only fully formed segments */ - ctx->max_len_to_snd = - (ctx->max_dequeue > ctx->snd_mss) ? - ctx->max_dequeue - ctx->max_dequeue % ctx->snd_mss : ctx->max_dequeue; + ctx->max_len_to_snd = (ctx->max_dequeue > ctx->sp.snd_mss) ? + (ctx->max_dequeue - (ctx->max_dequeue % ctx->sp.snd_mss)) : + ctx->max_dequeue; /* TODO Nagle ? */ } else { /* Expectation is that snd_space0 is already a multiple of snd_mss */ - ctx->max_len_to_snd = ctx->snd_space; + ctx->max_len_to_snd = ctx->sp.snd_space; } /* Check if we're tx constrained by the node */ - ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->snd_mss); + ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->sp.snd_mss); if (ctx->n_segs_per_evt > max_segs) { ctx->n_segs_per_evt = max_segs; - ctx->max_len_to_snd = max_segs * ctx->snd_mss; + ctx->max_len_to_snd = max_segs * ctx->sp.snd_mss; } n_bytes_per_buf = vlib_buffer_get_default_data_size (vm); ASSERT (n_bytes_per_buf > TRANSPORT_MAX_HDRS_LEN); - n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->snd_mss; + n_bytes_per_seg = TRANSPORT_MAX_HDRS_LEN + ctx->sp.snd_mss; ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf); - ctx->deq_per_buf = clib_min (ctx->snd_mss, n_bytes_per_buf); - ctx->deq_per_first_buf = clib_min (ctx->snd_mss, + ctx->deq_per_buf = clib_min (ctx->sp.snd_mss, n_bytes_per_buf); + ctx->deq_per_first_buf = clib_min (ctx->sp.snd_mss, n_bytes_per_buf - TRANSPORT_MAX_HDRS_LEN); } +always_inline void +session_tx_maybe_reschedule (session_worker_t * wrk, + session_tx_context_t * ctx, + session_evt_elt_t * elt) +{ + session_t *s = ctx->s; + + svm_fifo_unset_event (s->tx_fifo); + if (svm_fifo_max_dequeue_cons (s->tx_fifo) > ctx->sp.tx_offset) + if (svm_fifo_set_event (s->tx_fifo)) + session_evt_add_head_old (wrk, elt); +} + always_inline int session_tx_fifo_read_and_snd_i (session_worker_t * wrk, vlib_node_runtime_t * node, @@ -857,36 +878,53 @@ session_tx_fifo_read_and_snd_i (session_worker_t * wrk, (ctx->s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)) return SESSION_TX_OK; max_burst -= n_custom_tx; - if (!max_burst) + if (!max_burst || (ctx->s->flags & SESSION_F_CUSTOM_TX)) { session_evt_add_old (wrk, elt); return SESSION_TX_OK; } } - ctx->snd_mss = ctx->transport_vft->send_mss (ctx->tc); - ctx->snd_space = transport_connection_snd_space (ctx->tc, - vm->clib_time. - last_cpu_time, - ctx->snd_mss); + transport_connection_snd_params (ctx->tc, &ctx->sp); - if (ctx->snd_space == 0 || ctx->snd_mss == 0) + if (!ctx->sp.snd_space) { - session_evt_add_old (wrk, elt); + /* If the deschedule flag was set, remove session from scheduler. + * Transport is responsible for rescheduling this session. */ + if (ctx->sp.flags & TRANSPORT_SND_F_DESCHED) + transport_connection_deschedule (ctx->tc); + /* Request to postpone the session, e.g., zero-wnd and transport + * is not currently probing */ + else if (ctx->sp.flags & TRANSPORT_SND_F_POSTPONE) + session_evt_add_old (wrk, elt); + /* This flow queue is "empty" so it should be re-evaluated before + * the ones that have data to send. */ + else + session_evt_add_head_old (wrk, elt); + return SESSION_TX_NO_DATA; } - /* Allow enqueuing of a new event */ - svm_fifo_unset_event (ctx->s->tx_fifo); + if (transport_connection_is_tx_paced (ctx->tc)) + { + u32 snd_space = transport_connection_tx_pacer_burst (ctx->tc); + if (snd_space < TRANSPORT_PACER_MIN_BURST) + { + session_evt_add_head_old (wrk, elt); + return SESSION_TX_NO_DATA; + } + snd_space = clib_min (ctx->sp.snd_space, snd_space); + ctx->sp.snd_space = snd_space >= ctx->sp.snd_mss ? + snd_space - snd_space % ctx->sp.snd_mss : snd_space; + } /* Check how much we can pull. */ session_tx_set_dequeue_params (vm, ctx, max_burst, peek_data); if (PREDICT_FALSE (!ctx->max_len_to_snd)) { - transport_connection_tx_pacer_reset_bucket (ctx->tc, - vm->clib_time. - last_cpu_time); + transport_connection_tx_pacer_reset_bucket (ctx->tc, 0); + session_tx_maybe_reschedule (wrk, ctx, elt); return SESSION_TX_NO_DATA; } @@ -898,7 +936,7 @@ session_tx_fifo_read_and_snd_i (session_worker_t * wrk, { if (n_bufs) vlib_buffer_free (vm, wrk->tx_buffers, n_bufs); - session_evt_add_old (wrk, elt); + session_evt_add_head_old (wrk, elt); vlib_node_increment_counter (wrk->vm, node->node_index, SESSION_QUEUE_ERROR_NO_BUFFER, 1); return SESSION_TX_NO_BUFFERS; @@ -982,11 +1020,14 @@ session_tx_fifo_read_and_snd_i (session_worker_t * wrk, SESSION_EVT (SESSION_EVT_DEQ, ctx->s, ctx->max_len_to_snd, ctx->max_dequeue, ctx->s->tx_fifo->has_event, wrk->last_vlib_time); - /* If we couldn't dequeue all bytes mark as partially read */ ASSERT (ctx->left_to_snd == 0); + + /* If we couldn't dequeue all bytes reschedule as old flow. Otherwise, + * check if application enqueued more data and reschedule accordingly */ if (ctx->max_len_to_snd < ctx->max_dequeue) - if (svm_fifo_set_event (ctx->s->tx_fifo)) - session_evt_add_old (wrk, elt); + session_evt_add_old (wrk, elt); + else + session_tx_maybe_reschedule (wrk, ctx, elt); if (!peek_data && ctx->transport_vft->transport_options.tx_type == TRANSPORT_TX_DGRAM) @@ -1025,15 +1066,37 @@ session_tx_fifo_dequeue_and_snd (session_worker_t * wrk, int session_tx_fifo_dequeue_internal (session_worker_t * wrk, vlib_node_runtime_t * node, - session_evt_elt_t * e, int *n_tx_packets) + session_evt_elt_t * elt, int *n_tx_packets) { session_t *s = wrk->ctx.s; + u32 n_packets, max_pkts; if (PREDICT_FALSE (s->session_state >= SESSION_STATE_TRANSPORT_CLOSED)) return 0; - svm_fifo_unset_event (s->tx_fifo); - return transport_custom_tx (session_get_transport_proto (s), s, - VLIB_FRAME_SIZE - *n_tx_packets); + + /* Clear custom-tx flag used to request reschedule for tx */ + s->flags &= ~SESSION_F_CUSTOM_TX; + + max_pkts = clib_min (VLIB_FRAME_SIZE - *n_tx_packets, + TRANSPORT_PACER_MAX_BURST_PKTS); + n_packets = transport_custom_tx (session_get_transport_proto (s), s, + max_pkts); + *n_tx_packets -= n_packets; + + if (svm_fifo_max_dequeue_cons (s->tx_fifo) + || (s->flags & SESSION_F_CUSTOM_TX)) + { + session_evt_add_old (wrk, elt); + } + else + { + svm_fifo_unset_event (s->tx_fifo); + if (svm_fifo_max_dequeue_cons (s->tx_fifo)) + if (svm_fifo_set_event (s->tx_fifo)) + session_evt_add_head_old (wrk, elt); + } + + return n_packets; } always_inline session_t * @@ -1143,10 +1206,7 @@ session_event_dispatch_io (session_worker_t * wrk, vlib_node_runtime_t * node, case SESSION_IO_EVT_TX: s = session_event_get_session (e, thread_index); if (PREDICT_FALSE (!s)) - { - clib_warning ("session %u was freed!", e->session_index); - break; - } + break; CLIB_PREFETCH (s->tx_fifo, 2 * CLIB_CACHE_LINE_BYTES, LOAD); wrk->ctx.s = s; /* Spray packets in per session type frames, since they go to @@ -1240,20 +1300,22 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, u32 thread_index = vm->thread_index, n_to_dequeue; session_worker_t *wrk = &smm->wrk[thread_index]; session_evt_elt_t *elt, *ctrl_he, *new_he, *old_he; + clib_llist_index_t ei, next_ei, old_ti; svm_msg_q_msg_t _msg, *msg = &_msg; - clib_llist_index_t old_ti; - int i, n_tx_packets = 0; + int i, n_tx_packets; session_event_t *evt; svm_msg_q_t *mq; SESSION_EVT (SESSION_EVT_DISPATCH_START, wrk); wrk->last_vlib_time = vlib_time_now (vm); + wrk->last_vlib_us_time = wrk->last_vlib_time * CLIB_US_TIME_FREQ; /* * Update transport time */ transport_update_time (wrk->last_vlib_time, thread_index); + n_tx_packets = vec_len (wrk->pending_tx_buffers); /* * Dequeue and handle new events @@ -1296,24 +1358,14 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head); old_ti = clib_llist_prev_index (old_he, evt_list); - /* *INDENT-OFF* */ - clib_llist_foreach_safe (wrk->event_elts, evt_list, new_he, elt, ({ - session_evt_type_t et; - - et = elt->evt.event_type; - clib_llist_remove (wrk->event_elts, evt_list, elt); - - /* Postpone tx events if we can't handle them this dispatch cycle */ - if (n_tx_packets >= VLIB_FRAME_SIZE - && (et == SESSION_IO_EVT_TX || et == SESSION_IO_EVT_TX_FLUSH)) - { - clib_llist_add (wrk->event_elts, evt_list, elt, new_he); - continue; - } - - session_event_dispatch_io (wrk, node, elt, thread_index, &n_tx_packets); - })); - /* *INDENT-ON* */ + ei = clib_llist_next_index (new_he, evt_list); + while (ei != wrk->new_head && n_tx_packets < VLIB_FRAME_SIZE) + { + elt = pool_elt_at_index (wrk->event_elts, ei); + ei = clib_llist_next_index (elt, evt_list); + clib_llist_remove (wrk->event_elts, evt_list, elt); + session_event_dispatch_io (wrk, node, elt, thread_index, &n_tx_packets); + } /* * Handle the old io events, if we had any prior to processing the new ones @@ -1322,18 +1374,21 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, if (old_ti != wrk->old_head) { old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head); + ei = clib_llist_next_index (old_he, evt_list); + while (n_tx_packets < VLIB_FRAME_SIZE) { - clib_llist_index_t ei; + elt = pool_elt_at_index (wrk->event_elts, ei); + next_ei = clib_llist_next_index (elt, evt_list); + clib_llist_remove (wrk->event_elts, evt_list, elt); - clib_llist_pop_first (wrk->event_elts, evt_list, elt, old_he); - ei = clib_llist_entry_index (wrk->event_elts, elt); session_event_dispatch_io (wrk, node, elt, thread_index, &n_tx_packets); - old_he = pool_elt_at_index (wrk->event_elts, wrk->old_head); if (ei == old_ti) break; + + ei = next_ei; }; } @@ -1536,33 +1591,40 @@ session_queue_exit (vlib_main_t * vm) VLIB_MAIN_LOOP_EXIT_FUNCTION (session_queue_exit); +static uword +session_queue_run_on_main (vlib_main_t * vm) +{ + vlib_node_runtime_t *node; + + node = vlib_node_get_runtime (vm, session_queue_node.index); + return session_queue_node_fn (vm, node, 0); +} + static uword session_queue_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) { - f64 now, timeout = 1.0; uword *event_data = 0; + f64 timeout = 1.0; uword event_type; while (1) { vlib_process_wait_for_event_or_clock (vm, timeout); - now = vlib_time_now (vm); event_type = vlib_process_get_events (vm, (uword **) & event_data); switch (event_type) { - case SESSION_Q_PROCESS_FLUSH_FRAMES: - /* Flush the frames by updating all transports times */ - transport_update_time (now, 0); + case SESSION_Q_PROCESS_RUN_ON_MAIN: + /* Run session queue node on main thread */ + session_queue_run_on_main (vm); break; case SESSION_Q_PROCESS_STOP: timeout = 100000.0; break; case ~0: - /* Timed out. Update time for all transports to trigger all - * outstanding retransmits. */ - transport_update_time (now, 0); + /* Timed out. Run on main to ensure all events are handled */ + session_queue_run_on_main (vm); break; } vec_reset_length (event_data); @@ -1587,6 +1649,7 @@ session_queue_pre_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, session_main_t *sm = &session_main; if (!sm->wrk[0].vpp_event_queue) return 0; + node = vlib_node_get_runtime (vm, session_queue_node.index); return session_queue_node_fn (vm, node, frame); }