X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fsession%2Fsession_node.c;h=4fd8d0e0299ecdbe7de85d034b5d1eecb008f107;hb=b26743d093141a2aef19bdf8a7fe06dcaa81329a;hp=9c5b17d98e6d4eb53e09dbe3ab44c93d9cc06e06;hpb=b2215d6b0d8ef7d425d2b9eea524a1c055a9f3b3;p=vpp.git diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index 9c5b17d98e6..4fd8d0e0299 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -16,11 +16,12 @@ #include #include #include -#include #include +#include +#include #include #include -#include +#include vlib_node_registration_t session_queue_node; @@ -64,336 +65,495 @@ static char *session_queue_error_strings[] = { #undef _ }; -static u32 session_type_to_next[] = { - SESSION_QUEUE_NEXT_TCP_IP4_OUTPUT, - SESSION_QUEUE_NEXT_IP4_LOOKUP, - SESSION_QUEUE_NEXT_TCP_IP6_OUTPUT, - SESSION_QUEUE_NEXT_IP6_LOOKUP, -}; - -always_inline void -session_tx_fifo_chain_tail (session_manager_main_t * smm, vlib_main_t * vm, - u8 thread_index, svm_fifo_t * fifo, - vlib_buffer_t * b0, u32 bi0, u8 n_bufs_per_seg, - u32 left_from_seg, u32 * left_to_snd0, - u16 * n_bufs, u32 * rx_offset, u16 deq_per_buf, - u8 peek_data) +static void +session_tx_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node, + u32 next_index, u32 * to_next, u16 n_segs, + stream_session_t * s, u32 n_trace) { - vlib_buffer_t *chain_b0, *prev_b0; - u32 chain_bi0, to_deq; - u16 len_to_deq0, n_bytes_read; - u8 *data0, j; + session_queue_trace_t *t; + vlib_buffer_t *b; + int i; - b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; - b0->total_length_not_including_first_buffer = 0; + for (i = 0; i < clib_min (n_trace, n_segs); i++) + { + b = vlib_get_buffer (vm, to_next[i - n_segs]); + vlib_trace_buffer (vm, node, next_index, b, 1 /* follow_chain */ ); + t = vlib_add_trace (vm, node, b, sizeof (*t)); + t->session_index = s->session_index; + t->server_thread_index = s->thread_index; + } + vlib_set_trace_count (vm, node, n_trace - i); +} - chain_bi0 = bi0; - chain_b0 = b0; +always_inline void +session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx, + vlib_buffer_t * b, u16 * n_bufs, u8 peek_data) +{ + session_manager_main_t *smm = &session_manager_main; + vlib_buffer_t *chain_b, *prev_b; + u32 chain_bi0, to_deq, left_from_seg; + u16 len_to_deq, n_bytes_read; + u8 *data, j; + + b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; + b->total_length_not_including_first_buffer = 0; + + chain_b = b; + left_from_seg = clib_min (ctx->snd_mss - b->current_length, + ctx->left_to_snd); to_deq = left_from_seg; - for (j = 1; j < n_bufs_per_seg; j++) + for (j = 1; j < ctx->n_bufs_per_seg; j++) { - prev_b0 = chain_b0; - len_to_deq0 = clib_min (to_deq, deq_per_buf); + prev_b = chain_b; + len_to_deq = clib_min (to_deq, ctx->deq_per_buf); *n_bufs -= 1; - chain_bi0 = smm->tx_buffers[thread_index][*n_bufs]; - _vec_len (smm->tx_buffers[thread_index]) = *n_bufs; + chain_bi0 = smm->tx_buffers[ctx->s->thread_index][*n_bufs]; + _vec_len (smm->tx_buffers[ctx->s->thread_index]) = *n_bufs; - chain_b0 = vlib_get_buffer (vm, chain_bi0); - chain_b0->current_data = 0; - data0 = vlib_buffer_get_current (chain_b0); + chain_b = vlib_get_buffer (vm, chain_bi0); + chain_b->current_data = 0; + data = vlib_buffer_get_current (chain_b); if (peek_data) { - n_bytes_read = svm_fifo_peek (fifo, *rx_offset, len_to_deq0, data0); - *rx_offset += n_bytes_read; + n_bytes_read = svm_fifo_peek (ctx->s->server_tx_fifo, + ctx->tx_offset, len_to_deq, data); + ctx->tx_offset += n_bytes_read; } else { - n_bytes_read = svm_fifo_dequeue_nowait (fifo, len_to_deq0, data0); + if (ctx->transport_vft->tx_type == TRANSPORT_TX_DGRAM) + { + svm_fifo_t *f = ctx->s->server_tx_fifo; + session_dgram_hdr_t *hdr = &ctx->hdr; + u16 deq_now; + deq_now = clib_min (hdr->data_length - hdr->data_offset, + len_to_deq); + n_bytes_read = svm_fifo_peek (f, hdr->data_offset, deq_now, + data); + ASSERT (n_bytes_read > 0); + + hdr->data_offset += n_bytes_read; + if (hdr->data_offset == hdr->data_length) + svm_fifo_dequeue_drop (f, hdr->data_length); + } + else + n_bytes_read = svm_fifo_dequeue_nowait (ctx->s->server_tx_fifo, + len_to_deq, data); } - ASSERT (n_bytes_read == len_to_deq0); - chain_b0->current_length = n_bytes_read; - b0->total_length_not_including_first_buffer += chain_b0->current_length; + ASSERT (n_bytes_read == len_to_deq); + chain_b->current_length = n_bytes_read; + b->total_length_not_including_first_buffer += chain_b->current_length; /* update previous buffer */ - prev_b0->next_buffer = chain_bi0; - prev_b0->flags |= VLIB_BUFFER_NEXT_PRESENT; + prev_b->next_buffer = chain_bi0; + prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT; /* update current buffer */ - chain_b0->next_buffer = 0; + chain_b->next_buffer = 0; to_deq -= n_bytes_read; if (to_deq == 0) break; } - ASSERT (to_deq == 0); - *left_to_snd0 -= left_from_seg; + ASSERT (to_deq == 0 + && b->total_length_not_including_first_buffer == left_from_seg); + ctx->left_to_snd -= left_from_seg; } always_inline int -session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, +session_output_try_get_buffers (vlib_main_t * vm, session_manager_main_t * smm, - session_fifo_event_t * e0, - stream_session_t * s0, u32 thread_index, - int *n_tx_packets, u8 peek_data) + u32 thread_index, u16 * n_bufs, u32 wanted) { - u32 n_trace = vlib_get_trace_count (vm, node); - u32 left_to_snd0, max_len_to_snd0, len_to_deq0, snd_space0; - u32 n_bufs_per_evt, n_frames_per_evt; - transport_connection_t *tc0; - transport_proto_vft_t *transport_vft; - u32 next_index, next0, *to_next, n_left_to_next, bi0; - vlib_buffer_t *b0; - u32 rx_offset = 0, max_dequeue0, n_bytes_per_seg; - u16 snd_mss0, n_bufs_per_seg, n_bufs; - u8 *data0; - int i, n_bytes_read; - u32 n_bytes_per_buf, deq_per_buf; - u32 buffers_allocated, buffers_allocated_this_call; + u32 n_alloc; + vec_validate_aligned (smm->tx_buffers[thread_index], wanted - 1, + CLIB_CACHE_LINE_BYTES); + n_alloc = vlib_buffer_alloc (vm, &smm->tx_buffers[thread_index][*n_bufs], + wanted - *n_bufs); + *n_bufs += n_alloc; + _vec_len (smm->tx_buffers[thread_index]) = *n_bufs; + return n_alloc; +} - next_index = next0 = session_type_to_next[s0->session_type]; +always_inline void +session_tx_fill_buffer (vlib_main_t * vm, session_tx_context_t * ctx, + vlib_buffer_t * b, u16 * n_bufs, u8 peek_data) +{ + u32 len_to_deq; + u8 *data0; + int n_bytes_read; - transport_vft = session_get_transport_vft (s0->session_type); - tc0 = transport_vft->get_connection (s0->connection_index, thread_index); + /* + * Start with the first buffer in chain + */ + b->error = 0; + b->flags = VNET_BUFFER_F_LOCALLY_ORIGINATED; + b->current_data = 0; - /* Make sure we have space to send and there's something to dequeue */ - snd_mss0 = transport_vft->send_mss (tc0); - snd_space0 = transport_vft->send_space (tc0); + data0 = vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + len_to_deq = clib_min (ctx->left_to_snd, ctx->deq_per_first_buf); - /* Can't make any progress */ - if (snd_space0 == 0 || snd_mss0 == 0) + if (peek_data) { - vec_add1 (smm->pending_event_vector[thread_index], *e0); - return 0; + n_bytes_read = svm_fifo_peek (ctx->s->server_tx_fifo, ctx->tx_offset, + len_to_deq, data0); + ASSERT (n_bytes_read > 0); + /* Keep track of progress locally, transport is also supposed to + * increment it independently when pushing the header */ + ctx->tx_offset += n_bytes_read; + } + else + { + if (ctx->transport_vft->tx_type == TRANSPORT_TX_DGRAM) + { + session_dgram_hdr_t *hdr = &ctx->hdr; + svm_fifo_t *f = ctx->s->server_tx_fifo; + u16 deq_now; + u32 offset; + + ASSERT (hdr->data_length > hdr->data_offset); + deq_now = clib_min (hdr->data_length - hdr->data_offset, + len_to_deq); + offset = hdr->data_offset + SESSION_CONN_HDR_LEN; + n_bytes_read = svm_fifo_peek (f, offset, deq_now, data0); + ASSERT (n_bytes_read > 0); + + if (ctx->s->session_state == SESSION_STATE_LISTENING) + { + ip_copy (&ctx->tc->rmt_ip, &hdr->rmt_ip, ctx->tc->is_ip4); + ctx->tc->rmt_port = hdr->rmt_port; + } + hdr->data_offset += n_bytes_read; + if (hdr->data_offset == hdr->data_length) + { + offset = hdr->data_length + SESSION_CONN_HDR_LEN; + svm_fifo_dequeue_drop (f, offset); + } + } + else + { + n_bytes_read = svm_fifo_dequeue_nowait (ctx->s->server_tx_fifo, + len_to_deq, data0); + ASSERT (n_bytes_read > 0); + } } + b->current_length = n_bytes_read; + ctx->left_to_snd -= n_bytes_read; + /* + * Fill in the remaining buffers in the chain, if any + */ + if (PREDICT_FALSE (ctx->n_bufs_per_seg > 1 && ctx->left_to_snd)) + session_tx_fifo_chain_tail (vm, ctx, b, n_bufs, peek_data); + + /* *INDENT-OFF* */ + SESSION_EVT_DBG(SESSION_EVT_DEQ, ctx->s, ({ + ed->data[0] = FIFO_EVENT_APP_TX; + ed->data[1] = ctx->max_dequeue; + ed->data[2] = len_to_deq; + ed->data[3] = ctx->left_to_snd; + })); + /* *INDENT-ON* */ +} + +always_inline u8 +session_tx_not_ready (stream_session_t * s, u8 peek_data) +{ if (peek_data) { - /* Offset in rx fifo from where to peek data */ - rx_offset = transport_vft->tx_fifo_offset (tc0); + /* Can retransmit for closed sessions but can't send new data if + * session is not ready or closed */ + if (s->session_state < SESSION_STATE_READY) + return 1; + if (s->session_state == SESSION_STATE_CLOSED) + return 2; } + return 0; +} - /* Check how much we can pull. If buffering, subtract the offset */ - max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo) - rx_offset; +always_inline transport_connection_t * +session_tx_get_transport (session_tx_context_t * ctx, u8 peek_data) +{ + if (peek_data) + { + return ctx->transport_vft->get_connection (ctx->s->connection_index, + ctx->s->thread_index); + } + else + { + if (ctx->s->session_state == SESSION_STATE_LISTENING) + return ctx->transport_vft->get_listener (ctx->s->connection_index); + else + { + return ctx->transport_vft->get_connection (ctx->s->connection_index, + ctx->s->thread_index); + } + } +} - /* Nothing to read return */ - if (max_dequeue0 == 0) +always_inline void +session_tx_set_dequeue_params (vlib_main_t * vm, session_tx_context_t * ctx, + u32 max_segs, u8 peek_data) +{ + u32 n_bytes_per_buf, n_bytes_per_seg; + ctx->max_dequeue = svm_fifo_max_dequeue (ctx->s->server_tx_fifo); + if (peek_data) { - svm_fifo_unset_event (s0->server_tx_fifo); - return 0; + /* Offset in rx fifo from where to peek data */ + ctx->tx_offset = ctx->transport_vft->tx_fifo_offset (ctx->tc); + if (PREDICT_FALSE (ctx->tx_offset >= ctx->max_dequeue)) + { + ctx->max_len_to_snd = 0; + return; + } + ctx->max_dequeue -= ctx->tx_offset; } + else + { + if (ctx->transport_vft->tx_type == TRANSPORT_TX_DGRAM) + { + if (ctx->max_dequeue <= sizeof (ctx->hdr)) + { + ctx->max_len_to_snd = 0; + return; + } + svm_fifo_peek (ctx->s->server_tx_fifo, 0, sizeof (ctx->hdr), + (u8 *) & ctx->hdr); + ASSERT (ctx->hdr.data_length > ctx->hdr.data_offset); + ctx->max_dequeue = ctx->hdr.data_length - ctx->hdr.data_offset; + } + } + ASSERT (ctx->max_dequeue > 0); /* Ensure we're not writing more than transport window allows */ - if (max_dequeue0 < snd_space0) + if (ctx->max_dequeue < ctx->snd_space) { /* Constrained by tx queue. Try to send only fully formed segments */ - max_len_to_snd0 = (max_dequeue0 > snd_mss0) ? - max_dequeue0 - max_dequeue0 % snd_mss0 : max_dequeue0; + ctx->max_len_to_snd = + (ctx->max_dequeue > ctx->snd_mss) ? + ctx->max_dequeue - ctx->max_dequeue % ctx->snd_mss : ctx->max_dequeue; /* TODO Nagle ? */ } else { - max_len_to_snd0 = snd_space0; + /* Expectation is that snd_space0 is already a multiple of snd_mss */ + ctx->max_len_to_snd = ctx->snd_space; } - n_bytes_per_buf = vlib_buffer_free_list_buffer_size - (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); - n_bytes_per_seg = MAX_HDRS_LEN + snd_mss0; - n_bufs_per_seg = ceil ((double) n_bytes_per_seg / n_bytes_per_buf); - n_bufs_per_evt = (ceil ((double) max_len_to_snd0 / n_bytes_per_seg)) - * n_bufs_per_seg; - n_frames_per_evt = ceil ((double) n_bufs_per_evt / VLIB_FRAME_SIZE); + /* Check if we're tx constrained by the node */ + ctx->n_segs_per_evt = ceil ((f64) ctx->max_len_to_snd / ctx->snd_mss); + if (ctx->n_segs_per_evt > max_segs) + { + ctx->n_segs_per_evt = max_segs; + ctx->max_len_to_snd = max_segs * ctx->snd_mss; + } - deq_per_buf = clib_min (snd_mss0, n_bytes_per_buf); + n_bytes_per_buf = vlib_buffer_free_list_buffer_size (vm, + VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); + ASSERT (n_bytes_per_buf > MAX_HDRS_LEN); + n_bytes_per_seg = MAX_HDRS_LEN + ctx->snd_mss; + ctx->n_bufs_per_seg = ceil ((f64) n_bytes_per_seg / n_bytes_per_buf); + ctx->deq_per_buf = clib_min (ctx->snd_mss, n_bytes_per_buf); + ctx->deq_per_first_buf = clib_min (ctx->snd_mss, + n_bytes_per_buf - MAX_HDRS_LEN); +} - n_bufs = vec_len (smm->tx_buffers[thread_index]); - left_to_snd0 = max_len_to_snd0; - for (i = 0; i < n_frames_per_evt; i++) +always_inline int +session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, + session_fifo_event_t * e, + stream_session_t * s, int *n_tx_packets, + u8 peek_data) +{ + u32 next_index, next0, next1, *to_next, n_left_to_next; + u32 n_trace = vlib_get_trace_count (vm, node), n_bufs_needed = 0; + u32 thread_index = s->thread_index, n_left, pbi; + session_manager_main_t *smm = &session_manager_main; + session_tx_context_t *ctx = &smm->ctx[thread_index]; + transport_proto_t tp; + vlib_buffer_t *pb; + u16 n_bufs, rv; + + if (PREDICT_FALSE ((rv = session_tx_not_ready (s, peek_data)))) { - /* Make sure we have at least one full frame of buffers ready */ - if (PREDICT_FALSE (n_bufs < VLIB_FRAME_SIZE)) - { - vec_validate (smm->tx_buffers[thread_index], - n_bufs + 2 * VLIB_FRAME_SIZE - 1); + if (rv < 2) + vec_add1 (smm->pending_event_vector[thread_index], *e); + return 0; + } - buffers_allocated = 0; - do - { - buffers_allocated_this_call = - vlib_buffer_alloc - (vm, - &smm->tx_buffers[thread_index][n_bufs + buffers_allocated], - 2 * VLIB_FRAME_SIZE - buffers_allocated); - buffers_allocated += buffers_allocated_this_call; - } - while (buffers_allocated_this_call > 0 - && ((buffers_allocated + n_bufs < VLIB_FRAME_SIZE))); + next_index = smm->session_type_to_next[s->session_type]; + next0 = next1 = next_index; + + tp = session_get_transport_proto (s); + ctx->s = s; + ctx->transport_vft = transport_protocol_get_vft (tp); + ctx->tc = session_tx_get_transport (ctx, peek_data); + ctx->snd_mss = ctx->transport_vft->send_mss (ctx->tc); + ctx->snd_space = ctx->transport_vft->send_space (ctx->tc); + if (ctx->snd_space == 0 || ctx->snd_mss == 0) + { + vec_add1 (smm->pending_event_vector[thread_index], *e); + return 0; + } - n_bufs += buffers_allocated; - _vec_len (smm->tx_buffers[thread_index]) = n_bufs; + /* Allow enqueuing of a new event */ + svm_fifo_unset_event (s->server_tx_fifo); - if (PREDICT_FALSE (n_bufs < VLIB_FRAME_SIZE)) - { - vec_add1 (smm->pending_event_vector[thread_index], *e0); - return -1; - } - } - /* Allow enqueuing of a new event */ - svm_fifo_unset_event (s0->server_tx_fifo); + /* Check how much we can pull. */ + session_tx_set_dequeue_params (vm, ctx, VLIB_FRAME_SIZE - *n_tx_packets, + peek_data); - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - while (left_to_snd0 && n_left_to_next >= n_bufs_per_seg) + if (PREDICT_FALSE (!ctx->max_len_to_snd)) + return 0; + + n_bufs = vec_len (smm->tx_buffers[thread_index]); + n_bufs_needed = ctx->n_segs_per_evt * ctx->n_bufs_per_seg; + + /* + * Make sure we have at least one full frame of buffers ready + */ + if (n_bufs < n_bufs_needed) + { + session_output_try_get_buffers (vm, smm, thread_index, &n_bufs, + ctx->n_bufs_per_seg * VLIB_FRAME_SIZE); + if (PREDICT_FALSE (n_bufs < n_bufs_needed)) { - /* - * Handle first buffer in chain separately - */ - - /* Get free buffer */ - ASSERT (n_bufs >= 1); - bi0 = smm->tx_buffers[thread_index][--n_bufs]; - ASSERT (bi0); - _vec_len (smm->tx_buffers[thread_index]) = n_bufs; - - /* usual speculation, or the enqueue_x1 macro will barf */ - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - b0->error = 0; - b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID - | VNET_BUFFER_F_LOCALLY_ORIGINATED; - b0->current_data = 0; - b0->total_length_not_including_first_buffer = 0; - - len_to_deq0 = clib_min (left_to_snd0, deq_per_buf); - - data0 = vlib_buffer_make_headroom (b0, MAX_HDRS_LEN); - if (peek_data) - { - n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, rx_offset, - len_to_deq0, data0); - /* Keep track of progress locally, transport is also supposed to - * increment it independently when pushing the header */ - rx_offset += n_bytes_read; - } - else - { - n_bytes_read = svm_fifo_dequeue_nowait (s0->server_tx_fifo, - len_to_deq0, data0); - } + vec_add1 (smm->pending_event_vector[thread_index], *e); + return -1; + } + } - if (n_bytes_read <= 0) - goto dequeue_fail; + /* + * Write until we fill up a frame + */ + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + if (PREDICT_FALSE (ctx->n_segs_per_evt > n_left_to_next)) + { + ctx->n_segs_per_evt = n_left_to_next; + ctx->max_len_to_snd = ctx->snd_mss * n_left_to_next; + } + ctx->left_to_snd = ctx->max_len_to_snd; + n_left = ctx->n_segs_per_evt; - b0->current_length = n_bytes_read; + while (n_left >= 4) + { + vlib_buffer_t *b0, *b1; + u32 bi0, bi1; - left_to_snd0 -= n_bytes_read; - *n_tx_packets = *n_tx_packets + 1; + pbi = smm->tx_buffers[thread_index][n_bufs - 3]; + pb = vlib_get_buffer (vm, pbi); + vlib_prefetch_buffer_header (pb, STORE); + pbi = smm->tx_buffers[thread_index][n_bufs - 4]; + pb = vlib_get_buffer (vm, pbi); + vlib_prefetch_buffer_header (pb, STORE); - /* - * Fill in the remaining buffers in the chain, if any - */ - if (PREDICT_FALSE (n_bufs_per_seg > 1)) - { - u32 left_for_seg; - left_for_seg = clib_min (snd_mss0 - n_bytes_read, left_to_snd0); - session_tx_fifo_chain_tail (smm, vm, thread_index, - s0->server_tx_fifo, b0, bi0, - n_bufs_per_seg, left_for_seg, - &left_to_snd0, &n_bufs, &rx_offset, - deq_per_buf, peek_data); - } + to_next[0] = bi0 = smm->tx_buffers[thread_index][--n_bufs]; + to_next[1] = bi1 = smm->tx_buffers[thread_index][--n_bufs]; - /* Ask transport to push header after current_length and - * total_length_not_including_first_buffer are updated */ - transport_vft->push_header (tc0, b0); + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); - /* *INDENT-OFF* */ - SESSION_EVT_DBG(SESSION_EVT_DEQ, s0, ({ - ed->data[0] = e0->event_id; - ed->data[1] = max_dequeue0; - ed->data[2] = len_to_deq0; - ed->data[3] = left_to_snd0; - })); - /* *INDENT-ON* */ + session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data); + session_tx_fill_buffer (vm, ctx, b1, &n_bufs, peek_data); + ctx->transport_vft->push_header (ctx->tc, b0); + ctx->transport_vft->push_header (ctx->tc, b1); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); - if (PREDICT_FALSE (n_trace > 0)) - { - session_queue_trace_t *t0; - vlib_trace_buffer (vm, node, next_index, b0, - 1 /* follow_chain */ ); - vlib_set_trace_count (vm, node, --n_trace); - t0 = vlib_add_trace (vm, node, b0, sizeof (*t0)); - t0->session_index = s0->session_index; - t0->server_thread_index = s0->thread_index; - } + to_next += 2; + n_left_to_next -= 2; + n_left -= 2; - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1); - /* If we couldn't dequeue all bytes mark as partially read */ - if (max_len_to_snd0 < max_dequeue0) + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, next0, + next1); + } + while (n_left) { - /* If we don't already have new event */ - if (svm_fifo_set_event (s0->server_tx_fifo)) - { - vec_add1 (smm->pending_event_vector[thread_index], *e0); - } + vlib_buffer_t *b0; + u32 bi0; + + ASSERT (n_bufs >= 1); + to_next[0] = bi0 = smm->tx_buffers[thread_index][--n_bufs]; + b0 = vlib_get_buffer (vm, bi0); + session_tx_fill_buffer (vm, ctx, b0, &n_bufs, peek_data); + + /* Ask transport to push header after current_length and + * total_length_not_including_first_buffer are updated */ + ctx->transport_vft->push_header (ctx->tc, b0); + + to_next += 1; + n_left_to_next -= 1; + n_left -= 1; + + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); } - return 0; -dequeue_fail: - /* - * Can't read from fifo. If we don't already have an event, save as partially - * read, return buff to free list and return - */ - clib_warning ("dequeue fail"); + if (PREDICT_FALSE (n_trace > 0)) + session_tx_trace_frame (vm, node, next_index, to_next, + ctx->n_segs_per_evt, s, n_trace); + + _vec_len (smm->tx_buffers[thread_index]) = n_bufs; + *n_tx_packets += ctx->n_segs_per_evt; + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + + /* If we couldn't dequeue all bytes mark as partially read */ + ASSERT (ctx->left_to_snd == 0); + if (ctx->max_len_to_snd < ctx->max_dequeue) + if (svm_fifo_set_event (s->server_tx_fifo)) + vec_add1 (smm->pending_event_vector[thread_index], *e); - if (svm_fifo_set_event (s0->server_tx_fifo)) + if (!peek_data && ctx->transport_vft->tx_type == TRANSPORT_TX_DGRAM) { - vec_add1 (smm->pending_event_vector[thread_index], *e0); + /* Fix dgram pre header */ + if (ctx->max_len_to_snd < ctx->max_dequeue) + svm_fifo_overwrite_head (s->server_tx_fifo, (u8 *) & ctx->hdr, + sizeof (session_dgram_pre_hdr_t)); + /* More data needs to be read */ + else if (svm_fifo_max_dequeue (s->server_tx_fifo) > 0) + if (svm_fifo_set_event (s->server_tx_fifo)) + vec_add1 (smm->pending_event_vector[thread_index], *e); } - vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); - _vec_len (smm->tx_buffers[thread_index]) += 1; - return 0; } int session_tx_fifo_peek_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, - session_manager_main_t * smm, session_fifo_event_t * e0, - stream_session_t * s0, u32 thread_index, - int *n_tx_pkts) + stream_session_t * s0, int *n_tx_pkts) { - return session_tx_fifo_read_and_snd_i (vm, node, smm, e0, s0, thread_index, - n_tx_pkts, 1); + return session_tx_fifo_read_and_snd_i (vm, node, e0, s0, n_tx_pkts, 1); } int session_tx_fifo_dequeue_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, - session_manager_main_t * smm, session_fifo_event_t * e0, - stream_session_t * s0, u32 thread_index, - int *n_tx_pkts) + stream_session_t * s0, int *n_tx_pkts) +{ + return session_tx_fifo_read_and_snd_i (vm, node, e0, s0, n_tx_pkts, 0); +} + +int +session_tx_fifo_dequeue_internal (vlib_main_t * vm, + vlib_node_runtime_t * node, + session_fifo_event_t * e0, + stream_session_t * s0, int *n_tx_pkts) { - return session_tx_fifo_read_and_snd_i (vm, node, smm, e0, s0, thread_index, - n_tx_pkts, 0); + application_t *app; + app = application_get (s0->opaque); + svm_fifo_unset_event (s0->server_tx_fifo); + return app->cb_fns.builtin_app_tx_callback (s0); } always_inline stream_session_t * session_event_get_session (session_fifo_event_t * e, u8 thread_index) { - ASSERT (e->fifo->master_thread_index == thread_index); - return stream_session_get_if_valid (e->fifo->master_session_index, - thread_index); + return session_get_if_valid (e->fifo->master_session_index, thread_index); } void @@ -407,7 +567,7 @@ dump_thread_0_event_queue (void) int i, index; i8 *headp; - unix_shared_memory_queue_t *q; + svm_queue_t *q; q = smm->vpp_event_queues[my_thread_index]; index = q->head; @@ -425,7 +585,7 @@ dump_thread_0_event_queue (void) break; case FIFO_EVENT_DISCONNECT: - s0 = stream_session_get_from_handle (e->session_handle); + s0 = session_get_from_handle (e->session_handle); fformat (stdout, "[%04d] disconnect session %d\n", i, s0->session_index); break; @@ -468,7 +628,7 @@ session_node_cmp_event (session_fifo_event_t * e, svm_fifo_t * f) case FIFO_EVENT_DISCONNECT: break; case FIFO_EVENT_RPC: - s = stream_session_get_from_handle (e->session_handle); + s = session_get_from_handle (e->session_handle); if (!s) { clib_warning ("session has event but doesn't exist!"); @@ -487,7 +647,7 @@ u8 session_node_lookup_fifo_event (svm_fifo_t * f, session_fifo_event_t * e) { session_manager_main_t *smm = vnet_get_session_manager_main (); - unix_shared_memory_queue_t *q; + svm_queue_t *q; session_fifo_event_t *pending_event_vector, *evt; int i, index, found = 0; i8 *headp; @@ -506,7 +666,7 @@ session_node_lookup_fifo_event (svm_fifo_t * f, session_fifo_event_t * e) clib_memcpy (e, headp, q->elsize); found = session_node_cmp_event (e, f); if (found) - break; + return 1; if (++index == q->maxsize) index = 0; } @@ -534,35 +694,36 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, session_fifo_event_t *my_pending_event_vector, *e; session_fifo_event_t *my_fifo_events; u32 n_to_dequeue, n_events; - unix_shared_memory_queue_t *q; + svm_queue_t *q; application_t *app; int n_tx_packets = 0; - u32 my_thread_index = vm->thread_index; + u32 thread_index = vm->thread_index; int i, rv; f64 now = vlib_time_now (vm); void (*fp) (void *); - SESSION_EVT_DBG (SESSION_EVT_POLL_GAP_TRACK, smm, my_thread_index); + SESSION_EVT_DBG (SESSION_EVT_POLL_GAP_TRACK, smm, thread_index); /* - * Update TCP time + * Update transport time */ - tcp_update_time (now, my_thread_index); + transport_update_time (now, thread_index); /* * Get vpp queue events */ - q = smm->vpp_event_queues[my_thread_index]; + q = smm->vpp_event_queues[thread_index]; if (PREDICT_FALSE (q == 0)) return 0; - my_fifo_events = smm->free_event_vector[my_thread_index]; + my_fifo_events = smm->free_event_vector[thread_index]; /* min number of events we can dequeue without blocking */ n_to_dequeue = q->cursize; - my_pending_event_vector = smm->pending_event_vector[my_thread_index]; + my_pending_event_vector = smm->pending_event_vector[thread_index]; - if (n_to_dequeue == 0 && vec_len (my_pending_event_vector) == 0) + if (!n_to_dequeue && !vec_len (my_pending_event_vector) + && !vec_len (smm->pending_disconnects[thread_index])) return 0; SESSION_EVT_DBG (SESSION_EVT_DEQ_NODE, 0); @@ -585,7 +746,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, for (i = 0; i < n_to_dequeue; i++) { vec_add2 (my_fifo_events, e, 1); - unix_shared_memory_queue_sub_raw (q, (u8 *) e); + svm_queue_sub_raw (q, (u8 *) e); } /* The other side of the connection is not polling */ @@ -594,9 +755,11 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, pthread_mutex_unlock (&q->mutex); vec_append (my_fifo_events, my_pending_event_vector); + vec_append (my_fifo_events, smm->pending_disconnects[thread_index]); _vec_len (my_pending_event_vector) = 0; - smm->pending_event_vector[my_thread_index] = my_pending_event_vector; + smm->pending_event_vector[thread_index] = my_pending_event_vector; + _vec_len (smm->pending_disconnects[thread_index]) = 0; skip_dequeue: n_events = vec_len (my_fifo_events); @@ -606,25 +769,25 @@ skip_dequeue: session_fifo_event_t *e0; e0 = &my_fifo_events[i]; - switch (e0->event_type) { case FIFO_EVENT_APP_TX: - s0 = session_event_get_session (e0, my_thread_index); + if (n_tx_packets == VLIB_FRAME_SIZE) + { + vec_add1 (smm->pending_event_vector[thread_index], *e0); + break; + } - if (CLIB_DEBUG && !s0) + s0 = session_event_get_session (e0, thread_index); + if (PREDICT_FALSE (!s0)) { clib_warning ("It's dead, Jim!"); continue; } - /* Can retransmit for closed sessions but can't do anything if - * session is not ready or closed */ - if (PREDICT_FALSE (s0->session_state < SESSION_STATE_READY)) - continue; + /* Spray packets in per session type frames, since they go to * different nodes */ - rv = (smm->session_tx_fns[s0->session_type]) (vm, node, smm, e0, s0, - my_thread_index, + rv = (smm->session_tx_fns[s0->session_type]) (vm, node, e0, s0, &n_tx_packets); /* Out of buffers */ if (PREDICT_FALSE (rv < 0)) @@ -635,14 +798,31 @@ skip_dequeue: } break; case FIFO_EVENT_DISCONNECT: - s0 = stream_session_get_from_handle (e0->session_handle); - stream_session_disconnect (s0); + /* Make sure stream disconnects run after the pending list is + * drained */ + s0 = session_get_from_handle (e0->session_handle); + if (!e0->postponed) + { + e0->postponed = 1; + vec_add1 (smm->pending_disconnects[thread_index], *e0); + continue; + } + /* If tx queue is still not empty, wait */ + if (svm_fifo_max_dequeue (s0->server_tx_fifo)) + { + vec_add1 (smm->pending_disconnects[thread_index], *e0); + continue; + } + + stream_session_disconnect_transport (s0); break; case FIFO_EVENT_BUILTIN_RX: - s0 = session_event_get_session (e0, my_thread_index); + s0 = session_event_get_session (e0, thread_index); + if (PREDICT_FALSE (!s0)) + continue; svm_fifo_unset_event (s0->server_rx_fifo); app = application_get (s0->app_index); - app->cb_fns.builtin_server_rx_callback (s0); + app->cb_fns.builtin_app_rx_callback (s0); break; case FIFO_EVENT_RPC: fp = e0->rpc_args.fp; @@ -655,12 +835,12 @@ skip_dequeue: } _vec_len (my_fifo_events) = 0; - smm->free_event_vector[my_thread_index] = my_fifo_events; + smm->free_event_vector[thread_index] = my_fifo_events; vlib_node_increment_counter (vm, session_queue_node.index, SESSION_QUEUE_ERROR_TX, n_tx_packets); - SESSION_EVT_DBG (SESSION_EVT_DEQ_NODE, 1); + SESSION_EVT_DBG (SESSION_EVT_DISPATCH_END, smm, thread_index); return n_tx_packets; } @@ -674,19 +854,74 @@ VLIB_REGISTER_NODE (session_queue_node) = .type = VLIB_NODE_TYPE_INPUT, .n_errors = ARRAY_LEN (session_queue_error_strings), .error_strings = session_queue_error_strings, - .n_next_nodes = SESSION_QUEUE_N_NEXT, .state = VLIB_NODE_STATE_DISABLED, - .next_nodes = - { - [SESSION_QUEUE_NEXT_DROP] = "error-drop", - [SESSION_QUEUE_NEXT_IP4_LOOKUP] = "ip4-lookup", - [SESSION_QUEUE_NEXT_IP6_LOOKUP] = "ip6-lookup", - [SESSION_QUEUE_NEXT_TCP_IP4_OUTPUT] = "tcp4-output", - [SESSION_QUEUE_NEXT_TCP_IP6_OUTPUT] = "tcp6-output", - }, }; /* *INDENT-ON* */ +static clib_error_t * +session_queue_exit (vlib_main_t * vm) +{ + if (vec_len (vlib_mains) < 2) + return 0; + + /* + * Shut off (especially) worker-thread session nodes. + * Otherwise, vpp can crash as the main thread unmaps the + * API segment. + */ + vlib_worker_thread_barrier_sync (vm); + session_node_enable_disable (0 /* is_enable */ ); + vlib_worker_thread_barrier_release (vm); + return 0; +} + +VLIB_MAIN_LOOP_EXIT_FUNCTION (session_queue_exit); + +static uword +session_queue_process (vlib_main_t * vm, vlib_node_runtime_t * rt, + vlib_frame_t * f) +{ + f64 now, timeout = 1.0; + uword *event_data = 0; + uword event_type; + + while (1) + { + vlib_process_wait_for_event_or_clock (vm, timeout); + now = vlib_time_now (vm); + event_type = vlib_process_get_events (vm, (uword **) & event_data); + + switch (event_type) + { + case SESSION_Q_PROCESS_FLUSH_FRAMES: + /* Flush the frames by updating all transports times */ + transport_update_time (now, 0); + break; + case SESSION_Q_PROCESS_STOP: + timeout = 100000.0; + break; + case ~0: + /* Timed out. Update time for all transports to trigger all + * outstanding retransmits. */ + transport_update_time (now, 0); + break; + } + vec_reset_length (event_data); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (session_queue_process_node) = +{ + .function = session_queue_process, + .type = VLIB_NODE_TYPE_PROCESS, + .name = "session-queue-process", + .state = VLIB_NODE_STATE_DISABLED, +}; +/* *INDENT-ON* */ + + /* * fd.io coding-style-patch-verification: ON *