*data++ = TCP_OPTION_MSS;
*data++ = TCP_OPTION_LEN_MSS;
buf = clib_host_to_net_u16 (opts->mss);
- clib_memcpy (data, &buf, sizeof (opts->mss));
+ clib_memcpy_fast (data, &buf, sizeof (opts->mss));
data += sizeof (opts->mss);
opts_len += TCP_OPTION_LEN_MSS;
}
*data++ = TCP_OPTION_TIMESTAMP;
*data++ = TCP_OPTION_LEN_TIMESTAMP;
buf = clib_host_to_net_u32 (opts->tsval);
- clib_memcpy (data, &buf, sizeof (opts->tsval));
+ clib_memcpy_fast (data, &buf, sizeof (opts->tsval));
data += sizeof (opts->tsval);
buf = clib_host_to_net_u32 (opts->tsecr);
- clib_memcpy (data, &buf, sizeof (opts->tsecr));
+ clib_memcpy_fast (data, &buf, sizeof (opts->tsecr));
data += sizeof (opts->tsecr);
opts_len += TCP_OPTION_LEN_TIMESTAMP;
}
for (i = 0; i < n_sack_blocks; i++)
{
buf = clib_host_to_net_u32 (opts->sacks[i].start);
- clib_memcpy (data, &buf, seq_len);
+ clib_memcpy_fast (data, &buf, seq_len);
data += seq_len;
buf = clib_host_to_net_u32 (opts->sacks[i].end);
- clib_memcpy (data, &buf, seq_len);
+ clib_memcpy_fast (data, &buf, seq_len);
data += seq_len;
}
opts_len += 2 + n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK;
tcp_reuse_buffer (vm, b);
tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
TCP_EVT_DBG (TCP_EVT_ACK_SENT, tc);
- vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK;
tc->rcv_las = tc->rcv_nxt;
}
tcp_options_write ((u8 *) (th + 1), snd_opts);
vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
- vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK;
/* Init retransmit timer. Use update instead of set because of
* retransmissions */
{
ih6 = vlib_buffer_get_current (b0);
ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
- clib_memcpy (&src_ip60, &ih6->src_address, sizeof (ip6_address_t));
- clib_memcpy (&dst_ip60, &ih6->dst_address, sizeof (ip6_address_t));
+ clib_memcpy_fast (&src_ip60, &ih6->src_address, sizeof (ip6_address_t));
+ clib_memcpy_fast (&dst_ip60, &ih6->dst_address, sizeof (ip6_address_t));
}
src_port = th0->src_port;
tcp_make_syn (tc, b);
/* Measure RTT with this */
- tc->rtt_ts = tcp_time_now ();
+ tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
tc->rtt_seq = tc->snd_nxt;
tc->rto_boff = 0;
TCP_EVT_DBG (TCP_EVT_SYN_SENT, tc);
}
+void
+tcp_send_synack (tcp_connection_t * tc)
+{
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b;
+ u32 bi;
+
+ /* Get buffer */
+ if (PREDICT_FALSE (tcp_get_free_buffer_index (wrk, &bi)))
+ return;
+
+ b = vlib_get_buffer (vm, bi);
+ tcp_make_synack (tc, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+}
+
/**
* Flush tx frame populated by retransmits and timer pops
*/
if (maybe_burst)
{
- clib_memcpy ((u8 *) (th + 1),
- tm->wrk_ctx[tc->c_thread_index].cached_opts,
- tc->snd_opts_len);
+ clib_memcpy_fast ((u8 *) (th + 1),
+ tm->wrk_ctx[tc->c_thread_index].cached_opts,
+ tc->snd_opts_len);
}
else
{
tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED, /* compute opts */ 0,
/* burst */ 1);
tc->snd_una_max = tc->snd_nxt;
- ASSERT (seq_leq (tc->snd_una_max, tc->snd_una + tc->snd_wnd
- + tcp_fastrecovery_sent_1_smss (tc) * tc->snd_mss));
+ ASSERT (seq_leq (tc->snd_una_max, tc->snd_una + tc->snd_wnd));
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
/* If not tracking an ACK, start tracking */
if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
}
+void
+tcp_program_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ {
+ vec_add1 (wrk->pending_acks, tc->c_c_index);
+ tc->flags |= TCP_CONN_SNDACK;
+ }
+}
+
+void
+tcp_program_dupack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ {
+ vec_add1 (wrk->pending_acks, tc->c_c_index);
+ tc->flags |= TCP_CONN_SNDACK;
+ }
+ if (tc->pending_dupacks < 255)
+ tc->pending_dupacks += 1;
+}
+
+void
+tcp_send_acks (tcp_worker_ctx_t * wrk)
+{
+ u32 thread_index, *pending_acks;
+ tcp_connection_t *tc;
+ int i, j, n_acks;
+
+ if (!vec_len (wrk->pending_acks))
+ return;
+
+ thread_index = wrk->vm->thread_index;
+ pending_acks = wrk->pending_acks;
+ for (i = 0; i < vec_len (pending_acks); i++)
+ {
+ tc = tcp_connection_get (pending_acks[i], thread_index);
+ tc->flags &= ~TCP_CONN_SNDACK;
+ n_acks = clib_max (1, tc->pending_dupacks);
+ /* If we're supposed to send dupacks but have no ooo data
+ * send only one ack */
+ if (tc->pending_dupacks && !vec_len (tc->snd_sacks))
+ n_acks = 1;
+ for (j = 0; j < n_acks; j++)
+ tcp_send_ack (tc);
+ tc->pending_dupacks = 0;
+ }
+ _vec_len (wrk->pending_acks) = 0;
+}
+
/**
* Delayed ack timer handler
*
}
}
- tcp_get_free_buffer_index (wrk, &bi);
+ (void) tcp_get_free_buffer_index (wrk, &bi);
ASSERT (bi != (u32) ~ 0);
*b = vlib_get_buffer (vm, bi);
data = tcp_init_buffer (vm, *b);
/* Problem already solved or worse */
if (tc->state == TCP_STATE_CLOSED || tc->state > TCP_STATE_ESTABLISHED
- || tc->snd_wnd > tc->snd_mss || tcp_in_recovery (tc))
+ || tc->snd_wnd > tc->snd_mss)
return;
available_bytes = session_tx_fifo_max_dequeue (&tc->connection);
/* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
snd_space = tcp_available_cc_snd_space (tc);
- if (snd_space < tc->snd_mss)
+ if (snd_space < tc->snd_mss || tc->snd_mss == 0)
goto done;
max_deq = session_tx_fifo_max_dequeue (&tc->connection);
return tcp_fast_retransmit_no_sack (wrk, tc, burst_size);
}
-static u32
-tcp_session_has_ooo_data (tcp_connection_t * tc)
-{
- stream_session_t *s = session_get (tc->c_s_index, tc->c_thread_index);
- return svm_fifo_has_ooo_data (s->server_rx_fifo);
-}
-
static void
tcp_output_handle_link_local (tcp_connection_t * tc0, vlib_buffer_t * b0,
u16 * next0, u32 * error0)
tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
vm->thread_index);
t = vlib_add_trace (vm, node, b, sizeof (*t));
- clib_memcpy (&t->tcp_header, th, sizeof (t->tcp_header));
- clib_memcpy (&t->tcp_connection, tc, sizeof (t->tcp_connection));
+ clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
+ clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
}
}
tcp_output_handle_link_local (tc0, b0, next0, error0);
}
- /* Filter out DUPACKs if there are no OOO segments left */
- if (PREDICT_FALSE (vnet_buffer (b0)->tcp.flags & TCP_BUF_FLAG_DUPACK))
- {
- /* N.B. Should not filter burst of dupacks. Two issues:
- * 1) dupacks open cwnd on remote peer when congested
- * 2) acks leaving should have the latest rcv_wnd since the
- * burst may have eaten up all of it, so only the old ones
- * could be filtered.
- */
- if (!tcp_session_has_ooo_data (tc0))
- {
- *error0 = TCP_ERROR_FILTERED_DUPACKS;
- *next0 = TCP_OUTPUT_NEXT_DROP;
- return;
- }
- }
-
- /* Stop DELACK timer and fix flags */
- tc0->flags &= ~(TCP_CONN_SNDACK);
if (!TCP_ALWAYS_ACK)
tcp_timer_reset (tc0, TCP_TIMER_DELACK);
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (tcp4_output_node) =
{
- .function = tcp4_output,.name = "tcp4-output",
- /* Takes a vector of packets. */
- .vector_size = sizeof (u32),
- .n_errors = TCP_N_ERROR,
- .error_strings = tcp_error_strings,
- .n_next_nodes = TCP_OUTPUT_N_NEXT,
- .next_nodes = {
+ .function = tcp4_output,
+ .name = "tcp4-output",
+ /* Takes a vector of packets. */
+ .vector_size = sizeof (u32),
+ .n_errors = TCP_N_ERROR,
+ .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
+ .error_strings = tcp_error_strings,
+ .n_next_nodes = TCP_OUTPUT_N_NEXT,
+ .next_nodes = {
#define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
foreach_tcp4_output_next
#undef _
- },
- .format_buffer = format_tcp_header,
- .format_trace = format_tcp_tx_trace,
+ },
+ .format_buffer = format_tcp_header,
+ .format_trace = format_tcp_tx_trace,
};
/* *INDENT-ON* */
/* Takes a vector of packets. */
.vector_size = sizeof (u32),
.n_errors = TCP_N_ERROR,
+ .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
.error_strings = tcp_error_strings,
.n_next_nodes = TCP_OUTPUT_N_NEXT,
.next_nodes = {
else
th0 = ip6_next_header ((ip6_header_t *) th0);
t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
- clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
+ clib_memcpy_fast (&t0->tcp_header, th0,
+ sizeof (t0->tcp_header));
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,