/* Zero out all flags but those set in SYN */
to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE
- | TCP_OPTS_FLAG_TSTAMP | TCP_OPTION_MSS);
+ | TCP_OPTS_FLAG_TSTAMP | TCP_OPTS_FLAG_MSS);
for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
{
always_inline u8
tcp_recovery_no_snd_space (tcp_connection_t * tc)
{
- return (tcp_in_fastrecovery (tc)
- && tcp_fastrecovery_prr_snd_space (tc) < tc->snd_mss)
- || (tcp_in_recovery (tc)
- && tcp_available_output_snd_space (tc) < tc->snd_mss);
+ u32 space;
+
+ ASSERT (tcp_in_cong_recovery (tc));
+
+ if (tcp_in_recovery (tc))
+ space = tcp_available_output_snd_space (tc);
+ else
+ space = tcp_fastrecovery_prr_snd_space (tc);
+
+ return (space < tc->snd_mss + tc->burst_acked);
}
/**
{
/* Dequeue the newly ACKed bytes */
session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
- tc->burst_acked = 0;
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
/* Reset the pacer if we've been idle, i.e., no data sent or if
* we're in recovery and snd space constrained */
if (tc->data_segs_out == tc->prev_dsegs_out
- || tcp_recovery_no_snd_space (tc))
- transport_connection_tx_pacer_reset_bucket (&tc->connection,
- wrk->vm->clib_time.
- last_cpu_time);
+ || (tcp_in_cong_recovery (tc) && tcp_recovery_no_snd_space (tc)))
+ transport_connection_tx_pacer_reset_bucket (&tc->connection);
+
tc->prev_dsegs_out = tc->data_segs_out;
+ tc->burst_acked = 0;
}
_vec_len (wrk->pending_deq_acked) = 0;
}
/* Rule (3): if hole not lost */
else if (seq_lt (hole->start, sb->high_sacked))
{
+ /* And we didn't already retransmit it */
+ if (seq_leq (hole->end, sb->high_rxt))
+ {
+ sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
+ return 0;
+ }
*snd_limited = 0;
sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
}
sb->sacked_bytes = 0;
sb->last_sacked_bytes = 0;
sb->last_bytes_delivered = 0;
- sb->high_sacked = 0;
- sb->high_rxt = 0;
sb->lost_bytes = 0;
sb->last_lost_bytes = 0;
sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
is_spurious = 1;
}
- tc->rcv_dupacks = 0;
- tc->prr_delivered = 0;
- tc->rxt_delivered = 0;
- tc->snd_rxt_bytes = 0;
- tc->snd_rxt_ts = 0;
- tc->rtt_ts = 0;
- tc->flags &= ~TCP_CONN_RXT_PENDING;
-
tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
+ tc->rcv_dupacks = 0;
/* Previous recovery left us congested. Continue sending as part
* of the current recovery event with an updated snd_congestion */
if (tc->sack_sb.sacked_bytes)
{
tc->snd_congestion = tc->snd_nxt;
- tc->snd_rxt_ts = tcp_tstamp (tc);
- tc->prr_start = tc->snd_una;
- scoreboard_init_rxt (&tc->sack_sb, tc->snd_una);
tcp_program_retransmit (tc);
return is_spurious;
}
+ tc->rxt_delivered = 0;
+ tc->snd_rxt_bytes = 0;
+ tc->snd_rxt_ts = 0;
+ tc->prr_delivered = 0;
+ tc->rtt_ts = 0;
+ tc->flags &= ~TCP_CONN_RXT_PENDING;
+
hole = scoreboard_first_hole (&tc->sack_sb);
if (hole && hole->start == tc->snd_una && hole->end == tc->snd_nxt)
scoreboard_clear (&tc->sack_sb);
}
/*
- * Already in recovery. See if we can exit and stop retransmitting
+ * Already in recovery
*/
- if (seq_geq (tc->snd_una, tc->snd_congestion))
- {
- /* If spurious return, we've already updated everything */
- if (tcp_cc_recover (tc))
- {
- tc->tsecr_last_ack = tc->rcv_opts.tsecr;
- return;
- }
-
- /* Treat as congestion avoidance ack */
- tcp_cc_rcv_ack (tc, rs);
- return;
- }
-
/*
* Process (re)transmit feedback. Output path uses this to decide how much
* more data to release into the network
*/
if (has_sack)
{
+ if (!tc->bytes_acked && tc->sack_sb.rxt_sacked)
+ tcp_fastrecovery_first_on (tc);
+
tc->rxt_delivered += tc->sack_sb.rxt_sacked;
tc->prr_delivered += tc->bytes_acked + tc->sack_sb.last_sacked_bytes
- tc->sack_sb.last_bytes_delivered;
tc->rxt_delivered = clib_max (tc->rxt_delivered + tc->bytes_acked,
tc->snd_rxt_bytes);
if (is_dack)
- tc->prr_delivered += 1;
+ tc->prr_delivered += clib_min (tc->snd_mss,
+ tc->snd_nxt - tc->snd_una);
else
- tc->prr_delivered += tc->bytes_acked - tc->snd_mss * tc->rcv_dupacks;
+ tc->prr_delivered += tc->bytes_acked - clib_min (tc->bytes_acked,
+ tc->snd_mss *
+ tc->rcv_dupacks);
/* If partial ack, assume that the first un-acked segment was lost */
if (tc->bytes_acked || tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
tcp_program_retransmit (tc);
}
+ /*
+ * See if we can exit and stop retransmitting
+ */
+ if (seq_geq (tc->snd_una, tc->snd_congestion))
+ {
+ /* If spurious return, we've already updated everything */
+ if (tcp_cc_recover (tc))
+ {
+ tc->tsecr_last_ack = tc->rcv_opts.tsecr;
+ return;
+ }
+
+ /* Treat as congestion avoidance ack */
+ tcp_cc_rcv_ack (tc, rs);
+ return;
+ }
+
/*
* Notify cc of the event
*/
/* Account for the FIN and send ack */
tc->rcv_nxt += 1;
+ tc->flags |= TCP_CONN_FINRCVD;
tcp_program_ack (tc);
/* Enter CLOSE-WAIT and notify session. To avoid lingering
* in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
static u8
-tcp_lookup_is_valid (tcp_connection_t * tc, tcp_header_t * hdr)
+tcp_lookup_is_valid (tcp_connection_t * tc, vlib_buffer_t * b,
+ tcp_header_t * hdr)
{
transport_connection_t *tmp = 0;
u64 handle;
if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
return 1;
+ u8 is_ip_valid = 0, val_l, val_r;
+
+ if (tc->connection.is_ip4)
+ {
+ ip4_header_t *ip4_hdr = (ip4_header_t *) vlib_buffer_get_current (b);
+
+ val_l = !ip4_address_compare (&ip4_hdr->dst_address,
+ &tc->connection.lcl_ip.ip4);
+ val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 1);
+ val_r = !ip4_address_compare (&ip4_hdr->src_address,
+ &tc->connection.rmt_ip.ip4);
+ val_r = val_r || tc->state == TCP_STATE_LISTEN;
+ is_ip_valid = val_l && val_r;
+ }
+ else
+ {
+ ip6_header_t *ip6_hdr = (ip6_header_t *) vlib_buffer_get_current (b);
+
+ val_l = !ip6_address_compare (&ip6_hdr->dst_address,
+ &tc->connection.lcl_ip.ip6);
+ val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 0);
+ val_r = !ip6_address_compare (&ip6_hdr->src_address,
+ &tc->connection.rmt_ip.ip6);
+ val_r = val_r || tc->state == TCP_STATE_LISTEN;
+ is_ip_valid = val_l && val_r;
+ }
+
u8 is_valid = (tc->c_lcl_port == hdr->dst_port
&& (tc->state == TCP_STATE_LISTEN
- || tc->c_rmt_port == hdr->src_port));
+ || tc->c_rmt_port == hdr->src_port) && is_ip_valid);
if (!is_valid)
{
&& tmp->rmt_port == hdr->src_port)
{
TCP_DBG ("half-open is valid!");
+ is_valid = 1;
}
}
}
TRANSPORT_PROTO_TCP,
thread_index, &is_filtered);
tc = tcp_get_connection_from_transport (tconn);
- ASSERT (tcp_lookup_is_valid (tc, tcp));
+ ASSERT (tcp_lookup_is_valid (tc, b, tcp));
}
else
{
TRANSPORT_PROTO_TCP,
thread_index, &is_filtered);
tc = tcp_get_connection_from_transport (tconn);
- ASSERT (tcp_lookup_is_valid (tc, tcp));
+ ASSERT (tcp_lookup_is_valid (tc, b, tcp));
}
return tc;
}
}
else
{
- tcp_program_ack (new_tc0);
+ /* Send ack now instead of programming it because connection was
+ * just established and it's not optional. */
+ tcp_send_ack (new_tc0);
}
drop:
if (CLIB_DEBUG)
{
- tcp_connection_t *tmp;
- tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
- is_ip4);
- if (tmp->state != tc0->state)
+ if (!(tc0->connection.flags & TRANSPORT_CONNECTION_F_NO_LOOKUP))
{
- if (tc0->state != TCP_STATE_CLOSED)
- clib_warning ("state changed");
- goto drop;
+ tcp_connection_t *tmp;
+ tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
+ is_ip4);
+ if (tmp->state != tc0->state)
+ {
+ if (tc0->state != TCP_STATE_CLOSED)
+ clib_warning ("state changed");
+ goto drop;
+ }
}
}
if (PREDICT_TRUE (!tc0 + !tc1 == 0))
{
- ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
- ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
+ ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
+ ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
{
if (PREDICT_TRUE (tc0 != 0))
{
- ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
+ ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
}
if (PREDICT_TRUE (tc1 != 0))
{
- ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
+ ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
}
is_nolookup);
if (PREDICT_TRUE (tc0 != 0))
{
- ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
+ ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
}