*/
#include <vppinfra/sparse_vec.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
#include <vnet/tcp/tcp_packet.h>
#include <vnet/tcp/tcp.h>
#include <vnet/session/session.h>
/* Zero out all flags but those set in SYN */
to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE
- | TCP_OPTS_FLAG_TSTAMP | TCP_OPTION_MSS);
+ | TCP_OPTS_FLAG_TSTAMP | TCP_OPTS_FLAG_MSS);
for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
{
}
}
+static void
+tcp_handle_rst (tcp_connection_t * tc)
+{
+ switch (tc->rst_state)
+ {
+ case TCP_STATE_SYN_RCVD:
+ /* Cleanup everything. App wasn't notified yet */
+ session_transport_delete_notify (&tc->connection);
+ tcp_connection_cleanup (tc);
+ break;
+ case TCP_STATE_SYN_SENT:
+ session_stream_connect_notify (&tc->connection, 1 /* fail */ );
+ tcp_connection_cleanup (tc);
+ break;
+ case TCP_STATE_ESTABLISHED:
+ session_transport_reset_notify (&tc->connection);
+ session_transport_closed_notify (&tc->connection);
+ break;
+ case TCP_STATE_CLOSE_WAIT:
+ case TCP_STATE_FIN_WAIT_1:
+ case TCP_STATE_FIN_WAIT_2:
+ case TCP_STATE_CLOSING:
+ case TCP_STATE_LAST_ACK:
+ session_transport_closed_notify (&tc->connection);
+ break;
+ case TCP_STATE_CLOSED:
+ case TCP_STATE_TIME_WAIT:
+ break;
+ default:
+ TCP_DBG ("reset state: %u", tc->state);
+ }
+}
+
+static void
+tcp_program_reset_ntf (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+{
+ if (!tcp_disconnect_pending (tc))
+ {
+ tc->rst_state = tc->state;
+ vec_add1 (wrk->pending_resets, tc->c_c_index);
+ tcp_disconnect_pending_on (tc);
+ }
+}
+
+/**
+ * Handle reset packet
+ *
+ * Programs disconnect/reset notification that should be sent
+ * later by calling @ref tcp_handle_disconnects
+ */
+static void
+tcp_rcv_rst (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+{
+ TCP_EVT (TCP_EVT_RST_RCVD, tc);
+ switch (tc->state)
+ {
+ case TCP_STATE_SYN_RCVD:
+ tcp_program_reset_ntf (wrk, tc);
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ break;
+ case TCP_STATE_SYN_SENT:
+ /* Do not program ntf because the connection is half-open */
+ tcp_handle_rst (tc);
+ break;
+ case TCP_STATE_ESTABLISHED:
+ tcp_connection_timers_reset (tc);
+ tcp_cong_recovery_off (tc);
+ tcp_program_reset_ntf (wrk, tc);
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ tcp_program_cleanup (wrk, tc);
+ break;
+ case TCP_STATE_CLOSE_WAIT:
+ case TCP_STATE_FIN_WAIT_1:
+ case TCP_STATE_FIN_WAIT_2:
+ case TCP_STATE_CLOSING:
+ case TCP_STATE_LAST_ACK:
+ tcp_connection_timers_reset (tc);
+ tcp_cong_recovery_off (tc);
+ tcp_program_reset_ntf (wrk, tc);
+ /* Make sure we mark the session as closed. In some states we may
+ * be still trying to send data */
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ tcp_program_cleanup (wrk, tc);
+ break;
+ case TCP_STATE_CLOSED:
+ case TCP_STATE_TIME_WAIT:
+ break;
+ default:
+ TCP_DBG ("reset state: %u", tc->state);
+ }
+}
+
/**
* Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
*
if (PREDICT_FALSE (tcp_segment_check_paws (tc0)))
{
*error0 = TCP_ERROR_PAWS;
- TCP_EVT_DBG (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
- vnet_buffer (b0)->tcp.seq_end);
+ TCP_EVT (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
+ vnet_buffer (b0)->tcp.seq_end);
/* If it just so happens that a segment updates tsval_recent for a
* segment over 24 days old, invalidate tsval_recent. */
else if (!tcp_rst (th0))
{
tcp_program_ack (tc0);
- TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
+ TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
goto error;
}
}
if (tc0->state == TCP_STATE_SYN_RCVD)
{
tcp_send_synack (tc0);
- TCP_EVT_DBG (TCP_EVT_SYN_RCVD, tc0, 0);
+ TCP_EVT (TCP_EVT_SYN_RCVD, tc0, 0);
*error0 = TCP_ERROR_SYNS_RCVD;
}
else
{
tcp_program_ack (tc0);
- TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, tc0);
+ TCP_EVT (TCP_EVT_SYNACK_RCVD, tc0);
*error0 = TCP_ERROR_SYN_ACKS_RCVD;
}
goto error;
*error0 = TCP_ERROR_RCV_WND;
+ /* If we advertised a zero rcv_wnd and the segment is in the past or the
+ * next one that we expect, it is probably a window probe */
+ if ((tc0->flags & TCP_CONN_ZERO_RWND_SENT)
+ && seq_lt (vnet_buffer (b0)->tcp.seq_end,
+ tc0->rcv_las + tc0->rcv_opts.mss))
+ *error0 = TCP_ERROR_ZERO_RWND;
+
tc0->errors.below_data_wnd += seq_lt (vnet_buffer (b0)->tcp.seq_end,
tc0->rcv_las);
if (!tcp_rst (th0))
{
tcp_program_dupack (tc0);
- TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
+ TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
}
goto error;
/* 2nd: check the RST bit */
if (PREDICT_FALSE (tcp_rst (th0)))
{
- tcp_connection_reset (tc0);
+ tcp_rcv_rst (wrk, tc0);
*error0 = TCP_ERROR_RST_RCVD;
goto error;
}
* return 1 if valid rtt 0 otherwise
*/
static int
-tcp_update_rtt (tcp_connection_t * tc, u32 ack)
+tcp_update_rtt (tcp_connection_t * tc, tcp_rate_sample_t * rs, u32 ack)
{
u32 mrtt = 0;
/* Karn's rule, part 1. Don't use retransmitted segments to estimate
* RTT because they're ambiguous. */
- if (tcp_in_cong_recovery (tc) || tc->sack_sb.sacked_bytes)
+ if (tcp_in_cong_recovery (tc))
{
- if (tcp_in_recovery (tc))
- return 0;
+ /* Accept rtt estimates for samples that have not been retransmitted */
+ if ((tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
+ && !(rs->flags & TCP_BTS_IS_RXT))
+ {
+ mrtt = rs->rtt_time * THZ;
+ goto estimate_rtt;
+ }
goto done;
}
mrtt = clib_max (now - tc->rcv_opts.tsecr, 1);
}
+estimate_rtt:
+
/* Ignore dubious measurements */
if (mrtt == 0 || mrtt > TCP_RTT_MAX)
goto done;
/* Dequeue the newly ACKed bytes */
session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
- tc->burst_acked = 0;
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
tc->flags &= ~TCP_CONN_PSH_PENDING;
}
+ if (tcp_is_descheduled (tc))
+ tcp_reschedule (tc);
+
/* If everything has been acked, stop retransmit timer
* otherwise update. */
tcp_retransmit_timer_update (tc);
- /* If not congested, update pacer based on our new
- * cwnd estimate */
- if (!tcp_in_fastrecovery (tc))
- tcp_connection_tx_pacer_update (tc);
+ /* Update pacer based on our new cwnd estimate */
+ tcp_connection_tx_pacer_update (tc);
+
+ tc->burst_acked = 0;
}
_vec_len (wrk->pending_deq_acked) = 0;
}
vec_add1 (wrk->pending_deq_acked, tc->c_c_index);
tc->flags |= TCP_CONN_DEQ_PENDING;
}
- tc->burst_acked += tc->bytes_acked + tc->sack_sb.snd_una_adv;
-}
-
-/**
- * Check if duplicate ack as per RFC5681 Sec. 2
- */
-static u8
-tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
- u32 prev_snd_una)
-{
- return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
- && seq_gt (tc->snd_nxt, tc->snd_una)
- && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
- && (prev_snd_wnd == tc->snd_wnd));
-}
-
-/**
- * Checks if ack is a congestion control event.
- */
-static u8
-tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
- u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
-{
- /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
- * defined to be 'duplicate' */
- *is_dack = tc->sack_sb.last_sacked_bytes
- || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
-
- return ((*is_dack || tcp_in_cong_recovery (tc)) && !tcp_is_lost_fin (tc));
+ tc->burst_acked += tc->bytes_acked;
}
#ifndef CLIB_MARCH_VARIANT
return hole;
}
-#endif /* CLIB_MARCH_VARIANT */
-#ifndef CLIB_MARCH_VARIANT
-static void
-scoreboard_update_bytes (tcp_connection_t * tc, sack_scoreboard_t * sb)
+always_inline void
+scoreboard_update_sacked_rxt (sack_scoreboard_t * sb, u32 start, u32 end,
+ u8 has_rxt)
+{
+ if (!has_rxt || seq_geq (start, sb->high_rxt))
+ return;
+
+ sb->rxt_sacked +=
+ seq_lt (end, sb->high_rxt) ? (end - start) : (sb->high_rxt - start);
+}
+
+always_inline void
+scoreboard_update_bytes (sack_scoreboard_t * sb, u32 ack, u32 snd_mss)
{
sack_scoreboard_hole_t *left, *right;
- u32 bytes = 0, blks = 0;
+ u32 sacked = 0, blks = 0, old_sacked;
+
+ old_sacked = sb->sacked_bytes;
sb->last_lost_bytes = 0;
sb->lost_bytes = 0;
sb->sacked_bytes = 0;
- left = scoreboard_last_hole (sb);
- if (!left)
- return;
- if (seq_gt (sb->high_sacked, left->end))
+ right = scoreboard_last_hole (sb);
+ if (!right)
{
- bytes = sb->high_sacked - left->end;
+ sb->sacked_bytes = sb->high_sacked - ack;
+ sb->last_sacked_bytes = sb->sacked_bytes
+ - (old_sacked - sb->last_bytes_delivered);
+ return;
+ }
+
+ if (seq_gt (sb->high_sacked, right->end))
+ {
+ sacked = sb->high_sacked - right->end;
blks = 1;
}
- while ((right = left)
- && bytes < (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
- && blks < TCP_DUPACK_THRESHOLD
- /* left not updated if above conditions fail */
- && (left = scoreboard_prev_hole (sb, right)))
+ while (sacked < (TCP_DUPACK_THRESHOLD - 1) * snd_mss
+ && blks < TCP_DUPACK_THRESHOLD)
{
- bytes += right->start - left->end;
+ if (right->is_lost)
+ sb->lost_bytes += scoreboard_hole_bytes (right);
+
+ left = scoreboard_prev_hole (sb, right);
+ if (!left)
+ {
+ ASSERT (right->start == ack || sb->is_reneging);
+ sacked += right->start - ack;
+ right = 0;
+ break;
+ }
+
+ sacked += right->start - left->end;
blks++;
+ right = left;
}
- /* left is first lost */
- if (left)
+ /* right is first lost */
+ while (right)
{
- do
+ sb->lost_bytes += scoreboard_hole_bytes (right);
+ sb->last_lost_bytes += right->is_lost ? 0 : (right->end - right->start);
+ right->is_lost = 1;
+ left = scoreboard_prev_hole (sb, right);
+ if (!left)
{
- sb->lost_bytes += scoreboard_hole_bytes (right);
- sb->last_lost_bytes += left->is_lost ? 0 : left->end - left->start;
- left->is_lost = 1;
- left = scoreboard_prev_hole (sb, right);
- if (left)
- bytes += right->start - left->end;
+ ASSERT (right->start == ack || sb->is_reneging);
+ sacked += right->start - ack;
+ break;
}
- while ((right = left));
+ sacked += right->start - left->end;
+ right = left;
}
- sb->sacked_bytes = bytes;
+ sb->sacked_bytes = sacked;
+ sb->last_sacked_bytes = sacked - (old_sacked - sb->last_bytes_delivered);
}
/**
/* Rule (3): if hole not lost */
else if (seq_lt (hole->start, sb->high_sacked))
{
+ /* And we didn't already retransmit it */
+ if (seq_leq (hole->end, sb->high_rxt))
+ {
+ sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
+ return 0;
+ }
*snd_limited = 0;
sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
}
return hole;
}
-#endif /* CLIB_MARCH_VARIANT */
-static void
-scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 snd_una)
+void
+scoreboard_init_rxt (sack_scoreboard_t * sb, u32 snd_una)
{
sack_scoreboard_hole_t *hole;
hole = scoreboard_first_hole (sb);
sb->rescue_rxt = snd_una - 1;
}
-#ifndef CLIB_MARCH_VARIANT
void
scoreboard_init (sack_scoreboard_t * sb)
{
sb->sacked_bytes = 0;
sb->last_sacked_bytes = 0;
sb->last_bytes_delivered = 0;
- sb->snd_una_adv = 0;
- sb->high_sacked = 0;
- sb->high_rxt = 0;
sb->lost_bytes = 0;
sb->last_lost_bytes = 0;
sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
+ sb->is_reneging = 0;
+}
+
+void
+scoreboard_clear_reneging (sack_scoreboard_t * sb, u32 start, u32 end)
+{
+ sack_scoreboard_hole_t *last_hole;
+
+ clib_warning ("sack reneging");
+
+ scoreboard_clear (sb);
+ last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
+ start, end);
+ last_hole->is_lost = 1;
+ sb->tail = scoreboard_hole_index (sb, last_hole);
+ sb->high_sacked = start;
+ scoreboard_init_rxt (sb, start);
}
+
#endif /* CLIB_MARCH_VARIANT */
/**
void
tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
{
- sack_scoreboard_hole_t *hole, *next_hole, *last_hole;
- u32 blk_index = 0, old_sacked_bytes, hole_index;
+ sack_scoreboard_hole_t *hole, *next_hole;
sack_scoreboard_t *sb = &tc->sack_sb;
- sack_block_t *blk, tmp;
- int i, j;
+ sack_block_t *blk, *rcv_sacks;
+ u32 blk_index = 0, i, j;
+ u8 has_rxt;
sb->last_sacked_bytes = 0;
sb->last_bytes_delivered = 0;
- sb->snd_una_adv = 0;
+ sb->rxt_sacked = 0;
- if (!tcp_opts_sack (&tc->rcv_opts)
+ if (!tcp_opts_sack (&tc->rcv_opts) && !sb->sacked_bytes
&& sb->head == TCP_INVALID_SACK_HOLE_INDEX)
return;
- old_sacked_bytes = sb->sacked_bytes;
+ has_rxt = tcp_in_cong_recovery (tc);
/* Remove invalid blocks */
blk = tc->rcv_opts.sacks;
{
if (seq_lt (blk->start, blk->end)
&& seq_gt (blk->start, tc->snd_una)
- && seq_gt (blk->start, ack)
- && seq_lt (blk->start, tc->snd_nxt)
- && seq_leq (blk->end, tc->snd_nxt))
+ && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_nxt))
{
blk++;
continue;
/* Add block for cumulative ack */
if (seq_gt (ack, tc->snd_una))
{
- tmp.start = tc->snd_una;
- tmp.end = ack;
- vec_add1 (tc->rcv_opts.sacks, tmp);
+ vec_add2 (tc->rcv_opts.sacks, blk, 1);
+ blk->start = tc->snd_una;
+ blk->end = ack;
}
if (vec_len (tc->rcv_opts.sacks) == 0)
tcp_scoreboard_trace_add (tc, ack);
/* Make sure blocks are ordered */
- for (i = 0; i < vec_len (tc->rcv_opts.sacks); i++)
- for (j = i + 1; j < vec_len (tc->rcv_opts.sacks); j++)
- if (seq_lt (tc->rcv_opts.sacks[j].start, tc->rcv_opts.sacks[i].start))
+ rcv_sacks = tc->rcv_opts.sacks;
+ for (i = 0; i < vec_len (rcv_sacks); i++)
+ for (j = i + 1; j < vec_len (rcv_sacks); j++)
+ if (seq_lt (rcv_sacks[j].start, rcv_sacks[i].start))
{
- tmp = tc->rcv_opts.sacks[i];
- tc->rcv_opts.sacks[i] = tc->rcv_opts.sacks[j];
- tc->rcv_opts.sacks[j] = tmp;
+ sack_block_t tmp = rcv_sacks[i];
+ rcv_sacks[i] = rcv_sacks[j];
+ rcv_sacks[j] = tmp;
}
if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
{
- /* If no holes, insert the first that covers all outstanding bytes */
- last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
- tc->snd_una, tc->snd_nxt);
- sb->tail = scoreboard_hole_index (sb, last_hole);
- tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
- sb->high_sacked = tmp.end;
+ /* Handle reneging as a special case */
+ if (PREDICT_FALSE (sb->is_reneging))
+ {
+ /* No holes, only sacked bytes */
+ if (seq_leq (tc->snd_nxt, sb->high_sacked))
+ {
+ /* No progress made so return */
+ if (seq_leq (ack, tc->snd_una))
+ return;
+
+ /* Update sacked bytes delivered and return */
+ sb->last_bytes_delivered = ack - tc->snd_una;
+ sb->sacked_bytes -= sb->last_bytes_delivered;
+ sb->is_reneging = seq_lt (ack, sb->high_sacked);
+ return;
+ }
+
+ /* New hole above high sacked. Add it and process normally */
+ hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
+ sb->high_sacked, tc->snd_nxt);
+ sb->tail = scoreboard_hole_index (sb, hole);
+ }
+ /* Not reneging and no holes. Insert the first that covers all
+ * outstanding bytes */
+ else
+ {
+ hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
+ tc->snd_una, tc->snd_nxt);
+ sb->tail = scoreboard_hole_index (sb, hole);
+ }
+ sb->high_sacked = rcv_sacks[vec_len (rcv_sacks) - 1].end;
}
else
{
- /* If we have holes but snd_una_max is beyond the last hole, update
- * last hole end */
- tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
- last_hole = scoreboard_last_hole (sb);
- if (seq_gt (tc->snd_nxt, last_hole->end))
+ /* If we have holes but snd_nxt is beyond the last hole, update
+ * last hole end or add new hole after high sacked */
+ hole = scoreboard_last_hole (sb);
+ if (seq_gt (tc->snd_nxt, hole->end))
{
- if (seq_geq (last_hole->start, sb->high_sacked))
+ if (seq_geq (hole->start, sb->high_sacked))
{
- last_hole->end = tc->snd_nxt;
+ hole->end = tc->snd_nxt;
}
/* New hole after high sacked block */
else if (seq_lt (sb->high_sacked, tc->snd_nxt))
tc->snd_nxt);
}
}
+
/* Keep track of max byte sacked for when the last hole
* is acked */
- if (seq_gt (tmp.end, sb->high_sacked))
- sb->high_sacked = tmp.end;
+ sb->high_sacked = seq_max (rcv_sacks[vec_len (rcv_sacks) - 1].end,
+ sb->high_sacked);
}
/* Walk the holes with the SACK blocks */
hole = pool_elt_at_index (sb->holes, sb->head);
- while (hole && blk_index < vec_len (tc->rcv_opts.sacks))
+
+ if (PREDICT_FALSE (sb->is_reneging))
+ {
+ sb->last_bytes_delivered += clib_min (hole->start - tc->snd_una,
+ ack - tc->snd_una);
+ sb->is_reneging = seq_lt (ack, hole->start);
+ }
+
+ while (hole && blk_index < vec_len (rcv_sacks))
{
- blk = &tc->rcv_opts.sacks[blk_index];
+ blk = &rcv_sacks[blk_index];
if (seq_leq (blk->start, hole->start))
{
/* Block covers hole. Remove hole */
{
next_hole = scoreboard_next_hole (sb, hole);
- /* Byte accounting: snd_una needs to be advanced */
+ /* If covered by ack, compute delivered bytes */
if (blk->end == ack)
{
- if (next_hole)
+ u32 sacked = next_hole ? next_hole->start : sb->high_sacked;
+ if (PREDICT_FALSE (seq_lt (ack, sacked)))
{
- if (seq_lt (ack, next_hole->start))
- sb->snd_una_adv = next_hole->start - ack;
- sb->last_bytes_delivered +=
- next_hole->start - hole->end;
+ sb->last_bytes_delivered += ack - hole->end;
+ sb->is_reneging = 1;
}
else
{
- ASSERT (seq_geq (sb->high_sacked, ack));
- sb->snd_una_adv = sb->high_sacked - ack;
- sb->last_bytes_delivered += sb->high_sacked - hole->end;
+ sb->last_bytes_delivered += sacked - hole->end;
+ sb->is_reneging = 0;
}
}
+ scoreboard_update_sacked_rxt (sb, hole->start, hole->end,
+ has_rxt);
scoreboard_remove_hole (sb, hole);
hole = next_hole;
}
{
if (seq_gt (blk->end, hole->start))
{
+ scoreboard_update_sacked_rxt (sb, hole->start, blk->end,
+ has_rxt);
hole->start = blk->end;
}
blk_index++;
/* Hole must be split */
if (seq_lt (blk->end, hole->end))
{
- hole_index = scoreboard_hole_index (sb, hole);
+ u32 hole_index = scoreboard_hole_index (sb, hole);
next_hole = scoreboard_insert_hole (sb, hole_index, blk->end,
hole->end);
-
/* Pool might've moved */
hole = scoreboard_get_hole (sb, hole_index);
hole->end = blk->start;
+
+ scoreboard_update_sacked_rxt (sb, blk->start, blk->end,
+ has_rxt);
+
blk_index++;
ASSERT (hole->next == scoreboard_hole_index (sb, next_hole));
}
else if (seq_lt (blk->start, hole->end))
{
+ scoreboard_update_sacked_rxt (sb, blk->start, hole->end,
+ has_rxt);
hole->end = blk->start;
}
hole = scoreboard_next_hole (sb, hole);
}
}
- if (pool_elts (sb->holes) == 1)
- {
- hole = scoreboard_first_hole (sb);
- if (hole->start == ack + sb->snd_una_adv && hole->end == tc->snd_nxt)
- scoreboard_remove_hole (sb, hole);
- }
-
- scoreboard_update_bytes (tc, sb);
- sb->last_sacked_bytes = sb->sacked_bytes
- - (old_sacked_bytes - sb->last_bytes_delivered);
+ scoreboard_update_bytes (sb, ack, tc->snd_mss);
ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes || tcp_in_recovery (tc));
ASSERT (sb->sacked_bytes == 0 || tcp_in_recovery (tc)
- || sb->sacked_bytes < tc->snd_nxt - seq_max (tc->snd_una, ack));
+ || sb->sacked_bytes <= tc->snd_nxt - seq_max (tc->snd_una, ack));
ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_nxt
- seq_max (tc->snd_una, ack) || tcp_in_recovery (tc));
ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc)
- || sb->holes[sb->head].start == ack + sb->snd_una_adv);
+ || sb->is_reneging || sb->holes[sb->head].start == ack);
ASSERT (sb->last_lost_bytes <= sb->lost_bytes);
+ ASSERT ((ack - tc->snd_una) + sb->last_sacked_bytes
+ - sb->last_bytes_delivered >= sb->rxt_sacked);
+ ASSERT ((ack - tc->snd_una) >= tc->sack_sb.last_bytes_delivered
+ || (tc->flags & TCP_CONN_FINSNT));
- TCP_EVT_DBG (TCP_EVT_CC_SCOREBOARD, tc);
+ TCP_EVT (TCP_EVT_CC_SCOREBOARD, tc);
}
#endif /* CLIB_MARCH_VARIANT */
tc->snd_wnd = snd_wnd;
tc->snd_wl1 = seq;
tc->snd_wl2 = ack;
- TCP_EVT_DBG (TCP_EVT_SND_WND, tc);
+ TCP_EVT (TCP_EVT_SND_WND, tc);
if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss))
{
}
else
{
- tcp_persist_timer_reset (tc);
+ if (PREDICT_FALSE (tcp_timer_is_active (tc, TCP_TIMER_PERSIST)))
+ tcp_persist_timer_reset (tc);
+
+ if (PREDICT_FALSE (tcp_is_descheduled (tc)))
+ tcp_reschedule (tc);
+
if (PREDICT_FALSE (!tcp_in_recovery (tc) && tc->rto_boff > 0))
{
tc->rto_boff = 0;
}
}
-#ifndef CLIB_MARCH_VARIANT
/**
* Init loss recovery/fast recovery.
*
* Triggered by dup acks as opposed to timer timeout. Note that cwnd is
* updated in @ref tcp_cc_handle_event after fast retransmit
*/
-void
+static void
tcp_cc_init_congestion (tcp_connection_t * tc)
{
tcp_fastrecovery_on (tc);
tc->snd_congestion = tc->snd_nxt;
tc->cwnd_acc_bytes = 0;
tc->snd_rxt_bytes = 0;
+ tc->rxt_delivered = 0;
+ tc->prr_delivered = 0;
+ tc->prr_start = tc->snd_una;
tc->prev_ssthresh = tc->ssthresh;
tc->prev_cwnd = tc->cwnd;
- tc->cc_algo->congestion (tc);
- tc->fr_occurences += 1;
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4);
-}
-#endif /* CLIB_MARCH_VARIANT */
-static void
-tcp_cc_recovery_exit (tcp_connection_t * tc)
-{
- tc->rto_boff = 0;
- tcp_update_rto (tc);
- tc->snd_rxt_ts = 0;
- tc->rtt_ts = 0;
- tcp_recovery_off (tc);
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
-}
-
-#ifndef CLIB_MARCH_VARIANT
-void
-tcp_cc_fastrecovery_clear (tcp_connection_t * tc)
-{
- tc->snd_rxt_bytes = 0;
- tc->rcv_dupacks = 0;
- tc->rtt_ts = 0;
+ tc->snd_rxt_ts = tcp_tstamp (tc);
+ tcp_cc_congestion (tc);
- tcp_fastrecovery_off (tc);
- tcp_fastrecovery_first_off (tc);
- tc->flags &= ~TCP_CONN_FRXT_PENDING;
+ /* Post retransmit update cwnd to ssthresh and account for the
+ * three segments that have left the network and should've been
+ * buffered at the receiver XXX */
+ if (!tcp_opts_sack_permitted (&tc->rcv_opts))
+ tc->cwnd += 3 * tc->snd_mss;
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
+ tc->fr_occurences += 1;
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 4);
}
-#endif /* CLIB_MARCH_VARIANT */
static void
tcp_cc_congestion_undo (tcp_connection_t * tc)
{
tc->cwnd = tc->prev_cwnd;
tc->ssthresh = tc->prev_ssthresh;
- tc->rcv_dupacks = 0;
- if (tcp_in_recovery (tc))
- {
- tcp_cc_recovery_exit (tc);
- tc->snd_nxt = seq_max (tc->snd_nxt, tc->snd_congestion);
- }
- else if (tcp_in_fastrecovery (tc))
- {
- tcp_cc_fastrecovery_clear (tc);
- }
tcp_cc_undo_recovery (tc);
ASSERT (tc->rto_boff == 0);
- TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 5);
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 5);
}
static inline u8
}
static inline u8
-tcp_cc_is_spurious_fast_rxt (tcp_connection_t * tc)
+tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
{
- return (tcp_in_fastrecovery (tc)
- && tc->cwnd > tc->ssthresh + 3 * tc->snd_mss);
+ return (tcp_cc_is_spurious_timeout_rxt (tc));
}
-static u8
-tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
+static inline u8
+tcp_should_fastrecover_sack (tcp_connection_t * tc)
+{
+ return (tc->sack_sb.lost_bytes
+ || ((TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
+ < tc->sack_sb.sacked_bytes));
+}
+
+static inline u8
+tcp_should_fastrecover (tcp_connection_t * tc, u8 has_sack)
{
- return (tcp_cc_is_spurious_timeout_rxt (tc)
- || tcp_cc_is_spurious_fast_rxt (tc));
+ if (!has_sack)
+ {
+ /* If of of the two conditions lower hold, reset dupacks because
+ * we're probably after timeout (RFC6582 heuristics).
+ * If Cumulative ack does not cover more than congestion threshold,
+ * and:
+ * 1) The following doesn't hold: The congestion window is greater
+ * than SMSS bytes and the difference between highest_ack
+ * and prev_highest_ack is at most 4*SMSS bytes
+ * 2) Echoed timestamp in the last non-dup ack does not equal the
+ * stored timestamp
+ */
+ if (seq_leq (tc->snd_una, tc->snd_congestion)
+ && ((!(tc->cwnd > tc->snd_mss
+ && tc->bytes_acked <= 4 * tc->snd_mss))
+ || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
+ {
+ tc->rcv_dupacks = 0;
+ return 0;
+ }
+ }
+ return ((tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
+ || tcp_should_fastrecover_sack (tc));
}
static int
tcp_cc_recover (tcp_connection_t * tc)
{
+ sack_scoreboard_hole_t *hole;
+ u8 is_spurious = 0;
+
ASSERT (tcp_in_cong_recovery (tc));
+
if (tcp_cc_is_spurious_retransmit (tc))
{
tcp_cc_congestion_undo (tc);
- return 1;
+ is_spurious = 1;
}
- if (tcp_in_recovery (tc))
- tcp_cc_recovery_exit (tc);
- else if (tcp_in_fastrecovery (tc))
+ tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
+ tc->rcv_dupacks = 0;
+
+ /* Previous recovery left us congested. Continue sending as part
+ * of the current recovery event with an updated snd_congestion */
+ if (tc->sack_sb.sacked_bytes)
{
- tcp_cc_recovered (tc);
- tcp_cc_fastrecovery_clear (tc);
+ tc->snd_congestion = tc->snd_nxt;
+ tcp_program_retransmit (tc);
+ return is_spurious;
}
+ tc->rxt_delivered = 0;
+ tc->snd_rxt_bytes = 0;
+ tc->snd_rxt_ts = 0;
+ tc->prr_delivered = 0;
+ tc->rtt_ts = 0;
+ tc->flags &= ~TCP_CONN_RXT_PENDING;
+
+ hole = scoreboard_first_hole (&tc->sack_sb);
+ if (hole && hole->start == tc->snd_una && hole->end == tc->snd_nxt)
+ scoreboard_clear (&tc->sack_sb);
+
+ if (!tcp_in_recovery (tc) && !is_spurious)
+ tcp_cc_recovered (tc);
+
+ tcp_fastrecovery_off (tc);
+ tcp_fastrecovery_first_off (tc);
+ tcp_recovery_off (tc);
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 3);
+
ASSERT (tc->rto_boff == 0);
ASSERT (!tcp_in_cong_recovery (tc));
ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
- return 0;
+
+ return is_spurious;
}
static void
tc->snd_congestion = tc->snd_una - 1;
}
-static u8
-tcp_should_fastrecover_sack (tcp_connection_t * tc)
-{
- return (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss < tc->sack_sb.sacked_bytes;
-}
-
-static u8
-tcp_should_fastrecover (tcp_connection_t * tc)
-{
- return (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD
- || tcp_should_fastrecover_sack (tc));
-}
-
/**
* One function to rule them all ... and in the darkness bind them
*/
tcp_cc_handle_event (tcp_connection_t * tc, tcp_rate_sample_t * rs,
u32 is_dack)
{
- u32 rxt_delivered;
+ u8 has_sack = tcp_opts_sack_permitted (&tc->rcv_opts);
+
+ /* If reneging, wait for timer based retransmits */
+ if (PREDICT_FALSE (tcp_is_lost_fin (tc) || tc->sack_sb.is_reneging))
+ return;
- if (tcp_in_fastrecovery (tc) && tcp_opts_sack_permitted (&tc->rcv_opts))
- {
- if (tc->bytes_acked)
- goto partial_ack;
- tcp_program_fastretransmit (tc);
- return;
- }
/*
- * Duplicate ACK. Check if we should enter fast recovery, or if already in
- * it account for the bytes that left the network.
+ * If not in recovery, figure out if we should enter
*/
- else if (is_dack && !tcp_in_recovery (tc))
+ if (!tcp_in_cong_recovery (tc))
{
- TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1);
- ASSERT (tc->snd_una != tc->snd_nxt || tc->sack_sb.last_sacked_bytes);
+ ASSERT (is_dack);
tc->rcv_dupacks++;
+ TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
+ tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
- /* Pure duplicate ack. If some data got acked, it's handled lower */
- if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked)
- {
- ASSERT (tcp_in_fastrecovery (tc));
- tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
- return;
- }
- else if (tcp_should_fastrecover (tc))
+ if (tcp_should_fastrecover (tc, has_sack))
{
- u32 pacer_wnd;
+ tcp_cc_init_congestion (tc);
- ASSERT (!tcp_in_fastrecovery (tc));
+ if (has_sack)
+ scoreboard_init_rxt (&tc->sack_sb, tc->snd_una);
- /* Heuristic to catch potential late dupacks
- * after fast retransmit exits */
- if (is_dack && tc->snd_una == tc->snd_congestion
- && timestamp_leq (tc->rcv_opts.tsecr, tc->tsecr_last_ack))
- {
- tc->rcv_dupacks = 0;
- return;
- }
+ tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
+ tcp_program_retransmit (tc);
+ }
- tcp_cc_init_congestion (tc);
- tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
+ return;
+ }
- if (tcp_opts_sack_permitted (&tc->rcv_opts))
- {
- tc->cwnd = tc->ssthresh;
- scoreboard_init_high_rxt (&tc->sack_sb, tc->snd_una);
- }
- else
- {
- /* Post retransmit update cwnd to ssthresh and account for the
- * three segments that have left the network and should've been
- * buffered at the receiver XXX */
- tc->cwnd = tc->ssthresh + 3 * tc->snd_mss;
- }
+ /*
+ * Already in recovery
+ */
- /* Constrain rate until we get a partial ack */
- pacer_wnd = clib_max (0.1 * tc->cwnd, 2 * tc->snd_mss);
- tcp_connection_tx_pacer_reset (tc, pacer_wnd,
- 0 /* start bucket */ );
- tcp_program_fastretransmit (tc);
- return;
- }
- else if (!tc->bytes_acked
- || (tc->bytes_acked && !tcp_in_cong_recovery (tc)))
- {
- tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
- return;
- }
- else
- goto partial_ack;
+ /*
+ * Process (re)transmit feedback. Output path uses this to decide how much
+ * more data to release into the network
+ */
+ if (has_sack)
+ {
+ if (!tc->bytes_acked && tc->sack_sb.rxt_sacked)
+ tcp_fastrecovery_first_on (tc);
+
+ tc->rxt_delivered += tc->sack_sb.rxt_sacked;
+ tc->prr_delivered += tc->bytes_acked + tc->sack_sb.last_sacked_bytes
+ - tc->sack_sb.last_bytes_delivered;
}
- /* Don't allow entry in fast recovery if still in recovery, for now */
- else if (0 && is_dack && tcp_in_recovery (tc))
+ else
{
- /* If of of the two conditions lower hold, reset dupacks because
- * we're probably after timeout (RFC6582 heuristics).
- * If Cumulative ack does not cover more than congestion threshold,
- * and:
- * 1) The following doesn't hold: The congestion window is greater
- * than SMSS bytes and the difference between highest_ack
- * and prev_highest_ack is at most 4*SMSS bytes
- * 2) Echoed timestamp in the last non-dup ack does not equal the
- * stored timestamp
- */
- if (seq_leq (tc->snd_una, tc->snd_congestion)
- && ((!(tc->cwnd > tc->snd_mss
- && tc->bytes_acked <= 4 * tc->snd_mss))
- || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
+ if (is_dack)
{
- tc->rcv_dupacks = 0;
- return;
+ tc->rcv_dupacks += 1;
+ TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
}
- }
-
- if (!tc->bytes_acked)
- return;
+ tc->rxt_delivered = clib_min (tc->rxt_delivered + tc->bytes_acked,
+ tc->snd_rxt_bytes);
+ if (is_dack)
+ tc->prr_delivered += clib_min (tc->snd_mss,
+ tc->snd_nxt - tc->snd_una);
+ else
+ tc->prr_delivered += tc->bytes_acked - clib_min (tc->bytes_acked,
+ tc->snd_mss *
+ tc->rcv_dupacks);
-partial_ack:
- TCP_EVT_DBG (TCP_EVT_CC_PACK, tc);
+ /* If partial ack, assume that the first un-acked segment was lost */
+ if (tc->bytes_acked || tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
+ tcp_fastrecovery_first_on (tc);
+ }
/*
- * Legitimate ACK. 1) See if we can exit recovery
+ * See if we can exit and stop retransmitting
*/
-
- /* Update the pacing rate. For the first partial ack we move from
- * the artificially constrained rate to the one after congestion */
- tcp_connection_tx_pacer_update (tc);
-
if (seq_geq (tc->snd_una, tc->snd_congestion))
{
- tcp_retransmit_timer_update (tc);
-
/* If spurious return, we've already updated everything */
if (tcp_cc_recover (tc))
{
return;
}
+ tcp_program_retransmit (tc);
+
/*
- * Legitimate ACK. 2) If PARTIAL ACK try to retransmit
+ * Notify cc of the event
*/
- /* XXX limit this only to first partial ack? */
- tcp_retransmit_timer_update (tc);
+ if (!tc->bytes_acked)
+ {
+ tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
+ return;
+ }
/* RFC6675: If the incoming ACK is a cumulative acknowledgment,
* reset dupacks to 0. Also needed if in congestion recovery */
tc->rcv_dupacks = 0;
- /* Post RTO timeout don't try anything fancy */
if (tcp_in_recovery (tc))
- {
- tcp_cc_rcv_ack (tc, rs);
- transport_add_tx_event (&tc->connection);
- return;
- }
+ tcp_cc_rcv_ack (tc, rs);
+ else
+ tcp_cc_rcv_cong_ack (tc, TCP_CC_PARTIALACK, rs);
+}
+
+static void
+tcp_handle_old_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
+{
+ if (!tcp_in_cong_recovery (tc))
+ return;
- /* Remove retransmitted bytes that have been delivered */
if (tcp_opts_sack_permitted (&tc->rcv_opts))
- {
- ASSERT (tc->bytes_acked + tc->sack_sb.snd_una_adv
- >= tc->sack_sb.last_bytes_delivered
- || (tc->flags & TCP_CONN_FINSNT));
+ tcp_rcv_sacks (tc, tc->snd_una);
- /* If we have sacks and we haven't gotten an ack beyond high_rxt,
- * remove sacked bytes delivered */
- if (seq_lt (tc->snd_una, tc->sack_sb.high_rxt))
- {
- rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv
- - tc->sack_sb.last_bytes_delivered;
- ASSERT (tc->snd_rxt_bytes >= rxt_delivered);
- tc->snd_rxt_bytes -= rxt_delivered;
- }
- else
- {
- /* Apparently all retransmitted holes have been acked */
- tc->snd_rxt_bytes = 0;
- tc->sack_sb.high_rxt = tc->snd_una;
- }
- }
- else
- {
- tcp_fastrecovery_first_on (tc);
- if (tc->snd_rxt_bytes > tc->bytes_acked)
- tc->snd_rxt_bytes -= tc->bytes_acked;
- else
- tc->snd_rxt_bytes = 0;
- }
+ tc->bytes_acked = 0;
- tcp_cc_rcv_cong_ack (tc, TCP_CC_PARTIALACK, rs);
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
+ tcp_bt_sample_delivery_rate (tc, rs);
- /*
- * Since this was a partial ack, try to retransmit some more data
- */
- tcp_program_fastretransmit (tc);
+ tcp_cc_handle_event (tc, rs, 1);
+}
+
+/**
+ * Check if duplicate ack as per RFC5681 Sec. 2
+ */
+always_inline u8
+tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
+ u32 prev_snd_una)
+{
+ return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
+ && seq_gt (tc->snd_nxt, tc->snd_una)
+ && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
+ && (prev_snd_wnd == tc->snd_wnd));
+}
+
+/**
+ * Checks if ack is a congestion control event.
+ */
+static u8
+tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
+ u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
+{
+ /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
+ * defined to be 'duplicate' as well */
+ *is_dack = tc->sack_sb.last_sacked_bytes
+ || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
+
+ return (*is_dack || tcp_in_cong_recovery (tc));
}
/**
tcp_rate_sample_t rs = { 0 };
u8 is_dack;
- TCP_EVT_DBG (TCP_EVT_CC_STAT, tc);
+ TCP_EVT (TCP_EVT_CC_STAT, tc);
/* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
tc->errors.above_ack_wnd += 1;
*error = TCP_ERROR_ACK_FUTURE;
- TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 0,
- vnet_buffer (b)->tcp.ack_number);
+ TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 0, vnet_buffer (b)->tcp.ack_number);
return -1;
}
{
tc->errors.below_ack_wnd += 1;
*error = TCP_ERROR_ACK_OLD;
- TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 1,
- vnet_buffer (b)->tcp.ack_number);
- if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
- tcp_cc_handle_event (tc, 0, 1);
+ TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 1, vnet_buffer (b)->tcp.ack_number);
+
+ if (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una - tc->rcv_wnd))
+ return -1;
+
+ tcp_handle_old_ack (tc, &rs);
+
/* Don't drop yet */
return 0;
}
vnet_buffer (b)->tcp.ack_number,
clib_net_to_host_u16 (th->window) << tc->snd_wscale);
tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
- tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv;
+ tc->snd_una = vnet_buffer (b)->tcp.ack_number;
tcp_validate_txf_size (tc, tc->bytes_acked);
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
+ tcp_bt_sample_delivery_rate (tc, &rs);
+
if (tc->bytes_acked)
{
tcp_program_dequeue (wrk, tc);
- tcp_update_rtt (tc, vnet_buffer (b)->tcp.ack_number);
+ tcp_update_rtt (tc, &rs, vnet_buffer (b)->tcp.ack_number);
}
- if (tc->flags & TCP_CONN_RATE_SAMPLE)
- tcp_bt_sample_delivery_rate (tc, &rs);
-
- TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc);
+ TCP_EVT (TCP_EVT_ACK_RCVD, tc);
/*
* Check if we have congestion event
static void
tcp_handle_disconnects (tcp_worker_ctx_t * wrk)
{
- u32 thread_index, *pending_disconnects;
+ u32 thread_index, *pending_disconnects, *pending_resets;
tcp_connection_t *tc;
int i;
- if (!vec_len (wrk->pending_disconnects))
- return;
+ if (vec_len (wrk->pending_disconnects))
+ {
+ thread_index = wrk->vm->thread_index;
+ pending_disconnects = wrk->pending_disconnects;
+ for (i = 0; i < vec_len (pending_disconnects); i++)
+ {
+ tc = tcp_connection_get (pending_disconnects[i], thread_index);
+ tcp_disconnect_pending_off (tc);
+ session_transport_closing_notify (&tc->connection);
+ }
+ _vec_len (wrk->pending_disconnects) = 0;
+ }
- thread_index = wrk->vm->thread_index;
- pending_disconnects = wrk->pending_disconnects;
- for (i = 0; i < vec_len (pending_disconnects); i++)
+ if (vec_len (wrk->pending_resets))
{
- tc = tcp_connection_get (pending_disconnects[i], thread_index);
- tcp_disconnect_pending_off (tc);
- session_transport_closing_notify (&tc->connection);
+ thread_index = wrk->vm->thread_index;
+ pending_resets = wrk->pending_resets;
+ for (i = 0; i < vec_len (pending_resets); i++)
+ {
+ tc = tcp_connection_get (pending_resets[i], thread_index);
+ tcp_disconnect_pending_off (tc);
+ tcp_handle_rst (tc);
+ }
+ _vec_len (wrk->pending_resets) = 0;
}
- _vec_len (wrk->pending_disconnects) = 0;
}
static void
/* Account for the FIN and send ack */
tc->rcv_nxt += 1;
+ tc->flags |= TCP_CONN_FINRCVD;
tcp_program_ack (tc);
/* Enter CLOSE-WAIT and notify session. To avoid lingering
* in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
tcp_connection_set_state (tc, TCP_STATE_CLOSE_WAIT);
tcp_program_disconnect (wrk, tc);
tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
- TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc);
+ TCP_EVT (TCP_EVT_FIN_RCVD, tc);
*error = TCP_ERROR_FIN_RCVD;
}
1 /* queue event */ , 1);
tc->bytes_in += written;
- TCP_EVT_DBG (TCP_EVT_INPUT, tc, 0, data_len, written);
+ TCP_EVT (TCP_EVT_INPUT, tc, 0, data_len, written);
/* Update rcv_nxt */
if (PREDICT_TRUE (written == data_len))
else if (written > data_len)
{
tc->rcv_nxt += written;
- TCP_EVT_DBG (TCP_EVT_CC_INPUT, tc, data_len, written);
+ TCP_EVT (TCP_EVT_CC_INPUT, tc, data_len, written);
}
else if (written > 0)
{
/* Nothing written */
if (rv)
{
- TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, 0);
+ TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, 0);
return TCP_ERROR_FIFO_FULL;
}
- TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, data_len);
+ TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, data_len);
tc->bytes_in += data_len;
/* Update SACK list if in use */
end = start + ooo_segment_length (s0->rx_fifo, newest);
tcp_update_sack_list (tc, start, end);
svm_fifo_newest_ooo_segment_reset (s0->rx_fifo);
- TCP_EVT_DBG (TCP_EVT_CC_SACKS, tc);
+ TCP_EVT (TCP_EVT_CC_SACKS, tc);
}
}
* retransmissions since we may not have any data to send */
if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
{
- tcp_program_ack (tc);
+ tcp_program_dupack (tc);
+ tc->errors.below_data_wnd++;
error = TCP_ERROR_SEGMENT_OLD;
goto done;
}
/* RFC2581: Enqueue and send DUPACK for fast retransmit */
error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
tcp_program_dupack (tc);
- TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
+ TCP_EVT (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
tc->errors.above_data_wnd += seq_gt (vnet_buffer (b)->tcp.seq_end,
tc->rcv_las + tc->rcv_wnd);
goto done;
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
+ tcp_connection_t *tc = &t->tcp_connection;
u32 indent = format_get_indent (s);
- s = format (s, "%U\n%U%U",
- format_tcp_header, &t->tcp_header, 128,
- format_white_space, indent,
- format_tcp_connection, &t->tcp_connection, 1);
+ s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
+ format_tcp_state, tc->state, format_white_space, indent,
+ format_tcp_header, &t->tcp_header, 128);
return s;
}
/* 1-4: check SEQ, RST, SYN */
if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, th0, &error0)))
{
- TCP_EVT_DBG (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
+ TCP_EVT (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
goto done;
}
static u8
-tcp_lookup_is_valid (tcp_connection_t * tc, tcp_header_t * hdr)
+tcp_lookup_is_valid (tcp_connection_t * tc, vlib_buffer_t * b,
+ tcp_header_t * hdr)
{
transport_connection_t *tmp = 0;
u64 handle;
if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
return 1;
+ u8 is_ip_valid = 0, val_l, val_r;
+
+ if (tc->connection.is_ip4)
+ {
+ ip4_header_t *ip4_hdr = (ip4_header_t *) vlib_buffer_get_current (b);
+
+ val_l = !ip4_address_compare (&ip4_hdr->dst_address,
+ &tc->connection.lcl_ip.ip4);
+ val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 1);
+ val_r = !ip4_address_compare (&ip4_hdr->src_address,
+ &tc->connection.rmt_ip.ip4);
+ val_r = val_r || tc->state == TCP_STATE_LISTEN;
+ is_ip_valid = val_l && val_r;
+ }
+ else
+ {
+ ip6_header_t *ip6_hdr = (ip6_header_t *) vlib_buffer_get_current (b);
+
+ val_l = !ip6_address_compare (&ip6_hdr->dst_address,
+ &tc->connection.lcl_ip.ip6);
+ val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 0);
+ val_r = !ip6_address_compare (&ip6_hdr->src_address,
+ &tc->connection.rmt_ip.ip6);
+ val_r = val_r || tc->state == TCP_STATE_LISTEN;
+ is_ip_valid = val_l && val_r;
+ }
+
u8 is_valid = (tc->c_lcl_port == hdr->dst_port
&& (tc->state == TCP_STATE_LISTEN
- || tc->c_rmt_port == hdr->src_port));
+ || tc->c_rmt_port == hdr->src_port) && is_ip_valid);
if (!is_valid)
{
&& tmp->rmt_port == hdr->src_port)
{
TCP_DBG ("half-open is valid!");
+ is_valid = 1;
}
}
}
TRANSPORT_PROTO_TCP,
thread_index, &is_filtered);
tc = tcp_get_connection_from_transport (tconn);
- ASSERT (tcp_lookup_is_valid (tc, tcp));
+ ASSERT (tcp_lookup_is_valid (tc, b, tcp));
}
else
{
TRANSPORT_PROTO_TCP,
thread_index, &is_filtered);
tc = tcp_get_connection_from_transport (tconn);
- ASSERT (tcp_lookup_is_valid (tc, tcp));
+ ASSERT (tcp_lookup_is_valid (tc, b, tcp));
}
return tc;
}
+static tcp_connection_t *
+tcp_lookup_listener (vlib_buffer_t * b, u32 fib_index, int is_ip4)
+{
+ session_t *s;
+
+ if (is_ip4)
+ {
+ ip4_header_t *ip4 = vlib_buffer_get_current (b);
+ tcp_header_t *tcp = tcp_buffer_hdr (b);
+ s = session_lookup_listener4 (fib_index,
+ &ip4->dst_address,
+ tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
+ }
+ else
+ {
+ ip6_header_t *ip6 = vlib_buffer_get_current (b);
+ tcp_header_t *tcp = tcp_buffer_hdr (b);
+ s = session_lookup_listener6 (fib_index,
+ &ip6->dst_address,
+ tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
+
+ }
+ if (PREDICT_TRUE (s != 0))
+ return tcp_get_connection_from_transport (transport_get_listener
+ (TRANSPORT_PROTO_TCP,
+ s->connection_index));
+ else
+ return 0;
+}
+
+always_inline void
+tcp_check_tx_offload (tcp_connection_t * tc, int is_ipv4)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ const dpo_id_t *dpo;
+ const load_balance_t *lb;
+ vnet_hw_interface_t *hw_if;
+ u32 sw_if_idx, lb_idx;
+
+ if (is_ipv4)
+ {
+ ip4_address_t *dst_addr = &(tc->c_rmt_ip.ip4);
+ lb_idx = ip4_fib_forwarding_lookup (tc->c_fib_index, dst_addr);
+ }
+ else
+ {
+ ip6_address_t *dst_addr = &(tc->c_rmt_ip.ip6);
+ lb_idx = ip6_fib_table_fwding_lookup (tc->c_fib_index, dst_addr);
+ }
+
+ lb = load_balance_get (lb_idx);
+ if (PREDICT_FALSE (lb->lb_n_buckets > 1))
+ return;
+ dpo = load_balance_get_bucket_i (lb, 0);
+
+ sw_if_idx = dpo_get_urpf (dpo);
+ if (PREDICT_FALSE (sw_if_idx == ~0))
+ return;
+
+ hw_if = vnet_get_sup_hw_interface (vnm, sw_if_idx);
+ if (hw_if->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
+ tc->cfg_flags |= TCP_CFG_F_TSO;
+}
+
always_inline uword
tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * from_frame, int is_ip4)
{
- tcp_main_t *tm = vnet_get_tcp_main ();
u32 n_left_from, *from, *first_buffer, errors = 0;
u32 my_thread_index = vm->thread_index;
tcp_worker_ctx_t *wrk = tcp_get_worker (my_thread_index);
/* If ACK is acceptable, signal client that peer is not
* willing to accept connection and drop connection*/
if (tcp_ack (tcp0))
- tcp_connection_reset (tc0);
+ tcp_rcv_rst (wrk, tc0);
error0 = TCP_ERROR_RST_RCVD;
goto drop;
}
/* Valid SYN or SYN-ACK. Move connection from half-open pool to
* current thread pool. */
- pool_get (tm->connections[my_thread_index], new_tc0);
- clib_memcpy_fast (new_tc0, tc0, sizeof (*new_tc0));
- new_tc0->c_c_index = new_tc0 - tm->connections[my_thread_index];
- new_tc0->c_thread_index = my_thread_index;
+ new_tc0 = tcp_connection_alloc_w_base (my_thread_index, tc0);
new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
new_tc0->irs = seq0;
new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
transport_tx_fifo_size (&new_tc0->connection);
/* Update rtt with the syn-ack sample */
tcp_estimate_initial_rtt (new_tc0);
- TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, new_tc0);
+ TCP_EVT (TCP_EVT_SYNACK_RCVD, new_tc0);
error0 = TCP_ERROR_SYN_ACKS_RCVD;
}
/* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
{
tcp_connection_cleanup (new_tc0);
tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
- TCP_EVT_DBG (TCP_EVT_RST_SENT, tc0);
+ TCP_EVT (TCP_EVT_RST_SENT, tc0);
error0 = TCP_ERROR_CREATE_SESSION_FAIL;
goto drop;
}
goto drop;
}
+ if (!(new_tc0->cfg_flags & TCP_CFG_F_NO_TSO))
+ tcp_check_tx_offload (new_tc0, is_ip4);
+
/* Read data, if any */
if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
{
}
else
{
- tcp_program_ack (new_tc0);
+ /* Send ack now instead of programming it because connection was
+ * just established and it's not optional. */
+ tcp_send_ack (new_tc0);
}
drop:
my_thread_index);
tcp_inc_counter (syn_sent, TCP_ERROR_MSG_QUEUE_FULL, errors);
vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
+ tcp_handle_disconnects (wrk);
return from_frame->n_vectors;
}
if (CLIB_DEBUG)
{
- tcp_connection_t *tmp;
- tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
- is_ip4);
- if (tmp->state != tc0->state)
+ if (!(tc0->connection.flags & TRANSPORT_CONNECTION_F_NO_LOOKUP))
{
- if (tc0->state != TCP_STATE_CLOSED)
- clib_warning ("state changed");
- goto drop;
+ tcp_connection_t *tmp;
+ tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
+ is_ip4);
+ if (tmp->state != tc0->state)
+ {
+ if (tc0->state != TCP_STATE_CLOSED)
+ clib_warning ("state changed");
+ goto drop;
+ }
}
}
/* Make sure the segment is exactly right */
if (tc0->rcv_nxt != vnet_buffer (b0)->tcp.seq_number || is_fin0)
{
- tcp_connection_reset (tc0);
+ tcp_send_reset_w_pkt (tc0, b0, thread_index, is_ip4);
error0 = TCP_ERROR_SEGMENT_INVALID;
goto drop;
}
*/
if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
{
- tcp_connection_reset (tc0);
+ tcp_send_reset_w_pkt (tc0, b0, thread_index, is_ip4);
+ error0 = TCP_ERROR_SEGMENT_INVALID;
goto drop;
}
/* Update rtt and rto */
tcp_estimate_initial_rtt (tc0);
+ tcp_connection_tx_pacer_update (tc0);
/* Switch state to ESTABLISHED */
tc0->state = TCP_STATE_ESTABLISHED;
- TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
+ TCP_EVT (TCP_EVT_STATE_CHANGE, tc0);
+
+ if (!(tc0->cfg_flags & TCP_CFG_F_NO_TSO))
+ tcp_check_tx_offload (tc0, is_ip4);
/* Initialize session variables */
tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
if (session_stream_accept_notify (&tc0->connection))
{
error0 = TCP_ERROR_MSG_QUEUE_FULL;
- tcp_connection_reset (tc0);
+ tcp_send_reset (tc0);
+ session_transport_delete_notify (&tc0->connection);
+ tcp_connection_cleanup (tc0);
goto drop;
}
error0 = TCP_ERROR_ACK_OK;
if (tc0->flags & TCP_CONN_FINRCVD)
{
tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
- tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE,
- tcp_cfg.cleanup_time);
session_transport_closed_notify (&tc0->connection);
+ tcp_program_cleanup (wrk, tc0);
goto drop;
}
* we can't ensure that we have no packets already enqueued
* to output. Rely instead on the waitclose timer */
tcp_connection_timers_reset (tc0);
- tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
+ tcp_program_cleanup (tcp_get_worker (tc0->c_thread_index), tc0);
goto drop;
if (!is_fin0)
goto drop;
- TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
+ TCP_EVT (TCP_EVT_FIN_RCVD, tc0);
switch (tc0->state)
{
vlib_frame_t * from_frame, int is_ip4)
{
u32 n_left_from, *from, n_syns = 0, *first_buffer;
- u32 my_thread_index = vm->thread_index;
+ u32 thread_index = vm->thread_index;
from = first_buffer = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
while (n_left_from > 0)
{
- u32 bi0;
- vlib_buffer_t *b0;
- tcp_rx_trace_t *t0;
- tcp_header_t *th0 = 0;
- tcp_connection_t *lc0;
- ip4_header_t *ip40;
- ip6_header_t *ip60;
- tcp_connection_t *child0;
- u32 error0 = TCP_ERROR_NONE;
+ u32 bi, error = TCP_ERROR_NONE;
+ tcp_connection_t *lc, *child;
+ vlib_buffer_t *b;
- bi0 = from[0];
+ bi = from[0];
from += 1;
n_left_from -= 1;
- b0 = vlib_get_buffer (vm, bi0);
- lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index);
+ b = vlib_get_buffer (vm, bi);
- if (is_ip4)
+ lc = tcp_listener_get (vnet_buffer (b)->tcp.connection_index);
+ if (PREDICT_FALSE (lc == 0))
{
- ip40 = vlib_buffer_get_current (b0);
- th0 = ip4_next_header (ip40);
+ tcp_connection_t *tc;
+ tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
+ thread_index);
+ if (tc->state != TCP_STATE_TIME_WAIT)
+ {
+ error = TCP_ERROR_CREATE_EXISTS;
+ goto done;
+ }
+ lc = tcp_lookup_listener (b, tc->c_fib_index, is_ip4);
+ /* clean up the old session */
+ tcp_connection_del (tc);
}
- else
+
+ /* Make sure connection wasn't just created */
+ child = tcp_lookup_connection (lc->c_fib_index, b, thread_index,
+ is_ip4);
+ if (PREDICT_FALSE (child->state != TCP_STATE_LISTEN))
{
- ip60 = vlib_buffer_get_current (b0);
- th0 = ip6_next_header (ip60);
+ error = TCP_ERROR_CREATE_EXISTS;
+ goto done;
}
/* Create child session. For syn-flood protection use filter */
/* 3. check for a SYN (did that already) */
- /* Make sure connection wasn't just created */
- child0 = tcp_lookup_connection (lc0->c_fib_index, b0, my_thread_index,
- is_ip4);
- if (PREDICT_FALSE (child0->state != TCP_STATE_LISTEN))
- {
- error0 = TCP_ERROR_CREATE_EXISTS;
- goto drop;
- }
-
/* Create child session and send SYN-ACK */
- child0 = tcp_connection_alloc (my_thread_index);
- child0->c_lcl_port = th0->dst_port;
- child0->c_rmt_port = th0->src_port;
- child0->c_is_ip4 = is_ip4;
- child0->state = TCP_STATE_SYN_RCVD;
- child0->c_fib_index = lc0->c_fib_index;
+ child = tcp_connection_alloc (thread_index);
- if (is_ip4)
- {
- child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32;
- child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32;
- }
- else
+ if (tcp_options_parse (tcp_buffer_hdr (b), &child->rcv_opts, 1))
{
- clib_memcpy_fast (&child0->c_lcl_ip6, &ip60->dst_address,
- sizeof (ip6_address_t));
- clib_memcpy_fast (&child0->c_rmt_ip6, &ip60->src_address,
- sizeof (ip6_address_t));
+ error = TCP_ERROR_OPTIONS;
+ tcp_connection_free (child);
+ goto done;
}
- if (tcp_options_parse (th0, &child0->rcv_opts, 1))
- {
- error0 = TCP_ERROR_OPTIONS;
- tcp_connection_free (child0);
- goto drop;
- }
+ tcp_init_w_buffer (child, b, is_ip4);
- child0->irs = vnet_buffer (b0)->tcp.seq_number;
- child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
- child0->rcv_las = child0->rcv_nxt;
- child0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ child->state = TCP_STATE_SYN_RCVD;
+ child->c_fib_index = lc->c_fib_index;
+ child->cc_algo = lc->cc_algo;
+ tcp_connection_init_vars (child);
+ child->rto = TCP_RTO_MIN;
- /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
- * segments are used to initialize PAWS. */
- if (tcp_opts_tstamp (&child0->rcv_opts))
+ if (session_stream_accept (&child->connection, lc->c_s_index,
+ lc->c_thread_index, 0 /* notify */ ))
{
- child0->tsval_recent = child0->rcv_opts.tsval;
- child0->tsval_recent_age = tcp_time_now ();
+ tcp_connection_cleanup (child);
+ error = TCP_ERROR_CREATE_SESSION_FAIL;
+ goto done;
}
- if (tcp_opts_wscale (&child0->rcv_opts))
- child0->snd_wscale = child0->rcv_opts.wscale;
+ child->tx_fifo_size = transport_tx_fifo_size (&child->connection);
- child0->snd_wnd = clib_net_to_host_u16 (th0->window)
- << child0->snd_wscale;
- child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
- child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
+ tcp_send_synack (child);
- tcp_connection_init_vars (child0);
- child0->rto = TCP_RTO_MIN;
+ TCP_EVT (TCP_EVT_SYN_RCVD, child, 1);
- if (session_stream_accept (&child0->connection, lc0->c_s_index,
- lc0->c_thread_index, 0 /* notify */ ))
- {
- tcp_connection_cleanup (child0);
- error0 = TCP_ERROR_CREATE_SESSION_FAIL;
- goto drop;
- }
-
- TCP_EVT_DBG (TCP_EVT_SYN_RCVD, child0, 1);
- child0->tx_fifo_size = transport_tx_fifo_size (&child0->connection);
- tcp_send_synack (child0);
-
- drop:
+ done:
- if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
{
- t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
- clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
- clib_memcpy_fast (&t0->tcp_connection, lc0,
- sizeof (t0->tcp_connection));
+ tcp_rx_trace_t *t;
+ t = vlib_add_trace (vm, node, b, sizeof (*t));
+ clib_memcpy_fast (&t->tcp_header, tcp_buffer_hdr (b),
+ sizeof (t->tcp_header));
+ clib_memcpy_fast (&t->tcp_connection, lc,
+ sizeof (t->tcp_connection));
}
- n_syns += (error0 == TCP_ERROR_NONE);
+ n_syns += (error == TCP_ERROR_NONE);
}
tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
static inline void
tcp_input_dispatch_buffer (tcp_main_t * tm, tcp_connection_t * tc,
- vlib_buffer_t * b, u16 * next, u32 * error)
+ vlib_buffer_t * b, u16 * next,
+ vlib_node_runtime_t * error_node)
{
tcp_header_t *tcp;
+ u32 error;
u8 flags;
tcp = tcp_buffer_hdr (b);
flags = tcp->flags & filter_flags;
*next = tm->dispatch_table[tc->state][flags].next;
- *error = tm->dispatch_table[tc->state][flags].error;
+ error = tm->dispatch_table[tc->state][flags].error;
tc->segs_in += 1;
- if (PREDICT_FALSE (*error == TCP_ERROR_DISPATCH
- || *next == TCP_INPUT_NEXT_RESET))
+ if (PREDICT_FALSE (error != TCP_ERROR_NONE))
{
- /* Overload tcp flags to store state */
- tcp_state_t state = tc->state;
- vnet_buffer (b)->tcp.flags = tc->state;
-
- if (*error == TCP_ERROR_DISPATCH)
+ b->error = error_node->errors[error];
+ if (error == TCP_ERROR_DISPATCH)
clib_warning ("tcp conn %u disp error state %U flags %U",
- tc->c_c_index, format_tcp_state, state,
+ tc->c_c_index, format_tcp_state, tc->state,
format_tcp_flags, (int) flags);
}
}
tcp_main_t *tm = vnet_get_tcp_main ();
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
u16 nexts[VLIB_FRAME_SIZE], *next;
+ vlib_node_runtime_t *error_node;
tcp_set_time_now (tcp_get_worker (thread_index));
+ error_node = vlib_node_get_runtime (vm, tcp_node_index (input, is_ip4));
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
vlib_get_buffers (vm, from, bufs, n_left_from);
if (PREDICT_TRUE (!tc0 + !tc1 == 0))
{
- ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
- ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
+ ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
+ ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
- tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
- tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
+ tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], error_node);
+ tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], error_node);
}
else
{
if (PREDICT_TRUE (tc0 != 0))
{
- ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
+ ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
- tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
+ tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], error_node);
}
else
- tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
+ {
+ tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
+ b[0]->error = error_node->errors[error0];
+ }
if (PREDICT_TRUE (tc1 != 0))
{
- ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
+ ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
- tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
+ tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], error_node);
}
else
- tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
+ {
+ tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
+ b[1]->error = error_node->errors[error1];
+ }
}
b += 2;
is_nolookup);
if (PREDICT_TRUE (tc0 != 0))
{
- ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
+ ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
- tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
+ tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], error_node);
}
else
- tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
+ {
+ tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
+ b[0]->error = error_node->errors[error0];
+ }
b += 1;
next += 1;
_(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
TCP_ERROR_SEGMENT_INVALID);
_(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
- TCP_ERROR_NONE);
+ TCP_ERROR_SEGMENT_INVALID);
_(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_DROP,
TCP_ERROR_SEGMENT_INVALID);
_(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
TCP_ERROR_NONE);
_(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
- _(TIME_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
+ _(TIME_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
_(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
_(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
TCP_ERROR_NONE);
_(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
_(CLOSED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
TCP_ERROR_CONNECTION_CLOSED);
- _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
- _(CLOSED, TCP_FLAG_SYN, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
+ _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
+ _(CLOSED, TCP_FLAG_SYN, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
_(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
- TCP_ERROR_NONE);
+ TCP_ERROR_CONNECTION_CLOSED);
#undef _
}