*
* @param th TCP header
* @param to TCP options data structure to be populated
+ * @param is_syn set if packet is syn
* @return -1 if parsing failed
*/
-static int
-tcp_options_parse (tcp_header_t * th, tcp_options_t * to)
+static inline int
+tcp_options_parse (tcp_header_t * th, tcp_options_t * to, u8 is_syn)
{
const u8 *data;
u8 opt_len, opts_len, kind;
/* Zero out all flags but those set in SYN */
to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE
- | TCP_OPTS_FLAG_SACK);
+ | TCP_OPTS_FLAG_TSTAMP | TCP_OPTION_MSS);
for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
{
switch (kind)
{
case TCP_OPTION_MSS:
+ if (!is_syn)
+ break;
if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
{
to->flags |= TCP_OPTS_FLAG_MSS;
}
break;
case TCP_OPTION_WINDOW_SCALE:
+ if (!is_syn)
+ break;
if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
{
to->flags |= TCP_OPTS_FLAG_WSCALE;
to->wscale = data[2];
if (to->wscale > TCP_MAX_WND_SCALE)
- {
- clib_warning ("Illegal window scaling value: %d",
- to->wscale);
- to->wscale = TCP_MAX_WND_SCALE;
- }
+ to->wscale = TCP_MAX_WND_SCALE;
}
break;
case TCP_OPTION_TIMESTAMP:
- if (opt_len == TCP_OPTION_LEN_TIMESTAMP)
+ if (is_syn)
+ to->flags |= TCP_OPTS_FLAG_TSTAMP;
+ if ((to->flags & TCP_OPTS_FLAG_TSTAMP)
+ && opt_len == TCP_OPTION_LEN_TIMESTAMP)
{
- to->flags |= TCP_OPTS_FLAG_TSTAMP;
to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
}
break;
case TCP_OPTION_SACK_PERMITTED:
+ if (!is_syn)
+ break;
if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
break;
always_inline int
tcp_segment_check_paws (tcp_connection_t * tc)
{
- return tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent
+ return tcp_opts_tstamp (&tc->rcv_opts)
&& timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
}
goto error;
}
- if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts)))
+ if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts, 0)))
{
- clib_warning ("options parse error");
*error0 = TCP_ERROR_OPTIONS;
goto error;
}
if (PREDICT_FALSE (tcp_segment_check_paws (tc0)))
{
*error0 = TCP_ERROR_PAWS;
- if (CLIB_DEBUG > 2)
- clib_warning ("paws failed\n%U", format_tcp_connection, tc0, 2);
TCP_EVT_DBG (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
vnet_buffer (b0)->tcp.seq_end);
if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
tcp_time_now_w_thread (tc0->c_thread_index)))
{
- /* Age isn't reset until we get a valid tsval (bsd inspired) */
- tc0->tsval_recent = 0;
- clib_warning ("paws failed - really old segment. REALLY?");
+ tc0->tsval_recent = tc0->rcv_opts.tsval;
+ clib_warning ("paws failed: 24-day old segment");
}
- else
+ /* Drop after ack if not rst. Resets can fail paws check as per
+ * RFC 7323 sec. 5.2: When an <RST> segment is received, it MUST NOT
+ * be subjected to the PAWS check by verifying an acceptable value in
+ * SEG.TSval */
+ else if (!tcp_rst (th0))
{
- /* Drop after ack if not rst */
- if (!tcp_rst (th0))
- {
- tcp_program_ack (wrk, tc0);
- TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
- }
+ tcp_program_ack (wrk, tc0);
+ TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
+ goto error;
}
- goto error;
}
/* 1st: check sequence number */
/* TODO implement RFC 5961 */
if (tc0->state == TCP_STATE_SYN_RCVD)
{
+ tcp_options_parse (th0, &tc0->rcv_opts, 1);
tcp_send_synack (tc0);
TCP_EVT_DBG (TCP_EVT_SYN_RCVD, tc0, 0);
}
if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
{
- tc->mrtt_us = tcp_time_now_us (tc->c_thread_index) - tc->rtt_ts;
- mrtt = clib_max ((u32) (tc->mrtt_us * THZ), 1);
+ f64 sample = tcp_time_now_us (tc->c_thread_index) - tc->rtt_ts;
+ tc->mrtt_us = tc->mrtt_us + (sample - tc->mrtt_us) * 0.125;
+ mrtt = clib_max ((u32) (sample * THZ), 1);
+ /* Allow measuring of a new RTT */
+ tc->rtt_ts = 0;
}
/* As per RFC7323 TSecr can be used for RTTM only if the segment advances
* snd_una, i.e., the left side of the send window:
done:
- /* Allow measuring of a new RTT */
- tc->rtt_ts = 0;
-
/* If we got here something must've been ACKed so make sure boff is 0,
* even if mrtt is not valid since we update the rto lower */
tc->rto_boff = 0;
return 0;
}
+static void
+tcp_estimate_initial_rtt (tcp_connection_t * tc)
+{
+ u8 thread_index = vlib_num_workers ()? 1 : 0;
+ int mrtt;
+
+ if (tc->rtt_ts)
+ {
+ tc->mrtt_us = tcp_time_now_us (thread_index) - tc->rtt_ts;
+ mrtt = clib_max ((u32) (tc->mrtt_us * THZ), 1);
+ tc->rtt_ts = 0;
+ }
+ else
+ {
+ mrtt = tcp_time_now_w_thread (thread_index) - tc->rcv_opts.tsecr;
+ mrtt = clib_max (mrtt, 1);
+ tc->mrtt_us = (f64) mrtt *TCP_TICK;
+ }
+
+ if (mrtt > 0 && mrtt < TCP_RTT_MAX)
+ tcp_estimate_rtt (tc, mrtt);
+}
+
/**
* Dequeue bytes for connections that have received acks in last burst
*/
tc = tcp_connection_get (pending_deq_acked[i], thread_index);
tc->flags &= ~TCP_CONN_DEQ_PENDING;
+ if (PREDICT_FALSE (!tc->burst_acked))
+ continue;
+
/* Dequeue the newly ACKed bytes */
stream_session_dequeue_drop (&tc->connection, tc->burst_acked);
tc->burst_acked = 0;
tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
+ if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
+ {
+ if (seq_leq (tc->psh_seq, tc->snd_una))
+ tc->flags &= ~TCP_CONN_PSH_PENDING;
+ }
+
/* If everything has been acked, stop retransmit timer
* otherwise update. */
tcp_retransmit_timer_update (tc);
+
+ /* If not congested, update pacer based on our new
+ * cwnd estimate */
+ if (!tcp_in_fastrecovery (tc))
+ tcp_connection_tx_pacer_update (tc);
}
_vec_len (wrk->pending_deq_acked) = 0;
}
{
if (seq_lt (blk->start, blk->end)
&& seq_gt (blk->start, tc->snd_una)
- && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_una_max))
+ && seq_gt (blk->start, ack)
+ && seq_lt (blk->start, tc->snd_una_max)
+ && seq_leq (blk->end, tc->snd_una_max))
{
blk++;
continue;
tcp_update_rto (tc);
tc->snd_rxt_ts = 0;
tc->snd_nxt = tc->snd_una_max;
+ tc->rtt_ts = 0;
tcp_recovery_off (tc);
TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
}
tc->rcv_dupacks = 0;
tc->snd_nxt = tc->snd_una_max;
tc->snd_rxt_bytes = 0;
+ tc->rtt_ts = 0;
tcp_fastrecovery_off (tc);
- tcp_fastrecovery_1_smss_off (tc);
tcp_fastrecovery_first_off (tc);
TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
* Legitimate ACK. 1) See if we can exit recovery
*/
+ /* Update the pacing rate. For the first partial ack we move from
+ * the artificially constrained rate to the one after congestion */
+ tcp_connection_tx_pacer_update (tc);
+
if (seq_geq (tc->snd_una, tc->snd_congestion))
{
tcp_retransmit_timer_update (tc);
* Legitimate ACK. 2) If PARTIAL ACK try to retransmit
*/
- /* Update the pacing rate. For the first partial ack we move from
- * the artificially constrained rate to the one after congestion */
- tcp_connection_tx_pacer_update (tc);
-
/* XXX limit this only to first partial ack? */
- tcp_retransmit_timer_force_update (tc);
+ tcp_retransmit_timer_update (tc);
/* RFC6675: If the incoming ACK is a cumulative acknowledgment,
* reset dupacks to 0. Also needed if in congestion recovery */
{
tcp_cc_handle_event (tc, is_dack);
if (!tcp_in_cong_recovery (tc))
- return 0;
+ {
+ *error = TCP_ERROR_ACK_OK;
+ return 0;
+ }
*error = TCP_ERROR_ACK_DUP;
if (vnet_buffer (b)->tcp.data_len || tcp_is_fin (th))
return 0;
return 0;
}
+static void
+tcp_program_disconnect (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+{
+ if (!tcp_disconnect_pending (tc))
+ {
+ vec_add1 (wrk->pending_disconnects, tc->c_c_index);
+ tcp_disconnect_pending_on (tc);
+ }
+}
+
+static void
+tcp_handle_disconnects (tcp_worker_ctx_t * wrk)
+{
+ u32 thread_index, *pending_disconnects;
+ tcp_connection_t *tc;
+ int i;
+
+ if (!vec_len (wrk->pending_disconnects))
+ return;
+
+ thread_index = wrk->vm->thread_index;
+ pending_disconnects = wrk->pending_disconnects;
+ for (i = 0; i < vec_len (pending_disconnects); i++)
+ {
+ tc = tcp_connection_get (pending_disconnects[i], thread_index);
+ tcp_disconnect_pending_off (tc);
+ stream_session_disconnect_notify (&tc->connection);
+ }
+ _vec_len (wrk->pending_disconnects) = 0;
+}
+
+static void
+tcp_rcv_fin (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
+ u32 * error)
+{
+ /* Enter CLOSE-WAIT and notify session. To avoid lingering
+ * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
+ /* Account for the FIN if nothing else was received */
+ if (vnet_buffer (b)->tcp.data_len == 0)
+ tc->rcv_nxt += 1;
+ tcp_program_ack (wrk, tc);
+ tc->state = TCP_STATE_CLOSE_WAIT;
+ tcp_program_disconnect (wrk, tc);
+ tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
+ TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc);
+ *error = TCP_ERROR_FIN_RCVD;
+}
+
static u8
tcp_sack_vector_is_sane (sack_block_t * sacks)
{
{
if (tc0)
{
- clib_memcpy (&t0->tcp_connection, tc0, sizeof (t0->tcp_connection));
+ clib_memcpy_fast (&t0->tcp_connection, tc0,
+ sizeof (t0->tcp_connection));
}
else
{
th0 = tcp_buffer_hdr (b0);
}
- clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
+ clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
}
static void
/* 8: check the FIN bit */
if (PREDICT_FALSE (is_fin))
- {
- /* Enter CLOSE-WAIT and notify session. To avoid lingering
- * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
- /* Account for the FIN if nothing else was received */
- if (vnet_buffer (b0)->tcp.data_len == 0)
- tc0->rcv_nxt += 1;
- tcp_program_ack (wrk, tc0);
- tc0->state = TCP_STATE_CLOSE_WAIT;
- stream_session_disconnect_notify (&tc0->connection);
- tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
- TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
- error0 = TCP_ERROR_FIN_RCVD;
- }
+ tcp_rcv_fin (wrk, tc0, b0, &error0);
done:
tcp_inc_err_counter (err_counters, error0, 1);
errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
thread_index);
- err_counters[TCP_ERROR_EVENT_FIFO_FULL] = errors;
+ err_counters[TCP_ERROR_MSG_QUEUE_FULL] = errors;
tcp_store_err_counters (established, err_counters);
tcp_handle_postponed_dequeues (wrk);
+ tcp_handle_disconnects (wrk);
vlib_buffer_free (vm, first_buffer, frame->n_vectors);
return frame->n_vectors;
}
/* Parse options */
- if (tcp_options_parse (tcp0, &tc0->rcv_opts))
+ if (tcp_options_parse (tcp0, &tc0->rcv_opts, 1))
{
clib_warning ("options parse fail");
error0 = TCP_ERROR_OPTIONS;
/* Valid SYN or SYN-ACK. Move connection from half-open pool to
* current thread pool. */
pool_get (tm->connections[my_thread_index], new_tc0);
- clib_memcpy (new_tc0, tc0, sizeof (*new_tc0));
+ clib_memcpy_fast (new_tc0, tc0, sizeof (*new_tc0));
new_tc0->c_c_index = new_tc0 - tm->connections[my_thread_index];
new_tc0->c_thread_index = my_thread_index;
new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
if (tcp_opts_wscale (&new_tc0->rcv_opts))
new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
+ else
+ new_tc0->rcv_wscale = 0;
new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
<< new_tc0->snd_wscale;
goto drop;
}
+ new_tc0->tx_fifo_size =
+ transport_tx_fifo_size (&new_tc0->connection);
/* Update rtt with the syn-ack sample */
- tcp_update_rtt (new_tc0, vnet_buffer (b0)->tcp.ack_number);
+ tcp_estimate_initial_rtt (new_tc0);
TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, new_tc0);
error0 = TCP_ERROR_SYN_ACKS_RCVD;
}
goto drop;
}
- tc0->rtt_ts = 0;
- tcp_init_snd_vars (tc0);
+ new_tc0->tx_fifo_size =
+ transport_tx_fifo_size (&new_tc0->connection);
+ new_tc0->rtt_ts = 0;
+ tcp_init_snd_vars (new_tc0);
tcp_send_synack (new_tc0);
error0 = TCP_ERROR_SYNS_RCVD;
goto drop;
if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
{
t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
- clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
- clib_memcpy (&t0->tcp_connection, tc0, sizeof (t0->tcp_connection));
+ clib_memcpy_fast (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
+ clib_memcpy_fast (&t0->tcp_connection, tc0,
+ sizeof (t0->tcp_connection));
}
}
errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
my_thread_index);
- tcp_inc_counter (syn_sent, TCP_ERROR_EVENT_FIFO_FULL, errors);
+ tcp_inc_counter (syn_sent, TCP_ERROR_MSG_QUEUE_FULL, errors);
vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
return from_frame->n_vectors;
*/
if (!tcp_rcv_ack_is_acceptable (tc0, b0))
{
- TCP_DBG ("connection not accepted");
- tcp_send_reset_w_pkt (tc0, b0, is_ip4);
+ tcp_connection_reset (tc0);
error0 = TCP_ERROR_ACK_INVALID;
goto drop;
}
+ /* Make sure the ack is exactly right */
+ if (tc0->rcv_nxt != vnet_buffer (b0)->tcp.seq_number || is_fin0)
+ {
+ tcp_connection_reset (tc0);
+ error0 = TCP_ERROR_SEGMENT_INVALID;
+ goto drop;
+ }
+
/* Update rtt and rto */
- tcp_update_rtt (tc0, vnet_buffer (b0)->tcp.ack_number);
+ tcp_estimate_initial_rtt (tc0);
/* Switch state to ESTABLISHED */
tc0->state = TCP_STATE_ESTABLISHED;
/* Reset SYN-ACK retransmit and SYN_RCV establish timers */
tcp_retransmit_timer_reset (tc0);
tcp_timer_reset (tc0, TCP_TIMER_ESTABLISH);
- stream_session_accept_notify (&tc0->connection);
+ if (stream_session_accept_notify (&tc0->connection))
+ {
+ error0 = TCP_ERROR_MSG_QUEUE_FULL;
+ tcp_connection_reset (tc0);
+ goto drop;
+ }
error0 = TCP_ERROR_ACK_OK;
break;
case TCP_STATE_ESTABLISHED:
* wait for peer's FIN but not indefinitely. */
tcp_connection_timers_reset (tc0);
tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
+
+ /* Don't try to deq the FIN acked */
+ if (tc0->burst_acked > 1)
+ stream_session_dequeue_drop (&tc0->connection,
+ tc0->burst_acked - 1);
+ tc0->burst_acked = 0;
}
break;
case TCP_STATE_FIN_WAIT_2:
* acknowledged ("ok") but do not delete the TCB. */
if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
goto drop;
+ tc0->burst_acked = 0;
break;
case TCP_STATE_CLOSE_WAIT:
/* Do the same processing as for the ESTABLISHED state. */
* we can't ensure that we have no packets already enqueued
* to output. Rely instead on the waitclose timer */
tcp_connection_timers_reset (tc0);
- tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, 1);
+ tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
goto drop;
tcp_send_fin (tc0);
stream_session_disconnect_notify (&tc0->connection);
tc0->state = TCP_STATE_CLOSE_WAIT;
+ tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
break;
case TCP_STATE_CLOSE_WAIT:
errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
thread_index);
- tcp_inc_counter (rcv_process, TCP_ERROR_EVENT_FIFO_FULL, errors);
+ tcp_inc_counter (rcv_process, TCP_ERROR_MSG_QUEUE_FULL, errors);
tcp_handle_postponed_dequeues (wrk);
vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
}
/* Create child session and send SYN-ACK */
- child0 = tcp_connection_new (my_thread_index);
+ child0 = tcp_connection_alloc (my_thread_index);
child0->c_lcl_port = th0->dst_port;
child0->c_rmt_port = th0->src_port;
child0->c_is_ip4 = is_ip4;
}
else
{
- clib_memcpy (&child0->c_lcl_ip6, &ip60->dst_address,
- sizeof (ip6_address_t));
- clib_memcpy (&child0->c_rmt_ip6, &ip60->src_address,
- sizeof (ip6_address_t));
+ clib_memcpy_fast (&child0->c_lcl_ip6, &ip60->dst_address,
+ sizeof (ip6_address_t));
+ clib_memcpy_fast (&child0->c_rmt_ip6, &ip60->src_address,
+ sizeof (ip6_address_t));
}
- if (tcp_options_parse (th0, &child0->rcv_opts))
+ if (tcp_options_parse (th0, &child0->rcv_opts, 1))
{
- clib_warning ("options parse fail");
+ error0 = TCP_ERROR_OPTIONS;
+ tcp_connection_free (child0);
goto drop;
}
if (stream_session_accept (&child0->connection, lc0->c_s_index,
0 /* notify */ ))
{
- clib_warning ("session accept fail");
tcp_connection_cleanup (child0);
error0 = TCP_ERROR_CREATE_SESSION_FAIL;
goto drop;
}
+ child0->tx_fifo_size = transport_tx_fifo_size (&child0->connection);
tcp_send_synack (child0);
tcp_timer_set (child0, TCP_TIMER_ESTABLISH, TCP_SYN_RCVD_TIME);
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
- clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
- clib_memcpy (&t0->tcp_connection, lc0, sizeof (t0->tcp_connection));
+ clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
+ clib_memcpy_fast (&t0->tcp_connection, lc0,
+ sizeof (t0->tcp_connection));
}
n_syns += (error0 == TCP_ERROR_NONE);
if (is_ip4)
{
ip4_header_t *ip4 = vlib_buffer_get_current (b);
+ int ip_hdr_bytes = ip4_header_bytes (ip4);
+ if (PREDICT_FALSE (b->current_length < ip_hdr_bytes + sizeof (*tcp)))
+ {
+ *error = TCP_ERROR_LENGTH;
+ return 0;
+ }
tcp = ip4_next_header (ip4);
vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip4;
- n_advance_bytes = (ip4_header_bytes (ip4) + tcp_header_bytes (tcp));
+ n_advance_bytes = (ip_hdr_bytes + tcp_header_bytes (tcp));
n_data_bytes = clib_net_to_host_u16 (ip4->length) - n_advance_bytes;
/* Length check. Checksum computed by ipx_local no need to compute again */
- if (PREDICT_FALSE (n_advance_bytes < 0))
+ if (PREDICT_FALSE (n_data_bytes < 0))
{
*error = TCP_ERROR_LENGTH;
return 0;
else
{
ip6_header_t *ip6 = vlib_buffer_get_current (b);
+ if (PREDICT_FALSE (b->current_length < sizeof (*ip6) + sizeof (*tcp)))
+ {
+ *error = TCP_ERROR_LENGTH;
+ return 0;
+ }
tcp = ip6_next_header (ip6);
vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip6;
n_advance_bytes = tcp_header_bytes (tcp);
- n_advance_bytes;
n_advance_bytes += sizeof (ip6[0]);
- if (PREDICT_FALSE (n_advance_bytes < 0))
+ if (PREDICT_FALSE (n_data_bytes < 0))
{
*error = TCP_ERROR_LENGTH;
return 0;
vnet_buffer (b)->tcp.flags = tc->state;
if (*error == TCP_ERROR_DISPATCH)
- clib_warning ("disp error state %U flags %U", format_tcp_state,
- state, format_tcp_flags, (int) flags);
+ clib_warning ("tcp conn %u disp error state %U flags %U",
+ tc->c_c_index, format_tcp_state, state,
+ format_tcp_flags, (int) flags);
}
}
/* FIN in reply to our FIN from the other side */
_(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
_(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
+ _(FIN_WAIT_1, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
+ TCP_ERROR_NONE);
_(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
/* FIN confirming that the peer (app) has closed */
_(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);