}
/**
- * TCP's IW as recommended by RFC6928
+ * Update max segment size we're able to process.
+ *
+ * The value is constrained by our interface's MTU and IP options. It is
+ * also what we advertise to our peer.
+ */
+void
+tcp_update_rcv_mss (tcp_connection_t * tc)
+{
+ /* TODO find our iface MTU */
+ tc->mss = dummy_mtu;
+}
+
+/**
+ * TCP's initial window
*/
always_inline u32
tcp_initial_wnd_unscaled (tcp_connection_t * tc)
{
- return TCP_IW_N_SEGMENTS * tc->mss;
+ /* RFC 6928 recommends the value lower. However at the time our connections
+ * are initialized, fifos may not be allocated. Therefore, advertise the
+ * smallest possible unscaled window size and update once fifos are
+ * assigned to the session.
+ */
+ /*
+ tcp_update_rcv_mss (tc);
+ TCP_IW_N_SEGMENTS * tc->mss;
+ */
+ return TCP_MIN_RX_FIFO_SIZE;
}
/**
}
}
-/**
- * Update max segment size we're able to process.
- *
- * The value is constrained by our interface's MTU and IP options. It is
- * also what we advertise to our peer.
- */
-void
-tcp_update_rcv_mss (tcp_connection_t * tc)
-{
- /* TODO find our iface MTU */
- tc->mss = dummy_mtu;
-}
-
/**
* Update snd_mss to reflect the effective segment size that we can send
* by taking into account all TCP options, including SACKs
/* Init retransmit timer */
tcp_retransmit_timer_set (tc);
+ TCP_EVT_DBG (TCP_EVT_SYNACK_SENT, tc);
}
always_inline void
u32 *to_next, next_index;
vlib_frame_t *f;
- b->flags |= VNET_BUFFER_LOCALLY_ORIGINATED;
+ b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
b->error = 0;
/* Default FIB for now */
* Send reset without reusing existing buffer
*/
void
-tcp_send_reset (vlib_buffer_t * pkt, u8 is_ip4)
+tcp_send_reset (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4)
{
vlib_buffer_t *b;
u32 bi;
{
flags = TCP_FLAG_RST;
seq = pkt_th->ack_number;
- ack = 0;
+ ack = (tc && tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
}
else
{
}
tcp_enqueue_to_ip_lookup (vm, b, bi, is_ip4);
+ TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
}
void
tcp_push_ip_hdr (tm, tc, b);
tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4);
+ TCP_EVT_DBG (TCP_EVT_SYN_SENT, tc);
}
always_inline void
u32 *to_next, next_index;
vlib_frame_t *f;
- b->flags |= VNET_BUFFER_LOCALLY_ORIGINATED;
+ b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
b->error = 0;
/* Decide where to send the packet */
if (is_syn)
{
tc = tcp_half_open_connection_get (index);
+ tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
}
else
{
tc = tcp_connection_get (index, thread_index);
+ tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
}
- /* Make sure timer handle is set to invalid */
- tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
-
if (!tcp_in_recovery (tc) && tc->rto_boff > 0
&& tc->state >= TCP_STATE_ESTABLISHED)
{
/* Retransmit for SYN/SYNACK */
else if (tc->state == TCP_STATE_SYN_RCVD || tc->state == TCP_STATE_SYN_SENT)
{
+ /* Half-open connection actually moved to established but we were
+ * waiting for syn retransmit to pop to call cleanup from the right
+ * thread. */
+ if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
+ {
+ ASSERT (tc->state == TCP_STATE_SYN_SENT);
+ if (tcp_half_open_connection_cleanup (tc))
+ {
+ clib_warning ("could not remove half-open connection");
+ ASSERT (0);
+ }
+ return;
+ }
+
/* Try without increasing RTO a number of times. If this fails,
* start growing RTO exponentially */
if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
vlib_buffer_make_headroom (b, MAX_HDRS_LEN);
-
tcp_push_hdr_i (tc, b, tc->state, 1);
/* Account for the SYN */
tc->snd_nxt += 1;
tc->rtt_ts = 0;
+ TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc,
+ (tc->state == TCP_STATE_SYN_SENT ? 0 : 1));
}
else
{
{
ASSERT (tc->state == TCP_STATE_SYN_SENT);
- TCP_EVT_DBG (TCP_EVT_SYN_RTX, tc);
-
/* This goes straight to ipx_lookup */
tcp_push_ip_hdr (tm, tc, b);
tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4);
ip4_header_t *ih0;
ih0 = vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4,
&tc0->c_rmt_ip4, IP_PROTOCOL_TCP);
- th0->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ih0);
+ b0->flags |= VNET_BUFFER_F_IS_IP4 |
+ VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
+ VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ vnet_buffer (b0)->l3_hdr_offset = (u8 *) ih0 - b0->data;
+ vnet_buffer (b0)->l4_hdr_offset = (u8 *) th0 - b0->data;
+ th0->checksum = 0;
}
else
{
ih0 = vlib_buffer_push_ip6 (vm, b0, &tc0->c_lcl_ip6,
&tc0->c_rmt_ip6, IP_PROTOCOL_TCP);
- th0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0, ih0,
- &bogus);
+
+ b0->flags |= VNET_BUFFER_F_IS_IP6 |
+ VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
+ VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ vnet_buffer (b0)->l3_hdr_offset = (u8 *) ih0 - b0->data;
+ vnet_buffer (b0)->l4_hdr_offset = (u8 *) th0 - b0->data;
+ th0->checksum = 0;
ASSERT (!bogus);
}
vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
- b0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED;
+ b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
done:
b0->error = node->errors[error0];
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
done:
b0->error = node->errors[error0];
- b0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED;
+ b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
th0 = vlib_buffer_get_current (b0);