2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef SRC_VNET_TCP_TCP_INLINES_H_
17 #define SRC_VNET_TCP_TCP_INLINES_H_
19 #include <vnet/tcp/tcp.h>
22 tcp_node_inc_counter_i (vlib_main_t *vm, u32 tcp4_node, u32 tcp6_node,
23 u8 is_ip4, u32 evt, u32 val)
26 vlib_node_increment_counter (vm, tcp4_node, evt, val);
28 vlib_node_increment_counter (vm, tcp6_node, evt, val);
31 #define tcp_inc_counter(node_id, err, count) \
32 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
33 tcp6_##node_id##_node.index, is_ip4, err, count)
34 #define tcp_maybe_inc_err_counter(cnts, err) \
36 cnts[err] += (next0 != tcp_next_drop (is_ip4)); \
38 #define tcp_inc_err_counter(cnts, err, val) \
42 #define tcp_store_err_counters(node_id, cnts) \
45 for (i = 0; i < TCP_N_ERROR; i++) \
47 tcp_inc_counter (node_id, i, cnts[i]); \
50 always_inline tcp_header_t *
51 tcp_buffer_hdr (vlib_buffer_t * b)
53 ASSERT ((signed) b->current_data >= (signed) -VLIB_BUFFER_PRE_DATA_SIZE);
54 return (tcp_header_t *) (b->data + b->current_data
55 + vnet_buffer (b)->tcp.hdr_offset);
58 always_inline tcp_connection_t *
59 tcp_connection_get (u32 conn_index, u32 thread_index)
61 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
62 if (PREDICT_FALSE (pool_is_free_index (wrk->connections, conn_index)))
64 return pool_elt_at_index (wrk->connections, conn_index);
67 always_inline tcp_connection_t *
68 tcp_connection_get_if_valid (u32 conn_index, u32 thread_index)
70 tcp_worker_ctx_t *wrk;
71 if (thread_index >= vec_len (tcp_main.wrk_ctx))
73 wrk = tcp_get_worker (thread_index);
74 if (pool_is_free_index (wrk->connections, conn_index))
76 return pool_elt_at_index (wrk->connections, conn_index);
80 tcp_connection_set_state (tcp_connection_t * tc, tcp_state_t state)
83 TCP_EVT (TCP_EVT_STATE_CHANGE, tc);
86 always_inline tcp_connection_t *
87 tcp_listener_get (u32 tli)
89 tcp_connection_t *tc = 0;
90 if (!pool_is_free_index (tcp_main.listener_pool, tli))
91 tc = pool_elt_at_index (tcp_main.listener_pool, tli);
95 always_inline tcp_connection_t *
96 tcp_half_open_connection_get (u32 conn_index)
98 return tcp_connection_get (conn_index, transport_cl_thread ());
102 * Our estimate of the number of bytes that have left the network
105 tcp_bytes_out (const tcp_connection_t * tc)
107 if (tcp_opts_sack_permitted (&tc->rcv_opts))
108 return tc->sack_sb.sacked_bytes + tc->sack_sb.lost_bytes;
110 return clib_min (tc->rcv_dupacks * tc->snd_mss,
111 tc->snd_nxt - tc->snd_una);
115 * Our estimate of the number of bytes in flight (pipe size)
118 tcp_flight_size (const tcp_connection_t * tc)
122 flight_size = (int) (tc->snd_nxt - tc->snd_una) - tcp_bytes_out (tc)
123 + tc->snd_rxt_bytes - tc->rxt_delivered;
125 ASSERT (flight_size >= 0);
131 * Initial cwnd as per RFC5681
134 tcp_initial_cwnd (const tcp_connection_t * tc)
136 if (tcp_cfg.initial_cwnd_multiplier > 0)
137 return tcp_cfg.initial_cwnd_multiplier * tc->snd_mss;
139 if (tc->snd_mss > 2190)
140 return 2 * tc->snd_mss;
141 else if (tc->snd_mss > 1095)
142 return 3 * tc->snd_mss;
144 return 4 * tc->snd_mss;
148 * Accumulate acked bytes for cwnd increase
150 * Once threshold bytes are accumulated, snd_mss bytes are added
154 tcp_cwnd_accumulate (tcp_connection_t * tc, u32 thresh, u32 bytes)
156 tc->cwnd_acc_bytes += bytes;
157 if (tc->cwnd_acc_bytes >= thresh)
159 u32 inc = tc->cwnd_acc_bytes / thresh;
160 tc->cwnd_acc_bytes -= inc * thresh;
161 tc->cwnd += inc * tc->snd_mss;
162 tc->cwnd = clib_min (tc->cwnd, tc->tx_fifo_size);
167 tcp_loss_wnd (const tcp_connection_t * tc)
169 /* Whatever we have in flight + the packet we're about to send */
170 return tcp_flight_size (tc) + tc->snd_mss;
174 tcp_available_snd_wnd (const tcp_connection_t * tc)
176 return clib_min (tc->cwnd, tc->snd_wnd);
180 tcp_available_output_snd_space (const tcp_connection_t * tc)
182 u32 available_wnd = tcp_available_snd_wnd (tc);
183 int flight_size = (int) (tc->snd_nxt - tc->snd_una);
185 if (available_wnd <= flight_size)
188 return available_wnd - flight_size;
192 * Estimate of how many bytes we can still push into the network
195 tcp_available_cc_snd_space (const tcp_connection_t * tc)
197 u32 available_wnd = tcp_available_snd_wnd (tc);
198 u32 flight_size = tcp_flight_size (tc);
200 if (available_wnd <= flight_size)
203 return available_wnd - flight_size;
207 tcp_is_lost_fin (tcp_connection_t * tc)
209 if ((tc->flags & TCP_CONN_FINSNT) && (tc->snd_nxt - tc->snd_una == 1))
215 * Time used to generate timestamps, not the timestamp
218 tcp_time_tstamp (u32 thread_index)
220 return tcp_main.wrk_ctx[thread_index].time_tstamp;
224 * Generate timestamp for tcp connection
227 tcp_tstamp (tcp_connection_t * tc)
229 return (tcp_main.wrk_ctx[tc->c_thread_index].time_tstamp -
230 tc->timestamp_delta);
234 tcp_time_now_us (u32 thread_index)
236 return tcp_main.wrk_ctx[thread_index].time_us;
240 tcp_set_time_now (tcp_worker_ctx_t *wrk, f64 now)
242 /* TCP internal cache of time reference. Could use @ref transport_time_now
243 * but because @ref tcp_time_now_us is used per packet, caching might
244 * slightly improve efficiency. */
246 wrk->time_tstamp = (u64) (now * TCP_TSTP_HZ);
250 tcp_update_time_now (tcp_worker_ctx_t *wrk)
252 f64 now = vlib_time_now (wrk->vm);
254 /* Both pacer and tcp us time need to be updated */
255 transport_update_pacer_time (wrk->vm->thread_index, now);
256 tcp_set_time_now (wrk, now);
259 always_inline tcp_connection_t *
260 tcp_input_lookup_buffer (vlib_buffer_t * b, u8 thread_index, u32 * error,
261 u8 is_ip4, u8 is_nolookup)
263 u32 fib_index = vnet_buffer (b)->ip.fib_index;
264 int n_advance_bytes, n_data_bytes;
265 transport_connection_t *tc;
271 ip4_header_t *ip4 = vlib_buffer_get_current (b);
272 int ip_hdr_bytes = ip4_header_bytes (ip4);
273 if (PREDICT_FALSE (b->current_length < ip_hdr_bytes + sizeof (*tcp)))
275 *error = TCP_ERROR_LENGTH;
278 tcp = ip4_next_header (ip4);
279 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip4;
280 n_advance_bytes = (ip_hdr_bytes + tcp_header_bytes (tcp));
281 n_data_bytes = clib_net_to_host_u16 (ip4->length) - n_advance_bytes;
283 /* Length check. Checksum computed by ipx_local no need to compute again */
284 if (PREDICT_FALSE (n_data_bytes < 0))
286 *error = TCP_ERROR_LENGTH;
291 tc = session_lookup_connection_wt4 (fib_index, &ip4->dst_address,
292 &ip4->src_address, tcp->dst_port,
294 TRANSPORT_PROTO_TCP, thread_index,
299 ip6_header_t *ip6 = vlib_buffer_get_current (b);
300 if (PREDICT_FALSE (b->current_length < sizeof (*ip6) + sizeof (*tcp)))
302 *error = TCP_ERROR_LENGTH;
305 tcp = ip6_next_header (ip6);
306 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip6;
307 n_advance_bytes = tcp_header_bytes (tcp);
308 n_data_bytes = clib_net_to_host_u16 (ip6->payload_length)
310 n_advance_bytes += sizeof (ip6[0]);
312 if (PREDICT_FALSE (n_data_bytes < 0))
314 *error = TCP_ERROR_LENGTH;
321 (ip6_address_is_link_local_unicast (&ip6->dst_address)))
323 ip6_main_t *im = &ip6_main;
324 fib_index = vec_elt (im->fib_index_by_sw_if_index,
325 vnet_buffer (b)->ip.rx_sw_if_index);
328 tc = session_lookup_connection_wt6 (fib_index, &ip6->dst_address,
330 tcp->dst_port, tcp->src_port,
332 thread_index, &result);
336 /* Set the sw_if_index[VLIB_RX] to the interface we received
337 * the connection on (the local interface) */
338 vnet_buffer (b)->sw_if_index[VLIB_RX] = vnet_buffer (b)->ip.rx_sw_if_index;
342 (transport_connection_t *) tcp_connection_get (vnet_buffer (b)->
343 tcp.connection_index,
346 vnet_buffer (b)->tcp.seq_number = clib_net_to_host_u32 (tcp->seq_number);
347 vnet_buffer (b)->tcp.ack_number = clib_net_to_host_u32 (tcp->ack_number);
348 vnet_buffer (b)->tcp.data_offset = n_advance_bytes;
349 vnet_buffer (b)->tcp.data_len = n_data_bytes;
350 vnet_buffer (b)->tcp.seq_end = vnet_buffer (b)->tcp.seq_number
353 *error = result ? TCP_ERROR_NONE + result : *error;
355 return tcp_get_connection_from_transport (tc);
359 * Initialize connection by gleaning network and rcv params from buffer
361 * @param tc connection to initialize
362 * @param b buffer whose current data is pointing at ip
363 * @param is_ip4 flag set to 1 if using ip4
366 tcp_init_w_buffer (tcp_connection_t * tc, vlib_buffer_t * b, u8 is_ip4)
368 tcp_header_t *th = tcp_buffer_hdr (b);
370 tc->c_lcl_port = th->dst_port;
371 tc->c_rmt_port = th->src_port;
372 tc->c_is_ip4 = is_ip4;
376 ip4_header_t *ip4 = vlib_buffer_get_current (b);
377 tc->c_lcl_ip4.as_u32 = ip4->dst_address.as_u32;
378 tc->c_rmt_ip4.as_u32 = ip4->src_address.as_u32;
382 ip6_header_t *ip6 = vlib_buffer_get_current (b);
383 clib_memcpy_fast (&tc->c_lcl_ip6, &ip6->dst_address,
384 sizeof (ip6_address_t));
385 clib_memcpy_fast (&tc->c_rmt_ip6, &ip6->src_address,
386 sizeof (ip6_address_t));
389 tc->irs = vnet_buffer (b)->tcp.seq_number;
390 tc->rcv_nxt = vnet_buffer (b)->tcp.seq_number + 1;
391 tc->rcv_las = tc->rcv_nxt;
392 tc->sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
393 tc->snd_wl1 = vnet_buffer (b)->tcp.seq_number;
394 tc->snd_wl2 = vnet_buffer (b)->tcp.ack_number;
396 /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
397 * segments are used to initialize PAWS. */
398 if (tcp_opts_tstamp (&tc->rcv_opts))
400 tc->tsval_recent = tc->rcv_opts.tsval;
401 tc->tsval_recent_age = tcp_time_tstamp (tc->c_thread_index);
404 if (tcp_opts_wscale (&tc->rcv_opts))
405 tc->snd_wscale = tc->rcv_opts.wscale;
407 tc->snd_wnd = clib_net_to_host_u16 (th->window) << tc->snd_wscale;
411 tcp_update_rto (tcp_connection_t * tc)
413 tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
414 tc->rto = clib_max (tc->rto, TCP_RTO_MIN);
418 tcp_is_descheduled (tcp_connection_t * tc)
420 return (transport_connection_is_descheduled (&tc->connection) ? 1 : 0);
424 * Push TCP header to buffer
426 * @param vm - vlib_main
427 * @param b - buffer to write the header to
428 * @param sp_net - source port net order
429 * @param dp_net - destination port net order
430 * @param seq - sequence number net order
431 * @param ack - ack number net order
432 * @param tcp_hdr_opts_len - header and options length in bytes
433 * @param flags - header flags
434 * @param wnd - window size
436 * @return - pointer to start of TCP header
439 vlib_buffer_push_tcp_net_order (vlib_buffer_t * b, u16 sp, u16 dp, u32 seq,
440 u32 ack, u8 tcp_hdr_opts_len, u8 flags,
445 th = vlib_buffer_push_uninit (b, tcp_hdr_opts_len);
449 th->seq_number = seq;
450 th->ack_number = ack;
451 th->data_offset_and_reserved = (tcp_hdr_opts_len >> 2) << 4;
455 th->urgent_pointer = 0;
456 vnet_buffer (b)->l4_hdr_offset = (u8 *) th - b->data;
457 b->flags |= VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
462 * Push TCP header to buffer
464 * @param b - buffer to write the header to
465 * @param sp_net - source port net order
466 * @param dp_net - destination port net order
467 * @param seq - sequence number host order
468 * @param ack - ack number host order
469 * @param tcp_hdr_opts_len - header and options length in bytes
470 * @param flags - header flags
471 * @param wnd - window size
473 * @return - pointer to start of TCP header
476 vlib_buffer_push_tcp (vlib_buffer_t * b, u16 sp_net, u16 dp_net, u32 seq,
477 u32 ack, u8 tcp_hdr_opts_len, u8 flags, u16 wnd)
479 return vlib_buffer_push_tcp_net_order (b, sp_net, dp_net,
480 clib_host_to_net_u32 (seq),
481 clib_host_to_net_u32 (ack),
482 tcp_hdr_opts_len, flags,
483 clib_host_to_net_u16 (wnd));
486 #endif /* SRC_VNET_TCP_TCP_INLINES_H_ */
489 * fd.io coding-style-patch-verification: ON
492 * eval: (c-set-style "gnu")