2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 * @brief TCP host stack utilities
21 #include <vnet/tcp/tcp.h>
22 #include <vnet/session/session.h>
23 #include <vnet/fib/fib.h>
24 #include <vnet/dpo/load_balance.h>
25 #include <vnet/dpo/receive_dpo.h>
26 #include <vnet/ip/ip6_neighbor.h>
33 fib_protocol_t nh_proto;
34 vnet_link_t link_type;
38 } tcp_add_del_adj_args_t;
41 tcp_add_del_adj_cb (tcp_add_del_adj_args_t * args)
46 adj_nbr_add_or_lock (args->nh_proto, args->link_type, &args->ip,
51 ai = adj_nbr_find (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &args->ip,
53 if (ai != ADJ_INDEX_INVALID)
59 tcp_add_del_adjacency (tcp_connection_t * tc, u8 is_add)
61 tcp_add_del_adj_args_t args = {
62 .nh_proto = FIB_PROTOCOL_IP6,
63 .link_type = VNET_LINK_IP6,
65 .sw_if_index = tc->sw_if_index,
68 vlib_rpc_call_main_thread (tcp_add_del_adj_cb, (u8 *) & args,
73 tcp_connection_bind (u32 session_index, transport_endpoint_t * lcl)
75 tcp_main_t *tm = &tcp_main;
76 tcp_connection_t *listener;
79 pool_get (tm->listener_pool, listener);
80 clib_memset (listener, 0, sizeof (*listener));
82 listener->c_c_index = listener - tm->listener_pool;
83 listener->c_lcl_port = lcl->port;
85 /* If we are provided a sw_if_index, bind using one of its ips */
86 if (ip_is_zero (&lcl->ip, 1) && lcl->sw_if_index != ENDPOINT_INVALID_INDEX)
88 if ((iface_ip = ip_interface_get_first_ip (lcl->sw_if_index,
90 ip_set (&lcl->ip, iface_ip, lcl->is_ip4);
92 ip_copy (&listener->c_lcl_ip, &lcl->ip, lcl->is_ip4);
93 listener->c_is_ip4 = lcl->is_ip4;
94 listener->c_proto = TRANSPORT_PROTO_TCP;
95 listener->c_s_index = session_index;
96 listener->c_fib_index = lcl->fib_index;
97 listener->state = TCP_STATE_LISTEN;
99 tcp_connection_timers_init (listener);
101 TCP_EVT_DBG (TCP_EVT_BIND, listener);
103 return listener->c_c_index;
107 tcp_session_bind (u32 session_index, transport_endpoint_t * tep)
109 return tcp_connection_bind (session_index, tep);
113 tcp_connection_unbind (u32 listener_index)
115 tcp_main_t *tm = vnet_get_tcp_main ();
116 tcp_connection_t *tc;
118 tc = pool_elt_at_index (tm->listener_pool, listener_index);
120 TCP_EVT_DBG (TCP_EVT_UNBIND, tc);
122 /* Poison the entry */
124 clib_memset (tc, 0xFA, sizeof (*tc));
126 pool_put_index (tm->listener_pool, listener_index);
130 tcp_session_unbind (u32 listener_index)
132 tcp_connection_unbind (listener_index);
136 static transport_connection_t *
137 tcp_session_get_listener (u32 listener_index)
139 tcp_main_t *tm = vnet_get_tcp_main ();
140 tcp_connection_t *tc;
141 tc = pool_elt_at_index (tm->listener_pool, listener_index);
142 return &tc->connection;
146 * Cleanup half-open connection
150 tcp_half_open_connection_del (tcp_connection_t * tc)
152 tcp_main_t *tm = vnet_get_tcp_main ();
153 clib_spinlock_lock_if_init (&tm->half_open_lock);
154 pool_put_index (tm->half_open_connections, tc->c_c_index);
156 clib_memset (tc, 0xFA, sizeof (*tc));
157 clib_spinlock_unlock_if_init (&tm->half_open_lock);
161 * Try to cleanup half-open connection
163 * If called from a thread that doesn't own tc, the call won't have any
166 * @param tc - connection to be cleaned up
167 * @return non-zero if cleanup failed.
170 tcp_half_open_connection_cleanup (tcp_connection_t * tc)
172 /* Make sure this is the owning thread */
173 if (tc->c_thread_index != vlib_get_thread_index ())
175 tcp_timer_reset (tc, TCP_TIMER_ESTABLISH_AO);
176 tcp_timer_reset (tc, TCP_TIMER_RETRANSMIT_SYN);
177 tcp_half_open_connection_del (tc);
181 static tcp_connection_t *
182 tcp_half_open_connection_new (void)
184 tcp_main_t *tm = vnet_get_tcp_main ();
185 tcp_connection_t *tc = 0;
186 ASSERT (vlib_get_thread_index () == 0);
187 pool_get (tm->half_open_connections, tc);
188 clib_memset (tc, 0, sizeof (*tc));
189 tc->c_c_index = tc - tm->half_open_connections;
194 * Cleans up connection state.
199 tcp_connection_cleanup (tcp_connection_t * tc)
201 tcp_main_t *tm = &tcp_main;
203 /* Cleanup local endpoint if this was an active connect */
204 transport_endpoint_cleanup (TRANSPORT_PROTO_TCP, &tc->c_lcl_ip,
207 /* Check if connection is not yet fully established */
208 if (tc->state == TCP_STATE_SYN_SENT)
210 /* Try to remove the half-open connection. If this is not the owning
211 * thread, tc won't be removed. Retransmit or establish timers will
212 * eventually expire and call again cleanup on the right thread. */
213 if (tcp_half_open_connection_cleanup (tc))
214 tc->flags |= TCP_CONN_HALF_OPEN_DONE;
218 int thread_index = tc->c_thread_index;
220 /* Make sure all timers are cleared */
221 tcp_connection_timers_reset (tc);
223 if (!tc->c_is_ip4 && ip6_address_is_link_local_unicast (&tc->c_rmt_ip6))
224 tcp_add_del_adjacency (tc, 0);
226 /* Poison the entry */
228 clib_memset (tc, 0xFA, sizeof (*tc));
229 pool_put (tm->connections[thread_index], tc);
234 * Connection removal.
236 * This should be called only once connection enters CLOSED state. Note
237 * that it notifies the session of the removal event, so if the goal is to
238 * just remove the connection, call tcp_connection_cleanup instead.
241 tcp_connection_del (tcp_connection_t * tc)
243 TCP_EVT_DBG (TCP_EVT_DELETE, tc);
244 session_transport_delete_notify (&tc->connection);
245 tcp_connection_cleanup (tc);
249 tcp_connection_alloc (u8 thread_index)
251 tcp_main_t *tm = vnet_get_tcp_main ();
252 tcp_connection_t *tc;
254 pool_get (tm->connections[thread_index], tc);
255 clib_memset (tc, 0, sizeof (*tc));
256 tc->c_c_index = tc - tm->connections[thread_index];
257 tc->c_thread_index = thread_index;
262 tcp_connection_free (tcp_connection_t * tc)
264 tcp_main_t *tm = &tcp_main;
265 pool_put (tm->connections[tc->c_thread_index], tc);
267 clib_memset (tc, 0xFA, sizeof (*tc));
270 /** Notify session that connection has been reset.
272 * Switch state to closed and wait for session to call cleanup.
275 tcp_connection_reset (tcp_connection_t * tc)
277 TCP_EVT_DBG (TCP_EVT_RST_RCVD, tc);
280 case TCP_STATE_SYN_RCVD:
281 /* Cleanup everything. App wasn't notified yet */
282 session_transport_delete_notify (&tc->connection);
283 tcp_connection_cleanup (tc);
285 case TCP_STATE_SYN_SENT:
286 session_stream_connect_notify (&tc->connection, 1 /* fail */ );
287 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
288 tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
290 case TCP_STATE_ESTABLISHED:
291 tcp_connection_timers_reset (tc);
292 /* Set the cleanup timer, in case the session layer/app don't
293 * cleanly close the connection */
294 tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
295 session_transport_reset_notify (&tc->connection);
296 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
298 case TCP_STATE_CLOSE_WAIT:
299 case TCP_STATE_FIN_WAIT_1:
300 case TCP_STATE_FIN_WAIT_2:
301 case TCP_STATE_CLOSING:
302 case TCP_STATE_LAST_ACK:
303 tcp_connection_timers_reset (tc);
304 tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
305 /* Make sure we mark the session as closed. In some states we may
306 * be still trying to send data */
307 session_transport_closed_notify (&tc->connection);
308 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
310 case TCP_STATE_CLOSED:
311 case TCP_STATE_TIME_WAIT:
314 TCP_DBG ("reset state: %u", tc->state);
319 * Begin connection closing procedure.
321 * If at the end the connection is not in CLOSED state, it is not removed.
322 * Instead, we rely on on TCP to advance through state machine to either
323 * 1) LAST_ACK (passive close) whereby when the last ACK is received
324 * tcp_connection_del is called. This notifies session of the delete and
326 * 2) TIME_WAIT (active close) whereby after 2MSL the 2MSL timer triggers
327 * and cleanup is called.
329 * N.B. Half-close connections are not supported
332 tcp_connection_close (tcp_connection_t * tc)
334 TCP_EVT_DBG (TCP_EVT_CLOSE, tc);
336 /* Send/Program FIN if needed and switch state */
339 case TCP_STATE_SYN_SENT:
340 /* Try to cleanup. If not on the right thread, mark as half-open done.
341 * Connection will be cleaned up when establish timer pops */
342 tcp_connection_cleanup (tc);
344 case TCP_STATE_SYN_RCVD:
345 tcp_connection_timers_reset (tc);
347 tcp_connection_set_state (tc, TCP_STATE_FIN_WAIT_1);
348 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_FINWAIT1_TIME);
350 case TCP_STATE_ESTABLISHED:
351 if (!session_tx_fifo_max_dequeue (&tc->connection))
354 tc->flags |= TCP_CONN_FINPNDG;
355 tcp_connection_set_state (tc, TCP_STATE_FIN_WAIT_1);
356 /* Set a timer in case the peer stops responding. Otherwise the
357 * connection will be stuck here forever. */
358 ASSERT (tc->timers[TCP_TIMER_WAITCLOSE] == TCP_TIMER_HANDLE_INVALID);
359 tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_FINWAIT1_TIME);
361 case TCP_STATE_CLOSE_WAIT:
362 if (!session_tx_fifo_max_dequeue (&tc->connection))
365 tcp_connection_timers_reset (tc);
366 tcp_connection_set_state (tc, TCP_STATE_LAST_ACK);
367 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
370 tc->flags |= TCP_CONN_FINPNDG;
372 case TCP_STATE_FIN_WAIT_1:
373 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
375 case TCP_STATE_CLOSED:
376 tcp_connection_timers_reset (tc);
377 /* Delete connection but instead of doing it now wait until next
378 * dispatch cycle to give the session layer a chance to clear
379 * unhandled events */
380 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
383 TCP_DBG ("state: %u", tc->state);
388 tcp_session_close (u32 conn_index, u32 thread_index)
390 tcp_connection_t *tc;
391 tc = tcp_connection_get (conn_index, thread_index);
392 tcp_connection_close (tc);
396 tcp_session_cleanup (u32 conn_index, u32 thread_index)
398 tcp_connection_t *tc;
399 tc = tcp_connection_get (conn_index, thread_index);
400 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
401 tcp_connection_cleanup (tc);
405 * Initialize all connection timers as invalid
408 tcp_connection_timers_init (tcp_connection_t * tc)
412 /* Set all to invalid */
413 for (i = 0; i < TCP_N_TIMERS; i++)
415 tc->timers[i] = TCP_TIMER_HANDLE_INVALID;
418 tc->rto = TCP_RTO_INIT;
422 * Stop all connection timers
425 tcp_connection_timers_reset (tcp_connection_t * tc)
428 for (i = 0; i < TCP_N_TIMERS; i++)
430 tcp_timer_reset (tc, i);
435 typedef struct ip4_tcp_hdr
441 typedef struct ip6_tcp_hdr
448 tcp_connection_select_lb_bucket (tcp_connection_t * tc, const dpo_id_t * dpo,
451 const dpo_id_t *choice;
455 lb = load_balance_get (dpo->dpoi_index);
459 clib_memset (&hdr, 0, sizeof (hdr));
460 hdr.ip.protocol = IP_PROTOCOL_TCP;
461 hdr.ip.address_pair.src.as_u32 = tc->c_lcl_ip.ip4.as_u32;
462 hdr.ip.address_pair.dst.as_u32 = tc->c_rmt_ip.ip4.as_u32;
463 hdr.tcp.src_port = tc->c_lcl_port;
464 hdr.tcp.dst_port = tc->c_rmt_port;
465 hash = ip4_compute_flow_hash (&hdr.ip, lb->lb_hash_config);
470 clib_memset (&hdr, 0, sizeof (hdr));
471 hdr.ip.protocol = IP_PROTOCOL_TCP;
472 clib_memcpy_fast (&hdr.ip.src_address, &tc->c_lcl_ip.ip6,
473 sizeof (ip6_address_t));
474 clib_memcpy_fast (&hdr.ip.dst_address, &tc->c_rmt_ip.ip6,
475 sizeof (ip6_address_t));
476 hdr.tcp.src_port = tc->c_lcl_port;
477 hdr.tcp.dst_port = tc->c_rmt_port;
478 hash = ip6_compute_flow_hash (&hdr.ip, lb->lb_hash_config);
480 choice = load_balance_get_bucket_i (lb, hash & lb->lb_n_buckets_minus_1);
481 dpo_copy (result, choice);
485 tcp_lookup_rmt_in_fib (tcp_connection_t * tc)
490 clib_memcpy_fast (&prefix.fp_addr, &tc->c_rmt_ip, sizeof (prefix.fp_addr));
491 prefix.fp_proto = tc->c_is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
492 prefix.fp_len = tc->c_is_ip4 ? 32 : 128;
493 fib_index = fib_table_find (prefix.fp_proto, tc->c_fib_index);
494 return fib_table_lookup (fib_index, &prefix);
498 tcp_connection_stack_on_fib_entry (tcp_connection_t * tc)
500 dpo_id_t choice = DPO_INVALID;
501 u32 output_node_index;
504 fe = fib_entry_get (tc->c_rmt_fei);
505 if (fe->fe_lb.dpoi_type != DPO_LOAD_BALANCE)
508 tcp_connection_select_lb_bucket (tc, &fe->fe_lb, &choice);
511 tc->c_is_ip4 ? tcp4_output_node.index : tcp6_output_node.index;
512 dpo_stack_from_node (output_node_index, &tc->c_rmt_dpo, &choice);
516 /** Stack tcp connection on peer's fib entry.
518 * This ultimately populates the dpo the connection will use to send packets.
521 tcp_connection_fib_attach (tcp_connection_t * tc)
523 tc->c_rmt_fei = tcp_lookup_rmt_in_fib (tc);
525 ASSERT (tc->c_rmt_fei != FIB_NODE_INDEX_INVALID);
527 tcp_connection_stack_on_fib_entry (tc);
532 tcp_cc_init (tcp_connection_t * tc)
534 tc->cc_algo = tcp_cc_algo_get (tcp_main.cc_algo);
535 tc->cc_algo->init (tc);
539 tcp_cc_algo_register (tcp_cc_algorithm_type_e type,
540 const tcp_cc_algorithm_t * vft)
542 tcp_main_t *tm = vnet_get_tcp_main ();
543 vec_validate (tm->cc_algos, type);
545 tm->cc_algos[type] = *vft;
549 tcp_cc_algo_get (tcp_cc_algorithm_type_e type)
551 tcp_main_t *tm = vnet_get_tcp_main ();
552 return &tm->cc_algos[type];
557 * Initialize connection send variables.
560 tcp_init_snd_vars (tcp_connection_t * tc)
565 * We use the time to randomize iss and for setting up the initial
566 * timestamp. Make sure it's updated otherwise syn and ack in the
567 * handshake may make it look as if time has flown in the opposite
570 tcp_set_time_now (tcp_get_worker (vlib_get_thread_index ()));
571 time_now = tcp_time_now ();
573 tc->iss = random_u32 (&time_now);
574 tc->snd_una = tc->iss;
575 tc->snd_nxt = tc->iss + 1;
576 tc->snd_una_max = tc->snd_nxt;
581 tcp_enable_pacing (tcp_connection_t * tc)
583 u32 initial_bucket, byte_rate;
584 initial_bucket = 16 * tc->snd_mss;
586 transport_connection_tx_pacer_init (&tc->connection, byte_rate,
588 tc->mrtt_us = (u32) ~ 0;
591 /** Initialize tcp connection variables
593 * Should be called after having received a msg from the peer, i.e., a SYN or
594 * a SYNACK, such that connection options have already been exchanged. */
596 tcp_connection_init_vars (tcp_connection_t * tc)
598 tcp_connection_timers_init (tc);
600 scoreboard_init (&tc->sack_sb);
602 if (tc->state == TCP_STATE_SYN_RCVD)
603 tcp_init_snd_vars (tc);
605 if (!tc->c_is_ip4 && ip6_address_is_link_local_unicast (&tc->c_rmt_ip6))
606 tcp_add_del_adjacency (tc, 1);
608 /* tcp_connection_fib_attach (tc); */
610 if (transport_connection_is_tx_paced (&tc->connection)
611 || tcp_main.tx_pacing)
612 tcp_enable_pacing (tc);
616 tcp_alloc_custom_local_endpoint (tcp_main_t * tm, ip46_address_t * lcl_addr,
617 u16 * lcl_port, u8 is_ip4)
622 index = tm->last_v4_address_rotor++;
623 if (tm->last_v4_address_rotor >= vec_len (tm->ip4_src_addresses))
624 tm->last_v4_address_rotor = 0;
625 lcl_addr->ip4.as_u32 = tm->ip4_src_addresses[index].as_u32;
629 index = tm->last_v6_address_rotor++;
630 if (tm->last_v6_address_rotor >= vec_len (tm->ip6_src_addresses))
631 tm->last_v6_address_rotor = 0;
632 clib_memcpy_fast (&lcl_addr->ip6, &tm->ip6_src_addresses[index],
633 sizeof (ip6_address_t));
635 port = transport_alloc_local_port (TRANSPORT_PROTO_TCP, lcl_addr);
638 clib_warning ("Failed to allocate src port");
646 tcp_session_open (transport_endpoint_cfg_t * rmt)
648 tcp_main_t *tm = vnet_get_tcp_main ();
649 tcp_connection_t *tc;
650 ip46_address_t lcl_addr;
655 * Allocate local endpoint
657 if ((rmt->is_ip4 && vec_len (tm->ip4_src_addresses))
658 || (!rmt->is_ip4 && vec_len (tm->ip6_src_addresses)))
659 rv = tcp_alloc_custom_local_endpoint (tm, &lcl_addr, &lcl_port,
662 rv = transport_alloc_local_endpoint (TRANSPORT_PROTO_TCP,
663 rmt, &lcl_addr, &lcl_port);
669 * Create connection and send SYN
671 clib_spinlock_lock_if_init (&tm->half_open_lock);
672 tc = tcp_half_open_connection_new ();
673 ip_copy (&tc->c_rmt_ip, &rmt->ip, rmt->is_ip4);
674 ip_copy (&tc->c_lcl_ip, &lcl_addr, rmt->is_ip4);
675 tc->c_rmt_port = rmt->port;
676 tc->c_lcl_port = clib_host_to_net_u16 (lcl_port);
677 tc->c_is_ip4 = rmt->is_ip4;
678 tc->c_proto = TRANSPORT_PROTO_TCP;
679 tc->c_fib_index = rmt->fib_index;
680 /* The other connection vars will be initialized after SYN ACK */
681 tcp_connection_timers_init (tc);
683 TCP_EVT_DBG (TCP_EVT_OPEN, tc);
684 tc->state = TCP_STATE_SYN_SENT;
685 tcp_init_snd_vars (tc);
687 clib_spinlock_unlock_if_init (&tm->half_open_lock);
689 return tc->c_c_index;
692 const char *tcp_dbg_evt_str[] = {
693 #define _(sym, str) str,
698 const char *tcp_fsm_states[] = {
699 #define _(sym, str) str,
700 foreach_tcp_fsm_state
705 format_tcp_state (u8 * s, va_list * args)
707 u32 state = va_arg (*args, u32);
709 if (state < TCP_N_STATES)
710 s = format (s, "%s", tcp_fsm_states[state]);
712 s = format (s, "UNKNOWN (%d (0x%x))", state, state);
716 const char *tcp_connection_flags_str[] = {
717 #define _(sym, str) str,
718 foreach_tcp_connection_flag
723 format_tcp_connection_flags (u8 * s, va_list * args)
725 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
728 for (i = 0; i < TCP_CONN_N_FLAG_BITS; i++)
729 if (tc->flags & (1 << i))
731 for (i = 0; i < last; i++)
733 if (tc->flags & (1 << i))
734 s = format (s, "%s, ", tcp_connection_flags_str[i]);
737 s = format (s, "%s", tcp_connection_flags_str[last]);
741 const char *tcp_conn_timers[] = {
742 #define _(sym, str) str,
748 format_tcp_timers (u8 * s, va_list * args)
750 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
753 for (i = 0; i < TCP_N_TIMERS; i++)
754 if (tc->timers[i] != TCP_TIMER_HANDLE_INVALID)
757 for (i = 0; i < last; i++)
759 if (tc->timers[i] != TCP_TIMER_HANDLE_INVALID)
760 s = format (s, "%s,", tcp_conn_timers[i]);
764 s = format (s, "%s", tcp_conn_timers[i]);
770 format_tcp_congestion_status (u8 * s, va_list * args)
772 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
773 if (tcp_in_recovery (tc))
774 s = format (s, "recovery");
775 else if (tcp_in_fastrecovery (tc))
776 s = format (s, "fastrecovery");
778 s = format (s, "none");
783 tcp_rcv_wnd_available (tcp_connection_t * tc)
785 return (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
789 format_tcp_congestion (u8 * s, va_list * args)
791 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
792 u32 indent = format_get_indent (s);
794 s = format (s, "%U ", format_tcp_congestion_status, tc);
795 s = format (s, "cwnd %u ssthresh %u rtx_bytes %u bytes_acked %u\n",
796 tc->cwnd, tc->ssthresh, tc->snd_rxt_bytes, tc->bytes_acked);
797 s = format (s, "%Ucc space %u prev_ssthresh %u snd_congestion %u"
798 " dupack %u\n", format_white_space, indent,
799 tcp_available_cc_snd_space (tc), tc->prev_ssthresh,
800 tc->snd_congestion - tc->iss, tc->rcv_dupacks);
801 s = format (s, "%Utsecr %u tsecr_last_ack %u limited_transmit %u\n",
802 format_white_space, indent, tc->rcv_opts.tsecr,
803 tc->tsecr_last_ack, tc->limited_transmit - tc->iss);
808 format_tcp_vars (u8 * s, va_list * args)
810 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
811 s = format (s, " index: %u flags: %U timers: %U\n", tc->c_c_index,
812 format_tcp_connection_flags, tc, format_tcp_timers, tc);
813 s = format (s, " snd_una %u snd_nxt %u snd_una_max %u",
814 tc->snd_una - tc->iss, tc->snd_nxt - tc->iss,
815 tc->snd_una_max - tc->iss);
816 s = format (s, " rcv_nxt %u rcv_las %u\n",
817 tc->rcv_nxt - tc->irs, tc->rcv_las - tc->irs);
818 s = format (s, " snd_wnd %u rcv_wnd %u rcv_wscale %u ",
819 tc->snd_wnd, tc->rcv_wnd, tc->rcv_wscale);
820 s = format (s, "snd_wl1 %u snd_wl2 %u\n", tc->snd_wl1 - tc->irs,
821 tc->snd_wl2 - tc->iss);
822 s = format (s, " flight size %u out space %u rcv_wnd_av %u\n",
823 tcp_flight_size (tc), tcp_available_output_snd_space (tc),
824 tcp_rcv_wnd_available (tc));
825 s = format (s, " tsval_recent %u tsval_recent_age %u\n", tc->tsval_recent,
826 tcp_time_now () - tc->tsval_recent_age);
827 s = format (s, " rto %u rto_boff %u srtt %u us %.3f rttvar %u rtt_ts %x",
828 tc->rto, tc->rto_boff, tc->srtt, tc->mrtt_us * 1000, tc->rttvar,
830 s = format (s, " rtt_seq %u\n", tc->rtt_seq - tc->iss);
831 s = format (s, " cong: %U", format_tcp_congestion, tc);
833 if (tc->state >= TCP_STATE_ESTABLISHED)
835 s = format (s, " sboard: %U\n", format_tcp_scoreboard, &tc->sack_sb,
838 if (vec_len (tc->snd_sacks))
839 s = format (s, " sacks tx: %U\n", format_tcp_sacks, tc);
845 format_tcp_connection_id (u8 * s, va_list * args)
847 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
852 s = format (s, "[%d:%d][%s] %U:%d->%U:%d", tc->c_thread_index,
853 tc->c_s_index, "T", format_ip4_address, &tc->c_lcl_ip4,
854 clib_net_to_host_u16 (tc->c_lcl_port), format_ip4_address,
855 &tc->c_rmt_ip4, clib_net_to_host_u16 (tc->c_rmt_port));
859 s = format (s, "[%d:%d][%s] %U:%d->%U:%d", tc->c_thread_index,
860 tc->c_s_index, "T", format_ip6_address, &tc->c_lcl_ip6,
861 clib_net_to_host_u16 (tc->c_lcl_port), format_ip6_address,
862 &tc->c_rmt_ip6, clib_net_to_host_u16 (tc->c_rmt_port));
869 format_tcp_connection (u8 * s, va_list * args)
871 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
872 u32 verbose = va_arg (*args, u32);
876 s = format (s, "%-50U", format_tcp_connection_id, tc);
879 s = format (s, "%-15U", format_tcp_state, tc->state);
881 s = format (s, "\n%U", format_tcp_vars, tc);
888 format_tcp_session (u8 * s, va_list * args)
890 u32 tci = va_arg (*args, u32);
891 u32 thread_index = va_arg (*args, u32);
892 u32 verbose = va_arg (*args, u32);
893 tcp_connection_t *tc;
895 tc = tcp_connection_get (tci, thread_index);
897 s = format (s, "%U", format_tcp_connection, tc, verbose);
899 s = format (s, "empty\n");
904 format_tcp_listener_session (u8 * s, va_list * args)
906 u32 tci = va_arg (*args, u32);
907 tcp_connection_t *tc = tcp_listener_get (tci);
908 return format (s, "%U", format_tcp_connection_id, tc);
912 format_tcp_half_open_session (u8 * s, va_list * args)
914 u32 tci = va_arg (*args, u32);
915 tcp_connection_t *tc = tcp_half_open_connection_get (tci);
916 return format (s, "%U", format_tcp_connection_id, tc);
920 format_tcp_sacks (u8 * s, va_list * args)
922 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
923 sack_block_t *sacks = tc->snd_sacks;
927 len = vec_len (sacks);
928 for (i = 0; i < len - 1; i++)
931 s = format (s, " start %u end %u\n", block->start - tc->irs,
932 block->end - tc->irs);
936 block = &sacks[len - 1];
937 s = format (s, " start %u end %u", block->start - tc->irs,
938 block->end - tc->irs);
944 format_tcp_rcv_sacks (u8 * s, va_list * args)
946 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
947 sack_block_t *sacks = tc->rcv_opts.sacks;
951 len = vec_len (sacks);
952 for (i = 0; i < len - 1; i++)
955 s = format (s, " start %u end %u\n", block->start - tc->iss,
956 block->end - tc->iss);
960 block = &sacks[len - 1];
961 s = format (s, " start %u end %u", block->start - tc->iss,
962 block->end - tc->iss);
968 format_tcp_sack_hole (u8 * s, va_list * args)
970 sack_scoreboard_hole_t *hole = va_arg (*args, sack_scoreboard_hole_t *);
971 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
973 s = format (s, " [%u, %u]", hole->start - tc->iss, hole->end - tc->iss);
975 s = format (s, " [%u, %u]", hole->start, hole->end);
980 format_tcp_scoreboard (u8 * s, va_list * args)
982 sack_scoreboard_t *sb = va_arg (*args, sack_scoreboard_t *);
983 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
984 sack_scoreboard_hole_t *hole;
985 u32 indent = format_get_indent (s);
987 s = format (s, "sacked_bytes %u last_sacked_bytes %u lost_bytes %u\n",
988 sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes);
989 s = format (s, "%Ulast_bytes_delivered %u high_sacked %u snd_una_adv %u\n",
990 format_white_space, indent, sb->last_bytes_delivered,
991 sb->high_sacked - tc->iss, sb->snd_una_adv);
992 s = format (s, "%Ucur_rxt_hole %u high_rxt %u rescue_rxt %u",
993 format_white_space, indent, sb->cur_rxt_hole,
994 sb->high_rxt - tc->iss, sb->rescue_rxt - tc->iss);
996 hole = scoreboard_first_hole (sb);
998 s = format (s, "\n%Uhead %u tail %u %u holes:\n%U", format_white_space,
999 indent, sb->head, sb->tail, pool_elts (sb->holes),
1000 format_white_space, indent);
1004 s = format (s, "%U", format_tcp_sack_hole, hole, tc);
1005 hole = scoreboard_next_hole (sb, hole);
1011 static transport_connection_t *
1012 tcp_session_get_transport (u32 conn_index, u32 thread_index)
1014 tcp_connection_t *tc = tcp_connection_get (conn_index, thread_index);
1015 return &tc->connection;
1018 static transport_connection_t *
1019 tcp_half_open_session_get_transport (u32 conn_index)
1021 tcp_connection_t *tc = tcp_half_open_connection_get (conn_index);
1022 return &tc->connection;
1026 * Compute maximum segment size for session layer.
1028 * Since the result needs to be the actual data length, it first computes
1029 * the tcp options to be used in the next burst and subtracts their
1030 * length from the connection's snd_mss.
1033 tcp_session_send_mss (transport_connection_t * trans_conn)
1035 tcp_connection_t *tc = (tcp_connection_t *) trans_conn;
1037 /* Ensure snd_mss does accurately reflect the amount of data we can push
1038 * in a segment. This also makes sure that options are updated according to
1039 * the current state of the connection. */
1040 tcp_update_burst_snd_vars (tc);
1046 tcp_round_snd_space (tcp_connection_t * tc, u32 snd_space)
1048 if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss))
1050 return tc->snd_wnd <= snd_space ? tc->snd_wnd : 0;
1053 /* If not snd_wnd constrained and we can't write at least a segment,
1054 * don't try at all */
1055 if (PREDICT_FALSE (snd_space < tc->snd_mss))
1056 return snd_space < tc->cwnd ? 0 : snd_space;
1058 /* round down to mss multiple */
1059 return snd_space - (snd_space % tc->snd_mss);
1063 * Compute tx window session is allowed to fill.
1065 * Takes into account available send space, snd_mss and the congestion
1066 * state of the connection. If possible, the value returned is a multiple
1069 * @param tc tcp connection
1070 * @return number of bytes session is allowed to write
1073 tcp_snd_space_inline (tcp_connection_t * tc)
1075 int snd_space, snt_limited;
1077 if (PREDICT_FALSE (tcp_in_fastrecovery (tc)
1078 || tc->state == TCP_STATE_CLOSED))
1081 snd_space = tcp_available_output_snd_space (tc);
1083 /* If we haven't gotten dupacks or if we did and have gotten sacked
1084 * bytes then we can still send as per Limited Transmit (RFC3042) */
1085 if (PREDICT_FALSE (tc->rcv_dupacks != 0
1086 && (tcp_opts_sack_permitted (tc)
1087 && tc->sack_sb.last_sacked_bytes == 0)))
1089 if (tc->rcv_dupacks == 1 && tc->limited_transmit != tc->snd_nxt)
1090 tc->limited_transmit = tc->snd_nxt;
1091 ASSERT (seq_leq (tc->limited_transmit, tc->snd_nxt));
1093 snt_limited = tc->snd_nxt - tc->limited_transmit;
1094 snd_space = clib_max (2 * tc->snd_mss - snt_limited, 0);
1096 return tcp_round_snd_space (tc, snd_space);
1100 tcp_snd_space (tcp_connection_t * tc)
1102 return tcp_snd_space_inline (tc);
1106 tcp_session_send_space (transport_connection_t * trans_conn)
1108 tcp_connection_t *tc = (tcp_connection_t *) trans_conn;
1109 return clib_min (tcp_snd_space_inline (tc),
1110 tc->snd_wnd - (tc->snd_nxt - tc->snd_una));
1114 tcp_session_tx_fifo_offset (transport_connection_t * trans_conn)
1116 tcp_connection_t *tc = (tcp_connection_t *) trans_conn;
1118 ASSERT (seq_geq (tc->snd_nxt, tc->snd_una));
1120 /* This still works if fast retransmit is on */
1121 return (tc->snd_nxt - tc->snd_una);
1125 tcp_update_time (f64 now, u8 thread_index)
1127 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
1129 tcp_set_time_now (wrk);
1130 tw_timer_expire_timers_16t_2w_512sl (&wrk->timer_wheel, now);
1131 tcp_do_fastretransmits (wrk);
1132 tcp_send_acks (wrk);
1133 tcp_flush_frames_to_output (wrk);
1137 tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
1139 tcp_connection_t *tc = (tcp_connection_t *) tconn;
1140 return tcp_push_header (tc, b);
1144 tcp_session_flush_data (transport_connection_t * tconn)
1146 tcp_connection_t *tc = (tcp_connection_t *) tconn;
1147 if (tc->flags & TCP_CONN_PSH_PENDING)
1149 tc->flags |= TCP_CONN_PSH_PENDING;
1150 tc->psh_seq = tc->snd_una_max + transport_max_tx_dequeue (tconn) - 1;
1154 const static transport_proto_vft_t tcp_proto = {
1155 .enable = vnet_tcp_enable_disable,
1156 .bind = tcp_session_bind,
1157 .unbind = tcp_session_unbind,
1158 .push_header = tcp_session_push_header,
1159 .get_connection = tcp_session_get_transport,
1160 .get_listener = tcp_session_get_listener,
1161 .get_half_open = tcp_half_open_session_get_transport,
1162 .open = tcp_session_open,
1163 .close = tcp_session_close,
1164 .cleanup = tcp_session_cleanup,
1165 .send_mss = tcp_session_send_mss,
1166 .send_space = tcp_session_send_space,
1167 .update_time = tcp_update_time,
1168 .tx_fifo_offset = tcp_session_tx_fifo_offset,
1169 .flush_data = tcp_session_flush_data,
1170 .format_connection = format_tcp_session,
1171 .format_listener = format_tcp_listener_session,
1172 .format_half_open = format_tcp_half_open_session,
1173 .tx_type = TRANSPORT_TX_PEEK,
1174 .service_type = TRANSPORT_SERVICE_VC,
1179 tcp_connection_tx_pacer_update (tcp_connection_t * tc)
1184 if (!transport_connection_is_tx_paced (&tc->connection))
1187 srtt = clib_min ((f64) tc->srtt * TCP_TICK, tc->mrtt_us);
1188 /* TODO should constrain to interface's max throughput but
1189 * we don't have link speeds for sw ifs ..*/
1190 rate = tc->cwnd / srtt;
1191 transport_connection_tx_pacer_update (&tc->connection, rate);
1195 tcp_connection_tx_pacer_reset (tcp_connection_t * tc, u32 window,
1198 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1199 u32 byte_rate = window / ((f64) TCP_TICK * tc->srtt);
1200 u64 last_time = wrk->vm->clib_time.last_cpu_time;
1201 transport_connection_tx_pacer_reset (&tc->connection, byte_rate,
1202 start_bucket, last_time);
1206 tcp_timer_keep_handler (u32 conn_index)
1208 u32 thread_index = vlib_get_thread_index ();
1209 tcp_connection_t *tc;
1211 tc = tcp_connection_get (conn_index, thread_index);
1212 tc->timers[TCP_TIMER_KEEP] = TCP_TIMER_HANDLE_INVALID;
1214 tcp_connection_close (tc);
1218 tcp_timer_establish_handler (u32 conn_index)
1220 tcp_connection_t *tc;
1222 tc = tcp_connection_get (conn_index, vlib_get_thread_index ());
1223 /* note: the connection may have already disappeared */
1224 if (PREDICT_FALSE (tc == 0))
1226 ASSERT (tc->state == TCP_STATE_SYN_RCVD);
1227 /* Start cleanup. App wasn't notified yet so use delete notify as
1228 * opposed to delete to cleanup session layer state. */
1229 session_transport_delete_notify (&tc->connection);
1230 tc->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID;
1231 tcp_connection_cleanup (tc);
1235 tcp_timer_establish_ao_handler (u32 conn_index)
1237 tcp_connection_t *tc;
1239 tc = tcp_half_open_connection_get (conn_index);
1243 ASSERT (tc->state == TCP_STATE_SYN_SENT);
1244 /* Notify app if we haven't tried to clean this up already */
1245 if (!(tc->flags & TCP_CONN_HALF_OPEN_DONE))
1246 session_stream_connect_notify (&tc->connection, 1 /* fail */ );
1248 tc->timers[TCP_TIMER_ESTABLISH_AO] = TCP_TIMER_HANDLE_INVALID;
1249 tcp_connection_cleanup (tc);
1253 tcp_timer_waitclose_handler (u32 conn_index)
1255 u32 thread_index = vlib_get_thread_index (), rto;
1256 tcp_connection_t *tc;
1258 tc = tcp_connection_get (conn_index, thread_index);
1261 tc->timers[TCP_TIMER_WAITCLOSE] = TCP_TIMER_HANDLE_INVALID;
1265 case TCP_STATE_CLOSE_WAIT:
1266 tcp_connection_timers_reset (tc);
1267 session_transport_closed_notify (&tc->connection);
1269 if (!(tc->flags & TCP_CONN_FINPNDG))
1271 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1272 tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
1276 /* Session didn't come back with a close. Send FIN either way
1277 * and switch to LAST_ACK. */
1278 tcp_cong_recovery_off (tc);
1279 /* Make sure we don't try to send unsent data */
1280 tc->snd_una_max = tc->snd_nxt = tc->snd_una;
1282 tcp_connection_set_state (tc, TCP_STATE_LAST_ACK);
1284 /* Make sure we don't wait in LAST ACK forever */
1285 tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
1287 /* Don't delete the connection yet */
1289 case TCP_STATE_FIN_WAIT_1:
1290 tcp_connection_timers_reset (tc);
1291 if (tc->flags & TCP_CONN_FINPNDG)
1293 /* If FIN pending send it before closing and wait as long as
1294 * the rto timeout would wait. Notify session layer that transport
1295 * is closed. We haven't sent everything but we did try. */
1296 tcp_cong_recovery_off (tc);
1298 rto = clib_max ((tc->rto >> tc->rto_boff) * TCP_TO_TIMER_TICK, 1);
1299 tcp_timer_set (tc, TCP_TIMER_WAITCLOSE,
1300 clib_min (rto, TCP_2MSL_TIME));
1301 session_transport_closed_notify (&tc->connection);
1305 /* We've sent the fin but no progress. Close the connection and
1306 * to make sure everything is flushed, setup a cleanup timer */
1307 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1308 tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
1311 case TCP_STATE_LAST_ACK:
1312 case TCP_STATE_CLOSING:
1313 tcp_connection_timers_reset (tc);
1314 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1315 tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
1316 session_transport_closed_notify (&tc->connection);
1319 tcp_connection_del (tc);
1325 static timer_expiration_handler *timer_expiration_handlers[TCP_N_TIMERS] =
1327 tcp_timer_retransmit_handler,
1328 tcp_timer_delack_handler,
1329 tcp_timer_persist_handler,
1330 tcp_timer_keep_handler,
1331 tcp_timer_waitclose_handler,
1332 tcp_timer_retransmit_syn_handler,
1333 tcp_timer_establish_handler,
1334 tcp_timer_establish_ao_handler,
1339 tcp_expired_timers_dispatch (u32 * expired_timers)
1342 u32 connection_index, timer_id;
1344 for (i = 0; i < vec_len (expired_timers); i++)
1346 /* Get session index and timer id */
1347 connection_index = expired_timers[i] & 0x0FFFFFFF;
1348 timer_id = expired_timers[i] >> 28;
1350 TCP_EVT_DBG (TCP_EVT_TIMER_POP, connection_index, timer_id);
1352 /* Handle expiration */
1353 (*timer_expiration_handlers[timer_id]) (connection_index);
1358 tcp_initialize_timer_wheels (tcp_main_t * tm)
1360 tw_timer_wheel_16t_2w_512sl_t *tw;
1362 foreach_vlib_main (({
1363 tw = &tm->wrk_ctx[ii].timer_wheel;
1364 tw_timer_wheel_init_16t_2w_512sl (tw, tcp_expired_timers_dispatch,
1365 100e-3 /* timer period 100ms */ , ~0);
1366 tw->last_run_time = vlib_time_now (this_vlib_main);
1371 static clib_error_t *
1372 tcp_main_enable (vlib_main_t * vm)
1374 vlib_thread_main_t *vtm = vlib_get_thread_main ();
1375 u32 num_threads, n_workers, prealloc_conn_per_wrk;
1376 tcp_connection_t *tc __attribute__ ((unused));
1377 tcp_main_t *tm = vnet_get_tcp_main ();
1378 clib_error_t *error = 0;
1381 if ((error = vlib_call_init_function (vm, ip_main_init)))
1383 if ((error = vlib_call_init_function (vm, ip4_lookup_init)))
1385 if ((error = vlib_call_init_function (vm, ip6_lookup_init)))
1392 ip4_register_protocol (IP_PROTOCOL_TCP, tcp4_input_node.index);
1393 ip6_register_protocol (IP_PROTOCOL_TCP, tcp6_input_node.index);
1396 * Initialize data structures
1399 num_threads = 1 /* main thread */ + vtm->n_threads;
1400 vec_validate (tm->connections, num_threads - 1);
1401 vec_validate (tm->wrk_ctx, num_threads - 1);
1402 n_workers = num_threads == 1 ? 1 : vtm->n_threads;
1403 prealloc_conn_per_wrk = tm->preallocated_connections / n_workers;
1405 for (thread = 0; thread < num_threads; thread++)
1407 vec_validate (tm->wrk_ctx[thread].pending_fast_rxt, 255);
1408 vec_validate (tm->wrk_ctx[thread].ongoing_fast_rxt, 255);
1409 vec_validate (tm->wrk_ctx[thread].postponed_fast_rxt, 255);
1410 vec_validate (tm->wrk_ctx[thread].pending_deq_acked, 255);
1411 vec_validate (tm->wrk_ctx[thread].pending_acks, 255);
1412 vec_validate (tm->wrk_ctx[thread].pending_disconnects, 255);
1413 vec_reset_length (tm->wrk_ctx[thread].pending_fast_rxt);
1414 vec_reset_length (tm->wrk_ctx[thread].ongoing_fast_rxt);
1415 vec_reset_length (tm->wrk_ctx[thread].postponed_fast_rxt);
1416 vec_reset_length (tm->wrk_ctx[thread].pending_deq_acked);
1417 vec_reset_length (tm->wrk_ctx[thread].pending_acks);
1418 vec_reset_length (tm->wrk_ctx[thread].pending_disconnects);
1419 tm->wrk_ctx[thread].vm = vlib_mains[thread];
1422 * Preallocate connections. Assume that thread 0 won't
1423 * use preallocated threads when running multi-core
1425 if ((thread > 0 || num_threads == 1) && prealloc_conn_per_wrk)
1426 pool_init_fixed (tm->connections[thread], prealloc_conn_per_wrk);
1430 * Use a preallocated half-open connection pool?
1432 if (tm->preallocated_half_open_connections)
1433 pool_init_fixed (tm->half_open_connections,
1434 tm->preallocated_half_open_connections);
1436 /* Initialize clocks per tick for TCP timestamp. Used to compute
1437 * monotonically increasing timestamps. */
1438 tm->tstamp_ticks_per_clock = vm->clib_time.seconds_per_clock
1439 / TCP_TSTAMP_RESOLUTION;
1441 if (num_threads > 1)
1443 clib_spinlock_init (&tm->half_open_lock);
1446 tcp_initialize_timer_wheels (tm);
1448 tm->bytes_per_buffer = vlib_buffer_free_list_buffer_size
1449 (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1455 vnet_tcp_enable_disable (vlib_main_t * vm, u8 is_en)
1459 if (tcp_main.is_enabled)
1462 return tcp_main_enable (vm);
1466 tcp_main.is_enabled = 0;
1473 tcp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add)
1475 tcp_main_t *tm = &tcp_main;
1477 tm->punt_unknown4 = is_add;
1479 tm->punt_unknown6 = is_add;
1482 static clib_error_t *
1483 tcp_init (vlib_main_t * vm)
1485 tcp_main_t *tm = vnet_get_tcp_main ();
1486 ip_main_t *im = &ip_main;
1487 ip_protocol_info_t *pi;
1489 /* Session layer, and by implication tcp, are disabled by default */
1492 /* Register with IP for header parsing */
1493 pi = ip_get_protocol_info (im, IP_PROTOCOL_TCP);
1495 return clib_error_return (0, "TCP protocol info AWOL");
1496 pi->format_header = format_tcp_header;
1497 pi->unformat_pg_edit = unformat_pg_tcp_header;
1499 /* Register as transport with session layer */
1500 transport_register_protocol (TRANSPORT_PROTO_TCP, &tcp_proto,
1501 FIB_PROTOCOL_IP4, tcp4_output_node.index);
1502 transport_register_protocol (TRANSPORT_PROTO_TCP, &tcp_proto,
1503 FIB_PROTOCOL_IP6, tcp6_output_node.index);
1505 tcp_api_reference ();
1507 tm->cc_algo = TCP_CC_NEWRENO;
1511 VLIB_INIT_FUNCTION (tcp_init);
1514 unformat_tcp_cc_algo (unformat_input_t * input, va_list * va)
1516 uword *result = va_arg (*va, uword *);
1518 if (unformat (input, "newreno"))
1519 *result = TCP_CC_NEWRENO;
1520 else if (unformat (input, "cubic"))
1521 *result = TCP_CC_CUBIC;
1529 unformat_tcp_cc_algo_cfg (unformat_input_t * input, va_list * va)
1531 tcp_main_t *tm = vnet_get_tcp_main ();
1532 tcp_cc_algorithm_t *cc_alg;
1533 unformat_input_t sub_input;
1536 vec_foreach (cc_alg, tm->cc_algos)
1538 if (!unformat (input, cc_alg->name))
1541 if (cc_alg->unformat_cfg
1542 && unformat (input, "%U", unformat_vlib_cli_sub_input, &sub_input))
1544 if (cc_alg->unformat_cfg (&sub_input))
1551 static clib_error_t *
1552 tcp_config_fn (vlib_main_t * vm, unformat_input_t * input)
1554 tcp_main_t *tm = vnet_get_tcp_main ();
1556 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1558 if (unformat (input, "preallocated-connections %d",
1559 &tm->preallocated_connections))
1561 else if (unformat (input, "preallocated-half-open-connections %d",
1562 &tm->preallocated_half_open_connections))
1564 else if (unformat (input, "buffer-fail-fraction %f",
1565 &tm->buffer_fail_fraction))
1567 else if (unformat (input, "max-rx-fifo %U", unformat_memory_size,
1570 else if (unformat (input, "no-tx-pacing"))
1572 else if (unformat (input, "cc-algo %U", unformat_tcp_cc_algo,
1575 else if (unformat (input, "%U", unformat_tcp_cc_algo_cfg))
1578 return clib_error_return (0, "unknown input `%U'",
1579 format_unformat_error, input);
1584 VLIB_CONFIG_FUNCTION (tcp_config_fn, "tcp");
1588 * \brief Configure an ipv4 source address range
1589 * @param vm vlib_main_t pointer
1590 * @param start first ipv4 address in the source address range
1591 * @param end last ipv4 address in the source address range
1592 * @param table_id VRF / table ID, 0 for the default FIB
1593 * @return 0 if all OK, else an error indication from api_errno.h
1597 tcp_configure_v4_source_address_range (vlib_main_t * vm,
1598 ip4_address_t * start,
1599 ip4_address_t * end, u32 table_id)
1601 tcp_main_t *tm = vnet_get_tcp_main ();
1602 vnet_main_t *vnm = vnet_get_main ();
1603 u32 start_host_byte_order, end_host_byte_order;
1604 fib_prefix_t prefix;
1605 vnet_sw_interface_t *si;
1606 fib_node_index_t fei;
1610 int vnet_proxy_arp_add_del (ip4_address_t * lo_addr,
1611 ip4_address_t * hi_addr, u32 fib_index,
1614 clib_memset (&prefix, 0, sizeof (prefix));
1616 fib_index = fib_table_find (FIB_PROTOCOL_IP4, table_id);
1618 if (fib_index == ~0)
1619 return VNET_API_ERROR_NO_SUCH_FIB;
1621 start_host_byte_order = clib_net_to_host_u32 (start->as_u32);
1622 end_host_byte_order = clib_net_to_host_u32 (end->as_u32);
1624 /* sanity check for reversed args or some such */
1625 if ((end_host_byte_order - start_host_byte_order) > (10 << 10))
1626 return VNET_API_ERROR_INVALID_ARGUMENT;
1628 /* Lookup the last address, to identify the interface involved */
1630 prefix.fp_proto = FIB_PROTOCOL_IP4;
1631 memcpy (&prefix.fp_addr.ip4, end, sizeof (ip4_address_t));
1633 fei = fib_table_lookup (fib_index, &prefix);
1635 /* Couldn't find route to destination. Bail out. */
1636 if (fei == FIB_NODE_INDEX_INVALID)
1637 return VNET_API_ERROR_NEXT_HOP_NOT_IN_FIB;
1639 sw_if_index = fib_entry_get_resolving_interface (fei);
1641 /* Enable proxy arp on the interface */
1642 si = vnet_get_sw_interface (vnm, sw_if_index);
1643 si->flags |= VNET_SW_INTERFACE_FLAG_PROXY_ARP;
1645 /* Configure proxy arp across the range */
1646 rv = vnet_proxy_arp_add_del (start, end, fib_index, 0 /* is_del */ );
1653 dpo_id_t dpo = DPO_INVALID;
1655 vec_add1 (tm->ip4_src_addresses, start[0]);
1657 /* Add local adjacencies for the range */
1659 receive_dpo_add_or_lock (DPO_PROTO_IP4, ~0 /* sw_if_index */ ,
1662 prefix.fp_proto = FIB_PROTOCOL_IP4;
1663 prefix.fp_addr.ip4.as_u32 = start->as_u32;
1665 fib_table_entry_special_dpo_update (fib_index,
1668 FIB_ENTRY_FLAG_EXCLUSIVE, &dpo);
1671 start_host_byte_order++;
1672 start->as_u32 = clib_host_to_net_u32 (start_host_byte_order);
1674 while (start_host_byte_order <= end_host_byte_order);
1680 * \brief Configure an ipv6 source address range
1681 * @param vm vlib_main_t pointer
1682 * @param start first ipv6 address in the source address range
1683 * @param end last ipv6 address in the source address range
1684 * @param table_id VRF / table ID, 0 for the default FIB
1685 * @return 0 if all OK, else an error indication from api_errno.h
1689 tcp_configure_v6_source_address_range (vlib_main_t * vm,
1690 ip6_address_t * start,
1691 ip6_address_t * end, u32 table_id)
1693 tcp_main_t *tm = vnet_get_tcp_main ();
1694 fib_prefix_t prefix;
1696 fib_node_index_t fei;
1699 clib_memset (&prefix, 0, sizeof (prefix));
1701 fib_index = fib_table_find (FIB_PROTOCOL_IP6, table_id);
1703 if (fib_index == ~0)
1704 return VNET_API_ERROR_NO_SUCH_FIB;
1710 dpo_id_t dpo = DPO_INVALID;
1712 /* Remember this address */
1713 vec_add1 (tm->ip6_src_addresses, start[0]);
1715 /* Lookup the prefix, to identify the interface involved */
1716 prefix.fp_len = 128;
1717 prefix.fp_proto = FIB_PROTOCOL_IP6;
1718 memcpy (&prefix.fp_addr.ip6, start, sizeof (ip6_address_t));
1720 fei = fib_table_lookup (fib_index, &prefix);
1722 /* Couldn't find route to destination. Bail out. */
1723 if (fei == FIB_NODE_INDEX_INVALID)
1724 return VNET_API_ERROR_NEXT_HOP_NOT_IN_FIB;
1726 sw_if_index = fib_entry_get_resolving_interface (fei);
1728 if (sw_if_index == (u32) ~ 0)
1729 return VNET_API_ERROR_NO_MATCHING_INTERFACE;
1731 /* Add a proxy neighbor discovery entry for this address */
1732 ip6_neighbor_proxy_add_del (sw_if_index, start, 0 /* is_del */ );
1734 /* Add a receive adjacency for this address */
1735 receive_dpo_add_or_lock (DPO_PROTO_IP6, ~0 /* sw_if_index */ ,
1738 fib_table_entry_special_dpo_update (fib_index,
1741 FIB_ENTRY_FLAG_EXCLUSIVE, &dpo);
1744 /* Done with the entire range? */
1745 if (!memcmp (start, end, sizeof (start[0])))
1748 /* Increment the address. DGMS. */
1750 for (i = 15; i >= 0; i--)
1753 if (tmp.as_u8[i] != 0)
1761 static clib_error_t *
1762 tcp_src_address (vlib_main_t * vm,
1763 unformat_input_t * input, vlib_cli_command_t * cmd_arg)
1765 ip4_address_t v4start, v4end;
1766 ip6_address_t v6start, v6end;
1772 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1774 if (unformat (input, "%U - %U", unformat_ip4_address, &v4start,
1775 unformat_ip4_address, &v4end))
1777 else if (unformat (input, "%U", unformat_ip4_address, &v4start))
1779 memcpy (&v4end, &v4start, sizeof (v4start));
1782 else if (unformat (input, "%U - %U", unformat_ip6_address, &v6start,
1783 unformat_ip6_address, &v6end))
1785 else if (unformat (input, "%U", unformat_ip6_address, &v6start))
1787 memcpy (&v6end, &v6start, sizeof (v6start));
1790 else if (unformat (input, "fib-table %d", &table_id))
1796 if (!v4set && !v6set)
1797 return clib_error_return (0, "at least one v4 or v6 address required");
1801 rv = tcp_configure_v4_source_address_range (vm, &v4start, &v4end,
1808 case VNET_API_ERROR_NO_SUCH_FIB:
1809 return clib_error_return (0, "Invalid table-id %d", table_id);
1811 case VNET_API_ERROR_INVALID_ARGUMENT:
1812 return clib_error_return (0, "Invalid address range %U - %U",
1813 format_ip4_address, &v4start,
1814 format_ip4_address, &v4end);
1816 return clib_error_return (0, "error %d", rv);
1822 rv = tcp_configure_v6_source_address_range (vm, &v6start, &v6end,
1829 case VNET_API_ERROR_NO_SUCH_FIB:
1830 return clib_error_return (0, "Invalid table-id %d", table_id);
1833 return clib_error_return (0, "error %d", rv);
1841 VLIB_CLI_COMMAND (tcp_src_address_command, static) =
1843 .path = "tcp src-address",
1844 .short_help = "tcp src-address <ip-addr> [- <ip-addr>] add src address range",
1845 .function = tcp_src_address,
1850 tcp_scoreboard_dump_trace (u8 * s, sack_scoreboard_t * sb)
1852 #if TCP_SCOREBOARD_TRACE
1854 scoreboard_trace_elt_t *block;
1860 s = format (s, "scoreboard trace:");
1861 vec_foreach (block, sb->trace)
1863 s = format (s, "{%u, %u, %u, %u, %u}, ", block->start, block->end,
1864 block->ack, block->snd_una_max, block->group);
1866 s = format (s, "\n");
1874 static clib_error_t *
1875 tcp_show_scoreboard_trace_fn (vlib_main_t * vm, unformat_input_t * input,
1876 vlib_cli_command_t * cmd_arg)
1878 transport_connection_t *tconn = 0;
1879 tcp_connection_t *tc;
1881 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1883 if (unformat (input, "%U", unformat_transport_connection, &tconn,
1884 TRANSPORT_PROTO_TCP))
1887 return clib_error_return (0, "unknown input `%U'",
1888 format_unformat_error, input);
1891 if (!TCP_SCOREBOARD_TRACE)
1893 vlib_cli_output (vm, "scoreboard tracing not enabled");
1897 tc = tcp_get_connection_from_transport (tconn);
1898 s = tcp_scoreboard_dump_trace (s, &tc->sack_sb);
1899 vlib_cli_output (vm, "%v", s);
1904 VLIB_CLI_COMMAND (tcp_show_scoreboard_trace_command, static) =
1906 .path = "show tcp scoreboard trace",
1907 .short_help = "show tcp scoreboard trace <connection>",
1908 .function = tcp_show_scoreboard_trace_fn,
1913 tcp_scoreboard_replay (u8 * s, tcp_connection_t * tc, u8 verbose)
1916 scoreboard_trace_elt_t *trace;
1917 u32 next_ack, left, group, has_new_ack = 0;
1918 tcp_connection_t _dummy_tc, *dummy_tc = &_dummy_tc;
1919 sack_block_t *block;
1921 if (!TCP_SCOREBOARD_TRACE)
1923 s = format (s, "scoreboard tracing not enabled");
1930 clib_memset (dummy_tc, 0, sizeof (*dummy_tc));
1931 tcp_connection_timers_init (dummy_tc);
1932 scoreboard_init (&dummy_tc->sack_sb);
1933 dummy_tc->rcv_opts.flags |= TCP_OPTS_FLAG_SACK;
1935 #if TCP_SCOREBOARD_TRACE
1936 trace = tc->sack_sb.trace;
1937 trace_len = vec_len (tc->sack_sb.trace);
1940 for (i = 0; i < trace_len; i++)
1942 if (trace[i].ack != 0)
1944 dummy_tc->snd_una = trace[i].ack - 1448;
1945 dummy_tc->snd_una_max = trace[i].ack;
1950 while (left < trace_len)
1952 group = trace[left].group;
1953 vec_reset_length (dummy_tc->rcv_opts.sacks);
1955 while (trace[left].group == group)
1957 if (trace[left].ack != 0)
1960 s = format (s, "Adding ack %u, snd_una_max %u, segs: ",
1961 trace[left].ack, trace[left].snd_una_max);
1962 dummy_tc->snd_una_max = trace[left].snd_una_max;
1963 next_ack = trace[left].ack;
1969 s = format (s, "[%u, %u], ", trace[left].start,
1971 vec_add2 (dummy_tc->rcv_opts.sacks, block, 1);
1972 block->start = trace[left].start;
1973 block->end = trace[left].end;
1979 tcp_rcv_sacks (dummy_tc, next_ack);
1981 dummy_tc->snd_una = next_ack + dummy_tc->sack_sb.snd_una_adv;
1984 s = format (s, "result: %U", format_tcp_scoreboard,
1985 &dummy_tc->sack_sb);
1988 s = format (s, "result: %U", format_tcp_scoreboard, &dummy_tc->sack_sb);
1993 static clib_error_t *
1994 tcp_scoreboard_trace_fn (vlib_main_t * vm, unformat_input_t * input,
1995 vlib_cli_command_t * cmd_arg)
1997 transport_connection_t *tconn = 0;
1998 tcp_connection_t *tc = 0;
2000 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2002 if (unformat (input, "%U", unformat_transport_connection, &tconn,
2003 TRANSPORT_PROTO_TCP))
2006 return clib_error_return (0, "unknown input `%U'",
2007 format_unformat_error, input);
2010 if (!TCP_SCOREBOARD_TRACE)
2012 vlib_cli_output (vm, "scoreboard tracing not enabled");
2016 tc = tcp_get_connection_from_transport (tconn);
2019 vlib_cli_output (vm, "connection not found");
2022 str = tcp_scoreboard_replay (str, tc, 1);
2023 vlib_cli_output (vm, "%v", str);
2028 VLIB_CLI_COMMAND (tcp_replay_scoreboard_command, static) =
2030 .path = "tcp replay scoreboard",
2031 .short_help = "tcp replay scoreboard <connection>",
2032 .function = tcp_scoreboard_trace_fn,
2036 static clib_error_t *
2037 show_tcp_punt_fn (vlib_main_t * vm, unformat_input_t * input,
2038 vlib_cli_command_t * cmd_arg)
2040 tcp_main_t *tm = vnet_get_tcp_main ();
2041 if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
2042 return clib_error_return (0, "unknown input `%U'", format_unformat_error,
2044 vlib_cli_output (vm, "IPv4 TCP punt: %s",
2045 tm->punt_unknown4 ? "enabled" : "disabled");
2046 vlib_cli_output (vm, "IPv6 TCP punt: %s",
2047 tm->punt_unknown6 ? "enabled" : "disabled");
2051 VLIB_CLI_COMMAND (show_tcp_punt_command, static) =
2053 .path = "show tcp punt",
2054 .short_help = "show tcp punt",
2055 .function = show_tcp_punt_fn,
2060 * fd.io coding-style-patch-verification: ON
2063 * eval: (c-set-style "gnu")