2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/session/application_local.h>
17 #include <vnet/session/session.h>
19 static ct_connection_t *connections;
22 ct_enable_disable_main_pre_input_node (u8 is_add)
26 if (!vlib_num_workers ())
29 n_conns = pool_elts (connections);
33 if (n_conns > 0 && is_add)
34 vlib_node_set_state (vlib_get_main (),
35 session_queue_pre_input_node.index,
36 VLIB_NODE_STATE_POLLING);
37 else if (n_conns == 0)
38 vlib_node_set_state (vlib_get_main (),
39 session_queue_pre_input_node.index,
40 VLIB_NODE_STATE_DISABLED);
43 static ct_connection_t *
44 ct_connection_alloc (void)
48 pool_get_zero (connections, ct);
49 ct->c_c_index = ct - connections;
50 ct->c_thread_index = 0;
56 static ct_connection_t *
57 ct_connection_get (u32 ct_index)
59 if (pool_is_free_index (connections, ct_index))
61 return pool_elt_at_index (connections, ct_index);
65 ct_connection_free (ct_connection_t * ct)
68 memset (ct, 0xfc, sizeof (*ct));
69 pool_put (connections, ct);
73 ct_session_get_peer (session_t * s)
75 ct_connection_t *ct, *peer_ct;
76 ct = ct_connection_get (s->connection_index);
77 peer_ct = ct_connection_get (ct->peer_index);
78 return session_get (peer_ct->c_s_index, 0);
82 ct_session_endpoint (session_t * ll, session_endpoint_t * sep)
85 ct = (ct_connection_t *) session_get_transport (ll);
86 sep->transport_proto = ct->actual_tp;
87 sep->port = ct->c_lcl_port;
88 sep->is_ip4 = ct->c_is_ip4;
89 ip_copy (&sep->ip, &ct->c_lcl_ip, ct->c_is_ip4);
93 ct_session_connect_notify (session_t * ss)
95 ct_connection_t *sct, *cct;
96 app_worker_t *client_wrk;
97 segment_manager_t *sm;
104 ss_index = ss->session_index;
105 sct = (ct_connection_t *) session_get_transport (ss);
106 client_wrk = app_worker_get (sct->client_wrk);
107 opaque = sct->client_opaque;
109 sm = segment_manager_get (ss->rx_fifo->segment_manager);
110 seg = segment_manager_get_segment_w_lock (sm, ss->rx_fifo->segment_index);
111 segment_handle = segment_manager_segment_handle (sm, seg);
113 if ((err = app_worker_add_segment_notify (client_wrk, segment_handle)))
115 clib_warning ("failed to notify client %u of new segment",
117 segment_manager_segment_reader_unlock (sm);
123 segment_manager_segment_reader_unlock (sm);
126 /* Alloc client session */
127 cct = ct_connection_get (sct->peer_index);
129 cs = session_alloc (0);
130 ss = session_get (ss_index, 0);
131 cs->session_type = ss->session_type;
132 cs->connection_index = sct->c_c_index;
133 cs->listener_handle = SESSION_INVALID_HANDLE;
134 cs->session_state = SESSION_STATE_CONNECTING;
135 cs->app_wrk_index = client_wrk->wrk_index;
136 cs->connection_index = cct->c_c_index;
138 cct->c_s_index = cs->session_index;
139 cct->client_rx_fifo = ss->tx_fifo;
140 cct->client_tx_fifo = ss->rx_fifo;
142 cct->client_rx_fifo->refcnt++;
143 cct->client_tx_fifo->refcnt++;
145 /* This will allocate fifos for the session. They won't be used for
146 * exchanging data but they will be used to close the connection if
147 * the segment manager/worker is freed */
148 if ((err = app_worker_init_connected (client_wrk, cs)))
155 cs->session_state = SESSION_STATE_CONNECTING;
157 if (app_worker_connect_notify (client_wrk, cs, err, opaque))
160 segment_manager_dealloc_fifos (cs->rx_fifo, cs->tx_fifo);
165 cs = session_get (cct->c_s_index, 0);
166 cs->session_state = SESSION_STATE_READY;
171 app_worker_connect_notify (client_wrk, 0, err, opaque);
176 ct_init_local_session (app_worker_t * client_wrk, app_worker_t * server_wrk,
177 ct_connection_t * ct, session_t * ls, session_t * ll)
179 u32 round_rx_fifo_sz, round_tx_fifo_sz, sm_index, seg_size;
180 segment_manager_props_t *props;
181 application_t *server;
182 segment_manager_t *sm;
183 u32 margin = 16 << 10;
188 server = application_get (server_wrk->app_index);
190 props = application_segment_manager_properties (server);
191 round_rx_fifo_sz = 1 << max_log2 (props->rx_fifo_size);
192 round_tx_fifo_sz = 1 << max_log2 (props->tx_fifo_size);
193 /* Increase size because of inefficient chunk allocations. Depending on
194 * how data is consumed, it may happen that more chunks than needed are
196 * TODO should remove once allocations are done more efficiently */
197 seg_size = 4 * (round_rx_fifo_sz + round_tx_fifo_sz + margin);
199 sm = app_worker_get_listen_segment_manager (server_wrk, ll);
200 seg_index = segment_manager_add_segment (sm, seg_size);
203 clib_warning ("failed to add new cut-through segment");
206 seg = segment_manager_get_segment_w_lock (sm, seg_index);
208 rv = segment_manager_try_alloc_fifos (seg, ls->thread_index,
210 props->tx_fifo_size, &ls->rx_fifo,
214 clib_warning ("failed to add fifos in cut-through segment");
215 segment_manager_segment_reader_unlock (sm);
219 sm_index = segment_manager_index (sm);
220 ls->rx_fifo->master_session_index = ls->session_index;
221 ls->tx_fifo->master_session_index = ls->session_index;
222 ls->rx_fifo->segment_manager = sm_index;
223 ls->tx_fifo->segment_manager = sm_index;
224 ls->rx_fifo->segment_index = seg_index;
225 ls->tx_fifo->segment_index = seg_index;
227 /* Disable ooo lookups on the cut-through fifos. TODO remove once init of
228 * chunk lookup rbtrees is delegated to transports */
229 svm_fifo_free_chunk_lookup (ls->tx_fifo);
231 segment_handle = segment_manager_segment_handle (sm, seg);
232 if ((rv = app_worker_add_segment_notify (server_wrk, segment_handle)))
234 clib_warning ("failed to notify server of new segment");
235 segment_manager_segment_reader_unlock (sm);
238 segment_manager_segment_reader_unlock (sm);
239 ct->segment_handle = segment_handle;
244 segment_manager_del_segment (sm, seg);
249 ct_connect (app_worker_t * client_wrk, session_t * ll,
250 session_endpoint_cfg_t * sep)
252 u32 cct_index, ll_index, ll_ct_index;
253 ct_connection_t *sct, *cct, *ll_ct;
254 app_worker_t *server_wrk;
257 ll_index = ll->session_index;
258 ll_ct_index = ll->connection_index;
260 cct = ct_connection_alloc ();
261 cct_index = cct->c_c_index;
262 sct = ct_connection_alloc ();
263 ll_ct = ct_connection_get (ll_ct_index);
266 * Alloc and init client transport
268 cct = ct_connection_get (cct_index);
269 cct->c_thread_index = 0;
270 cct->c_rmt_port = sep->port;
272 cct->c_is_ip4 = sep->is_ip4;
273 clib_memcpy (&cct->c_rmt_ip, &sep->ip, sizeof (sep->ip));
274 cct->actual_tp = ll_ct->actual_tp;
278 * Init server transport
280 sct->c_thread_index = 0;
282 sct->c_lcl_port = ll_ct->c_lcl_port;
283 sct->c_is_ip4 = sep->is_ip4;
284 clib_memcpy (&sct->c_lcl_ip, &ll_ct->c_lcl_ip, sizeof (ll_ct->c_lcl_ip));
285 sct->client_wrk = client_wrk->wrk_index;
286 sct->c_proto = TRANSPORT_PROTO_NONE;
287 sct->client_opaque = sep->opaque;
288 sct->actual_tp = ll_ct->actual_tp;
290 sct->peer_index = cct->c_c_index;
291 cct->peer_index = sct->c_c_index;
294 * Accept server session. Client session is created only after
295 * server confirms accept.
297 ss = session_alloc (0);
298 ll = listen_session_get (ll_index);
299 ss->session_type = session_type_from_proto_and_ip (TRANSPORT_PROTO_NONE,
301 ss->connection_index = sct->c_c_index;
302 ss->listener_handle = listen_session_get_handle (ll);
303 ss->session_state = SESSION_STATE_CREATED;
305 server_wrk = application_listener_select_worker (ll);
306 ss->app_wrk_index = server_wrk->wrk_index;
308 sct->c_s_index = ss->session_index;
309 sct->server_wrk = ss->app_wrk_index;
311 if (ct_init_local_session (client_wrk, server_wrk, sct, ss, ll))
313 ct_connection_free (sct);
318 ss->session_state = SESSION_STATE_ACCEPTING;
319 if (app_worker_accept_notify (server_wrk, ss))
321 ct_connection_free (sct);
322 segment_manager_dealloc_fifos (ss->rx_fifo, ss->tx_fifo);
327 cct->segment_handle = sct->segment_handle;
328 ct_enable_disable_main_pre_input_node (1 /* is_add */ );
333 ct_start_listen (u32 app_listener_index, transport_endpoint_t * tep)
335 session_endpoint_cfg_t *sep;
338 sep = (session_endpoint_cfg_t *) tep;
339 ct = ct_connection_alloc ();
340 ct->server_wrk = sep->app_wrk_index;
341 ct->c_is_ip4 = sep->is_ip4;
342 clib_memcpy (&ct->c_lcl_ip, &sep->ip, sizeof (sep->ip));
343 ct->c_lcl_port = sep->port;
344 ct->actual_tp = sep->transport_proto;
345 ct_enable_disable_main_pre_input_node (1 /* is_add */ );
346 return ct->c_c_index;
350 ct_stop_listen (u32 ct_index)
353 ct = ct_connection_get (ct_index);
354 ct_connection_free (ct);
355 ct_enable_disable_main_pre_input_node (0 /* is_add */ );
359 static transport_connection_t *
360 ct_listener_get (u32 ct_index)
362 return (transport_connection_t *) ct_connection_get (ct_index);
366 ct_session_connect (transport_endpoint_cfg_t * tep)
368 session_endpoint_cfg_t *sep_ext;
369 session_endpoint_t *sep;
370 app_worker_t *app_wrk;
378 sep_ext = (session_endpoint_cfg_t *) tep;
379 sep = (session_endpoint_t *) tep;
380 app_wrk = app_worker_get (sep_ext->app_wrk_index);
381 app = application_get (app_wrk->app_index);
383 sep->transport_proto = sep_ext->original_tp;
384 table_index = application_local_session_table (app);
385 lh = session_lookup_local_endpoint (table_index, sep);
386 if (lh == SESSION_DROP_HANDLE)
387 return SESSION_E_FILTERED;
389 if (lh == SESSION_INVALID_HANDLE)
392 ll = listen_session_get_from_handle (lh);
393 al = app_listener_get_w_session (ll);
396 * Break loop if rule in local table points to connecting app. This
397 * can happen if client is a generic proxy. Route connect through
398 * global table instead.
400 if (al->app_index == app->app_index)
403 return ct_connect (app_wrk, ll, sep_ext);
406 * If nothing found, check the global scope for locally attached
407 * destinations. Make sure first that we're allowed to.
411 if (session_endpoint_is_local (sep))
412 return SESSION_E_NOROUTE;
414 if (!application_has_global_scope (app))
415 return SESSION_E_SCOPE;
417 fib_proto = session_endpoint_fib_proto (sep);
418 table_index = session_lookup_get_index_for_fib (fib_proto, sep->fib_index);
419 ll = session_lookup_listener_wildcard (table_index, sep);
422 return ct_connect (app_wrk, ll, sep_ext);
424 /* Failed to connect but no error */
429 ct_session_close (u32 ct_index, u32 thread_index)
431 ct_connection_t *ct, *peer_ct;
432 app_worker_t *app_wrk;
435 ct = ct_connection_get (ct_index);
436 peer_ct = ct_connection_get (ct->peer_index);
439 peer_ct->peer_index = ~0;
440 session_transport_closing_notify (&peer_ct->connection);
443 s = session_get (ct->c_s_index, 0);
444 app_wrk = app_worker_get_if_valid (s->app_wrk_index);
446 app_worker_del_segment_notify (app_wrk, ct->segment_handle);
447 session_free_w_fifos (s);
449 segment_manager_dealloc_fifos (ct->client_rx_fifo, ct->client_tx_fifo);
451 ct_connection_free (ct);
452 ct_enable_disable_main_pre_input_node (0 /* is_add */ );
455 static transport_connection_t *
456 ct_session_get (u32 ct_index, u32 thread_index)
458 return (transport_connection_t *) ct_connection_get (ct_index);
462 format_ct_connection_id (u8 * s, va_list * args)
464 ct_connection_t *ct = va_arg (*args, ct_connection_t *);
469 s = format (s, "[%d:%d][CT:%U] %U:%d->%U:%d", ct->c_thread_index,
470 ct->c_s_index, format_transport_proto_short, ct->actual_tp,
471 format_ip4_address, &ct->c_lcl_ip4,
472 clib_net_to_host_u16 (ct->c_lcl_port), format_ip4_address,
473 &ct->c_rmt_ip4, clib_net_to_host_u16 (ct->c_rmt_port));
477 s = format (s, "[%d:%d][CT:%U] %U:%d->%U:%d", ct->c_thread_index,
478 ct->c_s_index, format_transport_proto_short, ct->actual_tp,
479 format_ip6_address, &ct->c_lcl_ip6,
480 clib_net_to_host_u16 (ct->c_lcl_port), format_ip6_address,
481 &ct->c_rmt_ip6, clib_net_to_host_u16 (ct->c_rmt_port));
488 ct_custom_tx (void *session, transport_send_params_t * sp)
490 session_t *s = (session_t *) session;
491 if (session_has_transport (s))
493 /* As all other sessions, cut-throughs are scheduled by vpp for tx so let
494 * the scheduler's custom tx logic decide when to deschedule, i.e., after
495 * fifo is emptied. */
496 return ct_session_tx (s);
500 ct_app_rx_evt (transport_connection_t * tc)
502 ct_connection_t *ct = (ct_connection_t *) tc, *peer_ct;
505 peer_ct = ct_connection_get (ct->peer_index);
508 ps = session_get (peer_ct->c_s_index, peer_ct->c_thread_index);
509 return session_dequeue_notify (ps);
513 format_ct_listener (u8 * s, va_list * args)
515 u32 tc_index = va_arg (*args, u32);
516 u32 __clib_unused thread_index = va_arg (*args, u32);
517 u32 __clib_unused verbose = va_arg (*args, u32);
518 ct_connection_t *ct = ct_connection_get (tc_index);
519 s = format (s, "%-" SESSION_CLI_ID_LEN "U", format_ct_connection_id, ct);
521 s = format (s, "%-" SESSION_CLI_STATE_LEN "s", "LISTEN");
526 format_ct_connection (u8 * s, va_list * args)
528 ct_connection_t *ct = va_arg (*args, ct_connection_t *);
529 u32 verbose = va_arg (*args, u32);
533 s = format (s, "%-" SESSION_CLI_ID_LEN "U", format_ct_connection_id, ct);
536 s = format (s, "%-" SESSION_CLI_STATE_LEN "s", "ESTABLISHED");
539 s = format (s, "\n");
546 format_ct_session (u8 * s, va_list * args)
548 u32 ct_index = va_arg (*args, u32);
549 u32 __clib_unused thread_index = va_arg (*args, u32);
550 u32 verbose = va_arg (*args, u32);
553 ct = ct_connection_get (ct_index);
556 s = format (s, "empty\n");
560 s = format (s, "%U", format_ct_connection, ct, verbose);
565 static const transport_proto_vft_t cut_thru_proto = {
566 .start_listen = ct_start_listen,
567 .stop_listen = ct_stop_listen,
568 .get_listener = ct_listener_get,
569 .connect = ct_session_connect,
570 .close = ct_session_close,
571 .get_connection = ct_session_get,
572 .custom_tx = ct_custom_tx,
573 .app_rx_evt = ct_app_rx_evt,
574 .format_listener = format_ct_listener,
575 .format_connection = format_ct_session,
576 .transport_options = {
579 .tx_type = TRANSPORT_TX_INTERNAL,
580 .service_type = TRANSPORT_SERVICE_APP,
586 ct_session_tx (session_t * s)
588 ct_connection_t *ct, *peer_ct;
591 ct = (ct_connection_t *) session_get_transport (s);
592 peer_ct = ct_connection_get (ct->peer_index);
595 peer_s = session_get (peer_ct->c_s_index, 0);
596 if (peer_s->session_state >= SESSION_STATE_TRANSPORT_CLOSING)
598 session_enqueue_notify (peer_s);
599 /* The scheduler uses packet count as a means of upper bounding the amount
600 * of work done per dispatch. So make it look like we have sent something */
604 static clib_error_t *
605 ct_transport_init (vlib_main_t * vm)
607 transport_register_protocol (TRANSPORT_PROTO_NONE, &cut_thru_proto,
608 FIB_PROTOCOL_IP4, ~0);
609 transport_register_protocol (TRANSPORT_PROTO_NONE, &cut_thru_proto,
610 FIB_PROTOCOL_IP6, ~0);
614 VLIB_INIT_FUNCTION (ct_transport_init);
617 * fd.io coding-style-patch-verification: ON
620 * eval: (c-set-style "gnu")