session: return bound listener for proxy accepts
[vpp.git] / src / vnet / session / session.h
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __included_session_h__
16 #define __included_session_h__
17
18 #include <vnet/session/stream_session.h>
19 #include <vnet/session/session_lookup.h>
20 #include <vnet/session/transport_interface.h>
21 #include <vlibmemory/unix_shared_memory_queue.h>
22 #include <vnet/session/session_debug.h>
23 #include <vnet/session/segment_manager.h>
24
25 #define HALF_OPEN_LOOKUP_INVALID_VALUE ((u64)~0)
26 #define INVALID_INDEX ((u32)~0)
27 #define SESSION_PROXY_LISTENER_INDEX ((u32)~0 - 1)
28
29 /* TODO decide how much since we have pre-data as well */
30 #define MAX_HDRS_LEN    100     /* Max number of bytes for headers */
31
32 typedef enum
33 {
34   FIFO_EVENT_APP_RX,
35   FIFO_EVENT_APP_TX,
36   FIFO_EVENT_TIMEOUT,
37   FIFO_EVENT_DISCONNECT,
38   FIFO_EVENT_BUILTIN_RX,
39   FIFO_EVENT_RPC,
40 } fifo_event_type_t;
41
42 static inline const char *
43 fifo_event_type_str (fifo_event_type_t et)
44 {
45   switch (et)
46     {
47     case FIFO_EVENT_APP_RX:
48       return "FIFO_EVENT_APP_RX";
49     case FIFO_EVENT_APP_TX:
50       return "FIFO_EVENT_APP_TX";
51     case FIFO_EVENT_TIMEOUT:
52       return "FIFO_EVENT_TIMEOUT";
53     case FIFO_EVENT_DISCONNECT:
54       return "FIFO_EVENT_DISCONNECT";
55     case FIFO_EVENT_BUILTIN_RX:
56       return "FIFO_EVENT_BUILTIN_RX";
57     case FIFO_EVENT_RPC:
58       return "FIFO_EVENT_RPC";
59     default:
60       return "UNKNOWN FIFO EVENT";
61     }
62 }
63
64 #define foreach_session_input_error                                     \
65 _(NO_SESSION, "No session drops")                                       \
66 _(NO_LISTENER, "No listener for dst port drops")                        \
67 _(ENQUEUED, "Packets pushed into rx fifo")                              \
68 _(NOT_READY, "Session not ready packets")                               \
69 _(FIFO_FULL, "Packets dropped for lack of rx fifo space")               \
70 _(EVENT_FIFO_FULL, "Events not sent for lack of event fifo space")      \
71 _(API_QUEUE_FULL, "Sessions not created for lack of API queue space")   \
72 _(NEW_SEG_NO_SPACE, "Created segment, couldn't allocate a fifo pair")   \
73 _(NO_SPACE, "Couldn't allocate a fifo pair")
74
75 typedef enum
76 {
77 #define _(sym,str) SESSION_ERROR_##sym,
78   foreach_session_input_error
79 #undef _
80     SESSION_N_ERROR,
81 } session_error_t;
82
83 /* Event queue input node static next indices */
84 typedef enum
85 {
86   SESSION_QUEUE_NEXT_DROP,
87   SESSION_QUEUE_NEXT_TCP_IP4_OUTPUT,
88   SESSION_QUEUE_NEXT_IP4_LOOKUP,
89   SESSION_QUEUE_NEXT_TCP_IP6_OUTPUT,
90   SESSION_QUEUE_NEXT_IP6_LOOKUP,
91   SESSION_QUEUE_N_NEXT,
92 } session_queue_next_t;
93
94 typedef struct
95 {
96   void *fp;
97   void *arg;
98 } rpc_args_t;
99
100 /* *INDENT-OFF* */
101 typedef CLIB_PACKED (struct {
102   union
103     {
104       svm_fifo_t * fifo;
105       u64 session_handle;
106       rpc_args_t rpc_args;
107     };
108   u8 event_type;
109   u8 postponed;
110 }) session_fifo_event_t;
111 /* *INDENT-ON* */
112
113 /* Forward definition */
114 typedef struct _session_manager_main session_manager_main_t;
115
116 typedef int
117   (session_fifo_rx_fn) (vlib_main_t * vm, vlib_node_runtime_t * node,
118                         session_manager_main_t * smm,
119                         session_fifo_event_t * e0, stream_session_t * s0,
120                         u32 thread_index, int *n_tx_pkts);
121
122 extern session_fifo_rx_fn session_tx_fifo_peek_and_snd;
123 extern session_fifo_rx_fn session_tx_fifo_dequeue_and_snd;
124
125 u8 session_node_lookup_fifo_event (svm_fifo_t * f, session_fifo_event_t * e);
126
127 struct _session_manager_main
128 {
129   /** Per worker thread session pools */
130   stream_session_t **sessions;
131
132   /** Per worker-thread count of threads peeking into the session pool */
133   u32 *session_peekers;
134
135   /** Per worker-thread rw peekers locks */
136   clib_spinlock_t *peekers_readers_locks;
137   clib_spinlock_t *peekers_write_locks;
138
139   /** Pool of listen sessions. Same type as stream sessions to ease lookups */
140   stream_session_t *listen_sessions[SESSION_N_TYPES];
141
142   /** Per-proto, per-worker enqueue epoch counters */
143   u8 *current_enqueue_epoch[TRANSPORT_N_PROTO];
144
145   /** Per-proto, per-worker thread vector of sessions to enqueue */
146   u32 **session_to_enqueue[TRANSPORT_N_PROTO];
147
148   /** per-worker tx buffer free lists */
149   u32 **tx_buffers;
150
151   /** Per worker-thread vector of partially read events */
152   session_fifo_event_t **free_event_vector;
153
154   /** per-worker active event vectors */
155   session_fifo_event_t **pending_event_vector;
156
157   /** per-worker postponed disconnects */
158   session_fifo_event_t **pending_disconnects;
159
160   /** vpp fifo event queue */
161   unix_shared_memory_queue_t **vpp_event_queues;
162
163   /** vpp fifo event queue configured length */
164   u32 configured_event_queue_length;
165
166   /** session table size parameters */
167   u32 configured_v4_session_table_buckets;
168   u32 configured_v4_session_table_memory;
169   u32 configured_v4_halfopen_table_buckets;
170   u32 configured_v4_halfopen_table_memory;
171   u32 configured_v6_session_table_buckets;
172   u32 configured_v6_session_table_memory;
173   u32 configured_v6_halfopen_table_buckets;
174   u32 configured_v6_halfopen_table_memory;
175
176   /** Unique segment name counter */
177   u32 unique_segment_name_counter;
178
179   /** Per transport rx function that can either dequeue or peek */
180   session_fifo_rx_fn *session_tx_fns[SESSION_N_TYPES];
181
182   /** Session manager is enabled */
183   u8 is_enabled;
184
185   /** Preallocate session config parameter */
186   u32 preallocated_sessions;
187
188 #if SESSION_DBG
189   /**
190    * last event poll time by thread
191    * Debug only. Will cause false cache-line sharing as-is
192    */
193   f64 *last_event_poll_by_thread;
194 #endif
195
196 };
197
198 extern session_manager_main_t session_manager_main;
199 extern vlib_node_registration_t session_queue_node;
200
201 /*
202  * Session manager function
203  */
204 always_inline session_manager_main_t *
205 vnet_get_session_manager_main ()
206 {
207   return &session_manager_main;
208 }
209
210 always_inline u8
211 stream_session_is_valid (u32 si, u8 thread_index)
212 {
213   stream_session_t *s;
214   s = pool_elt_at_index (session_manager_main.sessions[thread_index], si);
215   if (s->thread_index != thread_index || s->session_index != si
216       /* || s->server_rx_fifo->master_session_index != si
217          || s->server_tx_fifo->master_session_index != si
218          || s->server_rx_fifo->master_thread_index != thread_index
219          || s->server_tx_fifo->master_thread_index != thread_index */ )
220     return 0;
221   return 1;
222 }
223
224 stream_session_t *session_alloc (u32 thread_index);
225
226 always_inline stream_session_t *
227 session_get (u32 si, u32 thread_index)
228 {
229   ASSERT (stream_session_is_valid (si, thread_index));
230   return pool_elt_at_index (session_manager_main.sessions[thread_index], si);
231 }
232
233 always_inline stream_session_t *
234 session_get_if_valid (u64 si, u32 thread_index)
235 {
236   if (thread_index >= vec_len (session_manager_main.sessions))
237     return 0;
238
239   if (pool_is_free_index (session_manager_main.sessions[thread_index], si))
240     return 0;
241
242   ASSERT (stream_session_is_valid (si, thread_index));
243   return pool_elt_at_index (session_manager_main.sessions[thread_index], si);
244 }
245
246 always_inline u64
247 session_handle (stream_session_t * s)
248 {
249   return ((u64) s->thread_index << 32) | (u64) s->session_index;
250 }
251
252 always_inline u32
253 session_index_from_handle (u64 handle)
254 {
255   return handle & 0xFFFFFFFF;
256 }
257
258 always_inline u32
259 session_thread_from_handle (u64 handle)
260 {
261   return handle >> 32;
262 }
263
264 always_inline void
265 session_parse_handle (u64 handle, u32 * index, u32 * thread_index)
266 {
267   *index = session_index_from_handle (handle);
268   *thread_index = session_thread_from_handle (handle);
269 }
270
271 always_inline stream_session_t *
272 session_get_from_handle (u64 handle)
273 {
274   session_manager_main_t *smm = &session_manager_main;
275   return
276     pool_elt_at_index (smm->sessions[session_thread_from_handle (handle)],
277                        session_index_from_handle (handle));
278 }
279
280 /**
281  * Acquires a lock that blocks a session pool from expanding.
282  *
283  * This is typically used for safely peeking into other threads'
284  * pools in order to clone elements. Lock should be dropped as soon
285  * as possible by calling @ref session_pool_remove_peeker.
286  *
287  * NOTE: Avoid using pool_elt_at_index while the lock is held because
288  * it may lead to free elt bitmap expansion/contraction!
289  */
290 always_inline void
291 session_pool_add_peeker (u32 thread_index)
292 {
293   session_manager_main_t *smm = &session_manager_main;
294   if (thread_index == vlib_get_thread_index ())
295     return;
296   clib_spinlock_lock_if_init (&smm->peekers_readers_locks[thread_index]);
297   smm->session_peekers[thread_index] += 1;
298   if (smm->session_peekers[thread_index] == 1)
299     clib_spinlock_lock_if_init (&smm->peekers_write_locks[thread_index]);
300   clib_spinlock_unlock_if_init (&smm->peekers_readers_locks[thread_index]);
301 }
302
303 always_inline void
304 session_pool_remove_peeker (u32 thread_index)
305 {
306   session_manager_main_t *smm = &session_manager_main;
307   if (thread_index == vlib_get_thread_index ())
308     return;
309   ASSERT (session_manager_main.session_peekers[thread_index] > 0);
310   clib_spinlock_lock_if_init (&smm->peekers_readers_locks[thread_index]);
311   smm->session_peekers[thread_index] -= 1;
312   if (smm->session_peekers[thread_index] == 0)
313     clib_spinlock_unlock_if_init (&smm->peekers_write_locks[thread_index]);
314   clib_spinlock_unlock_if_init (&smm->peekers_readers_locks[thread_index]);
315 }
316
317 /**
318  * Get session from handle and 'lock' pool resize if not in same thread
319  *
320  * Caller should drop the peek 'lock' as soon as possible.
321  */
322 always_inline stream_session_t *
323 session_get_from_handle_safe (u64 handle)
324 {
325   session_manager_main_t *smm = &session_manager_main;
326   u32 thread_index = session_thread_from_handle (handle);
327   if (thread_index == vlib_get_thread_index ())
328     {
329       return pool_elt_at_index (smm->sessions[thread_index],
330                                 session_index_from_handle (handle));
331     }
332   else
333     {
334       session_pool_add_peeker (thread_index);
335       /* Don't use pool_elt_at index. See @ref session_pool_add_peeker */
336       return smm->sessions[thread_index] + session_index_from_handle (handle);
337     }
338 }
339
340 always_inline stream_session_t *
341 stream_session_listener_get (u8 sst, u64 si)
342 {
343   return pool_elt_at_index (session_manager_main.listen_sessions[sst], si);
344 }
345
346 always_inline u32
347 stream_session_get_index (stream_session_t * s)
348 {
349   if (s->session_state == SESSION_STATE_LISTENING)
350     return s - session_manager_main.listen_sessions[s->session_type];
351
352   return s - session_manager_main.sessions[s->thread_index];
353 }
354
355 always_inline u32
356 stream_session_max_rx_enqueue (transport_connection_t * tc)
357 {
358   stream_session_t *s = session_get (tc->s_index, tc->thread_index);
359   return svm_fifo_max_enqueue (s->server_rx_fifo);
360 }
361
362 always_inline u32
363 stream_session_rx_fifo_size (transport_connection_t * tc)
364 {
365   stream_session_t *s = session_get (tc->s_index, tc->thread_index);
366   return s->server_rx_fifo->nitems;
367 }
368
369 always_inline u32
370 session_get_index (stream_session_t * s)
371 {
372   return (s - session_manager_main.sessions[s->thread_index]);
373 }
374
375 always_inline stream_session_t *
376 session_clone_safe (u32 session_index, u32 thread_index)
377 {
378   stream_session_t *old_s, *new_s;
379   u32 current_thread_index = vlib_get_thread_index ();
380
381   /* If during the memcpy pool is reallocated AND the memory allocator
382    * decides to give the old chunk of memory to somebody in a hurry to
383    * scribble something on it, we have a problem. So add this thread as
384    * a session pool peeker.
385    */
386   session_pool_add_peeker (thread_index);
387   new_s = session_alloc (current_thread_index);
388   old_s = session_manager_main.sessions[thread_index] + session_index;
389   clib_memcpy (new_s, old_s, sizeof (*new_s));
390   session_pool_remove_peeker (thread_index);
391   new_s->thread_index = current_thread_index;
392   new_s->session_index = session_get_index (new_s);
393   return new_s;
394 }
395
396 transport_connection_t *session_get_transport (stream_session_t * s);
397
398 u32 stream_session_tx_fifo_max_dequeue (transport_connection_t * tc);
399
400 stream_session_t *session_alloc (u32 thread_index);
401 int
402 session_enqueue_stream_connection (transport_connection_t * tc,
403                                    vlib_buffer_t * b, u32 offset,
404                                    u8 queue_event, u8 is_in_order);
405 int session_enqueue_dgram_connection (stream_session_t * s, vlib_buffer_t * b,
406                                       u8 proto, u8 queue_event);
407 int stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer,
408                                u32 offset, u32 max_bytes);
409 u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes);
410
411 int session_stream_connect_notify (transport_connection_t * tc, u8 is_fail);
412 int session_dgram_connect_notify (transport_connection_t * tc,
413                                   u32 old_thread_index,
414                                   stream_session_t ** new_session);
415 void stream_session_init_fifos_pointers (transport_connection_t * tc,
416                                          u32 rx_pointer, u32 tx_pointer);
417
418 void stream_session_accept_notify (transport_connection_t * tc);
419 void stream_session_disconnect_notify (transport_connection_t * tc);
420 void stream_session_delete_notify (transport_connection_t * tc);
421 void stream_session_reset_notify (transport_connection_t * tc);
422 int stream_session_accept (transport_connection_t * tc, u32 listener_index,
423                            u8 notify);
424 int session_open (u32 app_index, session_endpoint_t * tep, u32 opaque);
425 int stream_session_listen (stream_session_t * s, session_endpoint_t * tep);
426 int stream_session_stop_listen (stream_session_t * s);
427 void stream_session_disconnect (stream_session_t * s);
428 void stream_session_cleanup (stream_session_t * s);
429 void session_send_session_evt_to_thread (u64 session_handle,
430                                          fifo_event_type_t evt_type,
431                                          u32 thread_index);
432
433 u8 *format_stream_session (u8 * s, va_list * args);
434 uword unformat_stream_session (unformat_input_t * input, va_list * args);
435 uword unformat_transport_connection (unformat_input_t * input,
436                                      va_list * args);
437
438 int
439 send_session_connected_callback (u32 app_index, u32 api_context,
440                                  stream_session_t * s, u8 is_fail);
441
442
443 clib_error_t *vnet_session_enable_disable (vlib_main_t * vm, u8 is_en);
444
445 always_inline unix_shared_memory_queue_t *
446 session_manager_get_vpp_event_queue (u32 thread_index)
447 {
448   return session_manager_main.vpp_event_queues[thread_index];
449 }
450
451 int session_manager_flush_enqueue_events (u8 proto, u32 thread_index);
452
453 always_inline u64
454 listen_session_get_handle (stream_session_t * s)
455 {
456   ASSERT (s->session_state == SESSION_STATE_LISTENING);
457   return ((u64) s->session_type << 32) | s->session_index;
458 }
459
460 always_inline stream_session_t *
461 listen_session_get_from_handle (u64 handle)
462 {
463   session_manager_main_t *smm = &session_manager_main;
464   stream_session_t *s;
465   u32 type, index;
466   type = handle >> 32;
467   index = handle & 0xFFFFFFFF;
468
469   if (pool_is_free_index (smm->listen_sessions[type], index))
470     return 0;
471
472   s = pool_elt_at_index (smm->listen_sessions[type], index);
473   ASSERT (s->session_state == SESSION_STATE_LISTENING);
474   return s;
475 }
476
477 always_inline stream_session_t *
478 listen_session_new (session_type_t type)
479 {
480   stream_session_t *s;
481   pool_get_aligned (session_manager_main.listen_sessions[type], s,
482                     CLIB_CACHE_LINE_BYTES);
483   memset (s, 0, sizeof (*s));
484
485   s->session_type = type;
486   s->session_state = SESSION_STATE_LISTENING;
487   s->session_index = s - session_manager_main.listen_sessions[type];
488
489   return s;
490 }
491
492 always_inline stream_session_t *
493 listen_session_get (session_type_t type, u32 index)
494 {
495   return pool_elt_at_index (session_manager_main.listen_sessions[type],
496                             index);
497 }
498
499 always_inline void
500 listen_session_del (stream_session_t * s)
501 {
502   pool_put (session_manager_main.listen_sessions[s->session_type], s);
503 }
504
505 transport_connection_t *listen_session_get_transport (stream_session_t * s);
506
507 int
508 listen_session_get_local_session_endpoint (stream_session_t * listener,
509                                            session_endpoint_t * sep);
510
511 always_inline stream_session_t *
512 session_manager_get_listener (u8 type, u32 index)
513 {
514   return pool_elt_at_index (session_manager_main.listen_sessions[type],
515                             index);
516 }
517
518 always_inline void
519 session_manager_set_transport_rx_fn (u8 type, u8 is_peek)
520 {
521   /* If an offset function is provided, then peek instead of dequeue */
522   session_manager_main.session_tx_fns[type] = (is_peek) ?
523     session_tx_fifo_peek_and_snd : session_tx_fifo_dequeue_and_snd;
524 }
525
526 session_type_t
527 session_type_from_proto_and_ip (transport_proto_t proto, u8 is_ip4);
528
529 always_inline u8
530 session_manager_is_enabled ()
531 {
532   return session_manager_main.is_enabled == 1;
533 }
534
535 #define session_cli_return_if_not_enabled()                             \
536 do {                                                                    \
537     if (!session_manager_main.is_enabled)                               \
538       return clib_error_return(0, "session layer is not enabled");      \
539 } while (0)
540
541 #endif /* __included_session_h__ */
542
543 /*
544  * fd.io coding-style-patch-verification: ON
545  *
546  * Local Variables:
547  * eval: (c-set-style "gnu")
548  * End:
549  */