udp: refactor udp code
[vpp.git] / src / vnet / session / session.h
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __included_session_h__
16 #define __included_session_h__
17
18 #include <vnet/session/stream_session.h>
19 #include <vnet/session/session_lookup.h>
20 #include <vnet/session/transport_interface.h>
21 #include <vlibmemory/unix_shared_memory_queue.h>
22 #include <vnet/session/session_debug.h>
23 #include <vnet/session/segment_manager.h>
24
25 #define HALF_OPEN_LOOKUP_INVALID_VALUE ((u64)~0)
26 #define INVALID_INDEX ((u32)~0)
27
28 /* TODO decide how much since we have pre-data as well */
29 #define MAX_HDRS_LEN    100     /* Max number of bytes for headers */
30
31 typedef enum
32 {
33   FIFO_EVENT_APP_RX,
34   FIFO_EVENT_APP_TX,
35   FIFO_EVENT_TIMEOUT,
36   FIFO_EVENT_DISCONNECT,
37   FIFO_EVENT_BUILTIN_RX,
38   FIFO_EVENT_RPC,
39 } fifo_event_type_t;
40
41 static inline const char *
42 fifo_event_type_str (fifo_event_type_t et)
43 {
44   switch (et)
45     {
46     case FIFO_EVENT_APP_RX:
47       return "FIFO_EVENT_APP_RX";
48     case FIFO_EVENT_APP_TX:
49       return "FIFO_EVENT_APP_TX";
50     case FIFO_EVENT_TIMEOUT:
51       return "FIFO_EVENT_TIMEOUT";
52     case FIFO_EVENT_DISCONNECT:
53       return "FIFO_EVENT_DISCONNECT";
54     case FIFO_EVENT_BUILTIN_RX:
55       return "FIFO_EVENT_BUILTIN_RX";
56     case FIFO_EVENT_RPC:
57       return "FIFO_EVENT_RPC";
58     default:
59       return "UNKNOWN FIFO EVENT";
60     }
61 }
62
63 #define foreach_session_input_error                                     \
64 _(NO_SESSION, "No session drops")                                       \
65 _(NO_LISTENER, "No listener for dst port drops")                        \
66 _(ENQUEUED, "Packets pushed into rx fifo")                              \
67 _(NOT_READY, "Session not ready packets")                               \
68 _(FIFO_FULL, "Packets dropped for lack of rx fifo space")               \
69 _(EVENT_FIFO_FULL, "Events not sent for lack of event fifo space")      \
70 _(API_QUEUE_FULL, "Sessions not created for lack of API queue space")   \
71 _(NEW_SEG_NO_SPACE, "Created segment, couldn't allocate a fifo pair")   \
72 _(NO_SPACE, "Couldn't allocate a fifo pair")
73
74 typedef enum
75 {
76 #define _(sym,str) SESSION_ERROR_##sym,
77   foreach_session_input_error
78 #undef _
79     SESSION_N_ERROR,
80 } session_error_t;
81
82 /* Event queue input node static next indices */
83 typedef enum
84 {
85   SESSION_QUEUE_NEXT_DROP,
86   SESSION_QUEUE_NEXT_TCP_IP4_OUTPUT,
87   SESSION_QUEUE_NEXT_IP4_LOOKUP,
88   SESSION_QUEUE_NEXT_TCP_IP6_OUTPUT,
89   SESSION_QUEUE_NEXT_IP6_LOOKUP,
90   SESSION_QUEUE_N_NEXT,
91 } session_queue_next_t;
92
93 typedef struct
94 {
95   void *fp;
96   void *arg;
97 } rpc_args_t;
98
99 /* *INDENT-OFF* */
100 typedef CLIB_PACKED (struct {
101   union
102     {
103       svm_fifo_t * fifo;
104       u64 session_handle;
105       rpc_args_t rpc_args;
106     };
107   u8 event_type;
108   u8 postponed;
109 }) session_fifo_event_t;
110 /* *INDENT-ON* */
111
112 /* Forward definition */
113 typedef struct _session_manager_main session_manager_main_t;
114
115 typedef int
116   (session_fifo_rx_fn) (vlib_main_t * vm, vlib_node_runtime_t * node,
117                         session_manager_main_t * smm,
118                         session_fifo_event_t * e0, stream_session_t * s0,
119                         u32 thread_index, int *n_tx_pkts);
120
121 extern session_fifo_rx_fn session_tx_fifo_peek_and_snd;
122 extern session_fifo_rx_fn session_tx_fifo_dequeue_and_snd;
123
124 u8 session_node_lookup_fifo_event (svm_fifo_t * f, session_fifo_event_t * e);
125
126 struct _session_manager_main
127 {
128   /** Per worker thread session pools */
129   stream_session_t **sessions;
130
131   /** Per worker-thread count of threads peeking into the session pool */
132   u32 *session_peekers;
133
134   /** Per worker-thread rw peekers locks */
135   clib_spinlock_t *peekers_readers_locks;
136   clib_spinlock_t *peekers_write_locks;
137
138   /** Pool of listen sessions. Same type as stream sessions to ease lookups */
139   stream_session_t *listen_sessions[SESSION_N_TYPES];
140
141   /** Per-proto, per-worker enqueue epoch counters */
142   u8 *current_enqueue_epoch[TRANSPORT_N_PROTO];
143
144   /** Per-proto, per-worker thread vector of sessions to enqueue */
145   u32 **session_to_enqueue[TRANSPORT_N_PROTO];
146
147   /** per-worker tx buffer free lists */
148   u32 **tx_buffers;
149
150   /** Per worker-thread vector of partially read events */
151   session_fifo_event_t **free_event_vector;
152
153   /** per-worker active event vectors */
154   session_fifo_event_t **pending_event_vector;
155
156   /** per-worker postponed disconnects */
157   session_fifo_event_t **pending_disconnects;
158
159   /** vpp fifo event queue */
160   unix_shared_memory_queue_t **vpp_event_queues;
161
162   /** vpp fifo event queue configured length */
163   u32 configured_event_queue_length;
164
165   /** session table size parameters */
166   u32 configured_v4_session_table_buckets;
167   u32 configured_v4_session_table_memory;
168   u32 configured_v4_halfopen_table_buckets;
169   u32 configured_v4_halfopen_table_memory;
170   u32 configured_v6_session_table_buckets;
171   u32 configured_v6_session_table_memory;
172   u32 configured_v6_halfopen_table_buckets;
173   u32 configured_v6_halfopen_table_memory;
174
175   /** Unique segment name counter */
176   u32 unique_segment_name_counter;
177
178   /** Per transport rx function that can either dequeue or peek */
179   session_fifo_rx_fn *session_tx_fns[SESSION_N_TYPES];
180
181   /** Session manager is enabled */
182   u8 is_enabled;
183
184   /** Preallocate session config parameter */
185   u32 preallocated_sessions;
186
187 #if SESSION_DBG
188   /**
189    * last event poll time by thread
190    * Debug only. Will cause false cache-line sharing as-is
191    */
192   f64 *last_event_poll_by_thread;
193 #endif
194
195 };
196
197 extern session_manager_main_t session_manager_main;
198 extern vlib_node_registration_t session_queue_node;
199
200 /*
201  * Session manager function
202  */
203 always_inline session_manager_main_t *
204 vnet_get_session_manager_main ()
205 {
206   return &session_manager_main;
207 }
208
209 always_inline u8
210 stream_session_is_valid (u32 si, u8 thread_index)
211 {
212   stream_session_t *s;
213   s = pool_elt_at_index (session_manager_main.sessions[thread_index], si);
214   if (s->thread_index != thread_index || s->session_index != si
215       /* || s->server_rx_fifo->master_session_index != si
216          || s->server_tx_fifo->master_session_index != si
217          || s->server_rx_fifo->master_thread_index != thread_index
218          || s->server_tx_fifo->master_thread_index != thread_index */ )
219     return 0;
220   return 1;
221 }
222
223 stream_session_t *session_alloc (u32 thread_index);
224
225 always_inline stream_session_t *
226 session_get (u32 si, u32 thread_index)
227 {
228   ASSERT (stream_session_is_valid (si, thread_index));
229   return pool_elt_at_index (session_manager_main.sessions[thread_index], si);
230 }
231
232 always_inline stream_session_t *
233 session_get_if_valid (u64 si, u32 thread_index)
234 {
235   if (thread_index >= vec_len (session_manager_main.sessions))
236     return 0;
237
238   if (pool_is_free_index (session_manager_main.sessions[thread_index], si))
239     return 0;
240
241   ASSERT (stream_session_is_valid (si, thread_index));
242   return pool_elt_at_index (session_manager_main.sessions[thread_index], si);
243 }
244
245 always_inline u64
246 session_handle (stream_session_t * s)
247 {
248   return ((u64) s->thread_index << 32) | (u64) s->session_index;
249 }
250
251 always_inline u32
252 session_index_from_handle (u64 handle)
253 {
254   return handle & 0xFFFFFFFF;
255 }
256
257 always_inline u32
258 session_thread_from_handle (u64 handle)
259 {
260   return handle >> 32;
261 }
262
263 always_inline void
264 session_parse_handle (u64 handle, u32 * index, u32 * thread_index)
265 {
266   *index = session_index_from_handle (handle);
267   *thread_index = session_thread_from_handle (handle);
268 }
269
270 always_inline stream_session_t *
271 session_get_from_handle (u64 handle)
272 {
273   session_manager_main_t *smm = &session_manager_main;
274   return
275     pool_elt_at_index (smm->sessions[session_thread_from_handle (handle)],
276                        session_index_from_handle (handle));
277 }
278
279 /**
280  * Acquires a lock that blocks a session pool from expanding.
281  *
282  * This is typically used for safely peeking into other threads'
283  * pools in order to clone elements. Lock should be dropped as soon
284  * as possible by calling @ref session_pool_remove_peeker.
285  *
286  * NOTE: Avoid using pool_elt_at_index while the lock is held because
287  * it may lead to free elt bitmap expansion/contraction!
288  */
289 always_inline void
290 session_pool_add_peeker (u32 thread_index)
291 {
292   session_manager_main_t *smm = &session_manager_main;
293   if (thread_index == vlib_get_thread_index ())
294     return;
295   clib_spinlock_lock_if_init (&smm->peekers_readers_locks[thread_index]);
296   smm->session_peekers[thread_index] += 1;
297   if (smm->session_peekers[thread_index] == 1)
298     clib_spinlock_lock_if_init (&smm->peekers_write_locks[thread_index]);
299   clib_spinlock_unlock_if_init (&smm->peekers_readers_locks[thread_index]);
300 }
301
302 always_inline void
303 session_pool_remove_peeker (u32 thread_index)
304 {
305   session_manager_main_t *smm = &session_manager_main;
306   if (thread_index == vlib_get_thread_index ())
307     return;
308   ASSERT (session_manager_main.session_peekers[thread_index] > 0);
309   clib_spinlock_lock_if_init (&smm->peekers_readers_locks[thread_index]);
310   smm->session_peekers[thread_index] -= 1;
311   if (smm->session_peekers[thread_index] == 0)
312     clib_spinlock_unlock_if_init (&smm->peekers_write_locks[thread_index]);
313   clib_spinlock_unlock_if_init (&smm->peekers_readers_locks[thread_index]);
314 }
315
316 /**
317  * Get session from handle and 'lock' pool resize if not in same thread
318  *
319  * Caller should drop the peek 'lock' as soon as possible.
320  */
321 always_inline stream_session_t *
322 session_get_from_handle_safe (u64 handle)
323 {
324   session_manager_main_t *smm = &session_manager_main;
325   u32 thread_index = session_thread_from_handle (handle);
326   if (thread_index == vlib_get_thread_index ())
327     {
328       return pool_elt_at_index (smm->sessions[thread_index],
329                                 session_index_from_handle (handle));
330     }
331   else
332     {
333       session_pool_add_peeker (thread_index);
334       /* Don't use pool_elt_at index. See @ref session_pool_add_peeker */
335       return smm->sessions[thread_index] + session_index_from_handle (handle);
336     }
337 }
338
339 always_inline stream_session_t *
340 stream_session_listener_get (u8 sst, u64 si)
341 {
342   return pool_elt_at_index (session_manager_main.listen_sessions[sst], si);
343 }
344
345 always_inline u32
346 stream_session_get_index (stream_session_t * s)
347 {
348   if (s->session_state == SESSION_STATE_LISTENING)
349     return s - session_manager_main.listen_sessions[s->session_type];
350
351   return s - session_manager_main.sessions[s->thread_index];
352 }
353
354 always_inline u32
355 stream_session_max_rx_enqueue (transport_connection_t * tc)
356 {
357   stream_session_t *s = session_get (tc->s_index, tc->thread_index);
358   return svm_fifo_max_enqueue (s->server_rx_fifo);
359 }
360
361 always_inline u32
362 stream_session_rx_fifo_size (transport_connection_t * tc)
363 {
364   stream_session_t *s = session_get (tc->s_index, tc->thread_index);
365   return s->server_rx_fifo->nitems;
366 }
367
368 always_inline u32
369 session_get_index (stream_session_t * s)
370 {
371   return (s - session_manager_main.sessions[s->thread_index]);
372 }
373
374 always_inline stream_session_t *
375 session_clone_safe (u32 session_index, u32 thread_index)
376 {
377   stream_session_t *old_s, *new_s;
378   u32 current_thread_index = vlib_get_thread_index ();
379
380   /* If during the memcpy pool is reallocated AND the memory allocator
381    * decides to give the old chunk of memory to somebody in a hurry to
382    * scribble something on it, we have a problem. So add this thread as
383    * a session pool peeker.
384    */
385   session_pool_add_peeker (thread_index);
386   new_s = session_alloc (current_thread_index);
387   old_s = session_manager_main.sessions[thread_index] + session_index;
388   clib_memcpy (new_s, old_s, sizeof (*new_s));
389   session_pool_remove_peeker (thread_index);
390   new_s->thread_index = current_thread_index;
391   new_s->session_index = session_get_index (new_s);
392   return new_s;
393 }
394
395 transport_connection_t *session_get_transport (stream_session_t * s);
396
397 u32 stream_session_tx_fifo_max_dequeue (transport_connection_t * tc);
398
399 stream_session_t *session_alloc (u32 thread_index);
400 int
401 session_enqueue_stream_connection (transport_connection_t * tc,
402                                    vlib_buffer_t * b, u32 offset,
403                                    u8 queue_event, u8 is_in_order);
404 int session_enqueue_dgram_connection (stream_session_t * s, vlib_buffer_t * b,
405                                       u8 proto, u8 queue_event);
406 int stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer,
407                                u32 offset, u32 max_bytes);
408 u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes);
409
410 int session_stream_connect_notify (transport_connection_t * tc, u8 is_fail);
411 int session_dgram_connect_notify (transport_connection_t * tc,
412                                   u32 old_thread_index,
413                                   stream_session_t ** new_session);
414 void stream_session_init_fifos_pointers (transport_connection_t * tc,
415                                          u32 rx_pointer, u32 tx_pointer);
416
417 void stream_session_accept_notify (transport_connection_t * tc);
418 void stream_session_disconnect_notify (transport_connection_t * tc);
419 void stream_session_delete_notify (transport_connection_t * tc);
420 void stream_session_reset_notify (transport_connection_t * tc);
421 int stream_session_accept (transport_connection_t * tc, u32 listener_index,
422                            u8 notify);
423 int session_open (u32 app_index, session_endpoint_t * tep, u32 opaque);
424 int stream_session_listen (stream_session_t * s, session_endpoint_t * tep);
425 int stream_session_stop_listen (stream_session_t * s);
426 void stream_session_disconnect (stream_session_t * s);
427 void stream_session_cleanup (stream_session_t * s);
428 void session_send_session_evt_to_thread (u64 session_handle,
429                                          fifo_event_type_t evt_type,
430                                          u32 thread_index);
431
432 u8 *format_stream_session (u8 * s, va_list * args);
433 uword unformat_stream_session (unformat_input_t * input, va_list * args);
434 uword unformat_transport_connection (unformat_input_t * input,
435                                      va_list * args);
436
437 int
438 send_session_connected_callback (u32 app_index, u32 api_context,
439                                  stream_session_t * s, u8 is_fail);
440
441
442 clib_error_t *vnet_session_enable_disable (vlib_main_t * vm, u8 is_en);
443
444 always_inline unix_shared_memory_queue_t *
445 session_manager_get_vpp_event_queue (u32 thread_index)
446 {
447   return session_manager_main.vpp_event_queues[thread_index];
448 }
449
450 int session_manager_flush_enqueue_events (u8 proto, u32 thread_index);
451
452 always_inline u64
453 listen_session_get_handle (stream_session_t * s)
454 {
455   ASSERT (s->session_state == SESSION_STATE_LISTENING);
456   return ((u64) s->session_type << 32) | s->session_index;
457 }
458
459 always_inline stream_session_t *
460 listen_session_get_from_handle (u64 handle)
461 {
462   session_manager_main_t *smm = &session_manager_main;
463   stream_session_t *s;
464   u32 type, index;
465   type = handle >> 32;
466   index = handle & 0xFFFFFFFF;
467
468   if (pool_is_free_index (smm->listen_sessions[type], index))
469     return 0;
470
471   s = pool_elt_at_index (smm->listen_sessions[type], index);
472   ASSERT (s->session_state == SESSION_STATE_LISTENING);
473   return s;
474 }
475
476 always_inline stream_session_t *
477 listen_session_new (session_type_t type)
478 {
479   stream_session_t *s;
480   pool_get_aligned (session_manager_main.listen_sessions[type], s,
481                     CLIB_CACHE_LINE_BYTES);
482   memset (s, 0, sizeof (*s));
483
484   s->session_type = type;
485   s->session_state = SESSION_STATE_LISTENING;
486   s->session_index = s - session_manager_main.listen_sessions[type];
487
488   return s;
489 }
490
491 always_inline stream_session_t *
492 listen_session_get (session_type_t type, u32 index)
493 {
494   return pool_elt_at_index (session_manager_main.listen_sessions[type],
495                             index);
496 }
497
498 always_inline void
499 listen_session_del (stream_session_t * s)
500 {
501   pool_put (session_manager_main.listen_sessions[s->session_type], s);
502 }
503
504 transport_connection_t *listen_session_get_transport (stream_session_t * s);
505
506 int
507 listen_session_get_local_session_endpoint (stream_session_t * listener,
508                                            session_endpoint_t * sep);
509
510 always_inline stream_session_t *
511 session_manager_get_listener (u8 type, u32 index)
512 {
513   return pool_elt_at_index (session_manager_main.listen_sessions[type],
514                             index);
515 }
516
517 always_inline void
518 session_manager_set_transport_rx_fn (u8 type, u8 is_peek)
519 {
520   /* If an offset function is provided, then peek instead of dequeue */
521   session_manager_main.session_tx_fns[type] = (is_peek) ?
522     session_tx_fifo_peek_and_snd : session_tx_fifo_dequeue_and_snd;
523 }
524
525 session_type_t
526 session_type_from_proto_and_ip (transport_proto_t proto, u8 is_ip4);
527
528 always_inline u8
529 session_manager_is_enabled ()
530 {
531   return session_manager_main.is_enabled == 1;
532 }
533
534 #define session_cli_return_if_not_enabled()                             \
535 do {                                                                    \
536     if (!session_manager_main.is_enabled)                               \
537       return clib_error_return(0, "session layer is not enabled");      \
538 } while (0)
539
540 #endif /* __included_session_h__ */
541
542 /*
543  * fd.io coding-style-patch-verification: ON
544  *
545  * Local Variables:
546  * eval: (c-set-style "gnu")
547  * End:
548  */