udp: use pool safe reallocs 17/35717/23
authorFlorin Coras <fcoras@cisco.com>
Fri, 18 Mar 2022 19:50:03 +0000 (12:50 -0700)
committerDamjan Marion <dmarion@me.com>
Mon, 4 Apr 2022 22:30:48 +0000 (22:30 +0000)
Type: improvement

Signed-off-by: Florin Coras <fcoras@cisco.com>
Change-Id: I5bd0b300af07424d1ce4807fa0b17e375001f089

src/vnet/udp/udp.c
src/vnet/udp/udp.h
src/vnet/udp/udp_input.c

index d2a2b63..43391f9 100644 (file)
@@ -94,22 +94,10 @@ udp_connection_alloc (u32 thread_index)
 {
   udp_main_t *um = &udp_main;
   udp_connection_t *uc;
-  u32 will_expand = pool_get_will_expand (um->connections[thread_index]);
 
-  if (PREDICT_FALSE (will_expand))
-    {
-      clib_spinlock_lock_if_init (&udp_main.peekers_write_locks
-                                 [thread_index]);
-      pool_get_aligned (udp_main.connections[thread_index], uc,
-                       CLIB_CACHE_LINE_BYTES);
-      clib_spinlock_unlock_if_init (&udp_main.peekers_write_locks
-                                   [thread_index]);
-    }
-  else
-    {
-      pool_get_aligned (um->connections[thread_index], uc,
-                       CLIB_CACHE_LINE_BYTES);
-    }
+  pool_get_aligned_safe (um->connections[thread_index], uc,
+                        CLIB_CACHE_LINE_BYTES);
+
   clib_memset (uc, 0, sizeof (*uc));
   uc->c_c_index = uc - um->connections[thread_index];
   uc->c_thread_index = thread_index;
@@ -502,7 +490,6 @@ udp_init (vlib_main_t * vm)
   vlib_thread_main_t *tm = vlib_get_thread_main ();
   u32 num_threads;
   ip_protocol_info_t *pi;
-  int i;
 
   /*
    * Registrations
@@ -527,16 +514,6 @@ udp_init (vlib_main_t * vm)
 
   num_threads = 1 /* main thread */  + tm->n_threads;
   vec_validate (um->connections, num_threads - 1);
-  vec_validate (um->connection_peekers, num_threads - 1);
-  vec_validate (um->peekers_readers_locks, num_threads - 1);
-  vec_validate (um->peekers_write_locks, num_threads - 1);
-
-  if (num_threads > 1)
-    for (i = 0; i < num_threads; i++)
-      {
-       clib_spinlock_init (&um->peekers_readers_locks[i]);
-       clib_spinlock_init (&um->peekers_write_locks[i]);
-      }
 
   um->local_to_input_edge[UDP_IP4] =
     vlib_node_add_next (vm, udp4_local_node.index, udp4_input_node.index);
index f157711..92bcd1c 100644 (file)
@@ -115,9 +115,6 @@ typedef struct
    * Per-worker thread udp connection pools used with session layer
    */
   udp_connection_t **connections;
-  u32 *connection_peekers;
-  clib_spinlock_t *peekers_readers_locks;
-  clib_spinlock_t *peekers_write_locks;
   udp_connection_t *listener_pool;
 
   u16 default_mtu;
@@ -161,65 +158,23 @@ udp_connection_from_transport (transport_connection_t * tc)
   return ((udp_connection_t *) tc);
 }
 
-always_inline u32
-udp_connection_index (udp_connection_t * uc)
-{
-  return (uc - udp_main.connections[uc->c_thread_index]);
-}
-
 void udp_connection_free (udp_connection_t * uc);
 udp_connection_t *udp_connection_alloc (u32 thread_index);
 
-/**
- * Acquires a lock that blocks a connection pool from expanding.
- */
-always_inline void
-udp_pool_add_peeker (u32 thread_index)
-{
-  if (thread_index != vlib_get_thread_index ())
-    return;
-  clib_spinlock_lock_if_init (&udp_main.peekers_readers_locks[thread_index]);
-  udp_main.connection_peekers[thread_index] += 1;
-  if (udp_main.connection_peekers[thread_index] == 1)
-    clib_spinlock_lock_if_init (&udp_main.peekers_write_locks[thread_index]);
-  clib_spinlock_unlock_if_init (&udp_main.peekers_readers_locks
-                               [thread_index]);
-}
-
-always_inline void
-udp_pool_remove_peeker (u32 thread_index)
-{
-  if (thread_index != vlib_get_thread_index ())
-    return;
-  ASSERT (udp_main.connection_peekers[thread_index] > 0);
-  clib_spinlock_lock_if_init (&udp_main.peekers_readers_locks[thread_index]);
-  udp_main.connection_peekers[thread_index] -= 1;
-  if (udp_main.connection_peekers[thread_index] == 0)
-    clib_spinlock_unlock_if_init (&udp_main.peekers_write_locks
-                                 [thread_index]);
-  clib_spinlock_unlock_if_init (&udp_main.peekers_readers_locks
-                               [thread_index]);
-}
-
 always_inline udp_connection_t *
 udp_connection_clone_safe (u32 connection_index, u32 thread_index)
 {
+  u32 current_thread_index = vlib_get_thread_index (), new_index;
   udp_connection_t *old_c, *new_c;
-  u32 current_thread_index = vlib_get_thread_index ();
-  new_c = udp_connection_alloc (current_thread_index);
 
-  /* If during the memcpy pool is reallocated AND the memory allocator
-   * decides to give the old chunk of memory to somebody in a hurry to
-   * scribble something on it, we have a problem. So add this thread as
-   * a session pool peeker.
-   */
-  udp_pool_add_peeker (thread_index);
+  new_c = udp_connection_alloc (current_thread_index);
+  new_index = new_c->c_c_index;
+  /* Connection pool always realloced with barrier */
   old_c = udp_main.connections[thread_index] + connection_index;
   clib_memcpy_fast (new_c, old_c, sizeof (*new_c));
   old_c->flags |= UDP_CONN_F_MIGRATED;
-  udp_pool_remove_peeker (thread_index);
   new_c->c_thread_index = current_thread_index;
-  new_c->c_c_index = udp_connection_index (new_c);
+  new_c->c_c_index = new_index;
   new_c->c_fib_index = old_c->c_fib_index;
   /* Assume cloned sessions don't need lock */
   new_c->rx_lock = 0;
index e701ca5..f6c55f6 100644 (file)
@@ -251,11 +251,6 @@ udp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          goto done;
        }
 
-      /*
-       * If session exists pool peeker lock is taken at this point unless
-       * the session is already on the right thread or is a listener
-       */
-
       if (s0->session_state == SESSION_STATE_OPENED)
        {
          u8 queue_event = 1;