freelist_index = f->freelist_index;
- ASSERT (freelist_index > 0 && freelist_index < vec_len (fsh->free_fifos));
+ ASSERT (freelist_index < vec_len (fsh->free_fifos));
ssvm_lock_non_recursive (sh, 2);
oldheap = ssvm_push_heap (sh);
return 0;
}
+static u8 *cache_uri;
+static session_type_t cache_sst;
+static transport_endpoint_t *cache_tep;
+
int
parse_uri (char *uri, session_type_t * sst, transport_endpoint_t * tep)
{
unformat_input_t _input, *input = &_input;
+ if (cache_uri && !strncmp (uri, (char *) cache_uri, vec_len (cache_uri)))
+ {
+ *sst = cache_sst;
+ *tep = *cache_tep;
+ return 0;
+ }
+
/* Make sure */
uri = (char *) format (0, "%s%c", uri, 0);
}
unformat_free (input);
+ vec_free (cache_uri);
+ cache_uri = (u8 *) uri;
+ cache_sst = *sst;
+ if (cache_tep)
+ clib_mem_free (cache_tep);
+ cache_tep = clib_mem_alloc (sizeof (*tep));
+ *cache_tep = *tep;
+
return 0;
}
session_vpp_event_queue_allocate (smm, i);
/* Preallocate sessions */
- if (num_threads == 1)
+ if (smm->preallocated_sessions)
{
- for (i = 0; i < smm->preallocated_sessions; i++)
+ if (num_threads == 1)
{
- stream_session_t *ss __attribute__ ((unused));
- pool_get_aligned (smm->sessions[0], ss, CLIB_CACHE_LINE_BYTES);
+ pool_init_fixed (smm->sessions[0], smm->preallocated_sessions);
}
-
- for (i = 0; i < smm->preallocated_sessions; i++)
- pool_put_index (smm->sessions[0], i);
- }
- else
- {
- int j;
- preallocated_sessions_per_worker = smm->preallocated_sessions /
- (num_threads - 1);
-
- for (j = 1; j < num_threads; j++)
+ else
{
- for (i = 0; i < preallocated_sessions_per_worker; i++)
+ int j;
+ preallocated_sessions_per_worker =
+ (1.1 * (f64) smm->preallocated_sessions /
+ (f64) (num_threads - 1));
+
+ for (j = 1; j < num_threads; j++)
{
- stream_session_t *ss __attribute__ ((unused));
- pool_get_aligned (smm->sessions[j], ss, CLIB_CACHE_LINE_BYTES);
+ pool_init_fixed (smm->sessions[j],
+ preallocated_sessions_per_worker);
}
- for (i = 0; i < preallocated_sessions_per_worker; i++)
- pool_put_index (smm->sessions[j], i);
}
}
{
*proto = TRANSPORT_PROTO_UDP;
}
- else if (unformat (input, "%U:%d->%U:%d", unformat_ip4_address, &lcl->ip4,
- lcl_port, unformat_ip4_address, &rmt->ip4, rmt_port))
+ if (unformat (input, "%U:%d->%U:%d", unformat_ip4_address, &lcl->ip4,
+ lcl_port, unformat_ip4_address, &rmt->ip4, rmt_port))
{
*is_ip4 = 1;
tuple_is_set = 1;
else
{
tc = tcp_connection_get (conn_index, vlib_get_thread_index ());
+ /* note: the connection may have already disappeared */
+ if (PREDICT_FALSE (tc == 0))
+ return;
+
ASSERT (tc->state == TCP_STATE_SYN_RCVD);
}
tc->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID;
vlib_thread_main_t *vtm = vlib_get_thread_main ();
clib_error_t *error = 0;
u32 num_threads;
- int i, thread;
+ int thread;
tcp_connection_t *tc __attribute__ ((unused));
u32 preallocated_connections_per_thread;
}
for (; thread < num_threads; thread++)
{
- for (i = 0; i < preallocated_connections_per_thread; i++)
- pool_get (tm->connections[thread], tc);
-
- for (i = 0; i < preallocated_connections_per_thread; i++)
- pool_put_index (tm->connections[thread], i);
+ if (preallocated_connections_per_thread)
+ pool_init_fixed (tm->connections[thread],
+ preallocated_connections_per_thread);
}
/*
- * Preallocate half-open connections
+ * Use a preallocated half-open connection pool?
*/
- for (i = 0; i < tm->preallocated_half_open_connections; i++)
- pool_get (tm->half_open_connections, tc);
-
- for (i = 0; i < tm->preallocated_half_open_connections; i++)
- pool_put_index (tm->half_open_connections, i);
+ if (tm->preallocated_half_open_connections)
+ pool_init_fixed (tm->half_open_connections,
+ tm->preallocated_half_open_connections);
/* Initialize per worker thread tx buffers (used for control messages) */
vec_validate (tm->tx_buffers, num_threads - 1);
always_inline int
tcp_alloc_tx_buffers (tcp_main_t * tm, u8 thread_index, u32 n_free_buffers)
{
+ u32 current_length = vec_len (tm->tx_buffers[thread_index]);
+
vec_validate (tm->tx_buffers[thread_index],
- vec_len (tm->tx_buffers[thread_index]) + n_free_buffers - 1);
+ current_length + n_free_buffers - 1);
_vec_len (tm->tx_buffers[thread_index]) =
- vlib_buffer_alloc_from_free_list (vlib_get_main (),
- tm->tx_buffers[thread_index],
- n_free_buffers,
- VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ current_length + vlib_buffer_alloc_from_free_list (vlib_get_main (),
+ tm->tx_buffers
+ [thread_index],
+ n_free_buffers,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
/* buffer shortage, report failure */
if (vec_len (tm->tx_buffers[thread_index]) == 0)
{
if (is_syn)
{
tc = tcp_half_open_connection_get (index);
+ /* Note: the connection may have transitioned to ESTABLISHED... */
+ if (PREDICT_FALSE (tc == 0))
+ return;
tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
}
else
{
tc = tcp_connection_get (index, thread_index);
+ /* Note: the connection may have been closed and pool_put */
+ if (PREDICT_FALSE (tc == 0))
+ return;
tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
}
TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1);
- /* Send one segment */
+ /* Send one segment. Note that n_bytes may be zero due to buffer shortfall */
n_bytes = tcp_prepare_retransmit_segment (tc, 0, tc->snd_mss, &b);
- ASSERT (n_bytes);
- bi = vlib_get_buffer_index (vm, b);
+
/* TODO be less aggressive about this */
scoreboard_clear (&tc->sack_sb);
if (n_bytes == 0)
{
- clib_warning ("could not retransmit anything");
- clib_warning ("%U", format_tcp_connection, tc, 2);
-
+ if (b)
+ {
+ clib_warning ("retransmit fail: %U", format_tcp_connection, tc,
+ 2);
+ ASSERT (tc->rto_boff > 1 && tc->snd_una == tc->snd_congestion);
+ }
/* Try again eventually */
tcp_retransmit_timer_set (tc);
- ASSERT (0 || (tc->rto_boff > 1
- && tc->snd_una == tc->snd_congestion));
return;
}
+ bi = vlib_get_buffer_index (vm, b);
+
/* For first retransmit, record timestamp (Eifel detection RFC3522) */
if (tc->rto_boff == 1)
tc->snd_rxt_ts = tcp_time_now ();
tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
- return;
+ {
+ clib_warning ("tcp_get_free_buffer_index FAIL");
+ return;
+ }
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
tcp_push_hdr_i (tc, b, tc->state, 1);
if ENABLE_TESTS
TESTS += test_bihash_template \
test_dlist \
- test_elog \
test_elf \
+ test_elog \
test_fifo \
test_format \
+ test_fpool \
test_hash \
test_heap \
test_longjmp \
test_zvec
endif
-TESTS += test_bihash_template
-
noinst_PROGRAMS = $(TESTS)
check_PROGRAMS = $(TESTS)
test_bihash_template_SOURCES = vppinfra/test_bihash_template.c
test_dlist_SOURCES = vppinfra/test_dlist.c
-test_elog_SOURCES = vppinfra/test_elog.c
test_elf_SOURCES = vppinfra/test_elf.c
+test_elog_SOURCES = vppinfra/test_elog.c
test_fifo_SOURCES = vppinfra/test_fifo.c
test_format_SOURCES = vppinfra/test_format.c
+test_fpool_SOURCES = vppinfra/test_fpool.c
test_hash_SOURCES = vppinfra/test_hash.c
test_heap_SOURCES = vppinfra/test_heap.c
test_longjmp_SOURCES = vppinfra/test_longjmp.c
test_mheap_SOURCES = vppinfra/test_mheap.c
test_pool_iterate_SOURCES = vppinfra/test_pool_iterate.c
test_ptclosure_SOURCES = vppinfra/test_ptclosure.c
-test_random_SOURCES = vppinfra/test_random.c
test_random_isaac_SOURCES = vppinfra/test_random_isaac.c
+test_random_SOURCES = vppinfra/test_random.c
test_serialize_SOURCES = vppinfra/test_serialize.c
test_slist_SOURCES = vppinfra/test_slist.c
test_socket_SOURCES = vppinfra/test_socket.c
# So we'll need -DDEBUG to enable ASSERTs
test_bihash_template_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_dlist_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
-test_elog_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_elf_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
+test_elog_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_fifo_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_format_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
+test_fpool_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_hash_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_heap_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_longjmp_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_ptclosure_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_random_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_random_isaac_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
-test_socket_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_serialize_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_slist_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
+test_socket_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_time_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_timing_wheel_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_tw_timer_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG
test_bihash_template_LDADD = libvppinfra.la
test_dlist_LDADD = libvppinfra.la
-test_elog_LDADD = libvppinfra.la
test_elf_LDADD = libvppinfra.la
+test_elog_LDADD = libvppinfra.la
test_fifo_LDADD = libvppinfra.la
test_format_LDADD = libvppinfra.la
+test_fpool_LDADD = libvppinfra.la
test_hash_LDADD = libvppinfra.la
test_heap_LDADD = libvppinfra.la
test_longjmp_LDADD = libvppinfra.la
test_mheap_LDADD = libvppinfra.la
test_pool_iterate_LDADD = libvppinfra.la
test_ptclosure_LDADD = libvppinfra.la
-test_random_LDADD = libvppinfra.la
test_random_isaac_LDADD = libvppinfra.la
+test_random_LDADD = libvppinfra.la
test_serialize_LDADD = libvppinfra.la
test_slist_LDADD = libvppinfra.la
test_socket_LDADD = libvppinfra.la
test_bihash_template_LDFLAGS = -static
test_dlist_LDFLAGS = -static
-test_elog_LDFLAGS = -static
test_elf_LDFLAGS = -static
+test_elog_LDFLAGS = -static
test_fifo_LDFLAGS = -static
test_format_LDFLAGS = -static
+test_fpool_LDFLAGS = -static
test_hash_LDFLAGS = -static
test_heap_LDFLAGS = -static
test_longjmp_LDFLAGS = -static
test_mheap_LDFLAGS = -static
test_pool_iterate_LDFLAGS = -static
test_ptclosure_LDFLAGS = -static
-test_random_LDFLAGS = -static
test_random_isaac_LDFLAGS = -static
+test_random_LDFLAGS = -static
test_serialize_LDFLAGS = -static
test_slist_LDFLAGS = -static
test_socket_LDFLAGS = -static
vppinfra/fifo.c \
vppinfra/fheap.c \
vppinfra/format.c \
+ vppinfra/pool.c \
vppinfra/graph.c \
vppinfra/hash.c \
vppinfra/heap.c \
#define BIHASH_TYPE _24_8
#define BIHASH_KVP_PER_PAGE 4
-#define BIHASH_KVP_CACHE_SIZE 3
+#define BIHASH_KVP_CACHE_SIZE 0
#ifndef __included_bihash_24_8_h__
#define __included_bihash_24_8_h__
--- /dev/null
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ Copyright (c) 2001, 2002, 2003, 2004 Eliot Dresselhaus
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <vppinfra/pool.h>
+
+void
+_pool_init_fixed (void **pool_ptr, u32 elt_size, u32 max_elts)
+{
+ u8 *mmap_base;
+ u64 vector_size;
+ u64 free_index_size;
+ u64 total_size;
+ u64 page_size;
+ pool_header_t *fh;
+ vec_header_t *vh;
+ u8 *v;
+ u32 *fi;
+ u32 i;
+ u32 set_bits;
+
+ ASSERT (elt_size);
+ ASSERT (max_elts);
+
+ vector_size = pool_aligned_header_bytes + vec_header_bytes (0)
+ + (u64) elt_size *max_elts;
+
+ free_index_size = vec_header_bytes (0) + sizeof (u32) * max_elts;
+
+ /* Round up to a cache line boundary */
+ vector_size = (vector_size + CLIB_CACHE_LINE_BYTES - 1)
+ & ~(CLIB_CACHE_LINE_BYTES - 1);
+
+ free_index_size = (free_index_size + CLIB_CACHE_LINE_BYTES - 1)
+ & ~(CLIB_CACHE_LINE_BYTES - 1);
+
+ total_size = vector_size + free_index_size;
+
+ /* Round up to an even number of pages */
+ page_size = clib_mem_get_page_size ();
+ total_size = (total_size + page_size - 1) & ~(page_size - 1);
+
+ /* mmap demand zero memory */
+
+ mmap_base = mmap (0, total_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ if (mmap_base == MAP_FAILED)
+ {
+ clib_unix_warning ("mmap");
+ *pool_ptr = 0;
+ }
+
+ /* First comes the pool header */
+ fh = (pool_header_t *) mmap_base;
+ /* Find the user vector pointer */
+ v = (u8 *) (mmap_base + pool_aligned_header_bytes);
+ /* Finally, the vector header */
+ vh = _vec_find (v);
+
+ fh->free_bitmap = 0; /* No free elts (yet) */
+ fh->max_elts = max_elts;
+ fh->mmap_base = mmap_base;
+ fh->mmap_size = total_size;
+
+ vh->len = max_elts;
+
+ /* Build the free-index vector */
+ vh = (vec_header_t *) (v + vector_size);
+ vh->len = max_elts;
+ fi = (u32 *) (vh + 1);
+
+ fh->free_indices = fi;
+
+ /* Set the entire free bitmap */
+ clib_bitmap_alloc (fh->free_bitmap, max_elts);
+ memset (fh->free_bitmap, 0xff, vec_len (fh->free_bitmap) * sizeof (uword));
+
+ /* Clear any extraneous set bits */
+ set_bits = vec_len (fh->free_bitmap) * BITS (uword);
+
+ for (i = max_elts; i < set_bits; i++)
+ fh->free_bitmap = clib_bitmap_set (fh->free_bitmap, i, 0);
+
+ /* Create the initial free vector */
+ for (i = 0; i < max_elts; i++)
+ fi[i] = (max_elts - 1) - i;
+
+ *pool_ptr = v;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
/** Vector of free indices. One element for each set bit in bitmap. */
u32 *free_indices;
+
+ /* The following fields are set for fixed-size, preallocated pools */
+
+ /** Maximum size of the pool, in elements */
+ u32 max_elts;
+
+ /** mmap segment info: base + length */
+ u8 *mmap_base;
+ u64 mmap_size;
+
} pool_header_t;
/** Align pool header so that pointers are naturally aligned. */
return vec_aligned_header (v, sizeof (pool_header_t), sizeof (void *));
}
+extern void _pool_init_fixed (void **, u32, u32);
+extern void fpool_free (void *);
+
+/** initialize a fixed-size, preallocated pool */
+#define pool_init_fixed(pool,max_elts) \
+{ \
+ _pool_init_fixed((void **)&(pool),sizeof(pool[0]),max_elts); \
+}
+
/** Validate a pool */
always_inline void
pool_validate (void *v)
do { \
uword __pool_validate_index = (i); \
vec_validate_ha ((v), __pool_validate_index, \
- pool_aligned_header_bytes, /* align */ 0); \
+ pool_aligned_header_bytes, /* align */ 0); \
pool_header_validate_index ((v), __pool_validate_index); \
} while (0)
First search free list. If nothing is free extend vector of objects.
*/
-#define pool_get_aligned(P,E,A) \
-do { \
- pool_header_t * _pool_var (p) = pool_header (P); \
- uword _pool_var (l); \
- \
- _pool_var (l) = 0; \
- if (P) \
- _pool_var (l) = vec_len (_pool_var (p)->free_indices); \
- \
- if (_pool_var (l) > 0) \
- { \
- /* Return free element from free list. */ \
+#define pool_get_aligned(P,E,A) \
+do { \
+ pool_header_t * _pool_var (p) = pool_header (P); \
+ uword _pool_var (l); \
+ \
+ _pool_var (l) = 0; \
+ if (P) \
+ _pool_var (l) = vec_len (_pool_var (p)->free_indices); \
+ \
+ if (_pool_var (l) > 0) \
+ { \
+ /* Return free element from free list. */ \
uword _pool_var (i) = _pool_var (p)->free_indices[_pool_var (l) - 1]; \
- (E) = (P) + _pool_var (i); \
- _pool_var (p)->free_bitmap = \
+ (E) = (P) + _pool_var (i); \
+ _pool_var (p)->free_bitmap = \
clib_bitmap_andnoti (_pool_var (p)->free_bitmap, _pool_var (i)); \
- _vec_len (_pool_var (p)->free_indices) = _pool_var (l) - 1; \
- } \
- else \
- { \
- /* Nothing on free list, make a new element and return it. */ \
- P = _vec_resize (P, \
- /* length_increment */ 1, \
+ _vec_len (_pool_var (p)->free_indices) = _pool_var (l) - 1; \
+ } \
+ else \
+ { \
+ /* fixed-size, preallocated pools cannot expand */ \
+ if ((P) && _pool_var(p)->max_elts) \
+ { \
+ clib_warning ("can't expand fixed-size pool"); \
+ os_out_of_memory(); \
+ } \
+ /* Nothing on free list, make a new element and return it. */ \
+ P = _vec_resize (P, \
+ /* length_increment */ 1, \
/* new size */ (vec_len (P) + 1) * sizeof (P[0]), \
- pool_aligned_header_bytes, \
- /* align */ (A)); \
- E = vec_end (P) - 1; \
- } \
+ pool_aligned_header_bytes, \
+ /* align */ (A)); \
+ E = vec_end (P) - 1; \
+ } \
} while (0)
/** Allocate an object E from a pool P (unspecified alignment). */
\
_pool_var (l) = 0; \
if (P) \
+ { \
+ if (_pool_var (p)->max_elts) \
+ return 0; \
_pool_var (l) = vec_len (_pool_var (p)->free_indices); \
+ } \
\
/* Free elements, certainly won't expand */ \
if (_pool_var (l) > 0) \
/* Add element to free bitmap and to free list. */ \
_pool_var (p)->free_bitmap = \
clib_bitmap_ori (_pool_var (p)->free_bitmap, _pool_var (l)); \
- vec_add1 (_pool_var (p)->free_indices, _pool_var (l)); \
+ /* Preallocated pool? */ \
+ if (_pool_var (p)->max_elts) \
+ { \
+ ASSERT(_pool_var(l) < _pool_var (p)->max_elts); \
+ _pool_var(p)->free_indices[_vec_len(_pool_var(p)->free_indices)] = \
+ _pool_var(l); \
+ _vec_len(_pool_var(p)->free_indices) += 1; \
+ } \
+ else \
+ vec_add1 (_pool_var (p)->free_indices, _pool_var (l)); \
} while (0)
/** Free pool element with given index. */
#define pool_alloc_aligned(P,N,A) \
do { \
pool_header_t * _p; \
+ \
+ if ((P)) \
+ { \
+ _p = pool_header (P); \
+ if (_p->max_elts) \
+ { \
+ clib_warning ("Can't expand fixed-size pool"); \
+ os_out_of_memory(); \
+ } \
+ } \
+ \
(P) = _vec_resize ((P), 0, (vec_len (P) + (N)) * sizeof (P[0]), \
pool_aligned_header_bytes, \
(A)); \
if (!v)
return v;
clib_bitmap_free (p->free_bitmap);
- vec_free (p->free_indices);
- vec_free_h (v, pool_aligned_header_bytes);
+
+ if (p->max_elts)
+ {
+ int rv;
+
+ rv = munmap (p->mmap_base, p->mmap_size);
+ if (rv)
+ clib_unix_warning ("munmap");
+ }
+ else
+ {
+ vec_free (p->free_indices);
+ vec_free_h (v, pool_aligned_header_bytes);
+ }
return 0;
}
--- /dev/null
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+#include <vppinfra/pool.h>
+
+/* can be a very large size */
+#define NELTS 1024
+
+int
+main (int argc, char *argv[])
+{
+ u32 *junk = 0;
+ int i;
+ u32 *tp = 0;
+ u32 *indices = 0;
+
+ clib_mem_init (0, 3ULL << 30);
+
+ vec_validate (indices, NELTS - 1);
+ _vec_len (indices) = 0;
+
+ pool_init_fixed (tp, NELTS);
+
+ for (i = 0; i < NELTS; i++)
+ {
+ pool_get (tp, junk);
+ vec_add1 (indices, junk - tp);
+ *junk = i;
+ }
+
+ for (i = 0; i < NELTS; i++)
+ {
+ junk = pool_elt_at_index (tp, indices[i]);
+ ASSERT (*junk == i);
+ }
+
+ fformat (stdout, "%d pool elts before deletes\n", pool_elts (tp));
+
+ pool_put_index (tp, indices[12]);
+ pool_put_index (tp, indices[43]);
+
+ fformat (stdout, "%d pool elts after deletes\n", pool_elts (tp));
+
+ pool_validate (tp);
+
+ pool_free (tp);
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
#undef TW_OVERFLOW_VECTOR
#undef TW_FAST_WHEEL_BITMAP
#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
#define TW_TIMER_WHEELS 1
#define TW_SLOTS_PER_RING 2048
#undef TW_OVERFLOW_VECTOR
#undef TW_FAST_WHEEL_BITMAP
#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
#define TW_TIMER_WHEELS 2
#define TW_SLOTS_PER_RING 512
#define LOG2_TW_TIMERS_PER_OBJECT 4
#define TW_SUFFIX _16t_2w_512sl
#define TW_FAST_WHEEL_BITMAP 0
-#define TW_TIMER_ALLOW_DUPLICATE_STOP 0
+#define TW_TIMER_ALLOW_DUPLICATE_STOP 1
#include <vppinfra/tw_timer_template.h>
#undef TW_OVERFLOW_VECTOR
#undef TW_FAST_WHEEL_BITMAP
#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
#define TW_TIMER_WHEELS 3
#define TW_SLOTS_PER_RING 1024
#undef TW_OVERFLOW_VECTOR
#undef TW_FAST_WHEEL_BITMAP
#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
#define TW_TIMER_WHEELS 1
#define TW_SLOTS_PER_RING 2048
#undef TW_OVERFLOW_VECTOR
#undef TW_FAST_WHEEL_BITMAP
#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
#define TW_TIMER_WHEELS 3
#define TW_SLOTS_PER_RING 256
#undef TW_OVERFLOW_VECTOR
#undef TW_FAST_WHEEL_BITMAP
#undef TW_TIMER_ALLOW_DUPLICATE_STOP
+#undef TW_START_STOP_TRACE_SIZE
#define TW_TIMER_WHEELS 3
#define TW_SLOTS_PER_RING 4
*
*
*/
+#if TW_START_STOP_TRACE_SIZE > 0
+
+void TW (tw_timer_trace) (TWT (tw_timer_wheel) * tw, u32 timer_id,
+ u32 pool_index, u32 handle)
+{
+ TWT (trace) * t = &tw->traces[tw->trace_index];
+
+ t->timer_id = timer_id;
+ t->pool_index = pool_index;
+ t->handle = handle;
+
+ tw->trace_index++;
+ if (tw->trace_index == TW_START_STOP_TRACE_SIZE)
+ {
+ tw->trace_index = 0;
+ tw->trace_wrapped++;
+ }
+}
+
+void TW (tw_search_trace) (TWT (tw_timer_wheel) * tw, u32 handle)
+{
+ u32 i, start_pos;
+ TWT (trace) * t;
+ char *s = "bogus!";
+
+ /* reverse search for the supplied handle */
+
+ start_pos = tw->trace_index;
+ if (start_pos == 0)
+ start_pos = TW_START_STOP_TRACE_SIZE - 1;
+ else
+ start_pos--;
+
+ for (i = start_pos; i > 0; i--)
+ {
+ t = &tw->traces[i];
+ if (t->handle == handle)
+ {
+ switch (t->timer_id)
+ {
+ case 0xFF:
+ s = "stopped";
+ break;
+ case 0xFE:
+ s = "expired";
+ break;
+ default:
+ s = "started";
+ break;
+ }
+ fformat (stderr, "handle 0x%x (%d) %s at trace %d\n",
+ handle, handle, s, i);
+ }
+ }
+ if (tw->trace_wrapped > 0)
+ {
+ for (i = TW_START_STOP_TRACE_SIZE; i >= tw->trace_index; i--)
+ {
+ t = &tw->traces[i];
+ if (t->handle == handle)
+ {
+ switch (t->timer_id)
+ {
+ case 0xFF:
+ s = "stopped";
+ break;
+ case 0xFE:
+ s = "expired";
+ break;
+ default:
+ s = "started";
+ break;
+ }
+ fformat (stderr, "handle 0x%x (%d) %s at trace %d\n",
+ handle, handle, s, i);
+ }
+ }
+ }
+}
+#endif /* TW_START_STOP_TRACE_SIZE > 0 */
+
static inline u32
TW (make_internal_timer_handle) (u32 pool_index, u32 timer_id)
{
t->expiration_time = tw->current_tick + interval;
ts = &tw->overflow;
timer_addhead (tw->timers, ts->head_index, t - tw->timers);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers);
+#endif
return t - tw->timers;
}
#endif
ts = &tw->w[TW_TIMER_RING_GLACIER][glacier_ring_offset];
timer_addhead (tw->timers, ts->head_index, t - tw->timers);
-
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers);
+#endif
return t - tw->timers;
}
#endif
ts = &tw->w[TW_TIMER_RING_SLOW][slow_ring_offset];
timer_addhead (tw->timers, ts->head_index, t - tw->timers);
-
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers);
+#endif
return t - tw->timers;
}
#else
#if TW_FAST_WHEEL_BITMAP
tw->fast_slot_bitmap = clib_bitmap_set (tw->fast_slot_bitmap,
fast_ring_offset, 1);
+#endif
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers);
#endif
return t - tw->timers;
}
if (pool_is_free_index (tw->timers, handle))
return;
#endif
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, ~0, ~0, handle);
+#endif
t = pool_elt_at_index (tw->timers, handle);
tw->timer_interval = timer_interval_in_seconds;
tw->ticks_per_second = 1.0 / timer_interval_in_seconds;
tw->first_expires_tick = ~0ULL;
+
vec_validate (tw->expired_timer_handles, 0);
_vec_len (tw->expired_timer_handles) = 0;
new_glacier_ring_offset == 0))
{
vec_add1 (callback_vector, t->user_handle);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers);
+#endif
pool_put (tw->timers, t);
}
/* Timer moves to the glacier ring */
t->fast_ring_offset == 0))
{
vec_add1 (callback_vector, t->user_handle);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers);
+#endif
pool_put (tw->timers, t);
}
/* Timer expires during slow-wheel tick 0 */
if (PREDICT_FALSE (t->fast_ring_offset == 0))
{
vec_add1 (callback_vector, t->user_handle);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers);
+#endif
pool_put (tw->timers, t);
}
else /* typical case */
t = pool_elt_at_index (tw->timers, next_index);
next_index = t->next;
vec_add1 (callback_vector, t->user_handle);
+#if TW_START_STOP_TRACE_SIZE > 0
+ TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers);
+#endif
pool_put (tw->timers, t);
}
{
/* The callback is optional. We return the u32 * handle vector */
if (tw->expired_timer_callback)
- {
- tw->expired_timer_callback (callback_vector);
- _vec_len (callback_vector) = 0;
- }
+ tw->expired_timer_callback (callback_vector);
tw->expired_timer_handles = callback_vector;
}
} tw_ring_index_t;
#endif /* __defined_tw_timer_wheel_slot__ */
+typedef CLIB_PACKED (struct
+ {
+ u8 timer_id;
+ u32 pool_index;
+ u32 handle;
+ }) TWT (trace);
+
typedef struct
{
/** Timer pool */
/** expired timer callback, receives a vector of handles */
void (*expired_timer_callback) (u32 * expired_timer_handles);
- /** vector of expired timers */
+ /** vectors of expired timers */
u32 *expired_timer_handles;
/** maximum expirations */
u32 max_expirations;
+
+ /** current trace index */
+#if TW_START_STOP_TRACE_SIZE > 0
+ /* Start/stop/expire tracing */
+ u32 trace_index;
+ u32 trace_wrapped;
+ TWT (trace) traces[TW_START_STOP_TRACE_SIZE];
+#endif
+
} TWT (tw_timer_wheel);
u32 TW (tw_timer_start) (TWT (tw_timer_wheel) * tw,
u32 TW (tw_timer_first_expires_in_ticks) (TWT (tw_timer_wheel) * tw);
#endif
+#if TW_START_STOP_TRACE_SIZE > 0
+void TW (tw_search_trace) (TWT (tw_timer_wheel) * tw, u32 handle);
+void TW (tw_timer_trace) (TWT (tw_timer_wheel) * tw, u32 timer_id,
+ u32 pool_index, u32 handle);
+#endif
+
/*
* fd.io coding-style-patch-verification: ON
*