typedef struct segment_manager_main_
{
segment_manager_t *segment_managers; /**< Pool of segment managers */
- clib_valloc_main_t va_allocator; /**< Virtual address allocator */
u32 seg_name_counter; /**< Counter for segment names */
/*
* If needed a writer's lock is acquired before allocating a new segment
* to avoid affecting any of the segments pool readers.
*/
-int
-segment_manager_add_segment (segment_manager_t * sm, uword segment_size)
+static inline int
+segment_manager_add_segment_inline (segment_manager_t *sm, uword segment_size,
+ u8 notify_app, u8 flags)
{
- uword baseva = (uword) ~ 0ULL, alloc_size, page_size;
- u32 rnd_margin = 128 << 10, fs_index = ~0;
segment_manager_main_t *smm = &sm_main;
segment_manager_props_t *props;
fifo_segment_t *fs;
+ u32 fs_index = ~0;
u8 *seg_name;
int rv;
* Allocate ssvm segment
*/
segment_size = segment_size ? segment_size : props->add_segment_size;
- page_size = clib_mem_get_page_size ();
- /* Protect against segment size u32 wrap */
- segment_size = clib_max (segment_size + page_size - 1, segment_size);
- segment_size = segment_size & ~(page_size - 1);
+ segment_size = round_pow2 (segment_size, clib_mem_get_page_size ());
if (props->segment_type != SSVM_SEGMENT_PRIVATE)
{
seg_name = format (0, "%d-%d%c", getpid (), smm->seg_name_counter++, 0);
- alloc_size = (uword) segment_size + rnd_margin;
- baseva = clib_valloc_alloc (&smm->va_allocator, alloc_size, 0);
- if (!baseva)
- {
- clib_warning ("out of space for segments");
- pool_put (sm->segments, fs);
- goto done;
- }
}
else
{
fs->ssvm.ssvm_size = segment_size;
fs->ssvm.name = seg_name;
- /* clib_mem_vm_map_shared consumes first page before requested_va */
- fs->ssvm.requested_va = baseva + page_size;
+ fs->ssvm.requested_va = 0;
if ((rv = ssvm_server_init (&fs->ssvm, props->segment_type)))
{
clib_warning ("svm_master_init ('%v', %u) failed", seg_name,
segment_size);
-
- if (props->segment_type != SSVM_SEGMENT_PRIVATE)
- clib_valloc_free (&smm->va_allocator, baseva);
pool_put (sm->segments, fs);
goto done;
}
/*
* Set watermarks in segment
*/
- fs->h->high_watermark = sm->high_watermark;
- fs->h->low_watermark = sm->low_watermark;
+ fs->high_watermark = sm->high_watermark;
+ fs->low_watermark = sm->low_watermark;
+ fs->flags = flags;
+ fs->flags &= ~FIFO_SEGMENT_F_MEM_LIMIT;
fs->h->pct_first_alloc = props->pct_first_alloc;
- fs->h->flags &= ~FIFO_SEGMENT_F_MEM_LIMIT;
+ if (notify_app)
+ {
+ app_worker_t *app_wrk;
+ u64 fs_handle;
+ fs_handle = segment_manager_segment_handle (sm, fs);
+ app_wrk = app_worker_get (sm->app_wrk_index);
+ rv = app_worker_add_segment_notify (app_wrk, fs_handle);
+ if (rv)
+ return rv;
+ }
done:
if (vlib_num_workers ())
return fs_index;
}
+int
+segment_manager_add_segment (segment_manager_t *sm, uword segment_size,
+ u8 notify_app)
+{
+ return segment_manager_add_segment_inline (sm, segment_size, notify_app, 0);
+}
+
+int
+segment_manager_add_segment2 (segment_manager_t *sm, uword segment_size,
+ u8 flags)
+{
+ return segment_manager_add_segment_inline (sm, segment_size, 0, flags);
+}
+
/**
* Remove segment without lock
*/
void
segment_manager_del_segment (segment_manager_t * sm, fifo_segment_t * fs)
{
- segment_manager_main_t *smm = &sm_main;
-
if (ssvm_type (&fs->ssvm) != SSVM_SEGMENT_PRIVATE)
{
- /* clib_mem_vm_map_shared consumes first page before requested_va */
- clib_valloc_free (&smm->va_allocator,
- fs->ssvm.requested_va - clib_mem_get_page_size ());
-
if (!segment_manager_app_detached (sm))
{
app_worker_t *app_wrk;
}
}
+ fifo_segment_cleanup (fs);
ssvm_delete (&fs->ssvm);
if (CLIB_DEBUG)
/* Allocate the segments */
for (i = 0; i < approx_segment_count + 1; i++)
{
- fs_index = segment_manager_add_segment (sm, max_seg_size);
+ fs_index = segment_manager_add_segment (sm, max_seg_size, 0);
if (fs_index < 0)
{
clib_warning ("Failed to preallocate segment %d", i);
return 0;
}
- fs_index = segment_manager_add_segment (sm, first_seg_size);
+ fs_index = segment_manager_add_segment (sm, first_seg_size, 0);
if (fs_index < 0)
{
clib_warning ("Failed to allocate segment");
}
/* *INDENT-ON* */
+ pool_free (sm->segments);
clib_rwlock_writer_unlock (&sm->segments_rwlock);
clib_rwlock_free (&sm->segments_rwlock);
segment_manager_free (sm);
}
-static void
-segment_manager_free_safe (segment_manager_t * sm)
+void
+segment_manager_free_safe (segment_manager_t *sm)
{
if (!vlib_thread_is_main_w_barrier ())
{
*/
while (f)
{
- session = session_get_if_valid (f->master_session_index,
- f->master_thread_index);
- if (session)
- vec_add1 (handles, session_handle (session));
- f = f->next;
- }
+ session = session_get_if_valid (f->shr->master_session_index,
+ f->master_thread_index);
+ if (session)
+ vec_add1 (handles, session_handle (session));
+ f = f->next;
+ }
}
/* Instead of removing the segment, test when cleaning up disconnected
/* Avoid propagating notifications back to the app */
session->app_wrk_index = APP_INVALID_INDEX;
}
+ vec_free (handles);
+}
+
+/**
+ * Initiate disconnects for sessions in specified state 'owned' by a segment
+ * manager
+ */
+void
+segment_manager_del_sessions_filter (segment_manager_t *sm,
+ session_state_t *states)
+{
+ session_handle_t *handles = 0, *handle;
+ fifo_segment_t *fs;
+ session_t *session;
+ int slice_index;
+ svm_fifo_t *f;
+
+ ASSERT (pool_elts (sm->segments) != 0);
+
+ /* Across all fifo segments used by the server */
+ segment_manager_foreach_segment_w_lock (
+ fs, sm, ({
+ for (slice_index = 0; slice_index < fs->n_slices; slice_index++)
+ {
+ f = fifo_segment_get_slice_fifo_list (fs, slice_index);
+ while (f)
+ {
+ session = session_get_if_valid (f->shr->master_session_index,
+ f->master_thread_index);
+ if (session)
+ {
+ session_state_t *state;
+ vec_foreach (state, states)
+ {
+ if (session->session_state == *state)
+ {
+ vec_add1 (handles, session_handle (session));
+ break;
+ }
+ }
+ }
+ f = f->next;
+ }
+ }
+ }));
+
+ vec_foreach (handle, handles)
+ {
+ session = session_get_from_handle (*handle);
+ session_close (session);
+ /* Avoid propagating notifications back to the app */
+ session->app_wrk_index = APP_INVALID_INDEX;
+ }
+ vec_free (handles);
}
int
segment_manager_props_t *props;
fifo_segment_t *fs = 0, *cur;
u32 sm_index, fs_index;
- u8 added_a_segment = 0;
- u64 fs_handle;
props = segment_manager_properties_get (sm);
segment_manager_segment_reader_lock (sm);
- /* *INDENT-OFF* */
pool_foreach (cur, sm->segments) {
- free_bytes = fifo_segment_available_bytes (cur);
- if (free_bytes > max_free_bytes)
- {
- max_free_bytes = free_bytes;
- fs = cur;
- }
+ if (fifo_segment_flags (cur) & FIFO_SEGMENT_F_CUSTOM_USE)
+ continue;
+ free_bytes = fifo_segment_available_bytes (cur);
+ if (free_bytes > max_free_bytes)
+ {
+ max_free_bytes = free_bytes;
+ fs = cur;
+ }
}
- /* *INDENT-ON* */
if (fs)
{
segment_manager_segment_reader_unlock (sm);
-alloc_check:
-
- if (!alloc_fail)
- {
-
- alloc_success:
-
- ASSERT (rx_fifo && tx_fifo);
- sm_index = segment_manager_index (sm);
- fs_index = segment_manager_segment_index (sm, fs);
- (*tx_fifo)->segment_manager = sm_index;
- (*rx_fifo)->segment_manager = sm_index;
- (*tx_fifo)->segment_index = fs_index;
- (*rx_fifo)->segment_index = fs_index;
-
- if (added_a_segment)
- {
- app_worker_t *app_wrk;
- fs_handle = segment_manager_segment_handle (sm, fs);
- app_wrk = app_worker_get (sm->app_wrk_index);
- rv = app_worker_add_segment_notify (app_wrk, fs_handle);
- }
- /* Drop the lock after app is notified */
- segment_manager_segment_reader_unlock (sm);
- return rv;
- }
-
/*
* Allocation failed, see if we can add a new segment
*/
if (props->add_segment)
{
- if (added_a_segment)
- {
- clib_warning ("Added a segment, still can't allocate a fifo");
- segment_manager_segment_reader_unlock (sm);
- return SESSION_E_SEG_NO_SPACE2;
- }
- if ((new_fs_index = segment_manager_add_segment (sm, 0)) < 0)
+ if ((new_fs_index = segment_manager_add_segment (sm, 0, 1)) < 0)
{
clib_warning ("Failed to add new segment");
return SESSION_E_SEG_CREATE;
props->rx_fifo_size,
props->tx_fifo_size,
rx_fifo, tx_fifo);
- added_a_segment = 1;
- goto alloc_check;
+ if (alloc_fail)
+ {
+ clib_warning ("Added a segment, still can't allocate a fifo");
+ segment_manager_segment_reader_unlock (sm);
+ return SESSION_E_SEG_NO_SPACE2;
+ }
}
else
{
SESSION_DBG ("Can't add new seg and no space to allocate fifos!");
return SESSION_E_SEG_NO_SPACE;
}
+
+alloc_success:
+ ASSERT (rx_fifo && tx_fifo);
+
+ sm_index = segment_manager_index (sm);
+ fs_index = segment_manager_segment_index (sm, fs);
+ (*tx_fifo)->segment_manager = sm_index;
+ (*rx_fifo)->segment_manager = sm_index;
+ (*tx_fifo)->segment_index = fs_index;
+ (*rx_fifo)->segment_index = fs_index;
+
+ /* Drop the lock after app is notified */
+ segment_manager_segment_reader_unlock (sm);
+
+ return rv;
}
void
}
void
-segment_manager_detach_fifo (segment_manager_t * sm, svm_fifo_t * f)
+segment_manager_detach_fifo (segment_manager_t *sm, svm_fifo_t **f)
{
fifo_segment_t *fs;
- fs = segment_manager_get_segment_w_lock (sm, f->segment_index);
+ fs = segment_manager_get_segment_w_lock (sm, (*f)->segment_index);
fifo_segment_detach_fifo (fs, f);
segment_manager_segment_reader_unlock (sm);
}
void
-segment_manager_attach_fifo (segment_manager_t * sm, svm_fifo_t * f,
- session_t * s)
+segment_manager_attach_fifo (segment_manager_t *sm, svm_fifo_t **f,
+ session_t *s)
{
fifo_segment_t *fs;
- fs = segment_manager_get_segment_w_lock (sm, f->segment_index);
+ fs = segment_manager_get_segment_w_lock (sm, (*f)->segment_index);
fifo_segment_attach_fifo (fs, f, s->thread_index);
segment_manager_segment_reader_unlock (sm);
- f->master_session_index = s->session_index;
- f->master_thread_index = s->thread_index;
+ (*f)->shr->master_session_index = s->session_index;
+ (*f)->master_thread_index = s->thread_index;
}
u32
u32 fifo_evt_size, session_evt_size = 256, notif_q_size;
svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
svm_msg_q_t *q;
- void *oldheap;
fifo_evt_size = sizeof (session_event_t);
notif_q_size = clib_max (16, props->evt_q_size >> 4);
cfg->q_nitems = props->evt_q_size;
cfg->ring_cfgs = rc;
- oldheap = ssvm_push_heap (segment->ssvm.sh);
- q = svm_msg_q_alloc (cfg);
- fifo_segment_update_free_bytes (segment);
- ssvm_pop_heap (oldheap);
+ q = fifo_segment_msg_q_alloc (segment, 0, cfg);
if (props->use_mq_eventfd)
{
- if (svm_msg_q_alloc_producer_eventfd (q))
+ if (svm_msg_q_alloc_eventfd (q))
clib_warning ("failed to alloc eventfd");
}
return q;
* Init segment vm address allocator
*/
void
-segment_manager_main_init (segment_manager_main_init_args_t * a)
+segment_manager_main_init (void)
{
segment_manager_main_t *sm = &sm_main;
- clib_valloc_chunk_t _ip, *ip = &_ip;
-
- ip->baseva = a->baseva;
- ip->size = a->size;
-
- clib_valloc_init (&sm->va_allocator, ip, 1 /* lock */ );
sm->default_fifo_size = 1 << 12;
sm->default_segment_size = 1 << 20;
if (!sm)
{
if (verbose)
- vlib_cli_output (vm, "%-40s%-20s%-15s%-10s", "Connection", "App",
- "API Client", "SegManager");
+ vlib_cli_output (vm, "%-" SESSION_CLI_ID_LEN "s%-20s%-15s%-10s",
+ "Connection", "App", "API Client", "SegManager");
else
- vlib_cli_output (vm, "%-40s%-20s", "Connection", "App");
+ vlib_cli_output (vm, "%-" SESSION_CLI_ID_LEN "s%-20s", "Connection",
+ "App");
return;
}
u32 session_index, thread_index;
session_t *session;
- session_index = f->master_session_index;
- thread_index = f->master_thread_index;
+ session_index = f->shr->master_session_index;
+ thread_index = f->master_thread_index;
- session = session_get (session_index, thread_index);
- str = format (0, "%U", format_session, session, verbose);
+ session = session_get (session_index, thread_index);
+ str = format (0, "%U", format_session, session, verbose);
- if (verbose)
- s = format (s, "%-40v%-20v%-15u%-10u", str, app_name,
- app_wrk->api_client_index, app_wrk->connects_seg_manager);
- else
- s = format (s, "%-40v%-20v", str, app_name);
+ if (verbose)
+ s = format (s, "%-" SESSION_CLI_ID_LEN "v%-20v%-15u%-10u", str,
+ app_name, app_wrk->api_client_index,
+ app_wrk->connects_seg_manager);
+ else
+ s = format (s, "%-" SESSION_CLI_ID_LEN "v%-20v", str, app_name);
- vlib_cli_output (vm, "%v", s);
- vec_reset_length (s);
- vec_free (str);
+ vlib_cli_output (vm, "%v", s);
+ vec_reset_length (s);
+ vec_free (str);
- f = f->next;
- }
- vec_free (s);
+ f = f->next;
+ }
+ vec_free (s);
}
}
/* *INDENT-ON* */