int rv;
if (!svm_fifo_set_event (s->rx_fifo))
return;
- if ((rv =
- app_send_io_evt_to_vpp (s->vpp_evt_q, s->rx_fifo->master_session_index,
- SESSION_IO_EVT_RX, SVM_Q_WAIT)))
+ if ((rv = app_send_io_evt_to_vpp (s->vpp_evt_q,
+ s->rx_fifo->shr->master_session_index,
+ SESSION_IO_EVT_RX, SVM_Q_WAIT)))
ECHO_FAIL (ECHO_FAIL_SEND_IO_EVT, "app_send_io_evt_to_vpp errored %d",
rv);
svm_fifo_clear_deq_ntf (s->rx_fifo);
connect_to_vpp (char *name)
{
echo_main_t *em = &echo_main;
- api_main_t *am = &api_main;
+ api_main_t *am = vlibapi_get_main ();
if (em->use_sock_api)
{
fformat (stdout, " \"end_evt_missing\": \"%s\",\n",
end_evt_missing ? "True" : "False");
fformat (stdout, " \"rx_data\": %lld,\n", em->stats.rx_total);
- fformat (stdout, " \"tx_rx\": %lld,\n", em->stats.tx_total);
+ fformat (stdout, " \"tx_data\": %lld,\n", em->stats.tx_total);
+ fformat (stdout, " \"rx_bits_per_second\": %.1f,\n",
+ em->stats.rx_total * 8 / deltat);
+ fformat (stdout, " \"tx_bits_per_second\": %.1f,\n",
+ em->stats.tx_total * 8 / deltat);
fformat (stdout, " \"closing\": {\n");
fformat (stdout, " \"reset\": { \"q\": %d, \"s\": %d },\n",
em->stats.reset_count.q, em->stats.reset_count.s);
- fformat (stdout, " \"close\": { \"q\": %d, \"s\": %d },\n",
+ fformat (stdout, " \"recv evt\": { \"q\": %d, \"s\": %d },\n",
em->stats.close_count.q, em->stats.close_count.s);
- fformat (stdout, " \"active\": { \"q\": %d, \"s\": %d },\n",
+ fformat (stdout, " \"send evt\": { \"q\": %d, \"s\": %d },\n",
em->stats.active_count.q, em->stats.active_count.s);
- fformat (stdout, " \"clean\": { \"q\": %d, \"s\": %d }\n",
+ fformat (stdout, " \"clean\": { \"q\": %d, \"s\": %d },\n",
em->stats.clean_count.q, em->stats.clean_count.s);
- fformat (stdout, " }\n");
+ fformat (stdout, " \"accepted\": { \"q\": %d, \"s\": %d },\n",
+ em->stats.accepted_count.q, em->stats.accepted_count.s);
+ fformat (stdout, " \"connected\": { \"q\": %d, \"s\": %d }\n",
+ em->stats.connected_count.q, em->stats.connected_count.s);
+ fformat (stdout, " },\n");
fformat (stdout, " \"results\": {\n");
- fformat (stdout, " \"has_failed\": \"%d\"\n", em->has_failed);
+ fformat (stdout, " \"has_failed\": \"%d\",\n", em->has_failed);
fformat (stdout, " \"fail_descr\": \"%v\"\n", em->fail_descr);
fformat (stdout, " }\n");
fformat (stdout, "}\n");
em->stats.active_count.s, em->stats.active_count.q);
fformat (stdout, "Discarded %d streams (and %d Quic conn)\n",
em->stats.clean_count.s, em->stats.clean_count.q);
+ fformat (stdout, "--------------------\n");
+ fformat (stdout, "Got accept on %d streams (and %d Quic conn)\n",
+ em->stats.accepted_count.s, em->stats.accepted_count.q);
+ fformat (stdout, "Got connected on %d streams (and %d Quic conn)\n",
+ em->stats.connected_count.s, em->stats.connected_count.q);
if (em->has_failed)
fformat (stdout, "\nFailure Return Status: %d\n%v", em->has_failed,
em->fail_descr);
echo_update_count_on_session_close (echo_main_t * em, echo_session_t * s)
{
- ECHO_LOG (1, "[%lu/%lu] -> %U -> [%lu/%lu]",
+ ECHO_LOG (2, "[%lu/%lu] -> %U -> [%lu/%lu]",
s->bytes_received, s->bytes_received + s->bytes_to_receive,
echo_format_session, s, s->bytes_sent,
s->bytes_sent + s->bytes_to_send);
- clib_atomic_fetch_add (&em->stats.tx_total, s->bytes_sent);
- clib_atomic_fetch_add (&em->stats.rx_total, s->bytes_received);
- if (PREDICT_FALSE (em->stats.rx_total == em->stats.rx_expected))
+ if (PREDICT_FALSE
+ ((em->stats.rx_total == em->stats.rx_expected)
+ && (em->stats.tx_total == em->stats.tx_expected)))
echo_notify_event (em, ECHO_EVT_LAST_BYTE);
}
u32 *session_indexes = 0, *session_index;
/* *INDENT-OFF* */
- pool_foreach (s, em->sessions,
- ({
+ pool_foreach (s, em->sessions)
+ {
if (s->session_state == ECHO_SESSION_STATE_CLOSED)
- vec_add1 (session_indexes, s->session_index);}
- ));
+ vec_add1 (session_indexes, s->session_index);
+ }
/* *INDENT-ON* */
vec_foreach (session_index, session_indexes)
{
expected = (s->bytes_received + i) & 0xff;
if (rx_buf[i] == expected || em->max_test_msg > 0)
continue;
- ECHO_LOG (0, "Session 0x%lx byte %lld was 0x%x expected 0x%x",
+ ECHO_LOG (1, "Session 0x%lx byte %lld was 0x%x expected 0x%x",
s->vpp_session_handle, s->bytes_received + i, rx_buf[i],
expected);
em->max_test_msg--;
if (em->max_test_msg == 0)
- ECHO_LOG (0, "Too many errors, hiding next ones");
+ ECHO_LOG (1, "Too many errors, hiding next ones");
if (em->test_return_packets == RETURN_PACKETS_ASSERT)
ECHO_FAIL (ECHO_FAIL_TEST_BYTES_ERR, "test-bytes errored");
}
s->bytes_received += n_read;
s->bytes_to_receive -= n_read;
+ clib_atomic_fetch_add (&em->stats.rx_total, n_read);
return n_read;
}
{
int n_sent;
int bytes_this_chunk = clib_min (s->bytes_to_send, len - offset);
+ echo_main_t *em = &echo_main;
+
if (!bytes_this_chunk)
return 0;
n_sent = app_send ((app_session_t *) s, tx_buf + offset,
return 0;
s->bytes_to_send -= n_sent;
s->bytes_sent += n_sent;
+ clib_atomic_fetch_add (&em->stats.tx_total, n_sent);
return n_sent;
}
/* if parent has died, terminate gracefully */
if (s->listener_index == SESSION_INVALID_INDEX)
{
- ECHO_LOG (2, "%U: listener_index == SESSION_INVALID_INDEX",
+ ECHO_LOG (3, "%U: listener_index == SESSION_INVALID_INDEX",
echo_format_session, s);
return;
}
return;
}
- ECHO_LOG (2, "%U died, close child %U", echo_format_session, ls,
+ ECHO_LOG (3, "%U died, close child %U", echo_format_session, ls,
echo_format_session, s);
echo_update_count_on_session_close (em, s);
em->proto_cb_vft->cleanup_cb (s, 1 /* parent_died */ );
if (em->send_stream_disconnects == ECHO_CLOSE_F_ACTIVE)
{
echo_send_rpc (em, echo_send_disconnect_session,
- (void *) s->vpp_session_handle, 0);
+ (echo_rpc_args_t *) & s->vpp_session_handle);
clib_atomic_fetch_add (&em->stats.active_count.s, 1);
}
else if (em->send_stream_disconnects == ECHO_CLOSE_F_NONE)
clib_atomic_fetch_add (&em->stats.clean_count.s, 1);
}
}
+ ECHO_LOG (3, "%U: %U", echo_format_session, s,
+ echo_format_session_state, s->session_state);
return;
}
{
if (n_sent || n_read)
s->idle_cycles = 0;
- else if (s->idle_cycles++ == 1e7)
+ else if (s->idle_cycles++ == LOG_EVERY_N_IDLE_CYCLES)
{
s->idle_cycles = 0;
- ECHO_LOG (1, "Idle client TX:%dB RX:%dB", s->bytes_to_send,
+ ECHO_LOG (2, "Idle client TX:%dB RX:%dB", s->bytes_to_send,
s->bytes_to_receive);
- ECHO_LOG (1, "Idle FIFOs TX:%dB RX:%dB",
+ ECHO_LOG (2, "Idle FIFOs TX:%dB RX:%dB",
svm_fifo_max_dequeue (s->tx_fifo),
svm_fifo_max_dequeue (s->rx_fifo));
- ECHO_LOG (1, "Session 0x%lx state %U", s->vpp_session_handle,
+ ECHO_LOG (2, "Session 0x%lx state %U", s->vpp_session_handle,
echo_format_session_state, s->session_state);
}
}
u32 idx = (u64) arg;
if (n * idx >= N)
{
- ECHO_LOG (1, "Thread %u exiting, no sessions to care for", idx);
+ ECHO_LOG (2, "Thread %u exiting, no sessions to care for", idx);
pthread_exit (0);
}
u32 thread_n_sessions = clib_min (n, N - n * idx);
echo_check_closed_listener (em, s);
break;
case ECHO_SESSION_STATE_CLOSING:
- ECHO_LOG (2, "%U: %U", echo_format_session, s,
+ ECHO_LOG (3, "%U: %U", echo_format_session, s,
echo_format_session_state, s->session_state);
echo_update_count_on_session_close (em, s);
em->proto_cb_vft->cleanup_cb (s, 0 /* parent_died */ );
break;
case ECHO_SESSION_STATE_CLOSED:
- ECHO_LOG (2, "%U: %U", echo_format_session, s,
+ ECHO_LOG (3, "%U: %U", echo_format_session, s,
echo_format_session_state, s->session_state);
n_closed_sessions++;
break;
if (n_closed_sessions == thread_n_sessions)
break;
}
- ECHO_LOG (1, "Mission accomplished!");
+ ECHO_LOG (2, "Mission accomplished!");
pthread_exit (0);
}
static void
-session_unlisten_handler (session_unlisten_msg_t * mp)
+session_unlisten_handler (session_unlisten_reply_msg_t * mp)
{
- echo_session_t *listen_session;
+ echo_session_t *ls;
echo_main_t *em = &echo_main;
- listen_session = pool_elt_at_index (em->sessions, em->listen_session_index);
- em->proto_cb_vft->cleanup_cb (listen_session, 0 /* parent_died */ );
- listen_session->session_state = ECHO_SESSION_STATE_CLOSED;
- em->state = STATE_DISCONNECTED;
+
+ ls = echo_get_session_from_handle (em, mp->handle);
+ if (!ls)
+ return;
+ em->proto_cb_vft->cleanup_cb (ls, 0 /* parent_died */ );
+ ls->session_state = ECHO_SESSION_STATE_CLOSED;
+ if (--em->listen_session_cnt == 0)
+ em->state = STATE_DISCONNECTED;
}
static void
clib_net_to_host_u32 (mp->retval));
return;
}
- ECHO_LOG (0, "listening on %U:%u", format_ip46_address, mp->lcl_ip,
+ ECHO_LOG (1, "listening on %U:%u", format_ip46_address, mp->lcl_ip,
mp->lcl_is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6,
clib_net_to_host_u16 (mp->lcl_port));
listen_session->session_type = ECHO_SESSION_TYPE_LISTEN;
listen_session->vpp_session_handle = mp->handle;
echo_session_handle_add_del (em, mp->handle, listen_session->session_index);
- em->state = STATE_LISTEN;
- em->listen_session_index = listen_session->session_index;
+ vec_add1 (em->listen_session_indexes, listen_session->session_index);
+ if (++em->listen_session_cnt == em->n_uris)
+ em->state = STATE_LISTEN;
if (em->proto_cb_vft->bound_uri_cb)
em->proto_cb_vft->bound_uri_cb (mp, listen_session);
}
{
app_session_evt_t _app_evt, *app_evt = &_app_evt;
session_accepted_reply_msg_t *rmp;
- svm_fifo_t *rx_fifo, *tx_fifo;
echo_main_t *em = &echo_main;
echo_session_t *session, *ls;
"Unknown listener handle 0x%lx", mp->listener_handle);
return;
}
- if (wait_for_segment_allocation (mp->segment_handle))
+
+ /* Allocate local session and set it up */
+ session = echo_session_new (em);
+
+ if (echo_attach_session (mp->segment_handle, mp->server_rx_fifo,
+ mp->server_tx_fifo, mp->vpp_event_queue_address,
+ session))
{
ECHO_FAIL (ECHO_FAIL_ACCEPTED_WAIT_FOR_SEG_ALLOC,
"accepted wait_for_segment_allocation errored");
return;
}
- /* Allocate local session and set it up */
- session = echo_session_new (em);
session->vpp_session_handle = mp->handle;
- rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *);
- rx_fifo->client_session_index = session->session_index;
- tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *);
- tx_fifo->client_session_index = session->session_index;
-
- session->rx_fifo = rx_fifo;
- session->tx_fifo = tx_fifo;
-
/* session->transport needed by app_send_dgram */
clib_memcpy_fast (&session->transport.rmt_ip, &mp->rmt.ip,
sizeof (ip46_address_t));
session->transport.lcl_port = em->uri_elts.port;
session->vpp_session_handle = mp->handle;
- session->start = clib_time_now (&em->clib_time);
- session->vpp_evt_q = uword_to_pointer (mp->vpp_event_queue_address,
- svm_msg_q_t *);
session->listener_index = ls->session_index;
+ session->start = clib_time_now (&em->clib_time);
/* Add it to lookup table */
- ECHO_LOG (1, "Accepted session 0x%lx S[%u] -> 0x%lx S[%u]",
+ ECHO_LOG (2, "Accepted session 0x%lx S[%u] -> 0x%lx S[%u]",
mp->handle, session->session_index,
mp->listener_handle, session->listener_index);
echo_session_handle_add_del (em, mp->handle, session->session_index);
echo_main_t *em = &echo_main;
echo_session_t *session;
u32 listener_index = htonl (mp->context);
- svm_fifo_t *rx_fifo, *tx_fifo;
+
+ clib_atomic_add_fetch (&em->max_sim_connects, 1);
if (mp->retval)
{
}
session = echo_session_new (em);
- if (wait_for_segment_allocation (mp->segment_handle))
+
+ if (echo_attach_session (mp->segment_handle, mp->server_rx_fifo,
+ mp->server_tx_fifo, mp->vpp_event_queue_address,
+ session))
{
ECHO_FAIL (ECHO_FAIL_CONNECTED_WAIT_FOR_SEG_ALLOC,
"connected wait_for_segment_allocation errored");
return;
}
- rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *);
- rx_fifo->client_session_index = session->session_index;
- tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *);
- tx_fifo->client_session_index = session->session_index;
-
- session->rx_fifo = rx_fifo;
- session->tx_fifo = tx_fifo;
session->vpp_session_handle = mp->handle;
session->start = clib_time_now (&em->clib_time);
- session->vpp_evt_q = uword_to_pointer (mp->vpp_event_queue_address,
- svm_msg_q_t *);
session->listener_index = listener_index;
/* session->transport needed by app_send_dgram */
clib_memcpy_fast (&session->transport.lcl_ip, &mp->lcl.ip,
echo_session_t *s;
if (!(s = echo_get_session_from_handle (em, mp->handle)))
{
- ECHO_LOG (0, "Invalid vpp_session_handle: 0x%lx", mp->handle);
+ ECHO_LOG (1, "Invalid vpp_session_handle: 0x%lx", mp->handle);
return;
}
if (s->session_state == ECHO_SESSION_STATE_CLOSED)
{
- ECHO_LOG (1, "%U: already in ECHO_SESSION_STATE_CLOSED",
+ ECHO_LOG (2, "%U: already in ECHO_SESSION_STATE_CLOSED",
echo_format_session, s);
}
else
{
- ECHO_LOG (1, "%U: passive close", echo_format_session, s);
+ ECHO_LOG (2, "%U: passive close", echo_format_session, s);
em->proto_cb_vft->disconnected_cb (mp, s);
}
app_alloc_ctrl_evt_to_vpp (s->vpp_evt_q, app_evt,
echo_session_t *s = 0;
if (!(s = echo_get_session_from_handle (em, mp->handle)))
{
- ECHO_LOG (0, "Invalid vpp_session_handle: 0x%lx", mp->handle);
+ ECHO_LOG (1, "Invalid vpp_session_handle: 0x%lx", mp->handle);
return;
}
- ECHO_LOG (1, "%U: session reset", echo_format_session, s);
+ ECHO_LOG (2, "%U: session reset", echo_format_session, s);
em->proto_cb_vft->reset_cb (mp, s);
app_alloc_ctrl_evt_to_vpp (s->vpp_evt_q, app_evt,
app_send_ctrl_evt_to_vpp (s->vpp_evt_q, app_evt);
}
+static void
+add_segment_handler (session_app_add_segment_msg_t * mp)
+{
+ fifo_segment_main_t *sm = &echo_main.segment_main;
+ fifo_segment_create_args_t _a, *a = &_a;
+ int *fds = 0, i;
+ char *seg_name = (char *) mp->segment_name;
+ u64 segment_handle = mp->segment_handle;
+
+ if (mp->fd_flags & SESSION_FD_F_MEMFD_SEGMENT)
+ {
+ vec_validate (fds, 1);
+ if (vl_socket_client_recv_fd_msg (fds, 1, 5))
+ {
+ ECHO_FAIL (ECHO_FAIL_VL_API_RECV_FD_MSG,
+ "vl_socket_client_recv_fd_msg failed");
+ goto failed;
+ }
+
+ if (echo_segment_attach (segment_handle, seg_name, SSVM_SEGMENT_MEMFD,
+ fds[0]))
+ {
+ ECHO_FAIL (ECHO_FAIL_VL_API_SVM_FIFO_SEG_ATTACH,
+ "svm_fifo_segment_attach ('%s') "
+ "failed on SSVM_SEGMENT_MEMFD", seg_name);
+ goto failed;
+ }
+ vec_free (fds);
+ }
+ else
+ {
+ clib_memset (a, 0, sizeof (*a));
+ a->segment_name = seg_name;
+ a->segment_size = mp->segment_size;
+ /* Attach to the segment vpp created */
+ if (fifo_segment_attach (sm, a))
+ {
+ ECHO_FAIL (ECHO_FAIL_VL_API_FIFO_SEG_ATTACH,
+ "fifo_segment_attach ('%s') failed", seg_name);
+ goto failed;
+ }
+ }
+ ECHO_LOG (2, "Mapped segment 0x%lx", segment_handle);
+ return;
+
+failed:
+ for (i = 0; i < vec_len (fds); i++)
+ close (fds[i]);
+ vec_free (fds);
+}
+
+static void
+del_segment_handler (session_app_del_segment_msg_t * mp)
+{
+ echo_segment_detach (mp->segment_handle);
+ ECHO_LOG (2, "Unmaped segment 0x%lx", mp->segment_handle);
+}
+
+static void
+cleanup_handler (session_cleanup_msg_t * mp)
+{
+ ECHO_LOG (1, "Cleanup confirmed for 0x%lx", mp->handle);
+}
+
static void
handle_mq_event (session_event_t * e)
{
case SESSION_CTRL_EVT_RESET:
return session_reset_handler ((session_reset_msg_t *) e->data);
case SESSION_CTRL_EVT_UNLISTEN_REPLY:
- return session_unlisten_handler ((session_unlisten_msg_t *) e->data);
+ return session_unlisten_handler ((session_unlisten_reply_msg_t *)
+ e->data);
+ case SESSION_CTRL_EVT_APP_ADD_SEGMENT:
+ add_segment_handler ((session_app_add_segment_msg_t *) e->data);
+ break;
+ case SESSION_CTRL_EVT_APP_DEL_SEGMENT:
+ del_segment_handler ((session_app_del_segment_msg_t *) e->data);
+ break;
+ case SESSION_CTRL_EVT_CLEANUP:
+ cleanup_handler ((session_cleanup_msg_t *) e->data);
+ break;
case SESSION_IO_EVT_RX:
break;
default:
- ECHO_LOG (0, "unhandled event %u", e->event_type);
+ ECHO_LOG (1, "unhandled event %u", e->event_type);
}
}
{
echo_rpc_msg_t *rpc;
svm_msg_q_msg_t msg;
- svm_msg_q_t *mq = em->rpc_msq_queue;
+ svm_msg_q_t *mq = &em->rpc_msq_queue;
while (em->state < STATE_DATA_DONE && !em->time_to_stop)
{
- svm_msg_q_lock (mq);
if (svm_msg_q_is_empty (mq) && svm_msg_q_timedwait (mq, 1))
{
- svm_msg_q_unlock (mq);
continue;
}
- svm_msg_q_sub_w_lock (mq, &msg);
+ svm_msg_q_sub_raw (mq, &msg);
rpc = svm_msg_q_msg_data (mq, &msg);
- svm_msg_q_unlock (mq);
- ((echo_rpc_t) rpc->fp) (rpc->arg, rpc->opaque);
+ ((echo_rpc_t) rpc->fp) (em, &rpc->args);
svm_msg_q_free_msg (mq, &msg);
}
}
-static inline int
-echo_mq_dequeue_batch (svm_msg_q_t * mq, svm_msg_q_msg_t * msg_vec,
- u32 n_max_msg)
+static inline void
+echo_print_periodic_stats (echo_main_t * em)
{
- svm_msg_q_msg_t *msg;
- u32 n_msgs;
- int i;
+ f64 delta, now = clib_time_now (&em->clib_time);
+ echo_stats_t _st, *st = &_st;
+ echo_stats_t *lst = &em->last_stat_sampling;
+ delta = now - em->last_stat_sampling_ts;
+ if (delta < em->periodic_stats_delta)
+ return;
- n_msgs = clib_min (svm_msg_q_size (mq), n_max_msg);
- for (i = 0; i < n_msgs; i++)
- {
- vec_add2 (msg_vec, msg, 1);
- svm_msg_q_sub_w_lock (mq, msg);
- }
- return n_msgs;
+ clib_memcpy_fast (st, &em->stats, sizeof (*st));
+ if (st->rx_total - lst->rx_total)
+ clib_warning ("RX: %U", echo_format_bytes_per_sec,
+ (st->rx_total - lst->rx_total) / delta);
+ if (st->tx_total - lst->tx_total)
+ clib_warning ("TX: %U", echo_format_bytes_per_sec,
+ (st->tx_total - lst->tx_total) / delta);
+ if (st->connected_count.q - lst->connected_count.q)
+ clib_warning ("conn: %d/s",
+ st->connected_count.q - lst->connected_count.q);
+ if (st->accepted_count.q - lst->accepted_count.q)
+ clib_warning ("accept: %d/s",
+ st->accepted_count.q - lst->accepted_count.q);
+
+ clib_memcpy_fast (lst, st, sizeof (*st));
+ em->last_stat_sampling_ts = now;
}
static void *
while (em->state < STATE_DETACHED && !em->time_to_stop)
{
- svm_msg_q_lock (mq);
+ if (em->periodic_stats_delta)
+ echo_print_periodic_stats (em);
+
if (svm_msg_q_is_empty (mq) && svm_msg_q_timedwait (mq, 1))
{
- svm_msg_q_unlock (mq);
continue;
}
- echo_mq_dequeue_batch (mq, msg_vec, ~0);
- svm_msg_q_unlock (mq);
+ for (i = 0; i < svm_msg_q_size (mq); i++)
+ {
+ vec_add2 (msg_vec, msg, 1);
+ svm_msg_q_sub_raw (mq, msg);
+ }
for (i = 0; i < vec_len (msg_vec); i++)
{
pthread_exit (0);
}
+static inline void
+echo_cycle_ip (echo_main_t * em, ip46_address_t * ip, ip46_address_t * src_ip,
+ u32 i)
+{
+ u8 *ipu8;
+ u8 l;
+ if (i % em->n_uris == 0)
+ {
+ clib_memcpy_fast (ip, src_ip, sizeof (*ip));
+ return;
+ }
+ l = em->uri_elts.is_ip4 ? 3 : 15;
+ ipu8 = em->uri_elts.is_ip4 ? ip->ip4.as_u8 : ip->ip6.as_u8;
+ while (ipu8[l] == 0xf)
+ ipu8[l--] = 0;
+ if (l)
+ ipu8[l]++;
+}
+
static void
clients_run (echo_main_t * em)
{
+ echo_connect_args_t _a, *a = &_a;
u64 i;
+
+ a->context = SESSION_INVALID_INDEX;
+ a->parent_session_handle = SESSION_INVALID_HANDLE;
+ clib_memset (&a->lcl_ip, 0, sizeof (a->lcl_ip));
+
echo_notify_event (em, ECHO_EVT_FIRST_QCONNECT);
for (i = 0; i < em->n_connects; i++)
- echo_send_connect (SESSION_INVALID_HANDLE, SESSION_INVALID_INDEX);
+ {
+ echo_cycle_ip (em, &a->ip, &em->uri_elts.ip, i);
+ if (em->lcl_ip_set)
+ echo_cycle_ip (em, &a->lcl_ip, &em->lcl_ip, i);
+ echo_send_connect (em, a);
+ }
wait_for_state_change (em, STATE_READY, 0);
- ECHO_LOG (1, "App is ready");
+ ECHO_LOG (2, "App is ready");
echo_process_rpcs (em);
}
server_run (echo_main_t * em)
{
echo_session_t *ls;
- echo_send_listen (em);
+ ip46_address_t _ip, *ip = &_ip;
+ u32 *listen_session_index;
+ u32 i;
+
+ for (i = 0; i < em->n_uris; i++)
+ {
+ echo_cycle_ip (em, ip, &em->uri_elts.ip, i);
+ echo_send_listen (em, ip);
+ }
wait_for_state_change (em, STATE_READY, 0);
- ECHO_LOG (1, "App is ready");
+ ECHO_LOG (2, "App is ready");
echo_process_rpcs (em);
/* Cleanup */
- ECHO_LOG (1, "Unbind listen port");
- ls = pool_elt_at_index (em->sessions, em->listen_session_index);
- echo_send_unbind (em, ls);
+ vec_foreach (listen_session_index, em->listen_session_indexes)
+ {
+ ECHO_LOG (2, "Unbind listen port %d", em->listen_session_cnt);
+ ls = pool_elt_at_index (em->sessions, *listen_session_index);
+ echo_send_unbind (em, ls);
+ }
if (wait_for_state_change (em, STATE_DISCONNECTED, TIMEOUT))
{
ECHO_FAIL (ECHO_FAIL_SERVER_DISCONNECT_TIMEOUT,
int i;
fprintf (stderr,
"Usage: vpp_echo [socket-name SOCKET] [client|server] [uri URI] [OPTIONS]\n"
- "Generates traffic and assert correct teardown of the QUIC hoststack\n"
+ "Generates traffic and assert correct teardown of the hoststack\n"
"\n"
" socket-name PATH Specify the binary socket path to connect to VPP\n"
" use-svm-api Use SVM API to connect to VPP\n"
" test-bytes[:assert] Check data correctness when receiving (assert fails on first error)\n"
- " fifo-size N Use N Kb fifos\n"
- " mq-size N Use N event slots for vpp_echo <-> vpp events\n"
- " rx-buf N[Kb|Mb|GB] Use N[Kb|Mb|GB] RX buffer\n"
- " tx-buf N[Kb|Mb|GB] Use N[Kb|Mb|GB] TX test buffer\n"
+ " fifo-size N[K|M|G] Use N[K|M|G] fifos\n"
+ " mq-size N Use mq with N slots for [vpp_echo->vpp] communication\n"
+ " max-sim-connects N Do not allow more than N mq events inflight\n"
+ " rx-buf N[K|M|G] Use N[Kb|Mb|GB] RX buffer\n"
+ " tx-buf N[K|M|G] Use N[Kb|Mb|GB] TX test buffer\n"
" appns NAMESPACE Use the namespace NAMESPACE\n"
" all-scope all-scope option\n"
" local-scope local-scope option\n"
" global-scope global-scope option\n"
" secret SECRET set namespace secret\n"
" chroot prefix PATH Use PATH as memory root path\n"
- " sclose=[Y|N|W] When a stream is done, pass[N] send[Y] or wait[W] for close\n"
+ " sclose=[Y|N|W] When stream is done, send[Y]|nop[N]|wait[W] for close\n"
+ " nuris N Cycle through N consecutive (src&dst) ips when creating connections\n"
+ " lcl IP Set the local ip to use as a client (use with nuris to set first src ip)\n"
"\n"
" time START:END Time between evts START & END, events being :\n"
" start - Start of the app\n"
" rx-results-diff Rx results different to pass test\n"
" tx-results-diff Tx results different to pass test\n"
" json Output global stats in json\n"
+ " stats N Output stats evry N secs\n"
" log=N Set the log level to [0: no output, 1:errors, 2:log]\n"
+ " crypto [engine] Set the crypto engine [openssl, vpp, picotls, mbedtls]\n"
"\n"
" nclients N Open N clients sending data\n"
" nthreads N Use N busy loop threads for data [in addition to main & msg queue]\n"
- " TX=1337[Kb|Mb|GB] Send 1337 [K|M|G]bytes, use TX=RX to reflect the data\n"
- " RX=1337[Kb|Mb|GB] Expect 1337 [K|M|G]bytes\n" "\n");
- for (i = 0; i < TRANSPORT_N_PROTO; i++)
+ " TX=1337[K|M|G]|RX Send 1337 [K|M|G]bytes, use TX=RX to reflect the data\n"
+ " RX=1337[K|M|G] Expect 1337 [K|M|G]bytes\n" "\n");
+ for (i = 0; i < vec_len (em->available_proto_cb_vft); i++)
{
echo_proto_cb_vft_t *vft = em->available_proto_cb_vft[i];
if (vft && vft->print_usage_cb)
vft->print_usage_cb ();
}
fprintf (stderr, "\nDefault configuration is :\n"
- " server nclients 1/1 RX=64Kb TX=RX\n"
- " client nclients 1/1 RX=64Kb TX=64Kb\n");
+ " server nclients 1 [quic-streams 1] RX=64Kb TX=RX\n"
+ " client nclients 1 [quic-streams 1] RX=64Kb TX=64Kb\n");
exit (ECHO_FAIL_USAGE);
}
{
echo_main_t *em = &echo_main;
int i, rv;
- for (i = 0; i < TRANSPORT_N_PROTO; i++)
+ for (i = 0; i < vec_len (em->available_proto_cb_vft); i++)
{
echo_proto_cb_vft_t *vft = em->available_proto_cb_vft[i];
if (vft && vft->process_opts_cb)
echo_set_each_proto_defaults_before_opts (echo_main_t * em)
{
int i;
- for (i = 0; i < TRANSPORT_N_PROTO; i++)
+ for (i = 0; i < vec_len (em->available_proto_cb_vft); i++)
{
echo_proto_cb_vft_t *vft = em->available_proto_cb_vft[i];
if (vft && vft->set_defaults_before_opts_cb)
{
echo_main_t *em = &echo_main;
unformat_input_t _argv, *a = &_argv;
- u32 tmp;
u8 *chroot_prefix;
u8 *uri = 0;
u8 default_f_active;
+ uword tmp;
unformat_init_command_line (a, argv);
while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT)
vl_set_memory_root_path ((char *) chroot_prefix);
else if (unformat (a, "uri %s", &uri))
em->uri = format (0, "%s%c", uri, 0);
+ else if (unformat (a, "lcl %U", unformat_ip46_address, &em->lcl_ip))
+ em->lcl_ip_set = 1;
+ else if (unformat (a, "nuris %u", &em->n_uris))
+ em->n_sessions = em->n_clients + em->n_uris;
else if (unformat (a, "server"))
em->i_am_master = 1;
else if (unformat (a, "client"))
;
else if (unformat (a, "use-svm-api"))
em->use_sock_api = 0;
- else if (unformat (a, "fifo-size %d", &tmp))
- em->fifo_size = tmp << 10;
+ else if (unformat (a, "fifo-size %U", unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000ULL)
+ {
+ fprintf (stderr,
+ "ERROR: fifo-size %ld (0x%lx) too large\n", tmp, tmp);
+ print_usage_and_exit ();
+ }
+ em->fifo_size = tmp;
+ }
else if (unformat (a, "prealloc-fifos %u", &em->prealloc_fifo_pairs))
;
else
;
else if (unformat (a, "nclients %d", &em->n_clients))
{
- em->n_sessions = em->n_clients + 1;
+ em->n_sessions = em->n_clients + em->n_uris;
em->n_connects = em->n_clients;
}
else if (unformat (a, "nthreads %d", &em->n_rx_threads))
;
- else
- if (unformat
- (a, "crypto %U", echo_unformat_crypto_engine,
- &em->crypto_ctx_engine))
- ;
+ else if (unformat (a, "crypto %U", echo_unformat_crypto_engine, &tmp))
+ em->crypto_engine = tmp;
else if (unformat (a, "appns %_%v%_", &em->appns_id))
;
else if (unformat (a, "all-scope"))
em->tx_results_diff = 1;
else if (unformat (a, "json"))
em->output_json = 1;
+ else if (unformat (a, "stats %d", &em->periodic_stats_delta))
+ ;
+ else if (unformat (a, "wait-for-gdb"))
+ em->wait_for_gdb = 1;
else if (unformat (a, "log=%d", &em->log_lvl))
;
else if (unformat (a, "sclose=%U",
echo_unformat_timing_event, &em->timing.start_event,
echo_unformat_timing_event, &em->timing.end_event))
;
+ else if (unformat (a, "max-sim-connects %d", &em->max_sim_connects))
+ ;
else
print_usage_and_exit ();
}
em->bytes_to_receive == 0 ? ECHO_CLOSE_F_PASSIVE : ECHO_CLOSE_F_ACTIVE;
if (em->send_stream_disconnects == ECHO_CLOSE_F_INVALID)
em->send_stream_disconnects = default_f_active;
+
+ if (em->max_sim_connects == 0)
+ em->max_sim_connects = em->evt_q_size >> 1;
+
+ if (em->wait_for_gdb)
+ {
+ volatile u64 nop = 0;
+
+ clib_warning ("Waiting for gdb...");
+ while (em->wait_for_gdb)
+ nop++;
+ clib_warning ("Resuming execution (%llu)!", nop);
+ }
}
void
char *app_name;
u64 i;
svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
- u32 rpc_queue_size = 64 << 10;
+ u32 rpc_queue_size = 256 << 10;
em->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
clib_spinlock_init (&em->sid_vpp_handles_lock);
em->i_am_master = 1;
em->n_rx_threads = 4;
em->evt_q_size = 256;
+ em->lcl_ip_set = 0;
+ clib_memset (&em->lcl_ip, 0, sizeof (em->lcl_ip));
em->test_return_packets = RETURN_PACKETS_NOTEST;
em->timing.start_event = ECHO_EVT_FIRST_QCONNECT;
em->timing.end_event = ECHO_EVT_LAST_BYTE;
em->tx_buf_size = 1 << 20;
em->data_source = ECHO_INVALID_DATA_SOURCE;
em->uri = format (0, "%s%c", "tcp://0.0.0.0/1234", 0);
- em->crypto_ctx_engine = CRYPTO_ENGINE_NONE;
+ em->n_uris = 1;
+ em->max_sim_connects = 0;
+ em->listen_session_cnt = 0;
+ em->crypto_engine = CRYPTO_ENGINE_NONE;
echo_set_each_proto_defaults_before_opts (em);
echo_process_opts (argc, argv);
echo_process_uri (em);
cfg->n_rings = 1;
cfg->q_nitems = rpc_queue_size;
cfg->ring_cfgs = rc;
- em->rpc_msq_queue = svm_msg_q_alloc (cfg);
+ svm_msg_q_attach (&em->rpc_msq_queue, svm_msg_q_alloc (cfg));
signal (SIGINT, stop_signal);
signal (SIGQUIT, stop_signal);
goto exit_on_error;
}
- if (em->crypto_ctx_engine == CRYPTO_ENGINE_NONE)
- /* when no crypto engine specified, dont expect crypto ctx */
+ if (em->uri_elts.transport_proto != TRANSPORT_PROTO_QUIC
+ && em->uri_elts.transport_proto != TRANSPORT_PROTO_TLS)
em->state = STATE_ATTACHED;
else
{
- ECHO_LOG (1, "Adding crypto context %U", echo_format_crypto_engine,
- em->crypto_ctx_engine);
- echo_send_add_crypto_ctx (em);
+ ECHO_LOG (2, "Adding crypto context %U", echo_format_crypto_engine,
+ em->crypto_engine);
+ echo_send_add_cert_key (em);
if (wait_for_state_change (em, STATE_ATTACHED, TIMEOUT))
{
ECHO_FAIL (ECHO_FAIL_APP_ATTACH,
clients_run (em);
echo_notify_event (em, ECHO_EVT_EXIT);
echo_free_sessions (em);
+ echo_send_del_cert_key (em);
+ if (wait_for_state_change (em, STATE_CLEANED_CERT_KEY, TIMEOUT))
+ {
+ ECHO_FAIL (ECHO_FAIL_DEL_CERT_KEY, "Couldn't cleanup cert and key");
+ goto exit_on_error;
+ }
+
echo_send_detach (em);
if (wait_for_state_change (em, STATE_DETACHED, TIMEOUT))
{
vl_client_disconnect_from_vlib ();
echo_assert_test_suceeded (em);
exit_on_error:
- ECHO_LOG (0, "Test complete !\n");
+ ECHO_LOG (1, "Test complete !\n");
if (em->output_json)
print_global_json_stats (em);
else
print_global_stats (em);
vec_free (em->fail_descr);
+ vec_free (em->available_proto_cb_vft);
exit (em->has_failed);
}