return 0;
}
+static int
+echo_client_transport_needs_crypto (transport_proto_t proto)
+{
+ return proto == TRANSPORT_PROTO_TLS || proto == TRANSPORT_PROTO_DTLS ||
+ proto == TRANSPORT_PROTO_QUIC;
+}
+
clib_error_t *
echo_clients_connect (vlib_main_t * vm, u32 n_clients)
{
clib_memcpy (&a->sep_ext, &sep, sizeof (sep));
a->api_context = i;
a->app_index = ecm->app_index;
- a->sep_ext.ckpair_index = ecm->ckpair_index;
+ if (echo_client_transport_needs_crypto (a->sep_ext.transport_proto))
+ {
+ session_endpoint_alloc_ext_cfg (&a->sep_ext,
+ TRANSPORT_ENDPT_EXT_CFG_CRYPTO);
+ a->sep_ext.ext_cfg->crypto.ckpair_index = ecm->ckpair_index;
+ }
vlib_worker_thread_barrier_sync (vm);
- if ((rv = vnet_connect (a)))
+ rv = vnet_connect (a);
+ if (a->sep_ext.ext_cfg)
+ clib_mem_free (a->sep_ext.ext_cfg);
+ if (rv)
{
vlib_worker_thread_barrier_release (vm);
return clib_error_return (0, "connect returned: %d", rv);
echo_client_main_t *ecm = &echo_client_main;
vlib_thread_main_t *thread_main = vlib_get_thread_main ();
u64 tmp, total_bytes, appns_flags = 0, appns_secret = 0;
+ session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
f64 test_timeout = 20.0, syn_timeout = 20.0, delta;
char *default_uri = "tcp://6.0.1.1/1234";
+ u8 *appns_id = 0, barrier_acq_needed = 0;
+ int preallocate_sessions = 0, i, rv;
uword *event_data = 0, event_type;
f64 time_before_connects;
u32 n_clients = 1;
- int preallocate_sessions = 0;
char *transfer_type;
clib_error_t *error = 0;
- u8 *appns_id = 0;
- int i;
- session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
- int rv;
ecm->quic_streams = 1;
ecm->bytes_to_send = 8192;
ecm->no_copy = 0;
ecm->run_test = ECHO_CLIENTS_STARTING;
+ if (vlib_num_workers ())
+ {
+ /* The request came over the binary api and the inband cli handler
+ * is not mp_safe. Drop the barrier to make sure the workers are not
+ * blocked.
+ */
+ if (vlib_thread_is_main_w_barrier ())
+ {
+ barrier_acq_needed = 1;
+ vlib_worker_thread_barrier_release (vm);
+ }
+ /*
+ * There's a good chance that both the client and the server echo
+ * apps will be enabled so make sure the session queue node polls on
+ * the main thread as connections will probably be established on it.
+ */
+ vlib_node_set_state (vm, session_queue_node.index,
+ VLIB_NODE_STATE_POLLING);
+ }
+
if (thread_main->n_vlib_mains > 1)
clib_spinlock_init (&ecm->sessions_lock);
vec_free (ecm->connect_uri);
unformat_memory_size, &tmp))
{
if (tmp >= 0x100000000ULL)
- return clib_error_return
- (0, "private segment size %lld (%llu) too large", tmp, tmp);
+ {
+ error = clib_error_return (
+ 0, "private segment size %lld (%llu) too large", tmp, tmp);
+ goto cleanup;
+ }
ecm->private_segment_size = tmp;
}
else if (unformat (input, "preallocate-fifos"))
else if (unformat (input, "tls-engine %d", &ecm->tls_engine))
;
else
- return clib_error_return (0, "failed: unknown input `%U'",
- format_unformat_error, input);
+ {
+ error = clib_error_return (0, "failed: unknown input `%U'",
+ format_unformat_error, input);
+ goto cleanup;
+ }
}
/* Store cli process node index for signalling */
if (ecm->is_init == 0)
{
if (echo_clients_init (vm))
- return clib_error_return (0, "failed init");
+ {
+ error = clib_error_return (0, "failed init");
+ goto cleanup;
+ }
}
}
if ((rv = parse_uri ((char *) ecm->connect_uri, &sep)))
- return clib_error_return (0, "Uri parse error: %d", rv);
+ {
+ error = clib_error_return (0, "Uri parse error: %d", rv);
+ goto cleanup;
+ }
ecm->transport_proto = sep.transport_proto;
ecm->is_dgram = (sep.transport_proto == TRANSPORT_PROTO_UDP);
{
vec_free (appns_id);
clib_error_report (error);
- return error;
+ goto cleanup;
}
vec_free (appns_id);
}
/* Turn on the builtin client input nodes */
for (i = 0; i < thread_main->n_vlib_mains; i++)
- vlib_node_set_state (vlib_mains[i], echo_clients_node.index,
+ vlib_node_set_state (vlib_get_main_by_index (i), echo_clients_node.index,
VLIB_NODE_STATE_POLLING);
if (preallocate_sessions)
if (error)
ec_cli_output ("test failed");
vec_free (ecm->connect_uri);
+ clib_spinlock_free (&ecm->sessions_lock);
+
+ if (barrier_acq_needed)
+ vlib_worker_thread_barrier_sync (vm);
+
return error;
}