X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Ftcp%2Fbuiltin_client.c;h=b48422c0b30f130b56631a0cb65385bc65dbfdf7;hb=59b2565cd91a67ced650739f36129650830211ac;hp=744f50e7db265125bea53b5ce501a638e67045ec;hpb=6534b7aa13bc5bed15ed87f47bb766405963e9e8;p=vpp.git diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 744f50e7db2..b48422c0b30 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -21,29 +21,27 @@ #include #include -#include #include -/* define message IDs */ -#include - -/* define message structures */ -#define vl_typedefs -#include -#undef vl_typedefs - -/* define generated endian-swappers */ -#define vl_endianfun -#include -#undef vl_endianfun +#define TCP_BUILTIN_CLIENT_DBG (0) -/* instantiate all the print functions we know about */ -#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) -#define vl_printfun -#include -#undef vl_printfun +static void +signal_evt_to_cli_i (int *code) +{ + tclient_main_t *tm = &tclient_main; + ASSERT (vlib_get_thread_index () == 0); + vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, *code, 0); +} -#define TCP_BUILTIN_CLIENT_DBG (0) +static void +signal_evt_to_cli (int code) +{ + if (vlib_get_thread_index () != 0) + vl_api_rpc_call_main_thread (signal_evt_to_cli_i, (u8 *) & code, + sizeof (code)); + else + signal_evt_to_cli_i (&code); +} static void send_test_chunk (tclient_main_t * tm, session_t * s) @@ -53,6 +51,7 @@ send_test_chunk (tclient_main_t * tm, session_t * s) u32 bytes_this_chunk; session_fifo_event_t evt; static int serial_number = 0; + svm_fifo_t *txf; int rv; ASSERT (vec_len (test_data) > 0); @@ -63,7 +62,8 @@ send_test_chunk (tclient_main_t * tm, session_t * s) bytes_this_chunk = bytes_this_chunk < s->bytes_to_send ? bytes_this_chunk : s->bytes_to_send; - rv = svm_fifo_enqueue_nowait (s->server_tx_fifo, bytes_this_chunk, + txf = s->server_tx_fifo; + rv = svm_fifo_enqueue_nowait (txf, bytes_this_chunk, test_data + test_buf_offset); /* If we managed to enqueue data... */ @@ -93,15 +93,16 @@ send_test_chunk (tclient_main_t * tm, session_t * s) } /* Poke the session layer */ - if (svm_fifo_set_event (s->server_tx_fifo)) + if (svm_fifo_set_event (txf)) { /* Fabricate TX event, send to vpp */ - evt.fifo = s->server_tx_fifo; + evt.fifo = txf; evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; - if (unix_shared_memory_queue_add (tm->vpp_event_queue, (u8 *) & evt, - 0 /* do wait for mutex */ )) + if (unix_shared_memory_queue_add + (tm->vpp_event_queue[txf->master_thread_index], (u8 *) & evt, + 0 /* do wait for mutex */ )) clib_warning ("could not enqueue event"); } } @@ -112,14 +113,16 @@ receive_test_chunk (tclient_main_t * tm, session_t * s) { svm_fifo_t *rx_fifo = s->server_rx_fifo; int n_read, test_bytes = 0; + u32 my_thread_index = vlib_get_thread_index (); /* Allow enqueuing of new event */ // svm_fifo_unset_event (rx_fifo); if (test_bytes) { - n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (tm->rx_buf), - tm->rx_buf); + n_read = svm_fifo_dequeue_nowait (rx_fifo, + vec_len (tm->rx_buf[my_thread_index]), + tm->rx_buf[my_thread_index]); } else { @@ -151,10 +154,12 @@ receive_test_chunk (tclient_main_t * tm, session_t * s) int i; for (i = 0; i < n_read; i++) { - if (tm->rx_buf[i] != ((s->bytes_received + i) & 0xff)) + if (tm->rx_buf[my_thread_index][i] + != ((s->bytes_received + i) & 0xff)) { clib_warning ("read %d error at byte %lld, 0x%x not 0x%x", - n_read, s->bytes_received + i, tm->rx_buf[i], + n_read, s->bytes_received + i, + tm->rx_buf[my_thread_index][i], ((s->bytes_received + i) & 0xff)); } } @@ -247,7 +252,11 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, if (s) { - stream_session_disconnect (s); + vnet_disconnect_args_t _a, *a = &_a; + a->handle = stream_session_handle (s); + a->app_index = tm->app_index; + vnet_disconnect_session (a); + vec_delete (connections_this_batch, 1, i); i--; __sync_fetch_and_add (&tm->ready_connections, -1); @@ -258,9 +267,7 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, /* Kick the debug CLI process */ if (tm->ready_connections == 0) { - tm->test_end_time = vlib_time_now (vm); - vlib_process_signal_event (vm, tm->cli_node_index, - 2, 0 /* data */ ); + signal_evt_to_cli (2); } } } @@ -281,87 +288,16 @@ VLIB_REGISTER_NODE (builtin_client_node) = }; /* *INDENT-ON* */ -/* So we don't get "no handler for... " msgs */ -static void -vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) -{ - vlib_main_t *vm = vlib_get_main (); - tclient_main_t *tm = &tclient_main; - tm->my_client_index = mp->index; - vlib_process_signal_event (vm, tm->cli_node_index, 1 /* evt */ , - 0 /* data */ ); -} - static int create_api_loopback (tclient_main_t * tm) { - vlib_main_t *vm = vlib_get_main (); - vl_api_memclnt_create_t _m, *mp = &_m; - extern void vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t *); api_main_t *am = &api_main; vl_shmem_hdr_t *shmem_hdr; - uword *event_data = 0, event_type; - int resolved = 0; - - /* - * Create a "loopback" API client connection - * Don't do things like this unless you know what you're doing... - */ shmem_hdr = am->shmem_hdr; tm->vl_input_queue = shmem_hdr->vl_input_queue; - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; - mp->context = 0xFEEDFACE; - mp->input_queue = pointer_to_uword (tm->vl_input_queue); - strncpy ((char *) mp->name, "tcp_clients_tester", sizeof (mp->name) - 1); - - vl_api_memclnt_create_t_handler (mp); - - /* Wait for reply */ - vlib_process_wait_for_event_or_clock (vm, 1.0); - event_type = vlib_process_get_events (vm, &event_data); - switch (event_type) - { - case 1: - resolved = 1; - break; - case ~0: - /* timed out */ - break; - default: - clib_warning ("unknown event_type %d", event_type); - } - if (!resolved) - return -1; - return 0; -} - -#define foreach_tclient_static_api_msg \ -_(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ - -static clib_error_t * -tclient_api_hookup (vlib_main_t * vm) -{ - vl_msg_api_msg_config_t _c, *c = &_c; - - /* Hook up client-side static APIs to our handlers */ -#define _(N,n) do { \ - c->id = VL_API_##N; \ - c->name = #n; \ - c->handler = vl_api_##n##_t_handler; \ - c->cleanup = vl_noop_handler; \ - c->endian = vl_api_##n##_t_endian; \ - c->print = vl_api_##n##_t_print; \ - c->size = sizeof(vl_api_##n##_t); \ - c->traced = 1; /* trace, so these msgs print */ \ - c->replay = 0; /* don't replay client create/delete msgs */ \ - c->message_bounce = 0; /* don't bounce this message */ \ - vl_msg_api_config(c);} while (0); - - foreach_tclient_static_api_msg; -#undef _ - + tm->my_client_index = + vl_api_memclnt_create_internal ("tcp_test_client", tm->vl_input_queue); return 0; } @@ -369,27 +305,30 @@ static int tcp_test_clients_init (vlib_main_t * vm) { tclient_main_t *tm = &tclient_main; - vlib_thread_main_t *thread_main = vlib_get_thread_main (); + vlib_thread_main_t *vtm = vlib_get_thread_main (); + u32 num_threads; int i; - tclient_api_hookup (vm); if (create_api_loopback (tm)) return -1; + num_threads = 1 /* main thread */ + vtm->n_threads; + /* Init test data. Big buffer */ vec_validate (tm->connect_test_data, 1024 * 1024 - 1); for (i = 0; i < vec_len (tm->connect_test_data); i++) tm->connect_test_data[i] = i & 0xff; - tm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); - vec_validate (tm->rx_buf, vec_len (tm->connect_test_data) - 1); + vec_validate (tm->rx_buf, num_threads - 1); + for (i = 0; i < num_threads; i++) + vec_validate (tm->rx_buf[i], vec_len (tm->connect_test_data) - 1); tm->is_init = 1; - tm->vlib_main = vm; - vec_validate (tm->connection_index_by_thread, thread_main->n_vlib_mains); - vec_validate (tm->connections_this_batch_by_thread, - thread_main->n_vlib_mains); + vec_validate (tm->connection_index_by_thread, vtm->n_vlib_mains); + vec_validate (tm->connections_this_batch_by_thread, vtm->n_vlib_mains); + vec_validate (tm->vpp_event_queue, vtm->n_vlib_mains); + return 0; } @@ -400,23 +339,28 @@ builtin_session_connected_callback (u32 app_index, u32 api_context, tclient_main_t *tm = &tclient_main; session_t *session; u32 session_index; - int i; + u8 thread_index = vlib_get_thread_index (); if (is_fail) { clib_warning ("connection %d failed!", api_context); - vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, -1, - 0 /* data */ ); - return -1; + signal_evt_to_cli (-1); + return 0; } - tm->our_event_queue = session_manager_get_vpp_event_queue (s->thread_index); - tm->vpp_event_queue = session_manager_get_vpp_event_queue (s->thread_index); + ASSERT (s->thread_index == thread_index); + + if (!tm->vpp_event_queue[thread_index]) + tm->vpp_event_queue[thread_index] = + session_manager_get_vpp_event_queue (thread_index); /* * Setup session */ + clib_spinlock_lock_if_init (&tm->sessions_lock); pool_get (tm->sessions, session); + clib_spinlock_unlock_if_init (&tm->sessions_lock); + memset (session, 0, sizeof (*session)); session_index = session - tm->sessions; session->bytes_to_send = tm->bytes_to_send; @@ -427,32 +371,13 @@ builtin_session_connected_callback (u32 app_index, u32 api_context, session->server_tx_fifo->client_session_index = session_index; session->vpp_session_handle = stream_session_handle (s); - /* Add it to the session lookup table */ - hash_set (tm->session_index_by_vpp_handles, session->vpp_session_handle, - session_index); - - if (tm->ready_connections == tm->expected_connections - 1) - { - vlib_thread_main_t *thread_main = vlib_get_thread_main (); - int thread_index; - - thread_index = 0; - for (i = 0; i < pool_elts (tm->sessions); i++) - { - vec_add1 (tm->connection_index_by_thread[thread_index], i); - thread_index++; - if (thread_index == thread_main->n_vlib_mains) - thread_index = 0; - } - } + vec_add1 (tm->connection_index_by_thread[thread_index], session_index); __sync_fetch_and_add (&tm->ready_connections, 1); if (tm->ready_connections == tm->expected_connections) { tm->run_test = 1; - tm->test_start_time = vlib_time_now (tm->vlib_main); /* Signal the CLI process that the action is starting... */ - vlib_process_signal_event (tm->vlib_main, tm->cli_node_index, 1, - 0 /* data */ ); + signal_evt_to_cli (1); } return 0; @@ -580,8 +505,14 @@ clients_connect (vlib_main_t * vm, u8 * uri, u32 n_clients) a->mp = 0; vnet_connect_uri (a); - /* Crude pacing for call setups, 100k/sec */ - vlib_process_suspend (vm, 10e-6); + /* Crude pacing for call setups */ + if ((i % 4) == 0) + vlib_process_suspend (vm, 10e-6); + ASSERT (i + 1 >= tm->ready_connections); + while (i + 1 - tm->ready_connections > 1000) + { + vlib_process_suspend (vm, 100e-6); + } } } @@ -595,8 +526,10 @@ test_tcp_clients_command_fn (vlib_main_t * vm, uword *event_data = 0, event_type; u8 *default_connect_uri = (u8 *) "tcp://6.0.1.1/1234", *uri; u64 tmp, total_bytes; - f64 cli_timeout = 20.0, delta; + f64 test_timeout = 20.0, syn_timeout = 20.0, delta; + f64 time_before_connects; u32 n_clients = 1; + int preallocate_sessions = 0; char *transfer_type; int i; @@ -606,7 +539,9 @@ test_tcp_clients_command_fn (vlib_main_t * vm, tm->connections_per_batch = 1000; tm->private_segment_count = 0; tm->private_segment_size = 0; - + tm->vlib_main = vm; + if (thread_main->n_vlib_mains > 1) + clib_spinlock_init (&tm->sessions_lock); vec_free (tm->connect_uri); while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) @@ -621,7 +556,9 @@ test_tcp_clients_command_fn (vlib_main_t * vm, ; else if (unformat (input, "uri %s", &tm->connect_uri)) ; - else if (unformat (input, "cli-timeout %f", &cli_timeout)) + else if (unformat (input, "test-timeout %f", &test_timeout)) + ; + else if (unformat (input, "syn-timeout %f", &syn_timeout)) ; else if (unformat (input, "no-return")) tm->no_return = 1; @@ -630,14 +567,18 @@ test_tcp_clients_command_fn (vlib_main_t * vm, else if (unformat (input, "private-segment-count %d", &tm->private_segment_count)) ; - else if (unformat (input, "private-segment-size %dm", &tmp)) - tm->private_segment_size = tmp << 20; - else if (unformat (input, "private-segment-size %dg", &tmp)) - tm->private_segment_size = tmp << 30; - else if (unformat (input, "private-segment-size %d", &tmp)) - tm->private_segment_size = tmp; + else if (unformat (input, "private-segment-size %U", + unformat_memory_size, &tmp)) + { + if (tmp >= 0x100000000ULL) + return clib_error_return + (0, "private segment size %lld (%llu) too large", tmp, tmp); + tm->private_segment_size = tmp; + } else if (unformat (input, "preallocate-fifos")) tm->prealloc_fifos = 1; + else if (unformat (input, "preallocate-sessions")) + preallocate_sessions = 1; else if (unformat (input, "client-batch %d", &tm->connections_per_batch)) ; @@ -655,6 +596,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, return clib_error_return (0, "failed init"); } + tm->ready_connections = 0; tm->expected_connections = n_clients; tm->rx_total = 0; @@ -668,7 +610,9 @@ test_tcp_clients_command_fn (vlib_main_t * vm, start_tx_pthread (); #endif + vlib_worker_thread_barrier_sync (vm); vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); + vlib_worker_thread_barrier_release (vm); if (tm->test_client_attached == 0) { @@ -684,13 +628,22 @@ test_tcp_clients_command_fn (vlib_main_t * vm, vlib_node_set_state (vlib_mains[i], builtin_client_node.index, VLIB_NODE_STATE_POLLING); + if (preallocate_sessions) + { + session_t *sp __attribute__ ((unused)); + for (i = 0; i < n_clients; i++) + pool_get (tm->sessions, sp); + for (i = 0; i < n_clients; i++) + pool_put_index (tm->sessions, i); + } + /* Fire off connect requests */ + time_before_connects = vlib_time_now (vm); clients_connect (vm, uri, n_clients); /* Park until the sessions come up, or ten seconds elapse... */ - vlib_process_wait_for_event_or_clock (vm, 10.0 /* timeout, seconds */ ); + vlib_process_wait_for_event_or_clock (vm, syn_timeout); event_type = vlib_process_get_events (vm, &event_data); - switch (event_type) { case ~0: @@ -699,6 +652,16 @@ test_tcp_clients_command_fn (vlib_main_t * vm, goto cleanup; case 1: + delta = vlib_time_now (vm) - time_before_connects; + + if (delta != 0.0) + { + vlib_cli_output + (vm, "%d three-way handshakes in %.2f seconds, %.2f/sec", + n_clients, delta, ((f64) n_clients) / delta); + } + + tm->test_start_time = vlib_time_now (tm->vlib_main); vlib_cli_output (vm, "Test started at %.6f", tm->test_start_time); break; @@ -708,9 +671,8 @@ test_tcp_clients_command_fn (vlib_main_t * vm, } /* Now wait for the sessions to finish... */ - vlib_process_wait_for_event_or_clock (vm, cli_timeout); + vlib_process_wait_for_event_or_clock (vm, test_timeout); event_type = vlib_process_get_events (vm, &event_data); - switch (event_type) { case ~0: @@ -719,6 +681,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, goto cleanup; case 2: + tm->test_end_time = vlib_time_now (vm); vlib_cli_output (vm, "Test finished at %.6f", tm->test_end_time); break; @@ -753,8 +716,23 @@ cleanup: vec_reset_length (tm->connection_index_by_thread[i]); vec_reset_length (tm->connections_this_batch_by_thread[i]); } + pool_free (tm->sessions); + /* Detach the application, so we can use different fifo sizes next time */ + if (tm->test_client_attached) + { + vnet_app_detach_args_t _da, *da = &_da; + int rv; + + da->app_index = tm->app_index; + + rv = vnet_application_detach (da); + if (rv) + vlib_cli_output (vm, "WARNING: app detach failed..."); + tm->test_client_attached = 0; + tm->app_index = ~0; + } return 0; } @@ -762,9 +740,13 @@ cleanup: VLIB_CLI_COMMAND (test_clients_command, static) = { .path = "test tcp clients", - .short_help = "test tcp clients [nclients %d]" - "[iterations %d] [bytes %d] [uri tcp://6.0.1.1/1234]", + .short_help = "test tcp clients [nclients %d] [[m|g]bytes ] " + "[test-timeout