4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_cycles.h>
39 #include <rte_launch.h>
40 #include <rte_pause.h>
48 * Measures performance of various operations using rdtsc
49 * * Empty ring dequeue
50 * * Enqueue/dequeue of bursts in 1 threads
51 * * Enqueue/dequeue of bursts in 2 threads
54 #define RING_NAME "RING_PERF"
55 #define RING_SIZE 4096
59 * the sizes to enqueue and dequeue in testing
60 * (marked volatile so they won't be seen as compile-time constants)
62 static const volatile unsigned bulk_sizes[] = { 8, 32 };
68 static volatile unsigned lcore_count = 0;
70 /**** Functions to analyse our core mask to get cores for different tests ***/
73 get_two_hyperthreads(struct lcore_pair *lcp)
76 unsigned c1, c2, s1, s2;
77 RTE_LCORE_FOREACH(id1) {
78 /* inner loop just re-reads all id's. We could skip the first few
79 * elements, but since number of cores is small there is little point
81 RTE_LCORE_FOREACH(id2) {
84 c1 = lcore_config[id1].core_id;
85 c2 = lcore_config[id2].core_id;
86 s1 = lcore_config[id1].socket_id;
87 s2 = lcore_config[id2].socket_id;
88 if ((c1 == c2) && (s1 == s2)){
99 get_two_cores(struct lcore_pair *lcp)
102 unsigned c1, c2, s1, s2;
103 RTE_LCORE_FOREACH(id1) {
104 RTE_LCORE_FOREACH(id2) {
107 c1 = lcore_config[id1].core_id;
108 c2 = lcore_config[id2].core_id;
109 s1 = lcore_config[id1].socket_id;
110 s2 = lcore_config[id2].socket_id;
111 if ((c1 != c2) && (s1 == s2)){
122 get_two_sockets(struct lcore_pair *lcp)
126 RTE_LCORE_FOREACH(id1) {
127 RTE_LCORE_FOREACH(id2) {
130 s1 = lcore_config[id1].socket_id;
131 s2 = lcore_config[id2].socket_id;
142 /* Get cycle counts for dequeuing from an empty ring. Should be 2 or 3 cycles */
144 test_empty_dequeue(struct rte_ring *r)
146 const unsigned iter_shift = 26;
147 const unsigned iterations = 1<<iter_shift;
149 void *burst[MAX_BURST];
151 const uint64_t sc_start = rte_rdtsc();
152 for (i = 0; i < iterations; i++)
153 rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
154 const uint64_t sc_end = rte_rdtsc();
156 const uint64_t mc_start = rte_rdtsc();
157 for (i = 0; i < iterations; i++)
158 rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
159 const uint64_t mc_end = rte_rdtsc();
161 printf("SC empty dequeue: %.2F\n",
162 (double)(sc_end-sc_start) / iterations);
163 printf("MC empty dequeue: %.2F\n",
164 (double)(mc_end-mc_start) / iterations);
168 * for the separate enqueue and dequeue threads they take in one param
169 * and return two. Input = burst size, output = cycle average for sp/sc & mp/mc
171 struct thread_params {
173 unsigned size; /* input value, the burst size */
174 double spsc, mpmc; /* output value, the single or multi timings */
178 * Function that uses rdtsc to measure timing for ring enqueue. Needs pair
179 * thread running dequeue_bulk function
182 enqueue_bulk(void *p)
184 const unsigned iter_shift = 23;
185 const unsigned iterations = 1<<iter_shift;
186 struct thread_params *params = p;
187 struct rte_ring *r = params->r;
188 const unsigned size = params->size;
190 void *burst[MAX_BURST] = {0};
192 if ( __sync_add_and_fetch(&lcore_count, 1) != 2 )
193 while(lcore_count != 2)
196 const uint64_t sp_start = rte_rdtsc();
197 for (i = 0; i < iterations; i++)
198 while (rte_ring_sp_enqueue_bulk(r, burst, size, NULL) == 0)
200 const uint64_t sp_end = rte_rdtsc();
202 const uint64_t mp_start = rte_rdtsc();
203 for (i = 0; i < iterations; i++)
204 while (rte_ring_mp_enqueue_bulk(r, burst, size, NULL) == 0)
206 const uint64_t mp_end = rte_rdtsc();
208 params->spsc = ((double)(sp_end - sp_start))/(iterations*size);
209 params->mpmc = ((double)(mp_end - mp_start))/(iterations*size);
214 * Function that uses rdtsc to measure timing for ring dequeue. Needs pair
215 * thread running enqueue_bulk function
218 dequeue_bulk(void *p)
220 const unsigned iter_shift = 23;
221 const unsigned iterations = 1<<iter_shift;
222 struct thread_params *params = p;
223 struct rte_ring *r = params->r;
224 const unsigned size = params->size;
226 void *burst[MAX_BURST] = {0};
228 if ( __sync_add_and_fetch(&lcore_count, 1) != 2 )
229 while(lcore_count != 2)
232 const uint64_t sc_start = rte_rdtsc();
233 for (i = 0; i < iterations; i++)
234 while (rte_ring_sc_dequeue_bulk(r, burst, size, NULL) == 0)
236 const uint64_t sc_end = rte_rdtsc();
238 const uint64_t mc_start = rte_rdtsc();
239 for (i = 0; i < iterations; i++)
240 while (rte_ring_mc_dequeue_bulk(r, burst, size, NULL) == 0)
242 const uint64_t mc_end = rte_rdtsc();
244 params->spsc = ((double)(sc_end - sc_start))/(iterations*size);
245 params->mpmc = ((double)(mc_end - mc_start))/(iterations*size);
250 * Function that calls the enqueue and dequeue bulk functions on pairs of cores.
251 * used to measure ring perf between hyperthreads, cores and sockets.
254 run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r,
255 lcore_function_t f1, lcore_function_t f2)
257 struct thread_params param1 = {0}, param2 = {0};
259 for (i = 0; i < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); i++) {
261 param1.size = param2.size = bulk_sizes[i];
262 param1.r = param2.r = r;
263 if (cores->c1 == rte_get_master_lcore()) {
264 rte_eal_remote_launch(f2, ¶m2, cores->c2);
266 rte_eal_wait_lcore(cores->c2);
268 rte_eal_remote_launch(f1, ¶m1, cores->c1);
269 rte_eal_remote_launch(f2, ¶m2, cores->c2);
270 rte_eal_wait_lcore(cores->c1);
271 rte_eal_wait_lcore(cores->c2);
273 printf("SP/SC bulk enq/dequeue (size: %u): %.2F\n", bulk_sizes[i],
274 param1.spsc + param2.spsc);
275 printf("MP/MC bulk enq/dequeue (size: %u): %.2F\n", bulk_sizes[i],
276 param1.mpmc + param2.mpmc);
281 * Test function that determines how long an enqueue + dequeue of a single item
282 * takes on a single lcore. Result is for comparison with the bulk enq+deq.
285 test_single_enqueue_dequeue(struct rte_ring *r)
287 const unsigned iter_shift = 24;
288 const unsigned iterations = 1<<iter_shift;
292 const uint64_t sc_start = rte_rdtsc();
293 for (i = 0; i < iterations; i++) {
294 rte_ring_sp_enqueue(r, burst);
295 rte_ring_sc_dequeue(r, &burst);
297 const uint64_t sc_end = rte_rdtsc();
299 const uint64_t mc_start = rte_rdtsc();
300 for (i = 0; i < iterations; i++) {
301 rte_ring_mp_enqueue(r, burst);
302 rte_ring_mc_dequeue(r, &burst);
304 const uint64_t mc_end = rte_rdtsc();
306 printf("SP/SC single enq/dequeue: %"PRIu64"\n",
307 (sc_end-sc_start) >> iter_shift);
308 printf("MP/MC single enq/dequeue: %"PRIu64"\n",
309 (mc_end-mc_start) >> iter_shift);
313 * Test that does both enqueue and dequeue on a core using the burst() API calls
314 * instead of the bulk() calls used in other tests. Results should be the same
315 * as for the bulk function called on a single lcore.
318 test_burst_enqueue_dequeue(struct rte_ring *r)
320 const unsigned iter_shift = 23;
321 const unsigned iterations = 1<<iter_shift;
323 void *burst[MAX_BURST] = {0};
325 for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
326 const uint64_t sc_start = rte_rdtsc();
327 for (i = 0; i < iterations; i++) {
328 rte_ring_sp_enqueue_burst(r, burst,
329 bulk_sizes[sz], NULL);
330 rte_ring_sc_dequeue_burst(r, burst,
331 bulk_sizes[sz], NULL);
333 const uint64_t sc_end = rte_rdtsc();
335 const uint64_t mc_start = rte_rdtsc();
336 for (i = 0; i < iterations; i++) {
337 rte_ring_mp_enqueue_burst(r, burst,
338 bulk_sizes[sz], NULL);
339 rte_ring_mc_dequeue_burst(r, burst,
340 bulk_sizes[sz], NULL);
342 const uint64_t mc_end = rte_rdtsc();
344 uint64_t mc_avg = ((mc_end-mc_start) >> iter_shift) / bulk_sizes[sz];
345 uint64_t sc_avg = ((sc_end-sc_start) >> iter_shift) / bulk_sizes[sz];
347 printf("SP/SC burst enq/dequeue (size: %u): %"PRIu64"\n", bulk_sizes[sz],
349 printf("MP/MC burst enq/dequeue (size: %u): %"PRIu64"\n", bulk_sizes[sz],
354 /* Times enqueue and dequeue on a single lcore */
356 test_bulk_enqueue_dequeue(struct rte_ring *r)
358 const unsigned iter_shift = 23;
359 const unsigned iterations = 1<<iter_shift;
361 void *burst[MAX_BURST] = {0};
363 for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
364 const uint64_t sc_start = rte_rdtsc();
365 for (i = 0; i < iterations; i++) {
366 rte_ring_sp_enqueue_bulk(r, burst,
367 bulk_sizes[sz], NULL);
368 rte_ring_sc_dequeue_bulk(r, burst,
369 bulk_sizes[sz], NULL);
371 const uint64_t sc_end = rte_rdtsc();
373 const uint64_t mc_start = rte_rdtsc();
374 for (i = 0; i < iterations; i++) {
375 rte_ring_mp_enqueue_bulk(r, burst,
376 bulk_sizes[sz], NULL);
377 rte_ring_mc_dequeue_bulk(r, burst,
378 bulk_sizes[sz], NULL);
380 const uint64_t mc_end = rte_rdtsc();
382 double sc_avg = ((double)(sc_end-sc_start) /
383 (iterations * bulk_sizes[sz]));
384 double mc_avg = ((double)(mc_end-mc_start) /
385 (iterations * bulk_sizes[sz]));
387 printf("SP/SC bulk enq/dequeue (size: %u): %.2F\n", bulk_sizes[sz],
389 printf("MP/MC bulk enq/dequeue (size: %u): %.2F\n", bulk_sizes[sz],
397 struct lcore_pair cores;
398 struct rte_ring *r = NULL;
400 r = rte_ring_create(RING_NAME, RING_SIZE, rte_socket_id(), 0);
404 printf("### Testing single element and burst enq/deq ###\n");
405 test_single_enqueue_dequeue(r);
406 test_burst_enqueue_dequeue(r);
408 printf("\n### Testing empty dequeue ###\n");
409 test_empty_dequeue(r);
411 printf("\n### Testing using a single lcore ###\n");
412 test_bulk_enqueue_dequeue(r);
414 if (get_two_hyperthreads(&cores) == 0) {
415 printf("\n### Testing using two hyperthreads ###\n");
416 run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
418 if (get_two_cores(&cores) == 0) {
419 printf("\n### Testing using two physical cores ###\n");
420 run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
422 if (get_two_sockets(&cores) == 0) {
423 printf("\n### Testing using two NUMA nodes ###\n");
424 run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
430 REGISTER_TEST_COMMAND(ring_perf_autotest, test_ring_perf);