New upstream version 18.02
[deb_dpdk.git] / test / test / test_func_reentrancy.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <stdarg.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13
14 #include <rte_common.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
20 #include <rte_eal.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_ring.h>
26 #include <rte_mempool.h>
27 #include <rte_spinlock.h>
28 #include <rte_malloc.h>
29
30 #ifdef RTE_LIBRTE_HASH
31 #include <rte_hash.h>
32 #include <rte_fbk_hash.h>
33 #include <rte_jhash.h>
34 #endif /* RTE_LIBRTE_HASH */
35
36 #ifdef RTE_LIBRTE_LPM
37 #include <rte_lpm.h>
38 #endif /* RTE_LIBRTE_LPM */
39
40 #include <rte_string_fns.h>
41
42 #include "test.h"
43
44 typedef int (*case_func_t)(void* arg);
45 typedef void (*case_clean_t)(unsigned lcore_id);
46
47 #define MAX_STRING_SIZE                     (256)
48 #define MAX_ITER_TIMES                      (16)
49 #define MAX_LPM_ITER_TIMES                  (8)
50
51 #define MEMPOOL_ELT_SIZE                    (sizeof(uint32_t))
52 #define MEMPOOL_SIZE                        (4)
53
54 #define MAX_LCORES      RTE_MAX_MEMZONE / (MAX_ITER_TIMES * 4U)
55
56 static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
57 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
58
59 #define WAIT_SYNCHRO_FOR_SLAVES()   do{ \
60         if (lcore_self != rte_get_master_lcore())                  \
61                 while (rte_atomic32_read(&synchro) == 0);        \
62 } while(0)
63
64 /*
65  * rte_eal_init only init once
66  */
67 static int
68 test_eal_init_once(__attribute__((unused)) void *arg)
69 {
70         unsigned lcore_self =  rte_lcore_id();
71
72         WAIT_SYNCHRO_FOR_SLAVES();
73
74         rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
75         if (rte_eal_init(0, NULL) != -1)
76                 return -1;
77
78         return 0;
79 }
80
81 /*
82  * ring create/lookup reentrancy test
83  */
84 static void
85 ring_clean(unsigned int lcore_id)
86 {
87         struct rte_ring *rp;
88         char ring_name[MAX_STRING_SIZE];
89         int i;
90
91         for (i = 0; i < MAX_ITER_TIMES; i++) {
92                 snprintf(ring_name, sizeof(ring_name),
93                                 "fr_test_%d_%d", lcore_id, i);
94                 rp = rte_ring_lookup(ring_name);
95                 if (rp != NULL)
96                         rte_ring_free(rp);
97         }
98 }
99
100 static int
101 ring_create_lookup(__attribute__((unused)) void *arg)
102 {
103         unsigned lcore_self = rte_lcore_id();
104         struct rte_ring * rp;
105         char ring_name[MAX_STRING_SIZE];
106         int i;
107
108         WAIT_SYNCHRO_FOR_SLAVES();
109
110         /* create the same ring simultaneously on all threads */
111         for (i = 0; i < MAX_ITER_TIMES; i++) {
112                 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
113                 if (rp != NULL)
114                         rte_atomic32_inc(&obj_count);
115         }
116
117         /* create/lookup new ring several times */
118         for (i = 0; i < MAX_ITER_TIMES; i++) {
119                 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
120                 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
121                 if (NULL == rp)
122                         return -1;
123                 if (rte_ring_lookup(ring_name) != rp)
124                         return -1;
125         }
126
127         /* verify all ring created successful */
128         for (i = 0; i < MAX_ITER_TIMES; i++) {
129                 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
130                 if (rte_ring_lookup(ring_name) == NULL)
131                         return -1;
132         }
133
134         return 0;
135 }
136
137 static void
138 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
139             void *obj, unsigned i)
140 {
141         uint32_t *objnum = obj;
142         memset(obj, 0, mp->elt_size);
143         *objnum = i;
144 }
145
146 static void
147 mempool_clean(unsigned int lcore_id)
148 {
149         struct rte_mempool *mp;
150         char mempool_name[MAX_STRING_SIZE];
151         int i;
152
153         /* verify all ring created successful */
154         for (i = 0; i < MAX_ITER_TIMES; i++) {
155                 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
156                          lcore_id, i);
157                 mp = rte_mempool_lookup(mempool_name);
158                 if (mp != NULL)
159                         rte_mempool_free(mp);
160         }
161 }
162
163 static int
164 mempool_create_lookup(__attribute__((unused)) void *arg)
165 {
166         unsigned lcore_self = rte_lcore_id();
167         struct rte_mempool * mp;
168         char mempool_name[MAX_STRING_SIZE];
169         int i;
170
171         WAIT_SYNCHRO_FOR_SLAVES();
172
173         /* create the same mempool simultaneously on all threads */
174         for (i = 0; i < MAX_ITER_TIMES; i++) {
175                 mp = rte_mempool_create("fr_test_once",  MEMPOOL_SIZE,
176                                         MEMPOOL_ELT_SIZE, 0, 0,
177                                         NULL, NULL,
178                                         my_obj_init, NULL,
179                                         SOCKET_ID_ANY, 0);
180                 if (mp != NULL)
181                         rte_atomic32_inc(&obj_count);
182         }
183
184         /* create/lookup new ring several times */
185         for (i = 0; i < MAX_ITER_TIMES; i++) {
186                 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
187                 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
188                                                 MEMPOOL_ELT_SIZE, 0, 0,
189                                                 NULL, NULL,
190                                                 my_obj_init, NULL,
191                                                 SOCKET_ID_ANY, 0);
192                 if (NULL == mp)
193                         return -1;
194                 if (rte_mempool_lookup(mempool_name) != mp)
195                         return -1;
196         }
197
198         /* verify all ring created successful */
199         for (i = 0; i < MAX_ITER_TIMES; i++) {
200                 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
201                 if (rte_mempool_lookup(mempool_name) == NULL)
202                         return -1;
203         }
204
205         return 0;
206 }
207
208 #ifdef RTE_LIBRTE_HASH
209 static void
210 hash_clean(unsigned lcore_id)
211 {
212         char hash_name[MAX_STRING_SIZE];
213         struct rte_hash *handle;
214         int i;
215
216         for (i = 0; i < MAX_ITER_TIMES; i++) {
217                 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d",  lcore_id, i);
218
219                 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
220                         rte_hash_free(handle);
221         }
222 }
223
224 static int
225 hash_create_free(__attribute__((unused)) void *arg)
226 {
227         unsigned lcore_self = rte_lcore_id();
228         struct rte_hash *handle;
229         char hash_name[MAX_STRING_SIZE];
230         int i;
231         struct rte_hash_parameters hash_params = {
232                 .name = NULL,
233                 .entries = 16,
234                 .key_len = 4,
235                 .hash_func = (rte_hash_function)rte_jhash_32b,
236                 .hash_func_init_val = 0,
237                 .socket_id = 0,
238         };
239
240         WAIT_SYNCHRO_FOR_SLAVES();
241
242         /* create the same hash simultaneously on all threads */
243         hash_params.name = "fr_test_once";
244         for (i = 0; i < MAX_ITER_TIMES; i++) {
245                 handle = rte_hash_create(&hash_params);
246                 if (handle != NULL)
247                         rte_atomic32_inc(&obj_count);
248         }
249
250         /* create mutiple times simultaneously */
251         for (i = 0; i < MAX_ITER_TIMES; i++) {
252                 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
253                 hash_params.name = hash_name;
254
255                 handle = rte_hash_create(&hash_params);
256                 if (NULL == handle)
257                         return -1;
258
259                 /* verify correct existing and then free all */
260                 if (handle != rte_hash_find_existing(hash_name))
261                         return -1;
262
263                 rte_hash_free(handle);
264         }
265
266         /* verify free correct */
267         for (i = 0; i < MAX_ITER_TIMES; i++) {
268                 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d",  lcore_self, i);
269
270                 if (NULL != rte_hash_find_existing(hash_name))
271                         return -1;
272         }
273
274         return 0;
275 }
276
277 static void
278 fbk_clean(unsigned lcore_id)
279 {
280         char fbk_name[MAX_STRING_SIZE];
281         struct rte_fbk_hash_table *handle;
282         int i;
283
284         for (i = 0; i < MAX_ITER_TIMES; i++) {
285                 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d",  lcore_id, i);
286
287                 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
288                         rte_fbk_hash_free(handle);
289         }
290 }
291
292 static int
293 fbk_create_free(__attribute__((unused)) void *arg)
294 {
295         unsigned lcore_self = rte_lcore_id();
296         struct rte_fbk_hash_table *handle;
297         char fbk_name[MAX_STRING_SIZE];
298         int i;
299         struct rte_fbk_hash_params fbk_params = {
300                 .name = NULL,
301                 .entries = 4,
302                 .entries_per_bucket = 4,
303                 .socket_id = 0,
304                 .hash_func = rte_jhash_1word,
305                 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
306         };
307
308         WAIT_SYNCHRO_FOR_SLAVES();
309
310         /* create the same fbk hash table simultaneously on all threads */
311         fbk_params.name = "fr_test_once";
312         for (i = 0; i < MAX_ITER_TIMES; i++) {
313                 handle = rte_fbk_hash_create(&fbk_params);
314                 if (handle != NULL)
315                         rte_atomic32_inc(&obj_count);
316         }
317
318         /* create mutiple fbk tables simultaneously */
319         for (i = 0; i < MAX_ITER_TIMES; i++) {
320                 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
321                 fbk_params.name = fbk_name;
322
323                 handle = rte_fbk_hash_create(&fbk_params);
324                 if (NULL == handle)
325                         return -1;
326
327                 /* verify correct existing and then free all */
328                 if (handle != rte_fbk_hash_find_existing(fbk_name))
329                         return -1;
330
331                 rte_fbk_hash_free(handle);
332         }
333
334         /* verify free correct */
335         for (i = 0; i < MAX_ITER_TIMES; i++) {
336                 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d",  lcore_self, i);
337
338                 if (NULL != rte_fbk_hash_find_existing(fbk_name))
339                         return -1;
340         }
341
342         return 0;
343 }
344 #endif /* RTE_LIBRTE_HASH */
345
346 #ifdef RTE_LIBRTE_LPM
347 static void
348 lpm_clean(unsigned int lcore_id)
349 {
350         char lpm_name[MAX_STRING_SIZE];
351         struct rte_lpm *lpm;
352         int i;
353
354         for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
355                 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d",  lcore_id, i);
356
357                 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
358                         rte_lpm_free(lpm);
359         }
360 }
361
362 static int
363 lpm_create_free(__attribute__((unused)) void *arg)
364 {
365         unsigned lcore_self = rte_lcore_id();
366         struct rte_lpm *lpm;
367         struct rte_lpm_config config;
368
369         config.max_rules = 4;
370         config.number_tbl8s = 256;
371         config.flags = 0;
372         char lpm_name[MAX_STRING_SIZE];
373         int i;
374
375         WAIT_SYNCHRO_FOR_SLAVES();
376
377         /* create the same lpm simultaneously on all threads */
378         for (i = 0; i < MAX_ITER_TIMES; i++) {
379                 lpm = rte_lpm_create("fr_test_once",  SOCKET_ID_ANY, &config);
380                 if (lpm != NULL)
381                         rte_atomic32_inc(&obj_count);
382         }
383
384         /* create mutiple fbk tables simultaneously */
385         for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
386                 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
387                 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
388                 if (NULL == lpm)
389                         return -1;
390
391                 /* verify correct existing and then free all */
392                 if (lpm != rte_lpm_find_existing(lpm_name))
393                         return -1;
394
395                 rte_lpm_free(lpm);
396         }
397
398         /* verify free correct */
399         for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
400                 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d",  lcore_self, i);
401                 if (NULL != rte_lpm_find_existing(lpm_name))
402                         return -1;
403         }
404
405         return 0;
406 }
407 #endif /* RTE_LIBRTE_LPM */
408
409 struct test_case{
410         case_func_t    func;
411         void*          arg;
412         case_clean_t   clean;
413         char           name[MAX_STRING_SIZE];
414 };
415
416 /* All test cases in the test suite */
417 struct test_case test_cases[] = {
418         { test_eal_init_once,     NULL,  NULL,         "eal init once" },
419         { ring_create_lookup,     NULL,  ring_clean,   "ring create/lookup" },
420         { mempool_create_lookup,  NULL,  mempool_clean,
421                         "mempool create/lookup" },
422 #ifdef RTE_LIBRTE_HASH
423         { hash_create_free,       NULL,  hash_clean,   "hash create/free" },
424         { fbk_create_free,        NULL,  fbk_clean,    "fbk create/free" },
425 #endif /* RTE_LIBRTE_HASH */
426 #ifdef RTE_LIBRTE_LPM
427         { lpm_create_free,        NULL,  lpm_clean,    "lpm create/free" },
428 #endif /* RTE_LIBRTE_LPM */
429 };
430
431 /**
432  * launch test case in two separate thread
433  */
434 static int
435 launch_test(struct test_case *pt_case)
436 {
437         int ret = 0;
438         unsigned lcore_id;
439         unsigned cores_save = rte_lcore_count();
440         unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
441         unsigned count;
442
443         if (pt_case->func == NULL)
444                 return -1;
445
446         rte_atomic32_set(&obj_count, 0);
447         rte_atomic32_set(&synchro, 0);
448
449         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
450                 if (cores == 1)
451                         break;
452                 cores--;
453                 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
454         }
455
456         rte_atomic32_set(&synchro, 1);
457
458         if (pt_case->func(pt_case->arg) < 0)
459                 ret = -1;
460
461         cores = cores_save;
462         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
463                 if (cores == 1)
464                         break;
465                 cores--;
466                 if (rte_eal_wait_lcore(lcore_id) < 0)
467                         ret = -1;
468
469                 if (pt_case->clean != NULL)
470                         pt_case->clean(lcore_id);
471         }
472
473         count = rte_atomic32_read(&obj_count);
474         if (count != 1) {
475                 printf("%s: common object allocated %d times (should be 1)\n",
476                         pt_case->name, count);
477                 ret = -1;
478         }
479
480         return ret;
481 }
482
483 /**
484  * Main entry of func_reentrancy test
485  */
486 static int
487 test_func_reentrancy(void)
488 {
489         uint32_t case_id;
490         struct test_case *pt_case = NULL;
491
492         if (rte_lcore_count() <= 1) {
493                 printf("Not enough lcore for testing\n");
494                 return -1;
495         }
496         else if (rte_lcore_count() > MAX_LCORES)
497                 printf("Too many lcores, some cores will be disabled\n");
498
499         for (case_id = 0; case_id < sizeof(test_cases)/sizeof(struct test_case); case_id ++) {
500                 pt_case = &test_cases[case_id];
501                 if (pt_case->func == NULL)
502                         continue;
503
504                 if (launch_test(pt_case) < 0) {
505                         printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
506                         return -1;
507                 }
508                 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
509         }
510
511         return 0;
512 }
513
514 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);