New upstream version 18.08
[deb_dpdk.git] / lib / librte_eal / common / rte_service.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <string.h>
10 #include <dirent.h>
11
12 #include <rte_compat.h>
13 #include <rte_service.h>
14 #include "include/rte_service_component.h"
15
16 #include <rte_eal.h>
17 #include <rte_lcore.h>
18 #include <rte_common.h>
19 #include <rte_debug.h>
20 #include <rte_cycles.h>
21 #include <rte_atomic.h>
22 #include <rte_memory.h>
23 #include <rte_malloc.h>
24
25 #define RTE_SERVICE_NUM_MAX 64
26
27 #define SERVICE_F_REGISTERED    (1 << 0)
28 #define SERVICE_F_STATS_ENABLED (1 << 1)
29 #define SERVICE_F_START_CHECK   (1 << 2)
30
31 /* runstates for services and lcores, denoting if they are active or not */
32 #define RUNSTATE_STOPPED 0
33 #define RUNSTATE_RUNNING 1
34
35 /* internal representation of a service */
36 struct rte_service_spec_impl {
37         /* public part of the struct */
38         struct rte_service_spec spec;
39
40         /* atomic lock that when set indicates a service core is currently
41          * running this service callback. When not set, a core may take the
42          * lock and then run the service callback.
43          */
44         rte_atomic32_t execute_lock;
45
46         /* API set/get-able variables */
47         int8_t app_runstate;
48         int8_t comp_runstate;
49         uint8_t internal_flags;
50
51         /* per service statistics */
52         rte_atomic32_t num_mapped_cores;
53         uint64_t calls;
54         uint64_t cycles_spent;
55         uint8_t active_on_lcore[RTE_MAX_LCORE];
56 } __rte_cache_aligned;
57
58 /* the internal values of a service core */
59 struct core_state {
60         /* map of services IDs are run on this core */
61         uint64_t service_mask;
62         uint8_t runstate; /* running or stopped */
63         uint8_t is_service_core; /* set if core is currently a service core */
64
65         uint64_t loops;
66         uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
67 } __rte_cache_aligned;
68
69 static uint32_t rte_service_count;
70 static struct rte_service_spec_impl *rte_services;
71 static struct core_state *lcore_states;
72 static uint32_t rte_service_library_initialized;
73
74 int32_t rte_service_init(void)
75 {
76         if (rte_service_library_initialized) {
77                 printf("service library init() called, init flag %d\n",
78                         rte_service_library_initialized);
79                 return -EALREADY;
80         }
81
82         rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
83                         sizeof(struct rte_service_spec_impl),
84                         RTE_CACHE_LINE_SIZE);
85         if (!rte_services) {
86                 printf("error allocating rte services array\n");
87                 goto fail_mem;
88         }
89
90         lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
91                         sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
92         if (!lcore_states) {
93                 printf("error allocating core states array\n");
94                 goto fail_mem;
95         }
96
97         int i;
98         int count = 0;
99         struct rte_config *cfg = rte_eal_get_configuration();
100         for (i = 0; i < RTE_MAX_LCORE; i++) {
101                 if (lcore_config[i].core_role == ROLE_SERVICE) {
102                         if ((unsigned int)i == cfg->master_lcore)
103                                 continue;
104                         rte_service_lcore_add(i);
105                         count++;
106                 }
107         }
108
109         rte_service_library_initialized = 1;
110         return 0;
111 fail_mem:
112         if (rte_services)
113                 rte_free(rte_services);
114         if (lcore_states)
115                 rte_free(lcore_states);
116         return -ENOMEM;
117 }
118
119 void
120 rte_service_finalize(void)
121 {
122         if (!rte_service_library_initialized)
123                 return;
124
125         if (rte_services)
126                 rte_free(rte_services);
127
128         if (lcore_states)
129                 rte_free(lcore_states);
130
131         rte_service_library_initialized = 0;
132 }
133
134 /* returns 1 if service is registered and has not been unregistered
135  * Returns 0 if service never registered, or has been unregistered
136  */
137 static inline int
138 service_valid(uint32_t id)
139 {
140         return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
141 }
142
143 /* validate ID and retrieve service pointer, or return error value */
144 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
145         if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
146                 return retval;                                          \
147         service = &rte_services[id];                                    \
148 } while (0)
149
150 /* returns 1 if statistics should be collected for service
151  * Returns 0 if statistics should not be collected for service
152  */
153 static inline int
154 service_stats_enabled(struct rte_service_spec_impl *impl)
155 {
156         return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
157 }
158
159 static inline int
160 service_mt_safe(struct rte_service_spec_impl *s)
161 {
162         return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
163 }
164
165 int32_t
166 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
167 {
168         struct rte_service_spec_impl *s;
169         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
170
171         if (enabled)
172                 s->internal_flags |= SERVICE_F_STATS_ENABLED;
173         else
174                 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
175
176         return 0;
177 }
178
179 int32_t
180 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
181 {
182         struct rte_service_spec_impl *s;
183         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
184
185         if (enabled)
186                 s->internal_flags |= SERVICE_F_START_CHECK;
187         else
188                 s->internal_flags &= ~(SERVICE_F_START_CHECK);
189
190         return 0;
191 }
192
193 uint32_t
194 rte_service_get_count(void)
195 {
196         return rte_service_count;
197 }
198
199 int32_t
200 rte_service_get_by_name(const char *name, uint32_t *service_id)
201 {
202         if (!service_id)
203                 return -EINVAL;
204
205         int i;
206         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
207                 if (service_valid(i) &&
208                                 strcmp(name, rte_services[i].spec.name) == 0) {
209                         *service_id = i;
210                         return 0;
211                 }
212         }
213
214         return -ENODEV;
215 }
216
217 const char *
218 rte_service_get_name(uint32_t id)
219 {
220         struct rte_service_spec_impl *s;
221         SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
222         return s->spec.name;
223 }
224
225 int32_t
226 rte_service_probe_capability(uint32_t id, uint32_t capability)
227 {
228         struct rte_service_spec_impl *s;
229         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
230         return !!(s->spec.capabilities & capability);
231 }
232
233 int32_t
234 rte_service_component_register(const struct rte_service_spec *spec,
235                                uint32_t *id_ptr)
236 {
237         uint32_t i;
238         int32_t free_slot = -1;
239
240         if (spec->callback == NULL || strlen(spec->name) == 0)
241                 return -EINVAL;
242
243         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
244                 if (!service_valid(i)) {
245                         free_slot = i;
246                         break;
247                 }
248         }
249
250         if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
251                 return -ENOSPC;
252
253         struct rte_service_spec_impl *s = &rte_services[free_slot];
254         s->spec = *spec;
255         s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
256
257         rte_smp_wmb();
258         rte_service_count++;
259
260         if (id_ptr)
261                 *id_ptr = free_slot;
262
263         return 0;
264 }
265
266 int32_t
267 rte_service_component_unregister(uint32_t id)
268 {
269         uint32_t i;
270         struct rte_service_spec_impl *s;
271         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
272
273         rte_service_count--;
274         rte_smp_wmb();
275
276         s->internal_flags &= ~(SERVICE_F_REGISTERED);
277
278         /* clear the run-bit in all cores */
279         for (i = 0; i < RTE_MAX_LCORE; i++)
280                 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
281
282         memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
283
284         return 0;
285 }
286
287 int32_t
288 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
289 {
290         struct rte_service_spec_impl *s;
291         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
292
293         if (runstate)
294                 s->comp_runstate = RUNSTATE_RUNNING;
295         else
296                 s->comp_runstate = RUNSTATE_STOPPED;
297
298         rte_smp_wmb();
299         return 0;
300 }
301
302 int32_t
303 rte_service_runstate_set(uint32_t id, uint32_t runstate)
304 {
305         struct rte_service_spec_impl *s;
306         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
307
308         if (runstate)
309                 s->app_runstate = RUNSTATE_RUNNING;
310         else
311                 s->app_runstate = RUNSTATE_STOPPED;
312
313         rte_smp_wmb();
314         return 0;
315 }
316
317 int32_t
318 rte_service_runstate_get(uint32_t id)
319 {
320         struct rte_service_spec_impl *s;
321         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
322         rte_smp_rmb();
323
324         int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
325         int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);
326
327         return (s->app_runstate == RUNSTATE_RUNNING) &&
328                 (s->comp_runstate == RUNSTATE_RUNNING) &&
329                 (check_disabled | lcore_mapped);
330 }
331
332 static inline void
333 rte_service_runner_do_callback(struct rte_service_spec_impl *s,
334                                struct core_state *cs, uint32_t service_idx)
335 {
336         void *userdata = s->spec.callback_userdata;
337
338         if (service_stats_enabled(s)) {
339                 uint64_t start = rte_rdtsc();
340                 s->spec.callback(userdata);
341                 uint64_t end = rte_rdtsc();
342                 s->cycles_spent += end - start;
343                 cs->calls_per_service[service_idx]++;
344                 s->calls++;
345         } else
346                 s->spec.callback(userdata);
347 }
348
349
350 static inline int32_t
351 service_run(uint32_t i, int lcore, struct core_state *cs, uint64_t service_mask)
352 {
353         if (!service_valid(i))
354                 return -EINVAL;
355         struct rte_service_spec_impl *s = &rte_services[i];
356         if (s->comp_runstate != RUNSTATE_RUNNING ||
357                         s->app_runstate != RUNSTATE_RUNNING ||
358                         !(service_mask & (UINT64_C(1) << i))) {
359                 s->active_on_lcore[lcore] = 0;
360                 return -ENOEXEC;
361         }
362
363         s->active_on_lcore[lcore] = 1;
364
365         /* check do we need cmpset, if MT safe or <= 1 core
366          * mapped, atomic ops are not required.
367          */
368         const int use_atomics = (service_mt_safe(s) == 0) &&
369                                 (rte_atomic32_read(&s->num_mapped_cores) > 1);
370         if (use_atomics) {
371                 if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
372                         return -EBUSY;
373
374                 rte_service_runner_do_callback(s, cs, i);
375                 rte_atomic32_clear(&s->execute_lock);
376         } else
377                 rte_service_runner_do_callback(s, cs, i);
378
379         return 0;
380 }
381
382 int32_t __rte_experimental
383 rte_service_may_be_active(uint32_t id)
384 {
385         uint32_t ids[RTE_MAX_LCORE] = {0};
386         struct rte_service_spec_impl *s = &rte_services[id];
387         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
388         int i;
389
390         if (!service_valid(id))
391                 return -EINVAL;
392
393         for (i = 0; i < lcore_count; i++) {
394                 if (s->active_on_lcore[ids[i]])
395                         return 1;
396         }
397
398         return 0;
399 }
400
401 int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
402                 uint32_t serialize_mt_unsafe)
403 {
404         /* run service on calling core, using all-ones as the service mask */
405         if (!service_valid(id))
406                 return -EINVAL;
407
408         struct core_state *cs = &lcore_states[rte_lcore_id()];
409         struct rte_service_spec_impl *s = &rte_services[id];
410
411         /* Atomically add this core to the mapped cores first, then examine if
412          * we can run the service. This avoids a race condition between
413          * checking the value, and atomically adding to the mapped count.
414          */
415         if (serialize_mt_unsafe)
416                 rte_atomic32_inc(&s->num_mapped_cores);
417
418         if (service_mt_safe(s) == 0 &&
419                         rte_atomic32_read(&s->num_mapped_cores) > 1) {
420                 if (serialize_mt_unsafe)
421                         rte_atomic32_dec(&s->num_mapped_cores);
422                 return -EBUSY;
423         }
424
425         int ret = service_run(id, rte_lcore_id(), cs, UINT64_MAX);
426
427         if (serialize_mt_unsafe)
428                 rte_atomic32_dec(&s->num_mapped_cores);
429
430         return ret;
431 }
432
433 static int32_t
434 rte_service_runner_func(void *arg)
435 {
436         RTE_SET_USED(arg);
437         uint32_t i;
438         const int lcore = rte_lcore_id();
439         struct core_state *cs = &lcore_states[lcore];
440
441         while (lcore_states[lcore].runstate == RUNSTATE_RUNNING) {
442                 const uint64_t service_mask = cs->service_mask;
443
444                 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
445                         /* return value ignored as no change to code flow */
446                         service_run(i, lcore, cs, service_mask);
447                 }
448
449                 cs->loops++;
450
451                 rte_smp_rmb();
452         }
453
454         lcore_config[lcore].state = WAIT;
455
456         return 0;
457 }
458
459 int32_t
460 rte_service_lcore_count(void)
461 {
462         int32_t count = 0;
463         uint32_t i;
464         for (i = 0; i < RTE_MAX_LCORE; i++)
465                 count += lcore_states[i].is_service_core;
466         return count;
467 }
468
469 int32_t
470 rte_service_lcore_list(uint32_t array[], uint32_t n)
471 {
472         uint32_t count = rte_service_lcore_count();
473         if (count > n)
474                 return -ENOMEM;
475
476         if (!array)
477                 return -EINVAL;
478
479         uint32_t i;
480         uint32_t idx = 0;
481         for (i = 0; i < RTE_MAX_LCORE; i++) {
482                 struct core_state *cs = &lcore_states[i];
483                 if (cs->is_service_core) {
484                         array[idx] = i;
485                         idx++;
486                 }
487         }
488
489         return count;
490 }
491
492 int32_t
493 rte_service_lcore_count_services(uint32_t lcore)
494 {
495         if (lcore >= RTE_MAX_LCORE)
496                 return -EINVAL;
497
498         struct core_state *cs = &lcore_states[lcore];
499         if (!cs->is_service_core)
500                 return -ENOTSUP;
501
502         return __builtin_popcountll(cs->service_mask);
503 }
504
505 int32_t
506 rte_service_start_with_defaults(void)
507 {
508         /* create a default mapping from cores to services, then start the
509          * services to make them transparent to unaware applications.
510          */
511         uint32_t i;
512         int ret;
513         uint32_t count = rte_service_get_count();
514
515         int32_t lcore_iter = 0;
516         uint32_t ids[RTE_MAX_LCORE] = {0};
517         int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
518
519         if (lcore_count == 0)
520                 return -ENOTSUP;
521
522         for (i = 0; (int)i < lcore_count; i++)
523                 rte_service_lcore_start(ids[i]);
524
525         for (i = 0; i < count; i++) {
526                 /* do 1:1 core mapping here, with each service getting
527                  * assigned a single core by default. Adding multiple services
528                  * should multiplex to a single core, or 1:1 if there are the
529                  * same amount of services as service-cores
530                  */
531                 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
532                 if (ret)
533                         return -ENODEV;
534
535                 lcore_iter++;
536                 if (lcore_iter >= lcore_count)
537                         lcore_iter = 0;
538
539                 ret = rte_service_runstate_set(i, 1);
540                 if (ret)
541                         return -ENOEXEC;
542         }
543
544         return 0;
545 }
546
547 static int32_t
548 service_update(struct rte_service_spec *service, uint32_t lcore,
549                 uint32_t *set, uint32_t *enabled)
550 {
551         uint32_t i;
552         int32_t sid = -1;
553
554         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
555                 if ((struct rte_service_spec *)&rte_services[i] == service &&
556                                 service_valid(i)) {
557                         sid = i;
558                         break;
559                 }
560         }
561
562         if (sid == -1 || lcore >= RTE_MAX_LCORE)
563                 return -EINVAL;
564
565         if (!lcore_states[lcore].is_service_core)
566                 return -EINVAL;
567
568         uint64_t sid_mask = UINT64_C(1) << sid;
569         if (set) {
570                 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
571                         sid_mask;
572
573                 if (*set && !lcore_mapped) {
574                         lcore_states[lcore].service_mask |= sid_mask;
575                         rte_atomic32_inc(&rte_services[sid].num_mapped_cores);
576                 }
577                 if (!*set && lcore_mapped) {
578                         lcore_states[lcore].service_mask &= ~(sid_mask);
579                         rte_atomic32_dec(&rte_services[sid].num_mapped_cores);
580                 }
581         }
582
583         if (enabled)
584                 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
585
586         rte_smp_wmb();
587
588         return 0;
589 }
590
591 int32_t
592 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
593 {
594         struct rte_service_spec_impl *s;
595         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
596         uint32_t on = enabled > 0;
597         return service_update(&s->spec, lcore, &on, 0);
598 }
599
600 int32_t
601 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
602 {
603         struct rte_service_spec_impl *s;
604         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
605         uint32_t enabled;
606         int ret = service_update(&s->spec, lcore, 0, &enabled);
607         if (ret == 0)
608                 return enabled;
609         return ret;
610 }
611
612 static void
613 set_lcore_state(uint32_t lcore, int32_t state)
614 {
615         /* mark core state in hugepage backed config */
616         struct rte_config *cfg = rte_eal_get_configuration();
617         cfg->lcore_role[lcore] = state;
618
619         /* mark state in process local lcore_config */
620         lcore_config[lcore].core_role = state;
621
622         /* update per-lcore optimized state tracking */
623         lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
624 }
625
626 int32_t
627 rte_service_lcore_reset_all(void)
628 {
629         /* loop over cores, reset all to mask 0 */
630         uint32_t i;
631         for (i = 0; i < RTE_MAX_LCORE; i++) {
632                 if (lcore_states[i].is_service_core) {
633                         lcore_states[i].service_mask = 0;
634                         set_lcore_state(i, ROLE_RTE);
635                         lcore_states[i].runstate = RUNSTATE_STOPPED;
636                 }
637         }
638         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
639                 rte_atomic32_set(&rte_services[i].num_mapped_cores, 0);
640
641         rte_smp_wmb();
642
643         return 0;
644 }
645
646 int32_t
647 rte_service_lcore_add(uint32_t lcore)
648 {
649         if (lcore >= RTE_MAX_LCORE)
650                 return -EINVAL;
651         if (lcore_states[lcore].is_service_core)
652                 return -EALREADY;
653
654         set_lcore_state(lcore, ROLE_SERVICE);
655
656         /* ensure that after adding a core the mask and state are defaults */
657         lcore_states[lcore].service_mask = 0;
658         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
659
660         rte_smp_wmb();
661
662         return rte_eal_wait_lcore(lcore);
663 }
664
665 int32_t
666 rte_service_lcore_del(uint32_t lcore)
667 {
668         if (lcore >= RTE_MAX_LCORE)
669                 return -EINVAL;
670
671         struct core_state *cs = &lcore_states[lcore];
672         if (!cs->is_service_core)
673                 return -EINVAL;
674
675         if (cs->runstate != RUNSTATE_STOPPED)
676                 return -EBUSY;
677
678         set_lcore_state(lcore, ROLE_RTE);
679
680         rte_smp_wmb();
681         return 0;
682 }
683
684 int32_t
685 rte_service_lcore_start(uint32_t lcore)
686 {
687         if (lcore >= RTE_MAX_LCORE)
688                 return -EINVAL;
689
690         struct core_state *cs = &lcore_states[lcore];
691         if (!cs->is_service_core)
692                 return -EINVAL;
693
694         if (cs->runstate == RUNSTATE_RUNNING)
695                 return -EALREADY;
696
697         /* set core to run state first, and then launch otherwise it will
698          * return immediately as runstate keeps it in the service poll loop
699          */
700         lcore_states[lcore].runstate = RUNSTATE_RUNNING;
701
702         int ret = rte_eal_remote_launch(rte_service_runner_func, 0, lcore);
703         /* returns -EBUSY if the core is already launched, 0 on success */
704         return ret;
705 }
706
707 int32_t
708 rte_service_lcore_stop(uint32_t lcore)
709 {
710         if (lcore >= RTE_MAX_LCORE)
711                 return -EINVAL;
712
713         if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
714                 return -EALREADY;
715
716         uint32_t i;
717         uint64_t service_mask = lcore_states[lcore].service_mask;
718         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
719                 int32_t enabled = service_mask & (UINT64_C(1) << i);
720                 int32_t service_running = rte_service_runstate_get(i);
721                 int32_t only_core = (1 ==
722                         rte_atomic32_read(&rte_services[i].num_mapped_cores));
723
724                 /* if the core is mapped, and the service is running, and this
725                  * is the only core that is mapped, the service would cease to
726                  * run if this core stopped, so fail instead.
727                  */
728                 if (enabled && service_running && only_core)
729                         return -EBUSY;
730         }
731
732         lcore_states[lcore].runstate = RUNSTATE_STOPPED;
733
734         return 0;
735 }
736
737 int32_t
738 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint32_t *attr_value)
739 {
740         struct rte_service_spec_impl *s;
741         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
742
743         if (!attr_value)
744                 return -EINVAL;
745
746         switch (attr_id) {
747         case RTE_SERVICE_ATTR_CYCLES:
748                 *attr_value = s->cycles_spent;
749                 return 0;
750         case RTE_SERVICE_ATTR_CALL_COUNT:
751                 *attr_value = s->calls;
752                 return 0;
753         default:
754                 return -EINVAL;
755         }
756 }
757
758 int32_t __rte_experimental
759 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
760                            uint64_t *attr_value)
761 {
762         struct core_state *cs;
763
764         if (lcore >= RTE_MAX_LCORE || !attr_value)
765                 return -EINVAL;
766
767         cs = &lcore_states[lcore];
768         if (!cs->is_service_core)
769                 return -ENOTSUP;
770
771         switch (attr_id) {
772         case RTE_SERVICE_LCORE_ATTR_LOOPS:
773                 *attr_value = cs->loops;
774                 return 0;
775         default:
776                 return -EINVAL;
777         }
778 }
779
780 static void
781 rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
782                      uint64_t all_cycles, uint32_t reset)
783 {
784         /* avoid divide by zero */
785         if (all_cycles == 0)
786                 all_cycles = 1;
787
788         int calls = 1;
789         if (s->calls != 0)
790                 calls = s->calls;
791
792         if (reset) {
793                 s->cycles_spent = 0;
794                 s->calls = 0;
795                 return;
796         }
797
798         fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
799                         PRIu64"\tavg: %"PRIu64"\n",
800                         s->spec.name, service_stats_enabled(s), s->calls,
801                         s->cycles_spent, s->cycles_spent / calls);
802 }
803
804 int32_t
805 rte_service_attr_reset_all(uint32_t id)
806 {
807         struct rte_service_spec_impl *s;
808         SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
809
810         int reset = 1;
811         rte_service_dump_one(NULL, s, 0, reset);
812         return 0;
813 }
814
815 int32_t __rte_experimental
816 rte_service_lcore_attr_reset_all(uint32_t lcore)
817 {
818         struct core_state *cs;
819
820         if (lcore >= RTE_MAX_LCORE)
821                 return -EINVAL;
822
823         cs = &lcore_states[lcore];
824         if (!cs->is_service_core)
825                 return -ENOTSUP;
826
827         cs->loops = 0;
828
829         return 0;
830 }
831
832 static void
833 service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
834 {
835         uint32_t i;
836         struct core_state *cs = &lcore_states[lcore];
837
838         fprintf(f, "%02d\t", lcore);
839         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
840                 if (!service_valid(i))
841                         continue;
842                 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
843                 if (reset)
844                         cs->calls_per_service[i] = 0;
845         }
846         fprintf(f, "\n");
847 }
848
849 int32_t
850 rte_service_dump(FILE *f, uint32_t id)
851 {
852         uint32_t i;
853         int print_one = (id != UINT32_MAX);
854
855         uint64_t total_cycles = 0;
856
857         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
858                 if (!service_valid(i))
859                         continue;
860                 total_cycles += rte_services[i].cycles_spent;
861         }
862
863         /* print only the specified service */
864         if (print_one) {
865                 struct rte_service_spec_impl *s;
866                 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
867                 fprintf(f, "Service %s Summary\n", s->spec.name);
868                 uint32_t reset = 0;
869                 rte_service_dump_one(f, s, total_cycles, reset);
870                 return 0;
871         }
872
873         /* print all services, as UINT32_MAX was passed as id */
874         fprintf(f, "Services Summary\n");
875         for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
876                 if (!service_valid(i))
877                         continue;
878                 uint32_t reset = 0;
879                 rte_service_dump_one(f, &rte_services[i], total_cycles, reset);
880         }
881
882         fprintf(f, "Service Cores Summary\n");
883         for (i = 0; i < RTE_MAX_LCORE; i++) {
884                 if (lcore_config[i].core_role != ROLE_SERVICE)
885                         continue;
886
887                 uint32_t reset = 0;
888                 service_dump_calls_per_lcore(f, i, reset);
889         }
890
891         return 0;
892 }