New upstream version 18.02
[deb_dpdk.git] / drivers / net / softnic / rte_eth_softnic_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_malloc.h>
10
11 #include "rte_eth_softnic_internals.h"
12 #include "rte_eth_softnic.h"
13
14 #define BYTES_IN_MBPS           (1000 * 1000 / 8)
15 #define SUBPORT_TC_PERIOD       10
16 #define PIPE_TC_PERIOD          40
17
18 int
19 tm_params_check(struct pmd_params *params, uint32_t hard_rate)
20 {
21         uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
22         uint32_t i;
23
24         /* rate */
25         if (params->soft.tm.rate) {
26                 if (params->soft.tm.rate > hard_rate_bytes_per_sec)
27                         return -EINVAL;
28         } else {
29                 params->soft.tm.rate =
30                         (hard_rate_bytes_per_sec > UINT32_MAX) ?
31                                 UINT32_MAX : hard_rate_bytes_per_sec;
32         }
33
34         /* nb_queues */
35         if (params->soft.tm.nb_queues == 0)
36                 return -EINVAL;
37
38         if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
39                 params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
40
41         params->soft.tm.nb_queues =
42                 rte_align32pow2(params->soft.tm.nb_queues);
43
44         /* qsize */
45         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
46                 if (params->soft.tm.qsize[i] == 0)
47                         return -EINVAL;
48
49                 params->soft.tm.qsize[i] =
50                         rte_align32pow2(params->soft.tm.qsize[i]);
51         }
52
53         /* enq_bsz, deq_bsz */
54         if (params->soft.tm.enq_bsz == 0 ||
55                 params->soft.tm.deq_bsz == 0 ||
56                 params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
57                 return -EINVAL;
58
59         return 0;
60 }
61
62 static void
63 tm_hierarchy_init(struct pmd_internals *p)
64 {
65         memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
66
67         /* Initialize shaper profile list */
68         TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
69
70         /* Initialize shared shaper list */
71         TAILQ_INIT(&p->soft.tm.h.shared_shapers);
72
73         /* Initialize wred profile list */
74         TAILQ_INIT(&p->soft.tm.h.wred_profiles);
75
76         /* Initialize TM node list */
77         TAILQ_INIT(&p->soft.tm.h.nodes);
78 }
79
80 static void
81 tm_hierarchy_uninit(struct pmd_internals *p)
82 {
83         /* Remove all nodes*/
84         for ( ; ; ) {
85                 struct tm_node *tm_node;
86
87                 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes);
88                 if (tm_node == NULL)
89                         break;
90
91                 TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node);
92                 free(tm_node);
93         }
94
95         /* Remove all WRED profiles */
96         for ( ; ; ) {
97                 struct tm_wred_profile *wred_profile;
98
99                 wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles);
100                 if (wred_profile == NULL)
101                         break;
102
103                 TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node);
104                 free(wred_profile);
105         }
106
107         /* Remove all shared shapers */
108         for ( ; ; ) {
109                 struct tm_shared_shaper *shared_shaper;
110
111                 shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers);
112                 if (shared_shaper == NULL)
113                         break;
114
115                 TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node);
116                 free(shared_shaper);
117         }
118
119         /* Remove all shaper profiles */
120         for ( ; ; ) {
121                 struct tm_shaper_profile *shaper_profile;
122
123                 shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles);
124                 if (shaper_profile == NULL)
125                         break;
126
127                 TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles,
128                         shaper_profile, node);
129                 free(shaper_profile);
130         }
131
132         memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
133 }
134
135 int
136 tm_init(struct pmd_internals *p,
137         struct pmd_params *params,
138         int numa_node)
139 {
140         uint32_t enq_bsz = params->soft.tm.enq_bsz;
141         uint32_t deq_bsz = params->soft.tm.deq_bsz;
142
143         p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
144                 2 * enq_bsz * sizeof(struct rte_mbuf *),
145                 0,
146                 numa_node);
147
148         if (p->soft.tm.pkts_enq == NULL)
149                 return -ENOMEM;
150
151         p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
152                 deq_bsz * sizeof(struct rte_mbuf *),
153                 0,
154                 numa_node);
155
156         if (p->soft.tm.pkts_deq == NULL) {
157                 rte_free(p->soft.tm.pkts_enq);
158                 return -ENOMEM;
159         }
160
161         tm_hierarchy_init(p);
162
163         return 0;
164 }
165
166 void
167 tm_free(struct pmd_internals *p)
168 {
169         tm_hierarchy_uninit(p);
170         rte_free(p->soft.tm.pkts_enq);
171         rte_free(p->soft.tm.pkts_deq);
172 }
173
174 int
175 tm_start(struct pmd_internals *p)
176 {
177         struct tm_params *t = &p->soft.tm.params;
178         uint32_t n_subports, subport_id;
179         int status;
180
181         /* Is hierarchy frozen? */
182         if (p->soft.tm.hierarchy_frozen == 0)
183                 return -1;
184
185         /* Port */
186         p->soft.tm.sched = rte_sched_port_config(&t->port_params);
187         if (p->soft.tm.sched == NULL)
188                 return -1;
189
190         /* Subport */
191         n_subports = t->port_params.n_subports_per_port;
192         for (subport_id = 0; subport_id < n_subports; subport_id++) {
193                 uint32_t n_pipes_per_subport =
194                         t->port_params.n_pipes_per_subport;
195                 uint32_t pipe_id;
196
197                 status = rte_sched_subport_config(p->soft.tm.sched,
198                         subport_id,
199                         &t->subport_params[subport_id]);
200                 if (status) {
201                         rte_sched_port_free(p->soft.tm.sched);
202                         return -1;
203                 }
204
205                 /* Pipe */
206                 n_pipes_per_subport = t->port_params.n_pipes_per_subport;
207                 for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
208                         int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
209                                 pipe_id;
210                         int profile_id = t->pipe_to_profile[pos];
211
212                         if (profile_id < 0)
213                                 continue;
214
215                         status = rte_sched_pipe_config(p->soft.tm.sched,
216                                 subport_id,
217                                 pipe_id,
218                                 profile_id);
219                         if (status) {
220                                 rte_sched_port_free(p->soft.tm.sched);
221                                 return -1;
222                         }
223                 }
224         }
225
226         return 0;
227 }
228
229 void
230 tm_stop(struct pmd_internals *p)
231 {
232         if (p->soft.tm.sched)
233                 rte_sched_port_free(p->soft.tm.sched);
234
235         /* Unfreeze hierarchy */
236         p->soft.tm.hierarchy_frozen = 0;
237 }
238
239 static struct tm_shaper_profile *
240 tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id)
241 {
242         struct pmd_internals *p = dev->data->dev_private;
243         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
244         struct tm_shaper_profile *sp;
245
246         TAILQ_FOREACH(sp, spl, node)
247                 if (shaper_profile_id == sp->shaper_profile_id)
248                         return sp;
249
250         return NULL;
251 }
252
253 static struct tm_shared_shaper *
254 tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id)
255 {
256         struct pmd_internals *p = dev->data->dev_private;
257         struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers;
258         struct tm_shared_shaper *ss;
259
260         TAILQ_FOREACH(ss, ssl, node)
261                 if (shared_shaper_id == ss->shared_shaper_id)
262                         return ss;
263
264         return NULL;
265 }
266
267 static struct tm_wred_profile *
268 tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id)
269 {
270         struct pmd_internals *p = dev->data->dev_private;
271         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
272         struct tm_wred_profile *wp;
273
274         TAILQ_FOREACH(wp, wpl, node)
275                 if (wred_profile_id == wp->wred_profile_id)
276                         return wp;
277
278         return NULL;
279 }
280
281 static struct tm_node *
282 tm_node_search(struct rte_eth_dev *dev, uint32_t node_id)
283 {
284         struct pmd_internals *p = dev->data->dev_private;
285         struct tm_node_list *nl = &p->soft.tm.h.nodes;
286         struct tm_node *n;
287
288         TAILQ_FOREACH(n, nl, node)
289                 if (n->node_id == node_id)
290                         return n;
291
292         return NULL;
293 }
294
295 static struct tm_node *
296 tm_root_node_present(struct rte_eth_dev *dev)
297 {
298         struct pmd_internals *p = dev->data->dev_private;
299         struct tm_node_list *nl = &p->soft.tm.h.nodes;
300         struct tm_node *n;
301
302         TAILQ_FOREACH(n, nl, node)
303                 if (n->parent_node_id == RTE_TM_NODE_ID_NULL)
304                         return n;
305
306         return NULL;
307 }
308
309 static uint32_t
310 tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node)
311 {
312         struct pmd_internals *p = dev->data->dev_private;
313         struct tm_node_list *nl = &p->soft.tm.h.nodes;
314         struct tm_node *ns;
315         uint32_t subport_id;
316
317         subport_id = 0;
318         TAILQ_FOREACH(ns, nl, node) {
319                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
320                         continue;
321
322                 if (ns->node_id == subport_node->node_id)
323                         return subport_id;
324
325                 subport_id++;
326         }
327
328         return UINT32_MAX;
329 }
330
331 static uint32_t
332 tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node)
333 {
334         struct pmd_internals *p = dev->data->dev_private;
335         struct tm_node_list *nl = &p->soft.tm.h.nodes;
336         struct tm_node *np;
337         uint32_t pipe_id;
338
339         pipe_id = 0;
340         TAILQ_FOREACH(np, nl, node) {
341                 if (np->level != TM_NODE_LEVEL_PIPE ||
342                         np->parent_node_id != pipe_node->parent_node_id)
343                         continue;
344
345                 if (np->node_id == pipe_node->node_id)
346                         return pipe_id;
347
348                 pipe_id++;
349         }
350
351         return UINT32_MAX;
352 }
353
354 static uint32_t
355 tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node)
356 {
357         return tc_node->priority;
358 }
359
360 static uint32_t
361 tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node)
362 {
363         struct pmd_internals *p = dev->data->dev_private;
364         struct tm_node_list *nl = &p->soft.tm.h.nodes;
365         struct tm_node *nq;
366         uint32_t queue_id;
367
368         queue_id = 0;
369         TAILQ_FOREACH(nq, nl, node) {
370                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
371                         nq->parent_node_id != queue_node->parent_node_id)
372                         continue;
373
374                 if (nq->node_id == queue_node->node_id)
375                         return queue_id;
376
377                 queue_id++;
378         }
379
380         return UINT32_MAX;
381 }
382
383 static uint32_t
384 tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
385 {
386         struct pmd_internals *p = dev->data->dev_private;
387         uint32_t n_queues_max = p->params.soft.tm.nb_queues;
388         uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
389         uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
390         uint32_t n_subports_max = n_pipes_max;
391         uint32_t n_root_max = 1;
392
393         switch (level) {
394         case TM_NODE_LEVEL_PORT:
395                 return n_root_max;
396         case TM_NODE_LEVEL_SUBPORT:
397                 return n_subports_max;
398         case TM_NODE_LEVEL_PIPE:
399                 return n_pipes_max;
400         case TM_NODE_LEVEL_TC:
401                 return n_tc_max;
402         case TM_NODE_LEVEL_QUEUE:
403         default:
404                 return n_queues_max;
405         }
406 }
407
408 /* Traffic manager node type get */
409 static int
410 pmd_tm_node_type_get(struct rte_eth_dev *dev,
411         uint32_t node_id,
412         int *is_leaf,
413         struct rte_tm_error *error)
414 {
415         struct pmd_internals *p = dev->data->dev_private;
416
417         if (is_leaf == NULL)
418                 return -rte_tm_error_set(error,
419                    EINVAL,
420                    RTE_TM_ERROR_TYPE_UNSPECIFIED,
421                    NULL,
422                    rte_strerror(EINVAL));
423
424         if (node_id == RTE_TM_NODE_ID_NULL ||
425                 (tm_node_search(dev, node_id) == NULL))
426                 return -rte_tm_error_set(error,
427                    EINVAL,
428                    RTE_TM_ERROR_TYPE_NODE_ID,
429                    NULL,
430                    rte_strerror(EINVAL));
431
432         *is_leaf = node_id < p->params.soft.tm.nb_queues;
433
434         return 0;
435 }
436
437 #ifdef RTE_SCHED_RED
438 #define WRED_SUPPORTED                                          1
439 #else
440 #define WRED_SUPPORTED                                          0
441 #endif
442
443 #define STATS_MASK_DEFAULT                                      \
444         (RTE_TM_STATS_N_PKTS |                                  \
445         RTE_TM_STATS_N_BYTES |                                  \
446         RTE_TM_STATS_N_PKTS_GREEN_DROPPED |                     \
447         RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
448
449 #define STATS_MASK_QUEUE                                                \
450         (STATS_MASK_DEFAULT |                                   \
451         RTE_TM_STATS_N_PKTS_QUEUED)
452
453 static const struct rte_tm_capabilities tm_cap = {
454         .n_nodes_max = UINT32_MAX,
455         .n_levels_max = TM_NODE_LEVEL_MAX,
456
457         .non_leaf_nodes_identical = 0,
458         .leaf_nodes_identical = 1,
459
460         .shaper_n_max = UINT32_MAX,
461         .shaper_private_n_max = UINT32_MAX,
462         .shaper_private_dual_rate_n_max = 0,
463         .shaper_private_rate_min = 1,
464         .shaper_private_rate_max = UINT32_MAX,
465
466         .shaper_shared_n_max = UINT32_MAX,
467         .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX,
468         .shaper_shared_n_shapers_per_node_max = 1,
469         .shaper_shared_dual_rate_n_max = 0,
470         .shaper_shared_rate_min = 1,
471         .shaper_shared_rate_max = UINT32_MAX,
472
473         .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
474         .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS,
475
476         .sched_n_children_max = UINT32_MAX,
477         .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
478         .sched_wfq_n_children_per_group_max = UINT32_MAX,
479         .sched_wfq_n_groups_max = 1,
480         .sched_wfq_weight_max = UINT32_MAX,
481
482         .cman_head_drop_supported = 0,
483         .cman_wred_context_n_max = 0,
484         .cman_wred_context_private_n_max = 0,
485         .cman_wred_context_shared_n_max = 0,
486         .cman_wred_context_shared_n_nodes_per_context_max = 0,
487         .cman_wred_context_shared_n_contexts_per_node_max = 0,
488
489         .mark_vlan_dei_supported = {0, 0, 0},
490         .mark_ip_ecn_tcp_supported = {0, 0, 0},
491         .mark_ip_ecn_sctp_supported = {0, 0, 0},
492         .mark_ip_dscp_supported = {0, 0, 0},
493
494         .dynamic_update_mask = 0,
495
496         .stats_mask = STATS_MASK_QUEUE,
497 };
498
499 /* Traffic manager capabilities get */
500 static int
501 pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused,
502         struct rte_tm_capabilities *cap,
503         struct rte_tm_error *error)
504 {
505         if (cap == NULL)
506                 return -rte_tm_error_set(error,
507                    EINVAL,
508                    RTE_TM_ERROR_TYPE_CAPABILITIES,
509                    NULL,
510                    rte_strerror(EINVAL));
511
512         memcpy(cap, &tm_cap, sizeof(*cap));
513
514         cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
515                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
516                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
517                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) +
518                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
519
520         cap->shaper_private_n_max =
521                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) +
522                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) +
523                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) +
524                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC);
525
526         cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE *
527                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT);
528
529         cap->shaper_n_max = cap->shaper_private_n_max +
530                 cap->shaper_shared_n_max;
531
532         cap->shaper_shared_n_nodes_per_shaper_max =
533                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE);
534
535         cap->sched_n_children_max = RTE_MAX(
536                 tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE),
537                 (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE);
538
539         cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
540
541         if (WRED_SUPPORTED)
542                 cap->cman_wred_context_private_n_max =
543                         tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE);
544
545         cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max +
546                 cap->cman_wred_context_shared_n_max;
547
548         return 0;
549 }
550
551 static const struct rte_tm_level_capabilities tm_level_cap[] = {
552         [TM_NODE_LEVEL_PORT] = {
553                 .n_nodes_max = 1,
554                 .n_nodes_nonleaf_max = 1,
555                 .n_nodes_leaf_max = 0,
556                 .non_leaf_nodes_identical = 1,
557                 .leaf_nodes_identical = 0,
558
559                 {.nonleaf = {
560                         .shaper_private_supported = 1,
561                         .shaper_private_dual_rate_supported = 0,
562                         .shaper_private_rate_min = 1,
563                         .shaper_private_rate_max = UINT32_MAX,
564                         .shaper_shared_n_max = 0,
565
566                         .sched_n_children_max = UINT32_MAX,
567                         .sched_sp_n_priorities_max = 1,
568                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
569                         .sched_wfq_n_groups_max = 1,
570                         .sched_wfq_weight_max = 1,
571
572                         .stats_mask = STATS_MASK_DEFAULT,
573                 } },
574         },
575
576         [TM_NODE_LEVEL_SUBPORT] = {
577                 .n_nodes_max = UINT32_MAX,
578                 .n_nodes_nonleaf_max = UINT32_MAX,
579                 .n_nodes_leaf_max = 0,
580                 .non_leaf_nodes_identical = 1,
581                 .leaf_nodes_identical = 0,
582
583                 {.nonleaf = {
584                         .shaper_private_supported = 1,
585                         .shaper_private_dual_rate_supported = 0,
586                         .shaper_private_rate_min = 1,
587                         .shaper_private_rate_max = UINT32_MAX,
588                         .shaper_shared_n_max = 0,
589
590                         .sched_n_children_max = UINT32_MAX,
591                         .sched_sp_n_priorities_max = 1,
592                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
593                         .sched_wfq_n_groups_max = 1,
594 #ifdef RTE_SCHED_SUBPORT_TC_OV
595                         .sched_wfq_weight_max = UINT32_MAX,
596 #else
597                         .sched_wfq_weight_max = 1,
598 #endif
599                         .stats_mask = STATS_MASK_DEFAULT,
600                 } },
601         },
602
603         [TM_NODE_LEVEL_PIPE] = {
604                 .n_nodes_max = UINT32_MAX,
605                 .n_nodes_nonleaf_max = UINT32_MAX,
606                 .n_nodes_leaf_max = 0,
607                 .non_leaf_nodes_identical = 1,
608                 .leaf_nodes_identical = 0,
609
610                 {.nonleaf = {
611                         .shaper_private_supported = 1,
612                         .shaper_private_dual_rate_supported = 0,
613                         .shaper_private_rate_min = 1,
614                         .shaper_private_rate_max = UINT32_MAX,
615                         .shaper_shared_n_max = 0,
616
617                         .sched_n_children_max =
618                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
619                         .sched_sp_n_priorities_max =
620                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
621                         .sched_wfq_n_children_per_group_max = 1,
622                         .sched_wfq_n_groups_max = 0,
623                         .sched_wfq_weight_max = 1,
624
625                         .stats_mask = STATS_MASK_DEFAULT,
626                 } },
627         },
628
629         [TM_NODE_LEVEL_TC] = {
630                 .n_nodes_max = UINT32_MAX,
631                 .n_nodes_nonleaf_max = UINT32_MAX,
632                 .n_nodes_leaf_max = 0,
633                 .non_leaf_nodes_identical = 1,
634                 .leaf_nodes_identical = 0,
635
636                 {.nonleaf = {
637                         .shaper_private_supported = 1,
638                         .shaper_private_dual_rate_supported = 0,
639                         .shaper_private_rate_min = 1,
640                         .shaper_private_rate_max = UINT32_MAX,
641                         .shaper_shared_n_max = 1,
642
643                         .sched_n_children_max =
644                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
645                         .sched_sp_n_priorities_max = 1,
646                         .sched_wfq_n_children_per_group_max =
647                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
648                         .sched_wfq_n_groups_max = 1,
649                         .sched_wfq_weight_max = UINT32_MAX,
650
651                         .stats_mask = STATS_MASK_DEFAULT,
652                 } },
653         },
654
655         [TM_NODE_LEVEL_QUEUE] = {
656                 .n_nodes_max = UINT32_MAX,
657                 .n_nodes_nonleaf_max = 0,
658                 .n_nodes_leaf_max = UINT32_MAX,
659                 .non_leaf_nodes_identical = 0,
660                 .leaf_nodes_identical = 1,
661
662                 {.leaf = {
663                         .shaper_private_supported = 0,
664                         .shaper_private_dual_rate_supported = 0,
665                         .shaper_private_rate_min = 0,
666                         .shaper_private_rate_max = 0,
667                         .shaper_shared_n_max = 0,
668
669                         .cman_head_drop_supported = 0,
670                         .cman_wred_context_private_supported = WRED_SUPPORTED,
671                         .cman_wred_context_shared_n_max = 0,
672
673                         .stats_mask = STATS_MASK_QUEUE,
674                 } },
675         },
676 };
677
678 /* Traffic manager level capabilities get */
679 static int
680 pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused,
681         uint32_t level_id,
682         struct rte_tm_level_capabilities *cap,
683         struct rte_tm_error *error)
684 {
685         if (cap == NULL)
686                 return -rte_tm_error_set(error,
687                    EINVAL,
688                    RTE_TM_ERROR_TYPE_CAPABILITIES,
689                    NULL,
690                    rte_strerror(EINVAL));
691
692         if (level_id >= TM_NODE_LEVEL_MAX)
693                 return -rte_tm_error_set(error,
694                    EINVAL,
695                    RTE_TM_ERROR_TYPE_LEVEL_ID,
696                    NULL,
697                    rte_strerror(EINVAL));
698
699         memcpy(cap, &tm_level_cap[level_id], sizeof(*cap));
700
701         switch (level_id) {
702         case TM_NODE_LEVEL_PORT:
703                 cap->nonleaf.sched_n_children_max =
704                         tm_level_get_max_nodes(dev,
705                                 TM_NODE_LEVEL_SUBPORT);
706                 cap->nonleaf.sched_wfq_n_children_per_group_max =
707                         cap->nonleaf.sched_n_children_max;
708                 break;
709
710         case TM_NODE_LEVEL_SUBPORT:
711                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
712                         TM_NODE_LEVEL_SUBPORT);
713                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
714                 cap->nonleaf.sched_n_children_max =
715                         tm_level_get_max_nodes(dev,
716                                 TM_NODE_LEVEL_PIPE);
717                 cap->nonleaf.sched_wfq_n_children_per_group_max =
718                         cap->nonleaf.sched_n_children_max;
719                 break;
720
721         case TM_NODE_LEVEL_PIPE:
722                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
723                         TM_NODE_LEVEL_PIPE);
724                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
725                 break;
726
727         case TM_NODE_LEVEL_TC:
728                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
729                         TM_NODE_LEVEL_TC);
730                 cap->n_nodes_nonleaf_max = cap->n_nodes_max;
731                 break;
732
733         case TM_NODE_LEVEL_QUEUE:
734         default:
735                 cap->n_nodes_max = tm_level_get_max_nodes(dev,
736                         TM_NODE_LEVEL_QUEUE);
737                 cap->n_nodes_leaf_max = cap->n_nodes_max;
738                 break;
739         }
740
741         return 0;
742 }
743
744 static const struct rte_tm_node_capabilities tm_node_cap[] = {
745         [TM_NODE_LEVEL_PORT] = {
746                 .shaper_private_supported = 1,
747                 .shaper_private_dual_rate_supported = 0,
748                 .shaper_private_rate_min = 1,
749                 .shaper_private_rate_max = UINT32_MAX,
750                 .shaper_shared_n_max = 0,
751
752                 {.nonleaf = {
753                         .sched_n_children_max = UINT32_MAX,
754                         .sched_sp_n_priorities_max = 1,
755                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
756                         .sched_wfq_n_groups_max = 1,
757                         .sched_wfq_weight_max = 1,
758                 } },
759
760                 .stats_mask = STATS_MASK_DEFAULT,
761         },
762
763         [TM_NODE_LEVEL_SUBPORT] = {
764                 .shaper_private_supported = 1,
765                 .shaper_private_dual_rate_supported = 0,
766                 .shaper_private_rate_min = 1,
767                 .shaper_private_rate_max = UINT32_MAX,
768                 .shaper_shared_n_max = 0,
769
770                 {.nonleaf = {
771                         .sched_n_children_max = UINT32_MAX,
772                         .sched_sp_n_priorities_max = 1,
773                         .sched_wfq_n_children_per_group_max = UINT32_MAX,
774                         .sched_wfq_n_groups_max = 1,
775                         .sched_wfq_weight_max = UINT32_MAX,
776                 } },
777
778                 .stats_mask = STATS_MASK_DEFAULT,
779         },
780
781         [TM_NODE_LEVEL_PIPE] = {
782                 .shaper_private_supported = 1,
783                 .shaper_private_dual_rate_supported = 0,
784                 .shaper_private_rate_min = 1,
785                 .shaper_private_rate_max = UINT32_MAX,
786                 .shaper_shared_n_max = 0,
787
788                 {.nonleaf = {
789                         .sched_n_children_max =
790                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
791                         .sched_sp_n_priorities_max =
792                                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
793                         .sched_wfq_n_children_per_group_max = 1,
794                         .sched_wfq_n_groups_max = 0,
795                         .sched_wfq_weight_max = 1,
796                 } },
797
798                 .stats_mask = STATS_MASK_DEFAULT,
799         },
800
801         [TM_NODE_LEVEL_TC] = {
802                 .shaper_private_supported = 1,
803                 .shaper_private_dual_rate_supported = 0,
804                 .shaper_private_rate_min = 1,
805                 .shaper_private_rate_max = UINT32_MAX,
806                 .shaper_shared_n_max = 1,
807
808                 {.nonleaf = {
809                         .sched_n_children_max =
810                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
811                         .sched_sp_n_priorities_max = 1,
812                         .sched_wfq_n_children_per_group_max =
813                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
814                         .sched_wfq_n_groups_max = 1,
815                         .sched_wfq_weight_max = UINT32_MAX,
816                 } },
817
818                 .stats_mask = STATS_MASK_DEFAULT,
819         },
820
821         [TM_NODE_LEVEL_QUEUE] = {
822                 .shaper_private_supported = 0,
823                 .shaper_private_dual_rate_supported = 0,
824                 .shaper_private_rate_min = 0,
825                 .shaper_private_rate_max = 0,
826                 .shaper_shared_n_max = 0,
827
828
829                 {.leaf = {
830                         .cman_head_drop_supported = 0,
831                         .cman_wred_context_private_supported = WRED_SUPPORTED,
832                         .cman_wred_context_shared_n_max = 0,
833                 } },
834
835                 .stats_mask = STATS_MASK_QUEUE,
836         },
837 };
838
839 /* Traffic manager node capabilities get */
840 static int
841 pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused,
842         uint32_t node_id,
843         struct rte_tm_node_capabilities *cap,
844         struct rte_tm_error *error)
845 {
846         struct tm_node *tm_node;
847
848         if (cap == NULL)
849                 return -rte_tm_error_set(error,
850                    EINVAL,
851                    RTE_TM_ERROR_TYPE_CAPABILITIES,
852                    NULL,
853                    rte_strerror(EINVAL));
854
855         tm_node = tm_node_search(dev, node_id);
856         if (tm_node == NULL)
857                 return -rte_tm_error_set(error,
858                    EINVAL,
859                    RTE_TM_ERROR_TYPE_NODE_ID,
860                    NULL,
861                    rte_strerror(EINVAL));
862
863         memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap));
864
865         switch (tm_node->level) {
866         case TM_NODE_LEVEL_PORT:
867                 cap->nonleaf.sched_n_children_max =
868                         tm_level_get_max_nodes(dev,
869                                 TM_NODE_LEVEL_SUBPORT);
870                 cap->nonleaf.sched_wfq_n_children_per_group_max =
871                         cap->nonleaf.sched_n_children_max;
872                 break;
873
874         case TM_NODE_LEVEL_SUBPORT:
875                 cap->nonleaf.sched_n_children_max =
876                         tm_level_get_max_nodes(dev,
877                                 TM_NODE_LEVEL_PIPE);
878                 cap->nonleaf.sched_wfq_n_children_per_group_max =
879                         cap->nonleaf.sched_n_children_max;
880                 break;
881
882         case TM_NODE_LEVEL_PIPE:
883         case TM_NODE_LEVEL_TC:
884         case TM_NODE_LEVEL_QUEUE:
885         default:
886                 break;
887         }
888
889         return 0;
890 }
891
892 static int
893 shaper_profile_check(struct rte_eth_dev *dev,
894         uint32_t shaper_profile_id,
895         struct rte_tm_shaper_params *profile,
896         struct rte_tm_error *error)
897 {
898         struct tm_shaper_profile *sp;
899
900         /* Shaper profile ID must not be NONE. */
901         if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE)
902                 return -rte_tm_error_set(error,
903                         EINVAL,
904                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
905                         NULL,
906                         rte_strerror(EINVAL));
907
908         /* Shaper profile must not exist. */
909         sp = tm_shaper_profile_search(dev, shaper_profile_id);
910         if (sp)
911                 return -rte_tm_error_set(error,
912                         EEXIST,
913                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
914                         NULL,
915                         rte_strerror(EEXIST));
916
917         /* Profile must not be NULL. */
918         if (profile == NULL)
919                 return -rte_tm_error_set(error,
920                         EINVAL,
921                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
922                         NULL,
923                         rte_strerror(EINVAL));
924
925         /* Peak rate: non-zero, 32-bit */
926         if (profile->peak.rate == 0 ||
927                 profile->peak.rate >= UINT32_MAX)
928                 return -rte_tm_error_set(error,
929                         EINVAL,
930                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE,
931                         NULL,
932                         rte_strerror(EINVAL));
933
934         /* Peak size: non-zero, 32-bit */
935         if (profile->peak.size == 0 ||
936                 profile->peak.size >= UINT32_MAX)
937                 return -rte_tm_error_set(error,
938                         EINVAL,
939                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE,
940                         NULL,
941                         rte_strerror(EINVAL));
942
943         /* Dual-rate profiles are not supported. */
944         if (profile->committed.rate != 0)
945                 return -rte_tm_error_set(error,
946                         EINVAL,
947                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE,
948                         NULL,
949                         rte_strerror(EINVAL));
950
951         /* Packet length adjust: 24 bytes */
952         if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS)
953                 return -rte_tm_error_set(error,
954                         EINVAL,
955                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN,
956                         NULL,
957                         rte_strerror(EINVAL));
958
959         return 0;
960 }
961
962 /* Traffic manager shaper profile add */
963 static int
964 pmd_tm_shaper_profile_add(struct rte_eth_dev *dev,
965         uint32_t shaper_profile_id,
966         struct rte_tm_shaper_params *profile,
967         struct rte_tm_error *error)
968 {
969         struct pmd_internals *p = dev->data->dev_private;
970         struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles;
971         struct tm_shaper_profile *sp;
972         int status;
973
974         /* Check input params */
975         status = shaper_profile_check(dev, shaper_profile_id, profile, error);
976         if (status)
977                 return status;
978
979         /* Memory allocation */
980         sp = calloc(1, sizeof(struct tm_shaper_profile));
981         if (sp == NULL)
982                 return -rte_tm_error_set(error,
983                         ENOMEM,
984                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
985                         NULL,
986                         rte_strerror(ENOMEM));
987
988         /* Fill in */
989         sp->shaper_profile_id = shaper_profile_id;
990         memcpy(&sp->params, profile, sizeof(sp->params));
991
992         /* Add to list */
993         TAILQ_INSERT_TAIL(spl, sp, node);
994         p->soft.tm.h.n_shaper_profiles++;
995
996         return 0;
997 }
998
999 /* Traffic manager shaper profile delete */
1000 static int
1001 pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev,
1002         uint32_t shaper_profile_id,
1003         struct rte_tm_error *error)
1004 {
1005         struct pmd_internals *p = dev->data->dev_private;
1006         struct tm_shaper_profile *sp;
1007
1008         /* Check existing */
1009         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1010         if (sp == NULL)
1011                 return -rte_tm_error_set(error,
1012                         EINVAL,
1013                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1014                         NULL,
1015                         rte_strerror(EINVAL));
1016
1017         /* Check unused */
1018         if (sp->n_users)
1019                 return -rte_tm_error_set(error,
1020                         EBUSY,
1021                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1022                         NULL,
1023                         rte_strerror(EBUSY));
1024
1025         /* Remove from list */
1026         TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node);
1027         p->soft.tm.h.n_shaper_profiles--;
1028         free(sp);
1029
1030         return 0;
1031 }
1032
1033 static struct tm_node *
1034 tm_shared_shaper_get_tc(struct rte_eth_dev *dev,
1035         struct tm_shared_shaper *ss)
1036 {
1037         struct pmd_internals *p = dev->data->dev_private;
1038         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1039         struct tm_node *n;
1040
1041         /* Subport: each TC uses shared shaper  */
1042         TAILQ_FOREACH(n, nl, node) {
1043                 if (n->level != TM_NODE_LEVEL_TC ||
1044                         n->params.n_shared_shapers == 0 ||
1045                         n->params.shared_shaper_id[0] != ss->shared_shaper_id)
1046                         continue;
1047
1048                 return n;
1049         }
1050
1051         return NULL;
1052 }
1053
1054 static int
1055 update_subport_tc_rate(struct rte_eth_dev *dev,
1056         struct tm_node *nt,
1057         struct tm_shared_shaper *ss,
1058         struct tm_shaper_profile *sp_new)
1059 {
1060         struct pmd_internals *p = dev->data->dev_private;
1061         uint32_t tc_id = tm_node_tc_id(dev, nt);
1062
1063         struct tm_node *np = nt->parent_node;
1064
1065         struct tm_node *ns = np->parent_node;
1066         uint32_t subport_id = tm_node_subport_id(dev, ns);
1067
1068         struct rte_sched_subport_params subport_params;
1069
1070         struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev,
1071                 ss->shaper_profile_id);
1072
1073         /* Derive new subport configuration. */
1074         memcpy(&subport_params,
1075                 &p->soft.tm.params.subport_params[subport_id],
1076                 sizeof(subport_params));
1077         subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
1078
1079         /* Update the subport configuration. */
1080         if (rte_sched_subport_config(p->soft.tm.sched,
1081                 subport_id, &subport_params))
1082                 return -1;
1083
1084         /* Commit changes. */
1085         sp_old->n_users--;
1086
1087         ss->shaper_profile_id = sp_new->shaper_profile_id;
1088         sp_new->n_users++;
1089
1090         memcpy(&p->soft.tm.params.subport_params[subport_id],
1091                 &subport_params,
1092                 sizeof(subport_params));
1093
1094         return 0;
1095 }
1096
1097 /* Traffic manager shared shaper add/update */
1098 static int
1099 pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev,
1100         uint32_t shared_shaper_id,
1101         uint32_t shaper_profile_id,
1102         struct rte_tm_error *error)
1103 {
1104         struct pmd_internals *p = dev->data->dev_private;
1105         struct tm_shared_shaper *ss;
1106         struct tm_shaper_profile *sp;
1107         struct tm_node *nt;
1108
1109         /* Shaper profile must be valid. */
1110         sp = tm_shaper_profile_search(dev, shaper_profile_id);
1111         if (sp == NULL)
1112                 return -rte_tm_error_set(error,
1113                         EINVAL,
1114                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID,
1115                         NULL,
1116                         rte_strerror(EINVAL));
1117
1118         /**
1119          * Add new shared shaper
1120          */
1121         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1122         if (ss == NULL) {
1123                 struct tm_shared_shaper_list *ssl =
1124                         &p->soft.tm.h.shared_shapers;
1125
1126                 /* Hierarchy must not be frozen */
1127                 if (p->soft.tm.hierarchy_frozen)
1128                         return -rte_tm_error_set(error,
1129                                 EBUSY,
1130                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1131                                 NULL,
1132                                 rte_strerror(EBUSY));
1133
1134                 /* Memory allocation */
1135                 ss = calloc(1, sizeof(struct tm_shared_shaper));
1136                 if (ss == NULL)
1137                         return -rte_tm_error_set(error,
1138                                 ENOMEM,
1139                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
1140                                 NULL,
1141                                 rte_strerror(ENOMEM));
1142
1143                 /* Fill in */
1144                 ss->shared_shaper_id = shared_shaper_id;
1145                 ss->shaper_profile_id = shaper_profile_id;
1146
1147                 /* Add to list */
1148                 TAILQ_INSERT_TAIL(ssl, ss, node);
1149                 p->soft.tm.h.n_shared_shapers++;
1150
1151                 return 0;
1152         }
1153
1154         /**
1155          * Update existing shared shaper
1156          */
1157         /* Hierarchy must be frozen (run-time update) */
1158         if (p->soft.tm.hierarchy_frozen == 0)
1159                 return -rte_tm_error_set(error,
1160                         EBUSY,
1161                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1162                         NULL,
1163                         rte_strerror(EBUSY));
1164
1165
1166         /* Propagate change. */
1167         nt = tm_shared_shaper_get_tc(dev, ss);
1168         if (update_subport_tc_rate(dev, nt, ss, sp))
1169                 return -rte_tm_error_set(error,
1170                         EINVAL,
1171                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1172                         NULL,
1173                         rte_strerror(EINVAL));
1174
1175         return 0;
1176 }
1177
1178 /* Traffic manager shared shaper delete */
1179 static int
1180 pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev,
1181         uint32_t shared_shaper_id,
1182         struct rte_tm_error *error)
1183 {
1184         struct pmd_internals *p = dev->data->dev_private;
1185         struct tm_shared_shaper *ss;
1186
1187         /* Check existing */
1188         ss = tm_shared_shaper_search(dev, shared_shaper_id);
1189         if (ss == NULL)
1190                 return -rte_tm_error_set(error,
1191                         EINVAL,
1192                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1193                         NULL,
1194                         rte_strerror(EINVAL));
1195
1196         /* Check unused */
1197         if (ss->n_users)
1198                 return -rte_tm_error_set(error,
1199                         EBUSY,
1200                         RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID,
1201                         NULL,
1202                         rte_strerror(EBUSY));
1203
1204         /* Remove from list */
1205         TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node);
1206         p->soft.tm.h.n_shared_shapers--;
1207         free(ss);
1208
1209         return 0;
1210 }
1211
1212 static int
1213 wred_profile_check(struct rte_eth_dev *dev,
1214         uint32_t wred_profile_id,
1215         struct rte_tm_wred_params *profile,
1216         struct rte_tm_error *error)
1217 {
1218         struct tm_wred_profile *wp;
1219         enum rte_tm_color color;
1220
1221         /* WRED profile ID must not be NONE. */
1222         if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE)
1223                 return -rte_tm_error_set(error,
1224                         EINVAL,
1225                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1226                         NULL,
1227                         rte_strerror(EINVAL));
1228
1229         /* WRED profile must not exist. */
1230         wp = tm_wred_profile_search(dev, wred_profile_id);
1231         if (wp)
1232                 return -rte_tm_error_set(error,
1233                         EEXIST,
1234                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1235                         NULL,
1236                         rte_strerror(EEXIST));
1237
1238         /* Profile must not be NULL. */
1239         if (profile == NULL)
1240                 return -rte_tm_error_set(error,
1241                         EINVAL,
1242                         RTE_TM_ERROR_TYPE_WRED_PROFILE,
1243                         NULL,
1244                         rte_strerror(EINVAL));
1245
1246         /* min_th <= max_th, max_th > 0  */
1247         for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
1248                 uint16_t min_th = profile->red_params[color].min_th;
1249                 uint16_t max_th = profile->red_params[color].max_th;
1250
1251                 if (min_th > max_th || max_th == 0)
1252                         return -rte_tm_error_set(error,
1253                                 EINVAL,
1254                                 RTE_TM_ERROR_TYPE_WRED_PROFILE,
1255                                 NULL,
1256                                 rte_strerror(EINVAL));
1257         }
1258
1259         return 0;
1260 }
1261
1262 /* Traffic manager WRED profile add */
1263 static int
1264 pmd_tm_wred_profile_add(struct rte_eth_dev *dev,
1265         uint32_t wred_profile_id,
1266         struct rte_tm_wred_params *profile,
1267         struct rte_tm_error *error)
1268 {
1269         struct pmd_internals *p = dev->data->dev_private;
1270         struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles;
1271         struct tm_wred_profile *wp;
1272         int status;
1273
1274         /* Check input params */
1275         status = wred_profile_check(dev, wred_profile_id, profile, error);
1276         if (status)
1277                 return status;
1278
1279         /* Memory allocation */
1280         wp = calloc(1, sizeof(struct tm_wred_profile));
1281         if (wp == NULL)
1282                 return -rte_tm_error_set(error,
1283                         ENOMEM,
1284                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1285                         NULL,
1286                         rte_strerror(ENOMEM));
1287
1288         /* Fill in */
1289         wp->wred_profile_id = wred_profile_id;
1290         memcpy(&wp->params, profile, sizeof(wp->params));
1291
1292         /* Add to list */
1293         TAILQ_INSERT_TAIL(wpl, wp, node);
1294         p->soft.tm.h.n_wred_profiles++;
1295
1296         return 0;
1297 }
1298
1299 /* Traffic manager WRED profile delete */
1300 static int
1301 pmd_tm_wred_profile_delete(struct rte_eth_dev *dev,
1302         uint32_t wred_profile_id,
1303         struct rte_tm_error *error)
1304 {
1305         struct pmd_internals *p = dev->data->dev_private;
1306         struct tm_wred_profile *wp;
1307
1308         /* Check existing */
1309         wp = tm_wred_profile_search(dev, wred_profile_id);
1310         if (wp == NULL)
1311                 return -rte_tm_error_set(error,
1312                         EINVAL,
1313                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1314                         NULL,
1315                         rte_strerror(EINVAL));
1316
1317         /* Check unused */
1318         if (wp->n_users)
1319                 return -rte_tm_error_set(error,
1320                         EBUSY,
1321                         RTE_TM_ERROR_TYPE_WRED_PROFILE_ID,
1322                         NULL,
1323                         rte_strerror(EBUSY));
1324
1325         /* Remove from list */
1326         TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node);
1327         p->soft.tm.h.n_wred_profiles--;
1328         free(wp);
1329
1330         return 0;
1331 }
1332
1333 static int
1334 node_add_check_port(struct rte_eth_dev *dev,
1335         uint32_t node_id,
1336         uint32_t parent_node_id __rte_unused,
1337         uint32_t priority,
1338         uint32_t weight,
1339         uint32_t level_id __rte_unused,
1340         struct rte_tm_node_params *params,
1341         struct rte_tm_error *error)
1342 {
1343         struct pmd_internals *p = dev->data->dev_private;
1344         struct tm_shaper_profile *sp = tm_shaper_profile_search(dev,
1345                 params->shaper_profile_id);
1346
1347         /* node type: non-leaf */
1348         if (node_id < p->params.soft.tm.nb_queues)
1349                 return -rte_tm_error_set(error,
1350                         EINVAL,
1351                         RTE_TM_ERROR_TYPE_NODE_ID,
1352                         NULL,
1353                         rte_strerror(EINVAL));
1354
1355         /* Priority must be 0 */
1356         if (priority != 0)
1357                 return -rte_tm_error_set(error,
1358                         EINVAL,
1359                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1360                         NULL,
1361                         rte_strerror(EINVAL));
1362
1363         /* Weight must be 1 */
1364         if (weight != 1)
1365                 return -rte_tm_error_set(error,
1366                         EINVAL,
1367                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1368                         NULL,
1369                         rte_strerror(EINVAL));
1370
1371         /* Shaper must be valid.
1372          * Shaper profile peak rate must fit the configured port rate.
1373          */
1374         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1375                 sp == NULL ||
1376                 sp->params.peak.rate > p->params.soft.tm.rate)
1377                 return -rte_tm_error_set(error,
1378                         EINVAL,
1379                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1380                         NULL,
1381                         rte_strerror(EINVAL));
1382
1383         /* No shared shapers */
1384         if (params->n_shared_shapers != 0)
1385                 return -rte_tm_error_set(error,
1386                         EINVAL,
1387                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1388                         NULL,
1389                         rte_strerror(EINVAL));
1390
1391         /* Number of SP priorities must be 1 */
1392         if (params->nonleaf.n_sp_priorities != 1)
1393                 return -rte_tm_error_set(error,
1394                         EINVAL,
1395                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1396                         NULL,
1397                         rte_strerror(EINVAL));
1398
1399         /* Stats */
1400         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1401                 return -rte_tm_error_set(error,
1402                         EINVAL,
1403                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1404                         NULL,
1405                         rte_strerror(EINVAL));
1406
1407         return 0;
1408 }
1409
1410 static int
1411 node_add_check_subport(struct rte_eth_dev *dev,
1412         uint32_t node_id,
1413         uint32_t parent_node_id __rte_unused,
1414         uint32_t priority,
1415         uint32_t weight,
1416         uint32_t level_id __rte_unused,
1417         struct rte_tm_node_params *params,
1418         struct rte_tm_error *error)
1419 {
1420         struct pmd_internals *p = dev->data->dev_private;
1421
1422         /* node type: non-leaf */
1423         if (node_id < p->params.soft.tm.nb_queues)
1424                 return -rte_tm_error_set(error,
1425                         EINVAL,
1426                         RTE_TM_ERROR_TYPE_NODE_ID,
1427                         NULL,
1428                         rte_strerror(EINVAL));
1429
1430         /* Priority must be 0 */
1431         if (priority != 0)
1432                 return -rte_tm_error_set(error,
1433                         EINVAL,
1434                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1435                         NULL,
1436                         rte_strerror(EINVAL));
1437
1438         /* Weight must be 1 */
1439         if (weight != 1)
1440                 return -rte_tm_error_set(error,
1441                         EINVAL,
1442                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1443                         NULL,
1444                         rte_strerror(EINVAL));
1445
1446         /* Shaper must be valid */
1447         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1448                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1449                 return -rte_tm_error_set(error,
1450                         EINVAL,
1451                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1452                         NULL,
1453                         rte_strerror(EINVAL));
1454
1455         /* No shared shapers */
1456         if (params->n_shared_shapers != 0)
1457                 return -rte_tm_error_set(error,
1458                         EINVAL,
1459                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1460                         NULL,
1461                         rte_strerror(EINVAL));
1462
1463         /* Number of SP priorities must be 1 */
1464         if (params->nonleaf.n_sp_priorities != 1)
1465                 return -rte_tm_error_set(error,
1466                         EINVAL,
1467                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1468                         NULL,
1469                         rte_strerror(EINVAL));
1470
1471         /* Stats */
1472         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1473                 return -rte_tm_error_set(error,
1474                         EINVAL,
1475                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1476                         NULL,
1477                         rte_strerror(EINVAL));
1478
1479         return 0;
1480 }
1481
1482 static int
1483 node_add_check_pipe(struct rte_eth_dev *dev,
1484         uint32_t node_id,
1485         uint32_t parent_node_id __rte_unused,
1486         uint32_t priority,
1487         uint32_t weight __rte_unused,
1488         uint32_t level_id __rte_unused,
1489         struct rte_tm_node_params *params,
1490         struct rte_tm_error *error)
1491 {
1492         struct pmd_internals *p = dev->data->dev_private;
1493
1494         /* node type: non-leaf */
1495         if (node_id < p->params.soft.tm.nb_queues)
1496                 return -rte_tm_error_set(error,
1497                         EINVAL,
1498                         RTE_TM_ERROR_TYPE_NODE_ID,
1499                         NULL,
1500                         rte_strerror(EINVAL));
1501
1502         /* Priority must be 0 */
1503         if (priority != 0)
1504                 return -rte_tm_error_set(error,
1505                         EINVAL,
1506                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1507                         NULL,
1508                         rte_strerror(EINVAL));
1509
1510         /* Shaper must be valid */
1511         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1512                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1513                 return -rte_tm_error_set(error,
1514                         EINVAL,
1515                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1516                         NULL,
1517                         rte_strerror(EINVAL));
1518
1519         /* No shared shapers */
1520         if (params->n_shared_shapers != 0)
1521                 return -rte_tm_error_set(error,
1522                         EINVAL,
1523                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1524                         NULL,
1525                         rte_strerror(EINVAL));
1526
1527         /* Number of SP priorities must be 4 */
1528         if (params->nonleaf.n_sp_priorities !=
1529                 RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1530                 return -rte_tm_error_set(error,
1531                         EINVAL,
1532                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1533                         NULL,
1534                         rte_strerror(EINVAL));
1535
1536         /* WFQ mode must be byte mode */
1537         if (params->nonleaf.wfq_weight_mode != NULL &&
1538                 params->nonleaf.wfq_weight_mode[0] != 0 &&
1539                 params->nonleaf.wfq_weight_mode[1] != 0 &&
1540                 params->nonleaf.wfq_weight_mode[2] != 0 &&
1541                 params->nonleaf.wfq_weight_mode[3] != 0)
1542                 return -rte_tm_error_set(error,
1543                         EINVAL,
1544                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE,
1545                         NULL,
1546                         rte_strerror(EINVAL));
1547
1548         /* Stats */
1549         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1550                 return -rte_tm_error_set(error,
1551                         EINVAL,
1552                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1553                         NULL,
1554                         rte_strerror(EINVAL));
1555
1556         return 0;
1557 }
1558
1559 static int
1560 node_add_check_tc(struct rte_eth_dev *dev,
1561         uint32_t node_id,
1562         uint32_t parent_node_id __rte_unused,
1563         uint32_t priority __rte_unused,
1564         uint32_t weight,
1565         uint32_t level_id __rte_unused,
1566         struct rte_tm_node_params *params,
1567         struct rte_tm_error *error)
1568 {
1569         struct pmd_internals *p = dev->data->dev_private;
1570
1571         /* node type: non-leaf */
1572         if (node_id < p->params.soft.tm.nb_queues)
1573                 return -rte_tm_error_set(error,
1574                         EINVAL,
1575                         RTE_TM_ERROR_TYPE_NODE_ID,
1576                         NULL,
1577                         rte_strerror(EINVAL));
1578
1579         /* Weight must be 1 */
1580         if (weight != 1)
1581                 return -rte_tm_error_set(error,
1582                         EINVAL,
1583                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1584                         NULL,
1585                         rte_strerror(EINVAL));
1586
1587         /* Shaper must be valid */
1588         if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
1589                 (!tm_shaper_profile_search(dev, params->shaper_profile_id)))
1590                 return -rte_tm_error_set(error,
1591                         EINVAL,
1592                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1593                         NULL,
1594                         rte_strerror(EINVAL));
1595
1596         /* Single valid shared shaper */
1597         if (params->n_shared_shapers > 1)
1598                 return -rte_tm_error_set(error,
1599                         EINVAL,
1600                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1601                         NULL,
1602                         rte_strerror(EINVAL));
1603
1604         if (params->n_shared_shapers == 1 &&
1605                 (params->shared_shaper_id == NULL ||
1606                 (!tm_shared_shaper_search(dev, params->shared_shaper_id[0]))))
1607                 return -rte_tm_error_set(error,
1608                         EINVAL,
1609                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID,
1610                         NULL,
1611                         rte_strerror(EINVAL));
1612
1613         /* Number of priorities must be 1 */
1614         if (params->nonleaf.n_sp_priorities != 1)
1615                 return -rte_tm_error_set(error,
1616                         EINVAL,
1617                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES,
1618                         NULL,
1619                         rte_strerror(EINVAL));
1620
1621         /* Stats */
1622         if (params->stats_mask & ~STATS_MASK_DEFAULT)
1623                 return -rte_tm_error_set(error,
1624                         EINVAL,
1625                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1626                         NULL,
1627                         rte_strerror(EINVAL));
1628
1629         return 0;
1630 }
1631
1632 static int
1633 node_add_check_queue(struct rte_eth_dev *dev,
1634         uint32_t node_id,
1635         uint32_t parent_node_id __rte_unused,
1636         uint32_t priority,
1637         uint32_t weight __rte_unused,
1638         uint32_t level_id __rte_unused,
1639         struct rte_tm_node_params *params,
1640         struct rte_tm_error *error)
1641 {
1642         struct pmd_internals *p = dev->data->dev_private;
1643
1644         /* node type: leaf */
1645         if (node_id >= p->params.soft.tm.nb_queues)
1646                 return -rte_tm_error_set(error,
1647                         EINVAL,
1648                         RTE_TM_ERROR_TYPE_NODE_ID,
1649                         NULL,
1650                         rte_strerror(EINVAL));
1651
1652         /* Priority must be 0 */
1653         if (priority != 0)
1654                 return -rte_tm_error_set(error,
1655                         EINVAL,
1656                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1657                         NULL,
1658                         rte_strerror(EINVAL));
1659
1660         /* No shaper */
1661         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1662                 return -rte_tm_error_set(error,
1663                         EINVAL,
1664                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
1665                         NULL,
1666                         rte_strerror(EINVAL));
1667
1668         /* No shared shapers */
1669         if (params->n_shared_shapers != 0)
1670                 return -rte_tm_error_set(error,
1671                         EINVAL,
1672                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS,
1673                         NULL,
1674                         rte_strerror(EINVAL));
1675
1676         /* Congestion management must not be head drop */
1677         if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP)
1678                 return -rte_tm_error_set(error,
1679                         EINVAL,
1680                         RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN,
1681                         NULL,
1682                         rte_strerror(EINVAL));
1683
1684         /* Congestion management set to WRED */
1685         if (params->leaf.cman == RTE_TM_CMAN_WRED) {
1686                 uint32_t wred_profile_id = params->leaf.wred.wred_profile_id;
1687                 struct tm_wred_profile *wp = tm_wred_profile_search(dev,
1688                         wred_profile_id);
1689
1690                 /* WRED profile (for private WRED context) must be valid */
1691                 if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE ||
1692                         wp == NULL)
1693                         return -rte_tm_error_set(error,
1694                                 EINVAL,
1695                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID,
1696                                 NULL,
1697                                 rte_strerror(EINVAL));
1698
1699                 /* No shared WRED contexts */
1700                 if (params->leaf.wred.n_shared_wred_contexts != 0)
1701                         return -rte_tm_error_set(error,
1702                                 EINVAL,
1703                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS,
1704                                 NULL,
1705                                 rte_strerror(EINVAL));
1706         }
1707
1708         /* Stats */
1709         if (params->stats_mask & ~STATS_MASK_QUEUE)
1710                 return -rte_tm_error_set(error,
1711                         EINVAL,
1712                         RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS,
1713                         NULL,
1714                         rte_strerror(EINVAL));
1715
1716         return 0;
1717 }
1718
1719 static int
1720 node_add_check(struct rte_eth_dev *dev,
1721         uint32_t node_id,
1722         uint32_t parent_node_id,
1723         uint32_t priority,
1724         uint32_t weight,
1725         uint32_t level_id,
1726         struct rte_tm_node_params *params,
1727         struct rte_tm_error *error)
1728 {
1729         struct tm_node *pn;
1730         uint32_t level;
1731         int status;
1732
1733         /* node_id, parent_node_id:
1734          *    -node_id must not be RTE_TM_NODE_ID_NULL
1735          *    -node_id must not be in use
1736          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1737          *        -root node must not exist
1738          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1739          *        -parent_node_id must be valid
1740          */
1741         if (node_id == RTE_TM_NODE_ID_NULL)
1742                 return -rte_tm_error_set(error,
1743                         EINVAL,
1744                         RTE_TM_ERROR_TYPE_NODE_ID,
1745                         NULL,
1746                         rte_strerror(EINVAL));
1747
1748         if (tm_node_search(dev, node_id))
1749                 return -rte_tm_error_set(error,
1750                         EEXIST,
1751                         RTE_TM_ERROR_TYPE_NODE_ID,
1752                         NULL,
1753                         rte_strerror(EEXIST));
1754
1755         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
1756                 pn = NULL;
1757                 if (tm_root_node_present(dev))
1758                         return -rte_tm_error_set(error,
1759                                 EEXIST,
1760                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1761                                 NULL,
1762                                 rte_strerror(EEXIST));
1763         } else {
1764                 pn = tm_node_search(dev, parent_node_id);
1765                 if (pn == NULL)
1766                         return -rte_tm_error_set(error,
1767                                 EINVAL,
1768                                 RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
1769                                 NULL,
1770                                 rte_strerror(EINVAL));
1771         }
1772
1773         /* priority: must be 0 .. 3 */
1774         if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
1775                 return -rte_tm_error_set(error,
1776                         EINVAL,
1777                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
1778                         NULL,
1779                         rte_strerror(EINVAL));
1780
1781         /* weight: must be 1 .. 255 */
1782         if (weight == 0 || weight >= UINT8_MAX)
1783                 return -rte_tm_error_set(error,
1784                         EINVAL,
1785                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
1786                         NULL,
1787                         rte_strerror(EINVAL));
1788
1789         /* level_id: if valid, then
1790          *    -root node add (parent_node_id is RTE_TM_NODE_ID_NULL):
1791          *        -level_id must be zero
1792          *    -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL):
1793          *        -level_id must be parent level ID plus one
1794          */
1795         level = (pn == NULL) ? 0 : pn->level + 1;
1796         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level)
1797                 return -rte_tm_error_set(error,
1798                         EINVAL,
1799                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1800                         NULL,
1801                         rte_strerror(EINVAL));
1802
1803         /* params: must not be NULL */
1804         if (params == NULL)
1805                 return -rte_tm_error_set(error,
1806                         EINVAL,
1807                         RTE_TM_ERROR_TYPE_NODE_PARAMS,
1808                         NULL,
1809                         rte_strerror(EINVAL));
1810
1811         /* params: per level checks */
1812         switch (level) {
1813         case TM_NODE_LEVEL_PORT:
1814                 status = node_add_check_port(dev, node_id,
1815                         parent_node_id, priority, weight, level_id,
1816                         params, error);
1817                 if (status)
1818                         return status;
1819                 break;
1820
1821         case TM_NODE_LEVEL_SUBPORT:
1822                 status = node_add_check_subport(dev, node_id,
1823                         parent_node_id, priority, weight, level_id,
1824                         params, error);
1825                 if (status)
1826                         return status;
1827                 break;
1828
1829         case TM_NODE_LEVEL_PIPE:
1830                 status = node_add_check_pipe(dev, node_id,
1831                         parent_node_id, priority, weight, level_id,
1832                         params, error);
1833                 if (status)
1834                         return status;
1835                 break;
1836
1837         case TM_NODE_LEVEL_TC:
1838                 status = node_add_check_tc(dev, node_id,
1839                         parent_node_id, priority, weight, level_id,
1840                         params, error);
1841                 if (status)
1842                         return status;
1843                 break;
1844
1845         case TM_NODE_LEVEL_QUEUE:
1846                 status = node_add_check_queue(dev, node_id,
1847                         parent_node_id, priority, weight, level_id,
1848                         params, error);
1849                 if (status)
1850                         return status;
1851                 break;
1852
1853         default:
1854                 return -rte_tm_error_set(error,
1855                         EINVAL,
1856                         RTE_TM_ERROR_TYPE_LEVEL_ID,
1857                         NULL,
1858                         rte_strerror(EINVAL));
1859         }
1860
1861         return 0;
1862 }
1863
1864 /* Traffic manager node add */
1865 static int
1866 pmd_tm_node_add(struct rte_eth_dev *dev,
1867         uint32_t node_id,
1868         uint32_t parent_node_id,
1869         uint32_t priority,
1870         uint32_t weight,
1871         uint32_t level_id,
1872         struct rte_tm_node_params *params,
1873         struct rte_tm_error *error)
1874 {
1875         struct pmd_internals *p = dev->data->dev_private;
1876         struct tm_node_list *nl = &p->soft.tm.h.nodes;
1877         struct tm_node *n;
1878         uint32_t i;
1879         int status;
1880
1881         /* Checks */
1882         if (p->soft.tm.hierarchy_frozen)
1883                 return -rte_tm_error_set(error,
1884                         EBUSY,
1885                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1886                         NULL,
1887                         rte_strerror(EBUSY));
1888
1889         status = node_add_check(dev, node_id, parent_node_id, priority, weight,
1890                 level_id, params, error);
1891         if (status)
1892                 return status;
1893
1894         /* Memory allocation */
1895         n = calloc(1, sizeof(struct tm_node));
1896         if (n == NULL)
1897                 return -rte_tm_error_set(error,
1898                         ENOMEM,
1899                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1900                         NULL,
1901                         rte_strerror(ENOMEM));
1902
1903         /* Fill in */
1904         n->node_id = node_id;
1905         n->parent_node_id = parent_node_id;
1906         n->priority = priority;
1907         n->weight = weight;
1908
1909         if (parent_node_id != RTE_TM_NODE_ID_NULL) {
1910                 n->parent_node = tm_node_search(dev, parent_node_id);
1911                 n->level = n->parent_node->level + 1;
1912         }
1913
1914         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE)
1915                 n->shaper_profile = tm_shaper_profile_search(dev,
1916                         params->shaper_profile_id);
1917
1918         if (n->level == TM_NODE_LEVEL_QUEUE &&
1919                 params->leaf.cman == RTE_TM_CMAN_WRED)
1920                 n->wred_profile = tm_wred_profile_search(dev,
1921                         params->leaf.wred.wred_profile_id);
1922
1923         memcpy(&n->params, params, sizeof(n->params));
1924
1925         /* Add to list */
1926         TAILQ_INSERT_TAIL(nl, n, node);
1927         p->soft.tm.h.n_nodes++;
1928
1929         /* Update dependencies */
1930         if (n->parent_node)
1931                 n->parent_node->n_children++;
1932
1933         if (n->shaper_profile)
1934                 n->shaper_profile->n_users++;
1935
1936         for (i = 0; i < params->n_shared_shapers; i++) {
1937                 struct tm_shared_shaper *ss;
1938
1939                 ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]);
1940                 ss->n_users++;
1941         }
1942
1943         if (n->wred_profile)
1944                 n->wred_profile->n_users++;
1945
1946         p->soft.tm.h.n_tm_nodes[n->level]++;
1947
1948         return 0;
1949 }
1950
1951 /* Traffic manager node delete */
1952 static int
1953 pmd_tm_node_delete(struct rte_eth_dev *dev,
1954         uint32_t node_id,
1955         struct rte_tm_error *error)
1956 {
1957         struct pmd_internals *p = dev->data->dev_private;
1958         struct tm_node *n;
1959         uint32_t i;
1960
1961         /* Check hierarchy changes are currently allowed */
1962         if (p->soft.tm.hierarchy_frozen)
1963                 return -rte_tm_error_set(error,
1964                         EBUSY,
1965                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
1966                         NULL,
1967                         rte_strerror(EBUSY));
1968
1969         /* Check existing */
1970         n = tm_node_search(dev, node_id);
1971         if (n == NULL)
1972                 return -rte_tm_error_set(error,
1973                         EINVAL,
1974                         RTE_TM_ERROR_TYPE_NODE_ID,
1975                         NULL,
1976                         rte_strerror(EINVAL));
1977
1978         /* Check unused */
1979         if (n->n_children)
1980                 return -rte_tm_error_set(error,
1981                         EBUSY,
1982                         RTE_TM_ERROR_TYPE_NODE_ID,
1983                         NULL,
1984                         rte_strerror(EBUSY));
1985
1986         /* Update dependencies */
1987         p->soft.tm.h.n_tm_nodes[n->level]--;
1988
1989         if (n->wred_profile)
1990                 n->wred_profile->n_users--;
1991
1992         for (i = 0; i < n->params.n_shared_shapers; i++) {
1993                 struct tm_shared_shaper *ss;
1994
1995                 ss = tm_shared_shaper_search(dev,
1996                                 n->params.shared_shaper_id[i]);
1997                 ss->n_users--;
1998         }
1999
2000         if (n->shaper_profile)
2001                 n->shaper_profile->n_users--;
2002
2003         if (n->parent_node)
2004                 n->parent_node->n_children--;
2005
2006         /* Remove from list */
2007         TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node);
2008         p->soft.tm.h.n_nodes--;
2009         free(n);
2010
2011         return 0;
2012 }
2013
2014
2015 static void
2016 pipe_profile_build(struct rte_eth_dev *dev,
2017         struct tm_node *np,
2018         struct rte_sched_pipe_params *pp)
2019 {
2020         struct pmd_internals *p = dev->data->dev_private;
2021         struct tm_hierarchy *h = &p->soft.tm.h;
2022         struct tm_node_list *nl = &h->nodes;
2023         struct tm_node *nt, *nq;
2024
2025         memset(pp, 0, sizeof(*pp));
2026
2027         /* Pipe */
2028         pp->tb_rate = np->shaper_profile->params.peak.rate;
2029         pp->tb_size = np->shaper_profile->params.peak.size;
2030
2031         /* Traffic Class (TC) */
2032         pp->tc_period = PIPE_TC_PERIOD;
2033
2034 #ifdef RTE_SCHED_SUBPORT_TC_OV
2035         pp->tc_ov_weight = np->weight;
2036 #endif
2037
2038         TAILQ_FOREACH(nt, nl, node) {
2039                 uint32_t queue_id = 0;
2040
2041                 if (nt->level != TM_NODE_LEVEL_TC ||
2042                         nt->parent_node_id != np->node_id)
2043                         continue;
2044
2045                 pp->tc_rate[nt->priority] =
2046                         nt->shaper_profile->params.peak.rate;
2047
2048                 /* Queue */
2049                 TAILQ_FOREACH(nq, nl, node) {
2050                         uint32_t pipe_queue_id;
2051
2052                         if (nq->level != TM_NODE_LEVEL_QUEUE ||
2053                                 nq->parent_node_id != nt->node_id)
2054                                 continue;
2055
2056                         pipe_queue_id = nt->priority *
2057                                 RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2058                         pp->wrr_weights[pipe_queue_id] = nq->weight;
2059
2060                         queue_id++;
2061                 }
2062         }
2063 }
2064
2065 static int
2066 pipe_profile_free_exists(struct rte_eth_dev *dev,
2067         uint32_t *pipe_profile_id)
2068 {
2069         struct pmd_internals *p = dev->data->dev_private;
2070         struct tm_params *t = &p->soft.tm.params;
2071
2072         if (t->n_pipe_profiles < RTE_SCHED_PIPE_PROFILES_PER_PORT) {
2073                 *pipe_profile_id = t->n_pipe_profiles;
2074                 return 1;
2075         }
2076
2077         return 0;
2078 }
2079
2080 static int
2081 pipe_profile_exists(struct rte_eth_dev *dev,
2082         struct rte_sched_pipe_params *pp,
2083         uint32_t *pipe_profile_id)
2084 {
2085         struct pmd_internals *p = dev->data->dev_private;
2086         struct tm_params *t = &p->soft.tm.params;
2087         uint32_t i;
2088
2089         for (i = 0; i < t->n_pipe_profiles; i++)
2090                 if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) {
2091                         if (pipe_profile_id)
2092                                 *pipe_profile_id = i;
2093                         return 1;
2094                 }
2095
2096         return 0;
2097 }
2098
2099 static void
2100 pipe_profile_install(struct rte_eth_dev *dev,
2101         struct rte_sched_pipe_params *pp,
2102         uint32_t pipe_profile_id)
2103 {
2104         struct pmd_internals *p = dev->data->dev_private;
2105         struct tm_params *t = &p->soft.tm.params;
2106
2107         memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp));
2108         t->n_pipe_profiles++;
2109 }
2110
2111 static void
2112 pipe_profile_mark(struct rte_eth_dev *dev,
2113         uint32_t subport_id,
2114         uint32_t pipe_id,
2115         uint32_t pipe_profile_id)
2116 {
2117         struct pmd_internals *p = dev->data->dev_private;
2118         struct tm_hierarchy *h = &p->soft.tm.h;
2119         struct tm_params *t = &p->soft.tm.params;
2120         uint32_t n_pipes_per_subport, pos;
2121
2122         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2123                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2124         pos = subport_id * n_pipes_per_subport + pipe_id;
2125
2126         t->pipe_to_profile[pos] = pipe_profile_id;
2127 }
2128
2129 static struct rte_sched_pipe_params *
2130 pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np)
2131 {
2132         struct pmd_internals *p = dev->data->dev_private;
2133         struct tm_hierarchy *h = &p->soft.tm.h;
2134         struct tm_params *t = &p->soft.tm.params;
2135         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2136                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2137
2138         uint32_t subport_id = tm_node_subport_id(dev, np->parent_node);
2139         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2140
2141         uint32_t pos = subport_id * n_pipes_per_subport + pipe_id;
2142         uint32_t pipe_profile_id = t->pipe_to_profile[pos];
2143
2144         return &t->pipe_profiles[pipe_profile_id];
2145 }
2146
2147 static int
2148 pipe_profiles_generate(struct rte_eth_dev *dev)
2149 {
2150         struct pmd_internals *p = dev->data->dev_private;
2151         struct tm_hierarchy *h = &p->soft.tm.h;
2152         struct tm_node_list *nl = &h->nodes;
2153         struct tm_node *ns, *np;
2154         uint32_t subport_id;
2155
2156         /* Objective: Fill in the following fields in struct tm_params:
2157          *    - pipe_profiles
2158          *    - n_pipe_profiles
2159          *    - pipe_to_profile
2160          */
2161
2162         subport_id = 0;
2163         TAILQ_FOREACH(ns, nl, node) {
2164                 uint32_t pipe_id;
2165
2166                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2167                         continue;
2168
2169                 pipe_id = 0;
2170                 TAILQ_FOREACH(np, nl, node) {
2171                         struct rte_sched_pipe_params pp;
2172                         uint32_t pos;
2173
2174                         if (np->level != TM_NODE_LEVEL_PIPE ||
2175                                 np->parent_node_id != ns->node_id)
2176                                 continue;
2177
2178                         pipe_profile_build(dev, np, &pp);
2179
2180                         if (!pipe_profile_exists(dev, &pp, &pos)) {
2181                                 if (!pipe_profile_free_exists(dev, &pos))
2182                                         return -1;
2183
2184                                 pipe_profile_install(dev, &pp, pos);
2185                         }
2186
2187                         pipe_profile_mark(dev, subport_id, pipe_id, pos);
2188
2189                         pipe_id++;
2190                 }
2191
2192                 subport_id++;
2193         }
2194
2195         return 0;
2196 }
2197
2198 static struct tm_wred_profile *
2199 tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id)
2200 {
2201         struct pmd_internals *p = dev->data->dev_private;
2202         struct tm_hierarchy *h = &p->soft.tm.h;
2203         struct tm_node_list *nl = &h->nodes;
2204         struct tm_node *nq;
2205
2206         TAILQ_FOREACH(nq, nl, node) {
2207                 if (nq->level != TM_NODE_LEVEL_QUEUE ||
2208                         nq->parent_node->priority != tc_id)
2209                         continue;
2210
2211                 return nq->wred_profile;
2212         }
2213
2214         return NULL;
2215 }
2216
2217 #ifdef RTE_SCHED_RED
2218
2219 static void
2220 wred_profiles_set(struct rte_eth_dev *dev)
2221 {
2222         struct pmd_internals *p = dev->data->dev_private;
2223         struct rte_sched_port_params *pp = &p->soft.tm.params.port_params;
2224         uint32_t tc_id;
2225         enum rte_tm_color color;
2226
2227         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++)
2228                 for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
2229                         struct rte_red_params *dst =
2230                                 &pp->red_params[tc_id][color];
2231                         struct tm_wred_profile *src_wp =
2232                                 tm_tc_wred_profile_get(dev, tc_id);
2233                         struct rte_tm_red_params *src =
2234                                 &src_wp->params.red_params[color];
2235
2236                         memcpy(dst, src, sizeof(*dst));
2237                 }
2238 }
2239
2240 #else
2241
2242 #define wred_profiles_set(dev)
2243
2244 #endif
2245
2246 static struct tm_shared_shaper *
2247 tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node)
2248 {
2249         return (tc_node->params.n_shared_shapers) ?
2250                 tm_shared_shaper_search(dev,
2251                         tc_node->params.shared_shaper_id[0]) :
2252                 NULL;
2253 }
2254
2255 static struct tm_shared_shaper *
2256 tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev,
2257         struct tm_node *subport_node,
2258         uint32_t tc_id)
2259 {
2260         struct pmd_internals *p = dev->data->dev_private;
2261         struct tm_node_list *nl = &p->soft.tm.h.nodes;
2262         struct tm_node *n;
2263
2264         TAILQ_FOREACH(n, nl, node) {
2265                 if (n->level != TM_NODE_LEVEL_TC ||
2266                         n->parent_node->parent_node_id !=
2267                                 subport_node->node_id ||
2268                         n->priority != tc_id)
2269                         continue;
2270
2271                 return tm_tc_shared_shaper_get(dev, n);
2272         }
2273
2274         return NULL;
2275 }
2276
2277 static int
2278 hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error)
2279 {
2280         struct pmd_internals *p = dev->data->dev_private;
2281         struct tm_hierarchy *h = &p->soft.tm.h;
2282         struct tm_node_list *nl = &h->nodes;
2283         struct tm_shared_shaper_list *ssl = &h->shared_shapers;
2284         struct tm_wred_profile_list *wpl = &h->wred_profiles;
2285         struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq;
2286         struct tm_shared_shaper *ss;
2287
2288         uint32_t n_pipes_per_subport;
2289
2290         /* Root node exists. */
2291         if (nr == NULL)
2292                 return -rte_tm_error_set(error,
2293                         EINVAL,
2294                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2295                         NULL,
2296                         rte_strerror(EINVAL));
2297
2298         /* There is at least one subport, max is not exceeded. */
2299         if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS)
2300                 return -rte_tm_error_set(error,
2301                         EINVAL,
2302                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2303                         NULL,
2304                         rte_strerror(EINVAL));
2305
2306         /* There is at least one pipe. */
2307         if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0)
2308                 return -rte_tm_error_set(error,
2309                         EINVAL,
2310                         RTE_TM_ERROR_TYPE_LEVEL_ID,
2311                         NULL,
2312                         rte_strerror(EINVAL));
2313
2314         /* Number of pipes is the same for all subports. Maximum number of pipes
2315          * per subport is not exceeded.
2316          */
2317         n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2318                 h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
2319
2320         if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT)
2321                 return -rte_tm_error_set(error,
2322                         EINVAL,
2323                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2324                         NULL,
2325                         rte_strerror(EINVAL));
2326
2327         TAILQ_FOREACH(ns, nl, node) {
2328                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2329                         continue;
2330
2331                 if (ns->n_children != n_pipes_per_subport)
2332                         return -rte_tm_error_set(error,
2333                                 EINVAL,
2334                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2335                                 NULL,
2336                                 rte_strerror(EINVAL));
2337         }
2338
2339         /* Each pipe has exactly 4 TCs, with exactly one TC for each priority */
2340         TAILQ_FOREACH(np, nl, node) {
2341                 uint32_t mask = 0, mask_expected =
2342                         RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
2343                                 uint32_t);
2344
2345                 if (np->level != TM_NODE_LEVEL_PIPE)
2346                         continue;
2347
2348                 if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE)
2349                         return -rte_tm_error_set(error,
2350                                 EINVAL,
2351                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2352                                 NULL,
2353                                 rte_strerror(EINVAL));
2354
2355                 TAILQ_FOREACH(nt, nl, node) {
2356                         if (nt->level != TM_NODE_LEVEL_TC ||
2357                                 nt->parent_node_id != np->node_id)
2358                                 continue;
2359
2360                         mask |= 1 << nt->priority;
2361                 }
2362
2363                 if (mask != mask_expected)
2364                         return -rte_tm_error_set(error,
2365                                 EINVAL,
2366                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2367                                 NULL,
2368                                 rte_strerror(EINVAL));
2369         }
2370
2371         /* Each TC has exactly 4 packet queues. */
2372         TAILQ_FOREACH(nt, nl, node) {
2373                 if (nt->level != TM_NODE_LEVEL_TC)
2374                         continue;
2375
2376                 if (nt->n_children != RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS)
2377                         return -rte_tm_error_set(error,
2378                                 EINVAL,
2379                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2380                                 NULL,
2381                                 rte_strerror(EINVAL));
2382         }
2383
2384         /**
2385          * Shared shapers:
2386          *    -For each TC #i, all pipes in the same subport use the same
2387          *     shared shaper (or no shared shaper) for their TC#i.
2388          *    -Each shared shaper needs to have at least one user. All its
2389          *     users have to be TC nodes with the same priority and the same
2390          *     subport.
2391          */
2392         TAILQ_FOREACH(ns, nl, node) {
2393                 struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2394                 uint32_t id;
2395
2396                 if (ns->level != TM_NODE_LEVEL_SUBPORT)
2397                         continue;
2398
2399                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++)
2400                         s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id);
2401
2402                 TAILQ_FOREACH(nt, nl, node) {
2403                         struct tm_shared_shaper *subport_ss, *tc_ss;
2404
2405                         if (nt->level != TM_NODE_LEVEL_TC ||
2406                                 nt->parent_node->parent_node_id !=
2407                                         ns->node_id)
2408                                 continue;
2409
2410                         subport_ss = s[nt->priority];
2411                         tc_ss = tm_tc_shared_shaper_get(dev, nt);
2412
2413                         if (subport_ss == NULL && tc_ss == NULL)
2414                                 continue;
2415
2416                         if ((subport_ss == NULL && tc_ss != NULL) ||
2417                                 (subport_ss != NULL && tc_ss == NULL) ||
2418                                 subport_ss->shared_shaper_id !=
2419                                         tc_ss->shared_shaper_id)
2420                                 return -rte_tm_error_set(error,
2421                                         EINVAL,
2422                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2423                                         NULL,
2424                                         rte_strerror(EINVAL));
2425                 }
2426         }
2427
2428         TAILQ_FOREACH(ss, ssl, node) {
2429                 struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss);
2430                 uint32_t n_users = 0;
2431
2432                 if (nt_any != NULL)
2433                         TAILQ_FOREACH(nt, nl, node) {
2434                                 if (nt->level != TM_NODE_LEVEL_TC ||
2435                                         nt->priority != nt_any->priority ||
2436                                         nt->parent_node->parent_node_id !=
2437                                         nt_any->parent_node->parent_node_id)
2438                                         continue;
2439
2440                                 n_users++;
2441                         }
2442
2443                 if (ss->n_users == 0 || ss->n_users != n_users)
2444                         return -rte_tm_error_set(error,
2445                                 EINVAL,
2446                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2447                                 NULL,
2448                                 rte_strerror(EINVAL));
2449         }
2450
2451         /* Not too many pipe profiles. */
2452         if (pipe_profiles_generate(dev))
2453                 return -rte_tm_error_set(error,
2454                         EINVAL,
2455                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2456                         NULL,
2457                         rte_strerror(EINVAL));
2458
2459         /**
2460          * WRED (when used, i.e. at least one WRED profile defined):
2461          *    -Each WRED profile must have at least one user.
2462          *    -All leaf nodes must have their private WRED context enabled.
2463          *    -For each TC #i, all leaf nodes must use the same WRED profile
2464          *     for their private WRED context.
2465          */
2466         if (h->n_wred_profiles) {
2467                 struct tm_wred_profile *wp;
2468                 struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2469                 uint32_t id;
2470
2471                 TAILQ_FOREACH(wp, wpl, node)
2472                         if (wp->n_users == 0)
2473                                 return -rte_tm_error_set(error,
2474                                         EINVAL,
2475                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2476                                         NULL,
2477                                         rte_strerror(EINVAL));
2478
2479                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
2480                         w[id] = tm_tc_wred_profile_get(dev, id);
2481
2482                         if (w[id] == NULL)
2483                                 return -rte_tm_error_set(error,
2484                                         EINVAL,
2485                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2486                                         NULL,
2487                                         rte_strerror(EINVAL));
2488                 }
2489
2490                 TAILQ_FOREACH(nq, nl, node) {
2491                         uint32_t id;
2492
2493                         if (nq->level != TM_NODE_LEVEL_QUEUE)
2494                                 continue;
2495
2496                         id = nq->parent_node->priority;
2497
2498                         if (nq->wred_profile == NULL ||
2499                                 nq->wred_profile->wred_profile_id !=
2500                                         w[id]->wred_profile_id)
2501                                 return -rte_tm_error_set(error,
2502                                         EINVAL,
2503                                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2504                                         NULL,
2505                                         rte_strerror(EINVAL));
2506                 }
2507         }
2508
2509         return 0;
2510 }
2511
2512 static void
2513 hierarchy_blueprints_create(struct rte_eth_dev *dev)
2514 {
2515         struct pmd_internals *p = dev->data->dev_private;
2516         struct tm_params *t = &p->soft.tm.params;
2517         struct tm_hierarchy *h = &p->soft.tm.h;
2518
2519         struct tm_node_list *nl = &h->nodes;
2520         struct tm_node *root = tm_root_node_present(dev), *n;
2521
2522         uint32_t subport_id;
2523
2524         t->port_params = (struct rte_sched_port_params) {
2525                 .name = dev->data->name,
2526                 .socket = dev->data->numa_node,
2527                 .rate = root->shaper_profile->params.peak.rate,
2528                 .mtu = dev->data->mtu,
2529                 .frame_overhead =
2530                         root->shaper_profile->params.pkt_length_adjust,
2531                 .n_subports_per_port = root->n_children,
2532                 .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
2533                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
2534                 .qsize = {p->params.soft.tm.qsize[0],
2535                         p->params.soft.tm.qsize[1],
2536                         p->params.soft.tm.qsize[2],
2537                         p->params.soft.tm.qsize[3],
2538                 },
2539                 .pipe_profiles = t->pipe_profiles,
2540                 .n_pipe_profiles = t->n_pipe_profiles,
2541         };
2542
2543         wred_profiles_set(dev);
2544
2545         subport_id = 0;
2546         TAILQ_FOREACH(n, nl, node) {
2547                 uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
2548                 uint32_t i;
2549
2550                 if (n->level != TM_NODE_LEVEL_SUBPORT)
2551                         continue;
2552
2553                 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
2554                         struct tm_shared_shaper *ss;
2555                         struct tm_shaper_profile *sp;
2556
2557                         ss = tm_subport_tc_shared_shaper_get(dev, n, i);
2558                         sp = (ss) ? tm_shaper_profile_search(dev,
2559                                 ss->shaper_profile_id) :
2560                                 n->shaper_profile;
2561                         tc_rate[i] = sp->params.peak.rate;
2562                 }
2563
2564                 t->subport_params[subport_id] =
2565                         (struct rte_sched_subport_params) {
2566                                 .tb_rate = n->shaper_profile->params.peak.rate,
2567                                 .tb_size = n->shaper_profile->params.peak.size,
2568
2569                                 .tc_rate = {tc_rate[0],
2570                                         tc_rate[1],
2571                                         tc_rate[2],
2572                                         tc_rate[3],
2573                         },
2574                         .tc_period = SUBPORT_TC_PERIOD,
2575                 };
2576
2577                 subport_id++;
2578         }
2579 }
2580
2581 /* Traffic manager hierarchy commit */
2582 static int
2583 pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
2584         int clear_on_fail,
2585         struct rte_tm_error *error)
2586 {
2587         struct pmd_internals *p = dev->data->dev_private;
2588         int status;
2589
2590         /* Checks */
2591         if (p->soft.tm.hierarchy_frozen)
2592                 return -rte_tm_error_set(error,
2593                         EBUSY,
2594                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2595                         NULL,
2596                         rte_strerror(EBUSY));
2597
2598         status = hierarchy_commit_check(dev, error);
2599         if (status) {
2600                 if (clear_on_fail) {
2601                         tm_hierarchy_uninit(p);
2602                         tm_hierarchy_init(p);
2603                 }
2604
2605                 return status;
2606         }
2607
2608         /* Create blueprints */
2609         hierarchy_blueprints_create(dev);
2610
2611         /* Freeze hierarchy */
2612         p->soft.tm.hierarchy_frozen = 1;
2613
2614         return 0;
2615 }
2616
2617 #ifdef RTE_SCHED_SUBPORT_TC_OV
2618
2619 static int
2620 update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
2621 {
2622         struct pmd_internals *p = dev->data->dev_private;
2623         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2624
2625         struct tm_node *ns = np->parent_node;
2626         uint32_t subport_id = tm_node_subport_id(dev, ns);
2627
2628         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2629         struct rte_sched_pipe_params profile1;
2630         uint32_t pipe_profile_id;
2631
2632         /* Derive new pipe profile. */
2633         memcpy(&profile1, profile0, sizeof(profile1));
2634         profile1.tc_ov_weight = (uint8_t)weight;
2635
2636         /* Since implementation does not allow adding more pipe profiles after
2637          * port configuration, the pipe configuration can be successfully
2638          * updated only if the new profile is also part of the existing set of
2639          * pipe profiles.
2640          */
2641         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2642                 return -1;
2643
2644         /* Update the pipe profile used by the current pipe. */
2645         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2646                 (int32_t)pipe_profile_id))
2647                 return -1;
2648
2649         /* Commit changes. */
2650         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2651         np->weight = weight;
2652
2653         return 0;
2654 }
2655
2656 #endif
2657
2658 static int
2659 update_queue_weight(struct rte_eth_dev *dev,
2660         struct tm_node *nq, uint32_t weight)
2661 {
2662         struct pmd_internals *p = dev->data->dev_private;
2663         uint32_t queue_id = tm_node_queue_id(dev, nq);
2664
2665         struct tm_node *nt = nq->parent_node;
2666         uint32_t tc_id = tm_node_tc_id(dev, nt);
2667
2668         struct tm_node *np = nt->parent_node;
2669         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2670
2671         struct tm_node *ns = np->parent_node;
2672         uint32_t subport_id = tm_node_subport_id(dev, ns);
2673
2674         uint32_t pipe_queue_id =
2675                 tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + queue_id;
2676
2677         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2678         struct rte_sched_pipe_params profile1;
2679         uint32_t pipe_profile_id;
2680
2681         /* Derive new pipe profile. */
2682         memcpy(&profile1, profile0, sizeof(profile1));
2683         profile1.wrr_weights[pipe_queue_id] = (uint8_t)weight;
2684
2685         /* Since implementation does not allow adding more pipe profiles after
2686          * port configuration, the pipe configuration can be successfully
2687          * updated only if the new profile is also part of the existing set
2688          * of pipe profiles.
2689          */
2690         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2691                 return -1;
2692
2693         /* Update the pipe profile used by the current pipe. */
2694         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2695                 (int32_t)pipe_profile_id))
2696                 return -1;
2697
2698         /* Commit changes. */
2699         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2700         nq->weight = weight;
2701
2702         return 0;
2703 }
2704
2705 /* Traffic manager node parent update */
2706 static int
2707 pmd_tm_node_parent_update(struct rte_eth_dev *dev,
2708         uint32_t node_id,
2709         uint32_t parent_node_id,
2710         uint32_t priority,
2711         uint32_t weight,
2712         struct rte_tm_error *error)
2713 {
2714         struct tm_node *n;
2715
2716         /* Port must be started and TM used. */
2717         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2718                 return -rte_tm_error_set(error,
2719                         EBUSY,
2720                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2721                         NULL,
2722                         rte_strerror(EBUSY));
2723
2724         /* Node must be valid */
2725         n = tm_node_search(dev, node_id);
2726         if (n == NULL)
2727                 return -rte_tm_error_set(error,
2728                         EINVAL,
2729                         RTE_TM_ERROR_TYPE_NODE_ID,
2730                         NULL,
2731                         rte_strerror(EINVAL));
2732
2733         /* Parent node must be the same */
2734         if (n->parent_node_id != parent_node_id)
2735                 return -rte_tm_error_set(error,
2736                         EINVAL,
2737                         RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID,
2738                         NULL,
2739                         rte_strerror(EINVAL));
2740
2741         /* Priority must be the same */
2742         if (n->priority != priority)
2743                 return -rte_tm_error_set(error,
2744                         EINVAL,
2745                         RTE_TM_ERROR_TYPE_NODE_PRIORITY,
2746                         NULL,
2747                         rte_strerror(EINVAL));
2748
2749         /* weight: must be 1 .. 255 */
2750         if (weight == 0 || weight >= UINT8_MAX)
2751                 return -rte_tm_error_set(error,
2752                         EINVAL,
2753                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2754                         NULL,
2755                         rte_strerror(EINVAL));
2756
2757         switch (n->level) {
2758         case TM_NODE_LEVEL_PORT:
2759                 return -rte_tm_error_set(error,
2760                         EINVAL,
2761                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2762                         NULL,
2763                         rte_strerror(EINVAL));
2764                 /* fall-through */
2765         case TM_NODE_LEVEL_SUBPORT:
2766                 return -rte_tm_error_set(error,
2767                         EINVAL,
2768                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2769                         NULL,
2770                         rte_strerror(EINVAL));
2771                 /* fall-through */
2772         case TM_NODE_LEVEL_PIPE:
2773 #ifdef RTE_SCHED_SUBPORT_TC_OV
2774                 if (update_pipe_weight(dev, n, weight))
2775                         return -rte_tm_error_set(error,
2776                                 EINVAL,
2777                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2778                                 NULL,
2779                                 rte_strerror(EINVAL));
2780                 return 0;
2781 #else
2782                 return -rte_tm_error_set(error,
2783                         EINVAL,
2784                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2785                         NULL,
2786                         rte_strerror(EINVAL));
2787 #endif
2788                 /* fall-through */
2789         case TM_NODE_LEVEL_TC:
2790                 return -rte_tm_error_set(error,
2791                         EINVAL,
2792                         RTE_TM_ERROR_TYPE_NODE_WEIGHT,
2793                         NULL,
2794                         rte_strerror(EINVAL));
2795                 /* fall-through */
2796         case TM_NODE_LEVEL_QUEUE:
2797                 /* fall-through */
2798         default:
2799                 if (update_queue_weight(dev, n, weight))
2800                         return -rte_tm_error_set(error,
2801                                 EINVAL,
2802                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2803                                 NULL,
2804                                 rte_strerror(EINVAL));
2805                 return 0;
2806         }
2807 }
2808
2809 static int
2810 update_subport_rate(struct rte_eth_dev *dev,
2811         struct tm_node *ns,
2812         struct tm_shaper_profile *sp)
2813 {
2814         struct pmd_internals *p = dev->data->dev_private;
2815         uint32_t subport_id = tm_node_subport_id(dev, ns);
2816
2817         struct rte_sched_subport_params subport_params;
2818
2819         /* Derive new subport configuration. */
2820         memcpy(&subport_params,
2821                 &p->soft.tm.params.subport_params[subport_id],
2822                 sizeof(subport_params));
2823         subport_params.tb_rate = sp->params.peak.rate;
2824         subport_params.tb_size = sp->params.peak.size;
2825
2826         /* Update the subport configuration. */
2827         if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
2828                 &subport_params))
2829                 return -1;
2830
2831         /* Commit changes. */
2832         ns->shaper_profile->n_users--;
2833
2834         ns->shaper_profile = sp;
2835         ns->params.shaper_profile_id = sp->shaper_profile_id;
2836         sp->n_users++;
2837
2838         memcpy(&p->soft.tm.params.subport_params[subport_id],
2839                 &subport_params,
2840                 sizeof(subport_params));
2841
2842         return 0;
2843 }
2844
2845 static int
2846 update_pipe_rate(struct rte_eth_dev *dev,
2847         struct tm_node *np,
2848         struct tm_shaper_profile *sp)
2849 {
2850         struct pmd_internals *p = dev->data->dev_private;
2851         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2852
2853         struct tm_node *ns = np->parent_node;
2854         uint32_t subport_id = tm_node_subport_id(dev, ns);
2855
2856         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2857         struct rte_sched_pipe_params profile1;
2858         uint32_t pipe_profile_id;
2859
2860         /* Derive new pipe profile. */
2861         memcpy(&profile1, profile0, sizeof(profile1));
2862         profile1.tb_rate = sp->params.peak.rate;
2863         profile1.tb_size = sp->params.peak.size;
2864
2865         /* Since implementation does not allow adding more pipe profiles after
2866          * port configuration, the pipe configuration can be successfully
2867          * updated only if the new profile is also part of the existing set of
2868          * pipe profiles.
2869          */
2870         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2871                 return -1;
2872
2873         /* Update the pipe profile used by the current pipe. */
2874         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2875                 (int32_t)pipe_profile_id))
2876                 return -1;
2877
2878         /* Commit changes. */
2879         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2880         np->shaper_profile->n_users--;
2881         np->shaper_profile = sp;
2882         np->params.shaper_profile_id = sp->shaper_profile_id;
2883         sp->n_users++;
2884
2885         return 0;
2886 }
2887
2888 static int
2889 update_tc_rate(struct rte_eth_dev *dev,
2890         struct tm_node *nt,
2891         struct tm_shaper_profile *sp)
2892 {
2893         struct pmd_internals *p = dev->data->dev_private;
2894         uint32_t tc_id = tm_node_tc_id(dev, nt);
2895
2896         struct tm_node *np = nt->parent_node;
2897         uint32_t pipe_id = tm_node_pipe_id(dev, np);
2898
2899         struct tm_node *ns = np->parent_node;
2900         uint32_t subport_id = tm_node_subport_id(dev, ns);
2901
2902         struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np);
2903         struct rte_sched_pipe_params profile1;
2904         uint32_t pipe_profile_id;
2905
2906         /* Derive new pipe profile. */
2907         memcpy(&profile1, profile0, sizeof(profile1));
2908         profile1.tc_rate[tc_id] = sp->params.peak.rate;
2909
2910         /* Since implementation does not allow adding more pipe profiles after
2911          * port configuration, the pipe configuration can be successfully
2912          * updated only if the new profile is also part of the existing set of
2913          * pipe profiles.
2914          */
2915         if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0)
2916                 return -1;
2917
2918         /* Update the pipe profile used by the current pipe. */
2919         if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
2920                 (int32_t)pipe_profile_id))
2921                 return -1;
2922
2923         /* Commit changes. */
2924         pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id);
2925         nt->shaper_profile->n_users--;
2926         nt->shaper_profile = sp;
2927         nt->params.shaper_profile_id = sp->shaper_profile_id;
2928         sp->n_users++;
2929
2930         return 0;
2931 }
2932
2933 /* Traffic manager node shaper update */
2934 static int
2935 pmd_tm_node_shaper_update(struct rte_eth_dev *dev,
2936         uint32_t node_id,
2937         uint32_t shaper_profile_id,
2938         struct rte_tm_error *error)
2939 {
2940         struct tm_node *n;
2941         struct tm_shaper_profile *sp;
2942
2943         /* Port must be started and TM used. */
2944         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
2945                 return -rte_tm_error_set(error,
2946                         EBUSY,
2947                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2948                         NULL,
2949                         rte_strerror(EBUSY));
2950
2951         /* Node must be valid */
2952         n = tm_node_search(dev, node_id);
2953         if (n == NULL)
2954                 return -rte_tm_error_set(error,
2955                         EINVAL,
2956                         RTE_TM_ERROR_TYPE_NODE_ID,
2957                         NULL,
2958                         rte_strerror(EINVAL));
2959
2960         /* Shaper profile must be valid. */
2961         sp = tm_shaper_profile_search(dev, shaper_profile_id);
2962         if (sp == NULL)
2963                 return -rte_tm_error_set(error,
2964                         EINVAL,
2965                         RTE_TM_ERROR_TYPE_SHAPER_PROFILE,
2966                         NULL,
2967                         rte_strerror(EINVAL));
2968
2969         switch (n->level) {
2970         case TM_NODE_LEVEL_PORT:
2971                 return -rte_tm_error_set(error,
2972                         EINVAL,
2973                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
2974                         NULL,
2975                         rte_strerror(EINVAL));
2976                 /* fall-through */
2977         case TM_NODE_LEVEL_SUBPORT:
2978                 if (update_subport_rate(dev, n, sp))
2979                         return -rte_tm_error_set(error,
2980                                 EINVAL,
2981                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2982                                 NULL,
2983                                 rte_strerror(EINVAL));
2984                 return 0;
2985                 /* fall-through */
2986         case TM_NODE_LEVEL_PIPE:
2987                 if (update_pipe_rate(dev, n, sp))
2988                         return -rte_tm_error_set(error,
2989                                 EINVAL,
2990                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
2991                                 NULL,
2992                                 rte_strerror(EINVAL));
2993                 return 0;
2994                 /* fall-through */
2995         case TM_NODE_LEVEL_TC:
2996                 if (update_tc_rate(dev, n, sp))
2997                         return -rte_tm_error_set(error,
2998                                 EINVAL,
2999                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3000                                 NULL,
3001                                 rte_strerror(EINVAL));
3002                 return 0;
3003                 /* fall-through */
3004         case TM_NODE_LEVEL_QUEUE:
3005                 /* fall-through */
3006         default:
3007                 return -rte_tm_error_set(error,
3008                         EINVAL,
3009                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3010                         NULL,
3011                         rte_strerror(EINVAL));
3012         }
3013 }
3014
3015 static inline uint32_t
3016 tm_port_queue_id(struct rte_eth_dev *dev,
3017         uint32_t port_subport_id,
3018         uint32_t subport_pipe_id,
3019         uint32_t pipe_tc_id,
3020         uint32_t tc_queue_id)
3021 {
3022         struct pmd_internals *p = dev->data->dev_private;
3023         struct tm_hierarchy *h = &p->soft.tm.h;
3024         uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
3025                         h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3026
3027         uint32_t port_pipe_id =
3028                 port_subport_id * n_pipes_per_subport + subport_pipe_id;
3029         uint32_t port_tc_id =
3030                 port_pipe_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE + pipe_tc_id;
3031         uint32_t port_queue_id =
3032                 port_tc_id * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS + tc_queue_id;
3033
3034         return port_queue_id;
3035 }
3036
3037 static int
3038 read_port_stats(struct rte_eth_dev *dev,
3039         struct tm_node *nr,
3040         struct rte_tm_node_stats *stats,
3041         uint64_t *stats_mask,
3042         int clear)
3043 {
3044         struct pmd_internals *p = dev->data->dev_private;
3045         struct tm_hierarchy *h = &p->soft.tm.h;
3046         uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT];
3047         uint32_t subport_id;
3048
3049         for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) {
3050                 struct rte_sched_subport_stats s;
3051                 uint32_t tc_ov, id;
3052
3053                 /* Stats read */
3054                 int status = rte_sched_subport_read_stats(
3055                         p->soft.tm.sched,
3056                         subport_id,
3057                         &s,
3058                         &tc_ov);
3059                 if (status)
3060                         return status;
3061
3062                 /* Stats accumulate */
3063                 for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) {
3064                         nr->stats.n_pkts +=
3065                                 s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id];
3066                         nr->stats.n_bytes +=
3067                                 s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id];
3068                         nr->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3069                                 s.n_pkts_tc_dropped[id];
3070                         nr->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3071                                 s.n_bytes_tc_dropped[id];
3072                 }
3073         }
3074
3075         /* Stats copy */
3076         if (stats)
3077                 memcpy(stats, &nr->stats, sizeof(*stats));
3078
3079         if (stats_mask)
3080                 *stats_mask = STATS_MASK_DEFAULT;
3081
3082         /* Stats clear */
3083         if (clear)
3084                 memset(&nr->stats, 0, sizeof(nr->stats));
3085
3086         return 0;
3087 }
3088
3089 static int
3090 read_subport_stats(struct rte_eth_dev *dev,
3091         struct tm_node *ns,
3092         struct rte_tm_node_stats *stats,
3093         uint64_t *stats_mask,
3094         int clear)
3095 {
3096         struct pmd_internals *p = dev->data->dev_private;
3097         uint32_t subport_id = tm_node_subport_id(dev, ns);
3098         struct rte_sched_subport_stats s;
3099         uint32_t tc_ov, tc_id;
3100
3101         /* Stats read */
3102         int status = rte_sched_subport_read_stats(
3103                 p->soft.tm.sched,
3104                 subport_id,
3105                 &s,
3106                 &tc_ov);
3107         if (status)
3108                 return status;
3109
3110         /* Stats accumulate */
3111         for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) {
3112                 ns->stats.n_pkts +=
3113                         s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id];
3114                 ns->stats.n_bytes +=
3115                         s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id];
3116                 ns->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] +=
3117                         s.n_pkts_tc_dropped[tc_id];
3118                 ns->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3119                         s.n_bytes_tc_dropped[tc_id];
3120         }
3121
3122         /* Stats copy */
3123         if (stats)
3124                 memcpy(stats, &ns->stats, sizeof(*stats));
3125
3126         if (stats_mask)
3127                 *stats_mask = STATS_MASK_DEFAULT;
3128
3129         /* Stats clear */
3130         if (clear)
3131                 memset(&ns->stats, 0, sizeof(ns->stats));
3132
3133         return 0;
3134 }
3135
3136 static int
3137 read_pipe_stats(struct rte_eth_dev *dev,
3138         struct tm_node *np,
3139         struct rte_tm_node_stats *stats,
3140         uint64_t *stats_mask,
3141         int clear)
3142 {
3143         struct pmd_internals *p = dev->data->dev_private;
3144
3145         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3146
3147         struct tm_node *ns = np->parent_node;
3148         uint32_t subport_id = tm_node_subport_id(dev, ns);
3149
3150         uint32_t i;
3151
3152         /* Stats read */
3153         for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
3154                 struct rte_sched_queue_stats s;
3155                 uint16_t qlen;
3156
3157                 uint32_t qid = tm_port_queue_id(dev,
3158                         subport_id,
3159                         pipe_id,
3160                         i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
3161                         i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
3162
3163                 int status = rte_sched_queue_read_stats(
3164                         p->soft.tm.sched,
3165                         qid,
3166                         &s,
3167                         &qlen);
3168                 if (status)
3169                         return status;
3170
3171                 /* Stats accumulate */
3172                 np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3173                 np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3174                 np->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3175                 np->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3176                         s.n_bytes_dropped;
3177                 np->stats.leaf.n_pkts_queued = qlen;
3178         }
3179
3180         /* Stats copy */
3181         if (stats)
3182                 memcpy(stats, &np->stats, sizeof(*stats));
3183
3184         if (stats_mask)
3185                 *stats_mask = STATS_MASK_DEFAULT;
3186
3187         /* Stats clear */
3188         if (clear)
3189                 memset(&np->stats, 0, sizeof(np->stats));
3190
3191         return 0;
3192 }
3193
3194 static int
3195 read_tc_stats(struct rte_eth_dev *dev,
3196         struct tm_node *nt,
3197         struct rte_tm_node_stats *stats,
3198         uint64_t *stats_mask,
3199         int clear)
3200 {
3201         struct pmd_internals *p = dev->data->dev_private;
3202
3203         uint32_t tc_id = tm_node_tc_id(dev, nt);
3204
3205         struct tm_node *np = nt->parent_node;
3206         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3207
3208         struct tm_node *ns = np->parent_node;
3209         uint32_t subport_id = tm_node_subport_id(dev, ns);
3210
3211         uint32_t i;
3212
3213         /* Stats read */
3214         for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
3215                 struct rte_sched_queue_stats s;
3216                 uint16_t qlen;
3217
3218                 uint32_t qid = tm_port_queue_id(dev,
3219                         subport_id,
3220                         pipe_id,
3221                         tc_id,
3222                         i);
3223
3224                 int status = rte_sched_queue_read_stats(
3225                         p->soft.tm.sched,
3226                         qid,
3227                         &s,
3228                         &qlen);
3229                 if (status)
3230                         return status;
3231
3232                 /* Stats accumulate */
3233                 nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3234                 nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3235                 nt->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3236                 nt->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3237                         s.n_bytes_dropped;
3238                 nt->stats.leaf.n_pkts_queued = qlen;
3239         }
3240
3241         /* Stats copy */
3242         if (stats)
3243                 memcpy(stats, &nt->stats, sizeof(*stats));
3244
3245         if (stats_mask)
3246                 *stats_mask = STATS_MASK_DEFAULT;
3247
3248         /* Stats clear */
3249         if (clear)
3250                 memset(&nt->stats, 0, sizeof(nt->stats));
3251
3252         return 0;
3253 }
3254
3255 static int
3256 read_queue_stats(struct rte_eth_dev *dev,
3257         struct tm_node *nq,
3258         struct rte_tm_node_stats *stats,
3259         uint64_t *stats_mask,
3260         int clear)
3261 {
3262         struct pmd_internals *p = dev->data->dev_private;
3263         struct rte_sched_queue_stats s;
3264         uint16_t qlen;
3265
3266         uint32_t queue_id = tm_node_queue_id(dev, nq);
3267
3268         struct tm_node *nt = nq->parent_node;
3269         uint32_t tc_id = tm_node_tc_id(dev, nt);
3270
3271         struct tm_node *np = nt->parent_node;
3272         uint32_t pipe_id = tm_node_pipe_id(dev, np);
3273
3274         struct tm_node *ns = np->parent_node;
3275         uint32_t subport_id = tm_node_subport_id(dev, ns);
3276
3277         /* Stats read */
3278         uint32_t qid = tm_port_queue_id(dev,
3279                 subport_id,
3280                 pipe_id,
3281                 tc_id,
3282                 queue_id);
3283
3284         int status = rte_sched_queue_read_stats(
3285                 p->soft.tm.sched,
3286                 qid,
3287                 &s,
3288                 &qlen);
3289         if (status)
3290                 return status;
3291
3292         /* Stats accumulate */
3293         nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped;
3294         nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped;
3295         nq->stats.leaf.n_pkts_dropped[RTE_TM_GREEN] += s.n_pkts_dropped;
3296         nq->stats.leaf.n_bytes_dropped[RTE_TM_GREEN] +=
3297                 s.n_bytes_dropped;
3298         nq->stats.leaf.n_pkts_queued = qlen;
3299
3300         /* Stats copy */
3301         if (stats)
3302                 memcpy(stats, &nq->stats, sizeof(*stats));
3303
3304         if (stats_mask)
3305                 *stats_mask = STATS_MASK_QUEUE;
3306
3307         /* Stats clear */
3308         if (clear)
3309                 memset(&nq->stats, 0, sizeof(nq->stats));
3310
3311         return 0;
3312 }
3313
3314 /* Traffic manager read stats counters for specific node */
3315 static int
3316 pmd_tm_node_stats_read(struct rte_eth_dev *dev,
3317         uint32_t node_id,
3318         struct rte_tm_node_stats *stats,
3319         uint64_t *stats_mask,
3320         int clear,
3321         struct rte_tm_error *error)
3322 {
3323         struct tm_node *n;
3324
3325         /* Port must be started and TM used. */
3326         if (dev->data->dev_started == 0 && (tm_used(dev) == 0))
3327                 return -rte_tm_error_set(error,
3328                         EBUSY,
3329                         RTE_TM_ERROR_TYPE_UNSPECIFIED,
3330                         NULL,
3331                         rte_strerror(EBUSY));
3332
3333         /* Node must be valid */
3334         n = tm_node_search(dev, node_id);
3335         if (n == NULL)
3336                 return -rte_tm_error_set(error,
3337                         EINVAL,
3338                         RTE_TM_ERROR_TYPE_NODE_ID,
3339                         NULL,
3340                         rte_strerror(EINVAL));
3341
3342         switch (n->level) {
3343         case TM_NODE_LEVEL_PORT:
3344                 if (read_port_stats(dev, n, stats, stats_mask, clear))
3345                         return -rte_tm_error_set(error,
3346                                 EINVAL,
3347                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3348                                 NULL,
3349                                 rte_strerror(EINVAL));
3350                 return 0;
3351
3352         case TM_NODE_LEVEL_SUBPORT:
3353                 if (read_subport_stats(dev, n, stats, stats_mask, clear))
3354                         return -rte_tm_error_set(error,
3355                                 EINVAL,
3356                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3357                                 NULL,
3358                                 rte_strerror(EINVAL));
3359                 return 0;
3360
3361         case TM_NODE_LEVEL_PIPE:
3362                 if (read_pipe_stats(dev, n, stats, stats_mask, clear))
3363                         return -rte_tm_error_set(error,
3364                                 EINVAL,
3365                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3366                                 NULL,
3367                                 rte_strerror(EINVAL));
3368                 return 0;
3369
3370         case TM_NODE_LEVEL_TC:
3371                 if (read_tc_stats(dev, n, stats, stats_mask, clear))
3372                         return -rte_tm_error_set(error,
3373                                 EINVAL,
3374                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3375                                 NULL,
3376                                 rte_strerror(EINVAL));
3377                 return 0;
3378
3379         case TM_NODE_LEVEL_QUEUE:
3380         default:
3381                 if (read_queue_stats(dev, n, stats, stats_mask, clear))
3382                         return -rte_tm_error_set(error,
3383                                 EINVAL,
3384                                 RTE_TM_ERROR_TYPE_UNSPECIFIED,
3385                                 NULL,
3386                                 rte_strerror(EINVAL));
3387                 return 0;
3388         }
3389 }
3390
3391 const struct rte_tm_ops pmd_tm_ops = {
3392         .node_type_get = pmd_tm_node_type_get,
3393         .capabilities_get = pmd_tm_capabilities_get,
3394         .level_capabilities_get = pmd_tm_level_capabilities_get,
3395         .node_capabilities_get = pmd_tm_node_capabilities_get,
3396
3397         .wred_profile_add = pmd_tm_wred_profile_add,
3398         .wred_profile_delete = pmd_tm_wred_profile_delete,
3399         .shared_wred_context_add_update = NULL,
3400         .shared_wred_context_delete = NULL,
3401
3402         .shaper_profile_add = pmd_tm_shaper_profile_add,
3403         .shaper_profile_delete = pmd_tm_shaper_profile_delete,
3404         .shared_shaper_add_update = pmd_tm_shared_shaper_add_update,
3405         .shared_shaper_delete = pmd_tm_shared_shaper_delete,
3406
3407         .node_add = pmd_tm_node_add,
3408         .node_delete = pmd_tm_node_delete,
3409         .node_suspend = NULL,
3410         .node_resume = NULL,
3411         .hierarchy_commit = pmd_tm_hierarchy_commit,
3412
3413         .node_parent_update = pmd_tm_node_parent_update,
3414         .node_shaper_update = pmd_tm_node_shaper_update,
3415         .node_shared_shaper_update = NULL,
3416         .node_stats_update = NULL,
3417         .node_wfq_weight_mode_update = NULL,
3418         .node_cman_update = NULL,
3419         .node_wred_context_update = NULL,
3420         .node_shared_wred_context_update = NULL,
3421
3422         .node_stats_read = pmd_tm_node_stats_read,
3423 };