New upstream version 18.02
[deb_dpdk.git] / drivers / net / i40e / i40e_tm.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6
7 #include "base/i40e_prototype.h"
8 #include "i40e_ethdev.h"
9
10 static int i40e_tm_capabilities_get(struct rte_eth_dev *dev,
11                                     struct rte_tm_capabilities *cap,
12                                     struct rte_tm_error *error);
13 static int i40e_shaper_profile_add(struct rte_eth_dev *dev,
14                                    uint32_t shaper_profile_id,
15                                    struct rte_tm_shaper_params *profile,
16                                    struct rte_tm_error *error);
17 static int i40e_shaper_profile_del(struct rte_eth_dev *dev,
18                                    uint32_t shaper_profile_id,
19                                    struct rte_tm_error *error);
20 static int i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
21                          uint32_t parent_node_id, uint32_t priority,
22                          uint32_t weight, uint32_t level_id,
23                          struct rte_tm_node_params *params,
24                          struct rte_tm_error *error);
25 static int i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
26                             struct rte_tm_error *error);
27 static int i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
28                               int *is_leaf, struct rte_tm_error *error);
29 static int i40e_level_capabilities_get(struct rte_eth_dev *dev,
30                                        uint32_t level_id,
31                                        struct rte_tm_level_capabilities *cap,
32                                        struct rte_tm_error *error);
33 static int i40e_node_capabilities_get(struct rte_eth_dev *dev,
34                                       uint32_t node_id,
35                                       struct rte_tm_node_capabilities *cap,
36                                       struct rte_tm_error *error);
37 static int i40e_hierarchy_commit(struct rte_eth_dev *dev,
38                                  int clear_on_fail,
39                                  struct rte_tm_error *error);
40
41 const struct rte_tm_ops i40e_tm_ops = {
42         .capabilities_get = i40e_tm_capabilities_get,
43         .shaper_profile_add = i40e_shaper_profile_add,
44         .shaper_profile_delete = i40e_shaper_profile_del,
45         .node_add = i40e_node_add,
46         .node_delete = i40e_node_delete,
47         .node_type_get = i40e_node_type_get,
48         .level_capabilities_get = i40e_level_capabilities_get,
49         .node_capabilities_get = i40e_node_capabilities_get,
50         .hierarchy_commit = i40e_hierarchy_commit,
51 };
52
53 int
54 i40e_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
55                 void *arg)
56 {
57         if (!arg)
58                 return -EINVAL;
59
60         *(const void **)arg = &i40e_tm_ops;
61
62         return 0;
63 }
64
65 void
66 i40e_tm_conf_init(struct rte_eth_dev *dev)
67 {
68         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
69
70         /* initialize shaper profile list */
71         TAILQ_INIT(&pf->tm_conf.shaper_profile_list);
72
73         /* initialize node configuration */
74         pf->tm_conf.root = NULL;
75         TAILQ_INIT(&pf->tm_conf.tc_list);
76         TAILQ_INIT(&pf->tm_conf.queue_list);
77         pf->tm_conf.nb_tc_node = 0;
78         pf->tm_conf.nb_queue_node = 0;
79         pf->tm_conf.committed = false;
80 }
81
82 void
83 i40e_tm_conf_uninit(struct rte_eth_dev *dev)
84 {
85         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
86         struct i40e_tm_shaper_profile *shaper_profile;
87         struct i40e_tm_node *tm_node;
88
89         /* clear node configuration */
90         while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) {
91                 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
92                 rte_free(tm_node);
93         }
94         pf->tm_conf.nb_queue_node = 0;
95         while ((tm_node = TAILQ_FIRST(&pf->tm_conf.tc_list))) {
96                 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
97                 rte_free(tm_node);
98         }
99         pf->tm_conf.nb_tc_node = 0;
100         if (pf->tm_conf.root) {
101                 rte_free(pf->tm_conf.root);
102                 pf->tm_conf.root = NULL;
103         }
104
105         /* Remove all shaper profiles */
106         while ((shaper_profile =
107                TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) {
108                 TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list,
109                              shaper_profile, node);
110                 rte_free(shaper_profile);
111         }
112 }
113
114 static inline uint16_t
115 i40e_tc_nb_get(struct rte_eth_dev *dev)
116 {
117         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
118         struct i40e_vsi *main_vsi = pf->main_vsi;
119         uint16_t sum = 0;
120         int i;
121
122         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
123                 if (main_vsi->enabled_tc & BIT_ULL(i))
124                         sum++;
125         }
126
127         return sum;
128 }
129
130 static int
131 i40e_tm_capabilities_get(struct rte_eth_dev *dev,
132                          struct rte_tm_capabilities *cap,
133                          struct rte_tm_error *error)
134 {
135         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
136         uint16_t tc_nb = i40e_tc_nb_get(dev);
137
138         if (!cap || !error)
139                 return -EINVAL;
140
141         if (tc_nb > hw->func_caps.num_tx_qp)
142                 return -EINVAL;
143
144         error->type = RTE_TM_ERROR_TYPE_NONE;
145
146         /* set all the parameters to 0 first. */
147         memset(cap, 0, sizeof(struct rte_tm_capabilities));
148
149         /**
150          * support port + TCs + queues
151          * here shows the max capability not the current configuration.
152          */
153         cap->n_nodes_max = 1 + I40E_MAX_TRAFFIC_CLASS + hw->func_caps.num_tx_qp;
154         cap->n_levels_max = 3; /* port, TC, queue */
155         cap->non_leaf_nodes_identical = 1;
156         cap->leaf_nodes_identical = 1;
157         cap->shaper_n_max = cap->n_nodes_max;
158         cap->shaper_private_n_max = cap->n_nodes_max;
159         cap->shaper_private_dual_rate_n_max = 0;
160         cap->shaper_private_rate_min = 0;
161         /* 40Gbps -> 5GBps */
162         cap->shaper_private_rate_max = 5000000000ull;
163         cap->shaper_shared_n_max = 0;
164         cap->shaper_shared_n_nodes_per_shaper_max = 0;
165         cap->shaper_shared_n_shapers_per_node_max = 0;
166         cap->shaper_shared_dual_rate_n_max = 0;
167         cap->shaper_shared_rate_min = 0;
168         cap->shaper_shared_rate_max = 0;
169         cap->sched_n_children_max = hw->func_caps.num_tx_qp;
170         /**
171          * HW supports SP. But no plan to support it now.
172          * So, all the nodes should have the same priority.
173          */
174         cap->sched_sp_n_priorities_max = 1;
175         cap->sched_wfq_n_children_per_group_max = 0;
176         cap->sched_wfq_n_groups_max = 0;
177         /**
178          * SW only supports fair round robin now.
179          * So, all the nodes should have the same weight.
180          */
181         cap->sched_wfq_weight_max = 1;
182         cap->cman_head_drop_supported = 0;
183         cap->dynamic_update_mask = 0;
184         cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
185         cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
186         cap->cman_wred_context_n_max = 0;
187         cap->cman_wred_context_private_n_max = 0;
188         cap->cman_wred_context_shared_n_max = 0;
189         cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
190         cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
191         cap->stats_mask = 0;
192
193         return 0;
194 }
195
196 static inline struct i40e_tm_shaper_profile *
197 i40e_shaper_profile_search(struct rte_eth_dev *dev,
198                            uint32_t shaper_profile_id)
199 {
200         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
201         struct i40e_shaper_profile_list *shaper_profile_list =
202                 &pf->tm_conf.shaper_profile_list;
203         struct i40e_tm_shaper_profile *shaper_profile;
204
205         TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
206                 if (shaper_profile_id == shaper_profile->shaper_profile_id)
207                         return shaper_profile;
208         }
209
210         return NULL;
211 }
212
213 static int
214 i40e_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
215                                 struct rte_tm_error *error)
216 {
217         /* min rate not supported */
218         if (profile->committed.rate) {
219                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
220                 error->message = "committed rate not supported";
221                 return -EINVAL;
222         }
223         /* min bucket size not supported */
224         if (profile->committed.size) {
225                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
226                 error->message = "committed bucket size not supported";
227                 return -EINVAL;
228         }
229         /* max bucket size not supported */
230         if (profile->peak.size) {
231                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
232                 error->message = "peak bucket size not supported";
233                 return -EINVAL;
234         }
235         /* length adjustment not supported */
236         if (profile->pkt_length_adjust) {
237                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
238                 error->message = "packet length adjustment not supported";
239                 return -EINVAL;
240         }
241
242         return 0;
243 }
244
245 static int
246 i40e_shaper_profile_add(struct rte_eth_dev *dev,
247                         uint32_t shaper_profile_id,
248                         struct rte_tm_shaper_params *profile,
249                         struct rte_tm_error *error)
250 {
251         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
252         struct i40e_tm_shaper_profile *shaper_profile;
253         int ret;
254
255         if (!profile || !error)
256                 return -EINVAL;
257
258         ret = i40e_shaper_profile_param_check(profile, error);
259         if (ret)
260                 return ret;
261
262         shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
263
264         if (shaper_profile) {
265                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
266                 error->message = "profile ID exist";
267                 return -EINVAL;
268         }
269
270         shaper_profile = rte_zmalloc("i40e_tm_shaper_profile",
271                                      sizeof(struct i40e_tm_shaper_profile),
272                                      0);
273         if (!shaper_profile)
274                 return -ENOMEM;
275         shaper_profile->shaper_profile_id = shaper_profile_id;
276         rte_memcpy(&shaper_profile->profile, profile,
277                          sizeof(struct rte_tm_shaper_params));
278         TAILQ_INSERT_TAIL(&pf->tm_conf.shaper_profile_list,
279                           shaper_profile, node);
280
281         return 0;
282 }
283
284 static int
285 i40e_shaper_profile_del(struct rte_eth_dev *dev,
286                         uint32_t shaper_profile_id,
287                         struct rte_tm_error *error)
288 {
289         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
290         struct i40e_tm_shaper_profile *shaper_profile;
291
292         if (!error)
293                 return -EINVAL;
294
295         shaper_profile = i40e_shaper_profile_search(dev, shaper_profile_id);
296
297         if (!shaper_profile) {
298                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
299                 error->message = "profile ID not exist";
300                 return -EINVAL;
301         }
302
303         /* don't delete a profile if it's used by one or several nodes */
304         if (shaper_profile->reference_count) {
305                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
306                 error->message = "profile in use";
307                 return -EINVAL;
308         }
309
310         TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node);
311         rte_free(shaper_profile);
312
313         return 0;
314 }
315
316 static inline struct i40e_tm_node *
317 i40e_tm_node_search(struct rte_eth_dev *dev,
318                     uint32_t node_id, enum i40e_tm_node_type *node_type)
319 {
320         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
321         struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
322         struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
323         struct i40e_tm_node *tm_node;
324
325         if (pf->tm_conf.root && pf->tm_conf.root->id == node_id) {
326                 *node_type = I40E_TM_NODE_TYPE_PORT;
327                 return pf->tm_conf.root;
328         }
329
330         TAILQ_FOREACH(tm_node, tc_list, node) {
331                 if (tm_node->id == node_id) {
332                         *node_type = I40E_TM_NODE_TYPE_TC;
333                         return tm_node;
334                 }
335         }
336
337         TAILQ_FOREACH(tm_node, queue_list, node) {
338                 if (tm_node->id == node_id) {
339                         *node_type = I40E_TM_NODE_TYPE_QUEUE;
340                         return tm_node;
341                 }
342         }
343
344         return NULL;
345 }
346
347 static int
348 i40e_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
349                       uint32_t priority, uint32_t weight,
350                       struct rte_tm_node_params *params,
351                       struct rte_tm_error *error)
352 {
353         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
354
355         if (node_id == RTE_TM_NODE_ID_NULL) {
356                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
357                 error->message = "invalid node id";
358                 return -EINVAL;
359         }
360
361         if (priority) {
362                 error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
363                 error->message = "priority should be 0";
364                 return -EINVAL;
365         }
366
367         if (weight != 1) {
368                 error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
369                 error->message = "weight must be 1";
370                 return -EINVAL;
371         }
372
373         /* not support shared shaper */
374         if (params->shared_shaper_id) {
375                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
376                 error->message = "shared shaper not supported";
377                 return -EINVAL;
378         }
379         if (params->n_shared_shapers) {
380                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
381                 error->message = "shared shaper not supported";
382                 return -EINVAL;
383         }
384
385         /* for non-leaf node */
386         if (node_id >= hw->func_caps.num_tx_qp) {
387                 if (params->nonleaf.wfq_weight_mode) {
388                         error->type =
389                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
390                         error->message = "WFQ not supported";
391                         return -EINVAL;
392                 }
393                 if (params->nonleaf.n_sp_priorities != 1) {
394                         error->type =
395                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
396                         error->message = "SP priority not supported";
397                         return -EINVAL;
398                 } else if (params->nonleaf.wfq_weight_mode &&
399                            !(*params->nonleaf.wfq_weight_mode)) {
400                         error->type =
401                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
402                         error->message = "WFP should be byte mode";
403                         return -EINVAL;
404                 }
405
406                 return 0;
407         }
408
409         /* for leaf node */
410         if (params->leaf.cman) {
411                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
412                 error->message = "Congestion management not supported";
413                 return -EINVAL;
414         }
415         if (params->leaf.wred.wred_profile_id !=
416             RTE_TM_WRED_PROFILE_ID_NONE) {
417                 error->type =
418                         RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
419                 error->message = "WRED not supported";
420                 return -EINVAL;
421         }
422         if (params->leaf.wred.shared_wred_context_id) {
423                 error->type =
424                         RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
425                 error->message = "WRED not supported";
426                 return -EINVAL;
427         }
428         if (params->leaf.wred.n_shared_wred_contexts) {
429                 error->type =
430                         RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
431                 error->message = "WRED not supported";
432                 return -EINVAL;
433         }
434
435         return 0;
436 }
437
438 /**
439  * Now the TC and queue configuration is controlled by DCB.
440  * We need check if the node configuration follows the DCB configuration.
441  * In the future, we may use TM to cover DCB.
442  */
443 static int
444 i40e_node_add(struct rte_eth_dev *dev, uint32_t node_id,
445               uint32_t parent_node_id, uint32_t priority,
446               uint32_t weight, uint32_t level_id,
447               struct rte_tm_node_params *params,
448               struct rte_tm_error *error)
449 {
450         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
451         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
452         enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
453         enum i40e_tm_node_type parent_node_type = I40E_TM_NODE_TYPE_MAX;
454         struct i40e_tm_shaper_profile *shaper_profile = NULL;
455         struct i40e_tm_node *tm_node;
456         struct i40e_tm_node *parent_node;
457         uint16_t tc_nb = 0;
458         int ret;
459
460         if (!params || !error)
461                 return -EINVAL;
462
463         /* if already committed */
464         if (pf->tm_conf.committed) {
465                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
466                 error->message = "already committed";
467                 return -EINVAL;
468         }
469
470         ret = i40e_node_param_check(dev, node_id, priority, weight,
471                                     params, error);
472         if (ret)
473                 return ret;
474
475         /* check if the node ID is already used */
476         if (i40e_tm_node_search(dev, node_id, &node_type)) {
477                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
478                 error->message = "node id already used";
479                 return -EINVAL;
480         }
481
482         /* check the shaper profile id */
483         if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
484                 shaper_profile = i40e_shaper_profile_search(
485                                         dev, params->shaper_profile_id);
486                 if (!shaper_profile) {
487                         error->type =
488                                 RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
489                         error->message = "shaper profile not exist";
490                         return -EINVAL;
491                 }
492         }
493
494         /* root node if not have a parent */
495         if (parent_node_id == RTE_TM_NODE_ID_NULL) {
496                 /* check level */
497                 if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
498                     level_id > I40E_TM_NODE_TYPE_PORT) {
499                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
500                         error->message = "Wrong level";
501                         return -EINVAL;
502                 }
503
504                 /* obviously no more than one root */
505                 if (pf->tm_conf.root) {
506                         error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
507                         error->message = "already have a root";
508                         return -EINVAL;
509                 }
510
511                 /* add the root node */
512                 tm_node = rte_zmalloc("i40e_tm_node",
513                                       sizeof(struct i40e_tm_node),
514                                       0);
515                 if (!tm_node)
516                         return -ENOMEM;
517                 tm_node->id = node_id;
518                 tm_node->priority = priority;
519                 tm_node->weight = weight;
520                 tm_node->reference_count = 0;
521                 tm_node->parent = NULL;
522                 tm_node->shaper_profile = shaper_profile;
523                 rte_memcpy(&tm_node->params, params,
524                                  sizeof(struct rte_tm_node_params));
525                 pf->tm_conf.root = tm_node;
526
527                 /* increase the reference counter of the shaper profile */
528                 if (shaper_profile)
529                         shaper_profile->reference_count++;
530
531                 return 0;
532         }
533
534         /* TC or queue node */
535         /* check the parent node */
536         parent_node = i40e_tm_node_search(dev, parent_node_id,
537                                           &parent_node_type);
538         if (!parent_node) {
539                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
540                 error->message = "parent not exist";
541                 return -EINVAL;
542         }
543         if (parent_node_type != I40E_TM_NODE_TYPE_PORT &&
544             parent_node_type != I40E_TM_NODE_TYPE_TC) {
545                 error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
546                 error->message = "parent is not port or TC";
547                 return -EINVAL;
548         }
549         /* check level */
550         if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
551             level_id != parent_node_type + 1) {
552                 error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
553                 error->message = "Wrong level";
554                 return -EINVAL;
555         }
556
557         /* check the node number */
558         if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
559                 /* check the TC number */
560                 tc_nb = i40e_tc_nb_get(dev);
561                 if (pf->tm_conf.nb_tc_node >= tc_nb) {
562                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
563                         error->message = "too many TCs";
564                         return -EINVAL;
565                 }
566         } else {
567                 /* check the queue number */
568                 if (pf->tm_conf.nb_queue_node >= hw->func_caps.num_tx_qp) {
569                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
570                         error->message = "too many queues";
571                         return -EINVAL;
572                 }
573
574                 /**
575                  * check the node id.
576                  * For queue, the node id means queue id.
577                  */
578                 if (node_id >= hw->func_caps.num_tx_qp) {
579                         error->type = RTE_TM_ERROR_TYPE_NODE_ID;
580                         error->message = "too large queue id";
581                         return -EINVAL;
582                 }
583         }
584
585         /* add the TC or queue node */
586         tm_node = rte_zmalloc("i40e_tm_node",
587                               sizeof(struct i40e_tm_node),
588                               0);
589         if (!tm_node)
590                 return -ENOMEM;
591         tm_node->id = node_id;
592         tm_node->priority = priority;
593         tm_node->weight = weight;
594         tm_node->reference_count = 0;
595         tm_node->parent = parent_node;
596         tm_node->shaper_profile = shaper_profile;
597         rte_memcpy(&tm_node->params, params,
598                          sizeof(struct rte_tm_node_params));
599         if (parent_node_type == I40E_TM_NODE_TYPE_PORT) {
600                 TAILQ_INSERT_TAIL(&pf->tm_conf.tc_list,
601                                   tm_node, node);
602                 pf->tm_conf.nb_tc_node++;
603         } else {
604                 TAILQ_INSERT_TAIL(&pf->tm_conf.queue_list,
605                                   tm_node, node);
606                 pf->tm_conf.nb_queue_node++;
607         }
608         tm_node->parent->reference_count++;
609
610         /* increase the reference counter of the shaper profile */
611         if (shaper_profile)
612                 shaper_profile->reference_count++;
613
614         return 0;
615 }
616
617 static int
618 i40e_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
619                  struct rte_tm_error *error)
620 {
621         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
622         enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
623         struct i40e_tm_node *tm_node;
624
625         if (!error)
626                 return -EINVAL;
627
628         /* if already committed */
629         if (pf->tm_conf.committed) {
630                 error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
631                 error->message = "already committed";
632                 return -EINVAL;
633         }
634
635         if (node_id == RTE_TM_NODE_ID_NULL) {
636                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
637                 error->message = "invalid node id";
638                 return -EINVAL;
639         }
640
641         /* check if the node id exists */
642         tm_node = i40e_tm_node_search(dev, node_id, &node_type);
643         if (!tm_node) {
644                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
645                 error->message = "no such node";
646                 return -EINVAL;
647         }
648
649         /* the node should have no child */
650         if (tm_node->reference_count) {
651                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
652                 error->message =
653                         "cannot delete a node which has children";
654                 return -EINVAL;
655         }
656
657         /* root node */
658         if (node_type == I40E_TM_NODE_TYPE_PORT) {
659                 if (tm_node->shaper_profile)
660                         tm_node->shaper_profile->reference_count--;
661                 rte_free(tm_node);
662                 pf->tm_conf.root = NULL;
663                 return 0;
664         }
665
666         /* TC or queue node */
667         if (tm_node->shaper_profile)
668                 tm_node->shaper_profile->reference_count--;
669         tm_node->parent->reference_count--;
670         if (node_type == I40E_TM_NODE_TYPE_TC) {
671                 TAILQ_REMOVE(&pf->tm_conf.tc_list, tm_node, node);
672                 pf->tm_conf.nb_tc_node--;
673         } else {
674                 TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node);
675                 pf->tm_conf.nb_queue_node--;
676         }
677         rte_free(tm_node);
678
679         return 0;
680 }
681
682 static int
683 i40e_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
684                    int *is_leaf, struct rte_tm_error *error)
685 {
686         enum i40e_tm_node_type node_type = I40E_TM_NODE_TYPE_MAX;
687         struct i40e_tm_node *tm_node;
688
689         if (!is_leaf || !error)
690                 return -EINVAL;
691
692         if (node_id == RTE_TM_NODE_ID_NULL) {
693                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
694                 error->message = "invalid node id";
695                 return -EINVAL;
696         }
697
698         /* check if the node id exists */
699         tm_node = i40e_tm_node_search(dev, node_id, &node_type);
700         if (!tm_node) {
701                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
702                 error->message = "no such node";
703                 return -EINVAL;
704         }
705
706         if (node_type == I40E_TM_NODE_TYPE_QUEUE)
707                 *is_leaf = true;
708         else
709                 *is_leaf = false;
710
711         return 0;
712 }
713
714 static int
715 i40e_level_capabilities_get(struct rte_eth_dev *dev,
716                             uint32_t level_id,
717                             struct rte_tm_level_capabilities *cap,
718                             struct rte_tm_error *error)
719 {
720         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
721
722         if (!cap || !error)
723                 return -EINVAL;
724
725         if (level_id >= I40E_TM_NODE_TYPE_MAX) {
726                 error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
727                 error->message = "too deep level";
728                 return -EINVAL;
729         }
730
731         /* root node */
732         if (level_id == I40E_TM_NODE_TYPE_PORT) {
733                 cap->n_nodes_max = 1;
734                 cap->n_nodes_nonleaf_max = 1;
735                 cap->n_nodes_leaf_max = 0;
736         } else if (level_id == I40E_TM_NODE_TYPE_TC) {
737                 /* TC */
738                 cap->n_nodes_max = I40E_MAX_TRAFFIC_CLASS;
739                 cap->n_nodes_nonleaf_max = I40E_MAX_TRAFFIC_CLASS;
740                 cap->n_nodes_leaf_max = 0;
741         } else {
742                 /* queue */
743                 cap->n_nodes_max = hw->func_caps.num_tx_qp;
744                 cap->n_nodes_nonleaf_max = 0;
745                 cap->n_nodes_leaf_max = hw->func_caps.num_tx_qp;
746         }
747
748         cap->non_leaf_nodes_identical = true;
749         cap->leaf_nodes_identical = true;
750
751         if (level_id != I40E_TM_NODE_TYPE_QUEUE) {
752                 cap->nonleaf.shaper_private_supported = true;
753                 cap->nonleaf.shaper_private_dual_rate_supported = false;
754                 cap->nonleaf.shaper_private_rate_min = 0;
755                 /* 40Gbps -> 5GBps */
756                 cap->nonleaf.shaper_private_rate_max = 5000000000ull;
757                 cap->nonleaf.shaper_shared_n_max = 0;
758                 if (level_id == I40E_TM_NODE_TYPE_PORT)
759                         cap->nonleaf.sched_n_children_max =
760                                 I40E_MAX_TRAFFIC_CLASS;
761                 else
762                         cap->nonleaf.sched_n_children_max =
763                                 hw->func_caps.num_tx_qp;
764                 cap->nonleaf.sched_sp_n_priorities_max = 1;
765                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
766                 cap->nonleaf.sched_wfq_n_groups_max = 0;
767                 cap->nonleaf.sched_wfq_weight_max = 1;
768                 cap->nonleaf.stats_mask = 0;
769
770                 return 0;
771         }
772
773         /* queue node */
774         cap->leaf.shaper_private_supported = true;
775         cap->leaf.shaper_private_dual_rate_supported = false;
776         cap->leaf.shaper_private_rate_min = 0;
777         /* 40Gbps -> 5GBps */
778         cap->leaf.shaper_private_rate_max = 5000000000ull;
779         cap->leaf.shaper_shared_n_max = 0;
780         cap->leaf.cman_head_drop_supported = false;
781         cap->leaf.cman_wred_context_private_supported = true;
782         cap->leaf.cman_wred_context_shared_n_max = 0;
783         cap->leaf.stats_mask = 0;
784
785         return 0;
786 }
787
788 static int
789 i40e_node_capabilities_get(struct rte_eth_dev *dev,
790                            uint32_t node_id,
791                            struct rte_tm_node_capabilities *cap,
792                            struct rte_tm_error *error)
793 {
794         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
795         enum i40e_tm_node_type node_type;
796         struct i40e_tm_node *tm_node;
797
798         if (!cap || !error)
799                 return -EINVAL;
800
801         if (node_id == RTE_TM_NODE_ID_NULL) {
802                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
803                 error->message = "invalid node id";
804                 return -EINVAL;
805         }
806
807         /* check if the node id exists */
808         tm_node = i40e_tm_node_search(dev, node_id, &node_type);
809         if (!tm_node) {
810                 error->type = RTE_TM_ERROR_TYPE_NODE_ID;
811                 error->message = "no such node";
812                 return -EINVAL;
813         }
814
815         cap->shaper_private_supported = true;
816         cap->shaper_private_dual_rate_supported = false;
817         cap->shaper_private_rate_min = 0;
818         /* 40Gbps -> 5GBps */
819         cap->shaper_private_rate_max = 5000000000ull;
820         cap->shaper_shared_n_max = 0;
821
822         if (node_type == I40E_TM_NODE_TYPE_QUEUE) {
823                 cap->leaf.cman_head_drop_supported = false;
824                 cap->leaf.cman_wred_context_private_supported = true;
825                 cap->leaf.cman_wred_context_shared_n_max = 0;
826         } else {
827                 if (node_type == I40E_TM_NODE_TYPE_PORT)
828                         cap->nonleaf.sched_n_children_max =
829                                 I40E_MAX_TRAFFIC_CLASS;
830                 else
831                         cap->nonleaf.sched_n_children_max =
832                                 hw->func_caps.num_tx_qp;
833                 cap->nonleaf.sched_sp_n_priorities_max = 1;
834                 cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
835                 cap->nonleaf.sched_wfq_n_groups_max = 0;
836                 cap->nonleaf.sched_wfq_weight_max = 1;
837         }
838
839         cap->stats_mask = 0;
840
841         return 0;
842 }
843
844 static int
845 i40e_hierarchy_commit(struct rte_eth_dev *dev,
846                       int clear_on_fail,
847                       struct rte_tm_error *error)
848 {
849         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
850         struct i40e_tm_node_list *tc_list = &pf->tm_conf.tc_list;
851         struct i40e_tm_node_list *queue_list = &pf->tm_conf.queue_list;
852         struct i40e_tm_node *tm_node;
853         struct i40e_vsi *vsi;
854         struct i40e_hw *hw;
855         struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
856         uint64_t bw;
857         uint8_t tc_map;
858         int ret;
859         int i;
860
861         if (!error)
862                 return -EINVAL;
863
864         /* check the setting */
865         if (!pf->tm_conf.root)
866                 goto done;
867
868         vsi = pf->main_vsi;
869         hw = I40E_VSI_TO_HW(vsi);
870
871         /**
872          * Don't support bandwidth control for port and TCs in parallel.
873          * If the port has a max bandwidth, the TCs should have none.
874          */
875         /* port */
876         if (pf->tm_conf.root->shaper_profile)
877                 bw = pf->tm_conf.root->shaper_profile->profile.peak.rate;
878         else
879                 bw = 0;
880         if (bw) {
881                 /* check if any TC has a max bandwidth */
882                 TAILQ_FOREACH(tm_node, tc_list, node) {
883                         if (tm_node->shaper_profile &&
884                             tm_node->shaper_profile->profile.peak.rate) {
885                                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
886                                 error->message = "no port and TC max bandwidth"
887                                                  " in parallel";
888                                 goto fail_clear;
889                         }
890                 }
891
892                 /* interpret Bps to 50Mbps */
893                 bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
894
895                 /* set the max bandwidth */
896                 ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid,
897                                                   (uint16_t)bw, 0, NULL);
898                 if (ret) {
899                         error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
900                         error->message = "fail to set port max bandwidth";
901                         goto fail_clear;
902                 }
903
904                 goto done;
905         }
906
907         /* TC */
908         memset(&tc_bw, 0, sizeof(tc_bw));
909         tc_bw.tc_valid_bits = vsi->enabled_tc;
910         tc_map = vsi->enabled_tc;
911         TAILQ_FOREACH(tm_node, tc_list, node) {
912                 if (!tm_node->reference_count) {
913                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
914                         error->message = "TC without queue assigned";
915                         goto fail_clear;
916                 }
917
918                 i = 0;
919                 while (i < I40E_MAX_TRAFFIC_CLASS && !(tc_map & BIT_ULL(i)))
920                         i++;
921                 if (i >= I40E_MAX_TRAFFIC_CLASS) {
922                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
923                         error->message = "cannot find the TC";
924                         goto fail_clear;
925                 }
926                 tc_map &= ~BIT_ULL(i);
927
928                 if (tm_node->shaper_profile)
929                         bw = tm_node->shaper_profile->profile.peak.rate;
930                 else
931                         bw = 0;
932                 if (!bw)
933                         continue;
934
935                 /* interpret Bps to 50Mbps */
936                 bw = bw * 8 / 1000 / 1000 / I40E_QOS_BW_GRANULARITY;
937
938                 tc_bw.tc_bw_credits[i] = rte_cpu_to_le_16((uint16_t)bw);
939         }
940
941         TAILQ_FOREACH(tm_node, queue_list, node) {
942                 if (tm_node->shaper_profile)
943                         bw = tm_node->shaper_profile->profile.peak.rate;
944                 else
945                         bw = 0;
946                 if (bw) {
947                         error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
948                         error->message = "not support queue QoS";
949                         goto fail_clear;
950                 }
951         }
952
953         ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
954         if (ret) {
955                 error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
956                 error->message = "fail to set TC max bandwidth";
957                 goto fail_clear;
958         }
959
960 done:
961         pf->tm_conf.committed = true;
962         return 0;
963
964 fail_clear:
965         /* clear all the traffic manager configuration */
966         if (clear_on_fail) {
967                 i40e_tm_conf_uninit(dev);
968                 i40e_tm_conf_init(dev);
969         }
970         return -EINVAL;
971 }