New upstream version 18.02
[deb_dpdk.git] / drivers / net / softnic / rte_eth_softnic_internals.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
6 #define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
7
8 #include <stdint.h>
9
10 #include <rte_mbuf.h>
11 #include <rte_sched.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_tm_driver.h>
14
15 #include "rte_eth_softnic.h"
16
17 /**
18  * PMD Parameters
19  */
20
21 enum pmd_feature {
22         PMD_FEATURE_TM = 1, /**< Traffic Management (TM) */
23 };
24
25 #ifndef INTRUSIVE
26 #define INTRUSIVE                                       0
27 #endif
28
29 struct pmd_params {
30         /** Parameters for the soft device (to be created) */
31         struct {
32                 const char *name; /**< Name */
33                 uint32_t flags; /**< Flags */
34
35                 /** 0 = Access hard device though API only (potentially slower,
36                  *      but safer);
37                  *  1 = Access hard device private data structures is allowed
38                  *      (potentially faster).
39                  */
40                 int intrusive;
41
42                 /** Traffic Management (TM) */
43                 struct {
44                         uint32_t rate; /**< Rate (bytes/second) */
45                         uint32_t nb_queues; /**< Number of queues */
46                         uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
47                         /**< Queue size per traffic class */
48                         uint32_t enq_bsz; /**< Enqueue burst size */
49                         uint32_t deq_bsz; /**< Dequeue burst size */
50                 } tm;
51         } soft;
52
53         /** Parameters for the hard device (existing) */
54         struct {
55                 char *name; /**< Name */
56                 uint16_t tx_queue_id; /**< TX queue ID */
57         } hard;
58 };
59
60 /**
61  * Default Internals
62  */
63
64 #ifndef DEFAULT_BURST_SIZE
65 #define DEFAULT_BURST_SIZE                              32
66 #endif
67
68 #ifndef FLUSH_COUNT_THRESHOLD
69 #define FLUSH_COUNT_THRESHOLD                   (1 << 17)
70 #endif
71
72 struct default_internals {
73         struct rte_mbuf **pkts;
74         uint32_t pkts_len;
75         uint32_t txq_pos;
76         uint32_t flush_count;
77 };
78
79 /**
80  * Traffic Management (TM) Internals
81  */
82
83 #ifndef TM_MAX_SUBPORTS
84 #define TM_MAX_SUBPORTS                                 8
85 #endif
86
87 #ifndef TM_MAX_PIPES_PER_SUBPORT
88 #define TM_MAX_PIPES_PER_SUBPORT                        4096
89 #endif
90
91 struct tm_params {
92         struct rte_sched_port_params port_params;
93
94         struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];
95
96         struct rte_sched_pipe_params
97                 pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
98         uint32_t n_pipe_profiles;
99         uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
100 };
101
102 /* TM Levels */
103 enum tm_node_level {
104         TM_NODE_LEVEL_PORT = 0,
105         TM_NODE_LEVEL_SUBPORT,
106         TM_NODE_LEVEL_PIPE,
107         TM_NODE_LEVEL_TC,
108         TM_NODE_LEVEL_QUEUE,
109         TM_NODE_LEVEL_MAX,
110 };
111
112 /* TM Shaper Profile */
113 struct tm_shaper_profile {
114         TAILQ_ENTRY(tm_shaper_profile) node;
115         uint32_t shaper_profile_id;
116         uint32_t n_users;
117         struct rte_tm_shaper_params params;
118 };
119
120 TAILQ_HEAD(tm_shaper_profile_list, tm_shaper_profile);
121
122 /* TM Shared Shaper */
123 struct tm_shared_shaper {
124         TAILQ_ENTRY(tm_shared_shaper) node;
125         uint32_t shared_shaper_id;
126         uint32_t n_users;
127         uint32_t shaper_profile_id;
128 };
129
130 TAILQ_HEAD(tm_shared_shaper_list, tm_shared_shaper);
131
132 /* TM WRED Profile */
133 struct tm_wred_profile {
134         TAILQ_ENTRY(tm_wred_profile) node;
135         uint32_t wred_profile_id;
136         uint32_t n_users;
137         struct rte_tm_wred_params params;
138 };
139
140 TAILQ_HEAD(tm_wred_profile_list, tm_wred_profile);
141
142 /* TM Node */
143 struct tm_node {
144         TAILQ_ENTRY(tm_node) node;
145         uint32_t node_id;
146         uint32_t parent_node_id;
147         uint32_t priority;
148         uint32_t weight;
149         uint32_t level;
150         struct tm_node *parent_node;
151         struct tm_shaper_profile *shaper_profile;
152         struct tm_wred_profile *wred_profile;
153         struct rte_tm_node_params params;
154         struct rte_tm_node_stats stats;
155         uint32_t n_children;
156 };
157
158 TAILQ_HEAD(tm_node_list, tm_node);
159
160 /* TM Hierarchy Specification */
161 struct tm_hierarchy {
162         struct tm_shaper_profile_list shaper_profiles;
163         struct tm_shared_shaper_list shared_shapers;
164         struct tm_wred_profile_list wred_profiles;
165         struct tm_node_list nodes;
166
167         uint32_t n_shaper_profiles;
168         uint32_t n_shared_shapers;
169         uint32_t n_wred_profiles;
170         uint32_t n_nodes;
171
172         uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];
173 };
174
175 struct tm_internals {
176         /** Hierarchy specification
177          *
178          *     -Hierarchy is unfrozen at init and when port is stopped.
179          *     -Hierarchy is frozen on successful hierarchy commit.
180          *     -Run-time hierarchy changes are not allowed, therefore it makes
181          *      sense to keep the hierarchy frozen after the port is started.
182          */
183         struct tm_hierarchy h;
184         int hierarchy_frozen;
185
186         /** Blueprints */
187         struct tm_params params;
188
189         /** Run-time */
190         struct rte_sched_port *sched;
191         struct rte_mbuf **pkts_enq;
192         struct rte_mbuf **pkts_deq;
193         uint32_t pkts_enq_len;
194         uint32_t txq_pos;
195         uint32_t flush_count;
196 };
197
198 /**
199  * PMD Internals
200  */
201 struct pmd_internals {
202         /** Params */
203         struct pmd_params params;
204
205         /** Soft device */
206         struct {
207                 struct default_internals def; /**< Default */
208                 struct tm_internals tm; /**< Traffic Management */
209         } soft;
210
211         /** Hard device */
212         struct {
213                 uint16_t port_id;
214         } hard;
215 };
216
217 struct pmd_rx_queue {
218         /** Hard device */
219         struct {
220                 uint16_t port_id;
221                 uint16_t rx_queue_id;
222         } hard;
223 };
224
225 /**
226  * Traffic Management (TM) Operation
227  */
228 extern const struct rte_tm_ops pmd_tm_ops;
229
230 int
231 tm_params_check(struct pmd_params *params, uint32_t hard_rate);
232
233 int
234 tm_init(struct pmd_internals *p, struct pmd_params *params, int numa_node);
235
236 void
237 tm_free(struct pmd_internals *p);
238
239 int
240 tm_start(struct pmd_internals *p);
241
242 void
243 tm_stop(struct pmd_internals *p);
244
245 static inline int
246 tm_enabled(struct rte_eth_dev *dev)
247 {
248         struct pmd_internals *p = dev->data->dev_private;
249
250         return (p->params.soft.flags & PMD_FEATURE_TM);
251 }
252
253 static inline int
254 tm_used(struct rte_eth_dev *dev)
255 {
256         struct pmd_internals *p = dev->data->dev_private;
257
258         return (p->params.soft.flags & PMD_FEATURE_TM) &&
259                 p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
260 }
261
262 #endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */