misc: deprecate dpdk hqos
[vpp.git] / extras / deprecated / dpdk-hqos / hqos.c
1 /*
2  * Copyright(c) 2016 Intel Corporation. All rights reserved.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <sys/stat.h>
19 #include <sys/mount.h>
20 #include <string.h>
21 #include <fcntl.h>
22
23 #include <vppinfra/vec.h>
24 #include <vppinfra/error.h>
25 #include <vppinfra/format.h>
26 #include <vppinfra/bitmap.h>
27
28 #include <vnet/vnet.h>
29 #include <vnet/ethernet/ethernet.h>
30 #include <dpdk/device/dpdk.h>
31
32 #include <vlib/pci/pci.h>
33 #include <vlibmemory/api.h>
34 #include <vlibmemory/vl_memory_msg_enum.h>      /* enumerate all vlib messages */
35
36 #define vl_typedefs             /* define message structures */
37 #include <vlibmemory/vl_memory_api_h.h>
38 #undef vl_typedefs
39
40 /* instantiate all the print functions we know about */
41 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
42 #define vl_printfun
43 #include <vlibmemory/vl_memory_api_h.h>
44 #undef vl_printfun
45
46 #include <dpdk/device/dpdk_priv.h>
47
48 /***
49  *
50  * HQoS default configuration values
51  *
52  ***/
53
54 static dpdk_device_config_hqos_t hqos_params_default = {
55   .hqos_thread_valid = 0,
56
57   .swq_size = 4096,
58   .burst_enq = 256,
59   .burst_deq = 220,
60
61   /*
62    * Packet field to identify the subport.
63    *
64    * Default value: Since only one subport is defined by default (see below:
65    *     n_subports_per_port = 1), the subport ID is hardcoded to 0.
66    */
67   .pktfield0_slabpos = 0,
68   .pktfield0_slabmask = 0,
69
70   /*
71    * Packet field to identify the pipe.
72    *
73    * Default value: Assuming Ethernet/IPv4/UDP packets, UDP payload bits 12 .. 23
74    */
75   .pktfield1_slabpos = 40,
76   .pktfield1_slabmask = 0x0000000FFF000000LLU,
77
78   /* Packet field used as index into TC translation table to identify the traffic
79    *     class and queue.
80    *
81    * Default value: Assuming Ethernet/IPv4 packets, IPv4 DSCP field
82    */
83   .pktfield2_slabpos = 8,
84   .pktfield2_slabmask = 0x00000000000000FCLLU,
85   .tc_table = {
86                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
87                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
88                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
89                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
90                },
91
92   /* port */
93   .port = {
94            .name = NULL,        /* Set at init */
95            .socket = 0,         /* Set at init */
96            .rate = 1250000000,  /* Assuming 10GbE port */
97            .mtu = 14 + 1500,    /* Assuming Ethernet/IPv4 pkt (Ethernet FCS not included) */
98            .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
99            .n_subports_per_port = 1,
100            .n_pipes_per_subport = 4096,
101            .qsize = {64, 64, 64, 64},
102            .pipe_profiles = NULL,       /* Set at config */
103            .n_pipe_profiles = 1,
104
105 #ifdef RTE_SCHED_RED
106            .red_params = {
107                           /* Traffic Class 0 Colors Green / Yellow / Red */
108                           [0][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
109                                     10,.wq_log2 = 9},
110                           [0][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
111                                     10,.wq_log2 = 9},
112                           [0][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
113                                     10,.wq_log2 = 9},
114
115                           /* Traffic Class 1 - Colors Green / Yellow / Red */
116                           [1][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
117                                     10,.wq_log2 = 9},
118                           [1][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
119                                     10,.wq_log2 = 9},
120                           [1][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
121                                     10,.wq_log2 = 9},
122
123                           /* Traffic Class 2 - Colors Green / Yellow / Red */
124                           [2][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
125                                     10,.wq_log2 = 9},
126                           [2][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
127                                     10,.wq_log2 = 9},
128                           [2][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
129                                     10,.wq_log2 = 9},
130
131                           /* Traffic Class 3 - Colors Green / Yellow / Red */
132                           [3][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
133                                     10,.wq_log2 = 9},
134                           [3][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
135                                     10,.wq_log2 = 9},
136                           [3][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
137                                     10,.wq_log2 = 9}
138                           },
139 #endif /* RTE_SCHED_RED */
140            },
141 };
142
143 static struct rte_sched_subport_params hqos_subport_params_default = {
144   .tb_rate = 1250000000,        /* 10GbE line rate (measured in bytes/second) */
145   .tb_size = 1000000,
146   .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
147   .tc_period = 10,
148 };
149
150 static struct rte_sched_pipe_params hqos_pipe_params_default = {
151   .tb_rate = 305175,            /* 10GbE line rate divided by 4K pipes */
152   .tb_size = 1000000,
153   .tc_rate = {305175, 305175, 305175, 305175},
154   .tc_period = 40,
155 #ifdef RTE_SCHED_SUBPORT_TC_OV
156   .tc_ov_weight = 1,
157 #endif
158   .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
159 };
160
161 /***
162  *
163  * HQoS configuration
164  *
165  ***/
166
167 int
168 dpdk_hqos_validate_mask (u64 mask, u32 n)
169 {
170   int count = __builtin_popcountll (mask);
171   int pos_lead = sizeof (u64) * 8 - count_leading_zeros (mask);
172   int pos_trail = count_trailing_zeros (mask);
173   int count_expected = __builtin_popcount (n - 1);
174
175   /* Handle the exceptions */
176   if (n == 0)
177     return -1;                  /* Error */
178
179   if ((mask == 0) && (n == 1))
180     return 0;                   /* OK */
181
182   if (((mask == 0) && (n != 1)) || ((mask != 0) && (n == 1)))
183     return -2;                  /* Error */
184
185   /* Check that mask is contiguous */
186   if ((pos_lead - pos_trail) != count)
187     return -3;                  /* Error */
188
189   /* Check that mask contains the expected number of bits set */
190   if (count != count_expected)
191     return -4;                  /* Error */
192
193   return 0;                     /* OK */
194 }
195
196 void
197 dpdk_device_config_hqos_pipe_profile_default (dpdk_device_config_hqos_t *
198                                               hqos, u32 pipe_profile_id)
199 {
200   memcpy (&hqos->pipe[pipe_profile_id], &hqos_pipe_params_default,
201           sizeof (hqos_pipe_params_default));
202 }
203
204 void
205 dpdk_device_config_hqos_default (dpdk_device_config_hqos_t * hqos)
206 {
207   struct rte_sched_subport_params *subport_params;
208   struct rte_sched_pipe_params *pipe_params;
209   u32 *pipe_map;
210   u32 i;
211
212   memcpy (hqos, &hqos_params_default, sizeof (hqos_params_default));
213
214   /* pipe */
215   vec_add2 (hqos->pipe, pipe_params, hqos->port.n_pipe_profiles);
216
217   for (i = 0; i < vec_len (hqos->pipe); i++)
218     memcpy (&pipe_params[i],
219             &hqos_pipe_params_default, sizeof (hqos_pipe_params_default));
220
221   hqos->port.pipe_profiles = hqos->pipe;
222
223   /* subport */
224   vec_add2 (hqos->subport, subport_params, hqos->port.n_subports_per_port);
225
226   for (i = 0; i < vec_len (hqos->subport); i++)
227     memcpy (&subport_params[i],
228             &hqos_subport_params_default,
229             sizeof (hqos_subport_params_default));
230
231   /* pipe profile */
232   vec_add2 (hqos->pipe_map,
233             pipe_map,
234             hqos->port.n_subports_per_port * hqos->port.n_pipes_per_subport);
235
236   for (i = 0; i < vec_len (hqos->pipe_map); i++)
237     pipe_map[i] = 0;
238 }
239
240 /***
241  *
242  * HQoS init
243  *
244  ***/
245
246 clib_error_t *
247 dpdk_port_setup_hqos (dpdk_device_t * xd, dpdk_device_config_hqos_t * hqos)
248 {
249   vlib_thread_main_t *tm = vlib_get_thread_main ();
250   char name[32];
251   u32 subport_id, i;
252   int rv;
253
254   /* Detect the set of worker threads */
255   int worker_thread_first = 0;
256   int worker_thread_count = 0;
257
258   uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
259   vlib_thread_registration_t *tr =
260     p ? (vlib_thread_registration_t *) p[0] : 0;
261
262   if (tr && tr->count > 0)
263     {
264       worker_thread_first = tr->first_index;
265       worker_thread_count = tr->count;
266     }
267
268   /* Allocate the per-thread device data array */
269   vec_validate_aligned (xd->hqos_wt, tm->n_vlib_mains - 1,
270                         CLIB_CACHE_LINE_BYTES);
271   clib_memset (xd->hqos_wt, 0, tm->n_vlib_mains * sizeof (xd->hqos_wt[0]));
272
273   vec_validate_aligned (xd->hqos_ht, 0, CLIB_CACHE_LINE_BYTES);
274   clib_memset (xd->hqos_ht, 0, sizeof (xd->hqos_ht[0]));
275
276   /* Allocate space for one SWQ per worker thread in the I/O TX thread data structure */
277   vec_validate (xd->hqos_ht->swq, worker_thread_count);
278
279   /* SWQ */
280   for (i = 0; i < worker_thread_count + 1; i++)
281     {
282       u32 swq_flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
283
284       snprintf (name, sizeof (name), "SWQ-worker%u-to-device%u", i,
285                 xd->port_id);
286       xd->hqos_ht->swq[i] =
287         rte_ring_create (name, hqos->swq_size, xd->cpu_socket, swq_flags);
288       if (xd->hqos_ht->swq[i] == NULL)
289         return clib_error_return (0,
290                                   "SWQ-worker%u-to-device%u: rte_ring_create err",
291                                   i, xd->port_id);
292     }
293
294   /*
295    * HQoS
296    */
297
298   /* HQoS port */
299   snprintf (name, sizeof (name), "HQoS%u", xd->port_id);
300   hqos->port.name = strdup (name);
301   if (hqos->port.name == NULL)
302     return clib_error_return (0, "HQoS%u: strdup err", xd->port_id);
303
304   hqos->port.socket = rte_eth_dev_socket_id (xd->port_id);
305   if (hqos->port.socket == SOCKET_ID_ANY)
306     hqos->port.socket = 0;
307
308   xd->hqos_ht->hqos = rte_sched_port_config (&hqos->port);
309   if (xd->hqos_ht->hqos == NULL)
310     return clib_error_return (0, "HQoS%u: rte_sched_port_config err",
311                               xd->port_id);
312
313   /* HQoS subport */
314   for (subport_id = 0; subport_id < hqos->port.n_subports_per_port;
315        subport_id++)
316     {
317       u32 pipe_id;
318
319       rv =
320         rte_sched_subport_config (xd->hqos_ht->hqos, subport_id,
321                                   &hqos->subport[subport_id]);
322       if (rv)
323         return clib_error_return (0,
324                                   "HQoS%u subport %u: rte_sched_subport_config err (%d)",
325                                   xd->port_id, subport_id, rv);
326
327       /* HQoS pipe */
328       for (pipe_id = 0; pipe_id < hqos->port.n_pipes_per_subport; pipe_id++)
329         {
330           u32 pos = subport_id * hqos->port.n_pipes_per_subport + pipe_id;
331           u32 profile_id = hqos->pipe_map[pos];
332
333           rv =
334             rte_sched_pipe_config (xd->hqos_ht->hqos, subport_id, pipe_id,
335                                    profile_id);
336           if (rv)
337             return clib_error_return (0,
338                                       "HQoS%u subport %u pipe %u: rte_sched_pipe_config err (%d)",
339                                       xd->port_id, subport_id, pipe_id, rv);
340         }
341     }
342
343   /* Set up per-thread device data for the I/O TX thread */
344   xd->hqos_ht->hqos_burst_enq = hqos->burst_enq;
345   xd->hqos_ht->hqos_burst_deq = hqos->burst_deq;
346   vec_validate (xd->hqos_ht->pkts_enq, 2 * hqos->burst_enq - 1);
347   vec_validate (xd->hqos_ht->pkts_deq, hqos->burst_deq - 1);
348   xd->hqos_ht->pkts_enq_len = 0;
349   xd->hqos_ht->swq_pos = 0;
350   xd->hqos_ht->flush_count = 0;
351
352   /* Set up per-thread device data for each worker thread */
353   for (i = 0; i < worker_thread_count + 1; i++)
354     {
355       u32 tid;
356       if (i)
357         tid = worker_thread_first + (i - 1);
358       else
359         tid = i;
360
361       xd->hqos_wt[tid].swq = xd->hqos_ht->swq[i];
362       xd->hqos_wt[tid].hqos_field0_slabpos = hqos->pktfield0_slabpos;
363       xd->hqos_wt[tid].hqos_field0_slabmask = hqos->pktfield0_slabmask;
364       xd->hqos_wt[tid].hqos_field0_slabshr =
365         count_trailing_zeros (hqos->pktfield0_slabmask);
366       xd->hqos_wt[tid].hqos_field1_slabpos = hqos->pktfield1_slabpos;
367       xd->hqos_wt[tid].hqos_field1_slabmask = hqos->pktfield1_slabmask;
368       xd->hqos_wt[tid].hqos_field1_slabshr =
369         count_trailing_zeros (hqos->pktfield1_slabmask);
370       xd->hqos_wt[tid].hqos_field2_slabpos = hqos->pktfield2_slabpos;
371       xd->hqos_wt[tid].hqos_field2_slabmask = hqos->pktfield2_slabmask;
372       xd->hqos_wt[tid].hqos_field2_slabshr =
373         count_trailing_zeros (hqos->pktfield2_slabmask);
374       memcpy (xd->hqos_wt[tid].hqos_tc_table, hqos->tc_table,
375               sizeof (hqos->tc_table));
376     }
377
378   return 0;
379 }
380
381 /***
382  *
383  * HQoS run-time
384  *
385  ***/
386 /*
387  * dpdk_hqos_thread - Contains the main loop of an HQoS thread.
388  *
389  * w
390  *     Information for the current thread
391  */
392 static_always_inline void
393 dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm)
394 {
395   dpdk_main_t *dm = &dpdk_main;
396   u32 thread_index = vm->thread_index;
397   u32 dev_pos;
398
399   dev_pos = 0;
400   while (1)
401     {
402       vlib_worker_thread_barrier_check ();
403
404       u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]);
405       if (dev_pos >= n_devs)
406         dev_pos = 0;
407
408       dpdk_device_and_queue_t *dq =
409         vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos);
410       dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
411
412       dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht;
413       u32 device_index = xd->port_id;
414       u16 queue_id = dq->queue_id;
415
416       struct rte_mbuf **pkts_enq = hqos->pkts_enq;
417       u32 pkts_enq_len = hqos->pkts_enq_len;
418       u32 swq_pos = hqos->swq_pos;
419       u32 n_swq = vec_len (hqos->swq), i;
420       u32 flush_count = hqos->flush_count;
421
422       for (i = 0; i < n_swq; i++)
423         {
424           /* Get current SWQ for this device */
425           struct rte_ring *swq = hqos->swq[swq_pos];
426
427           /* Read SWQ burst to packet buffer of this device */
428           pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
429                                                      (void **)
430                                                      &pkts_enq[pkts_enq_len],
431                                                      hqos->hqos_burst_enq, 0);
432
433           /* Get next SWQ for this device */
434           swq_pos++;
435           if (swq_pos >= n_swq)
436             swq_pos = 0;
437           hqos->swq_pos = swq_pos;
438
439           /* HWQ TX enqueue when burst available */
440           if (pkts_enq_len >= hqos->hqos_burst_enq)
441             {
442               u32 n_pkts = rte_eth_tx_burst (device_index,
443                                              (uint16_t) queue_id,
444                                              pkts_enq,
445                                              (uint16_t) pkts_enq_len);
446
447               for (; n_pkts < pkts_enq_len; n_pkts++)
448                 rte_pktmbuf_free (pkts_enq[n_pkts]);
449
450               pkts_enq_len = 0;
451               flush_count = 0;
452               break;
453             }
454         }
455       if (pkts_enq_len)
456         {
457           flush_count++;
458           if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
459             {
460               rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
461
462               pkts_enq_len = 0;
463               flush_count = 0;
464             }
465         }
466       hqos->pkts_enq_len = pkts_enq_len;
467       hqos->flush_count = flush_count;
468
469       /* Advance to next device */
470       dev_pos++;
471     }
472 }
473
474 static_always_inline void
475 dpdk_hqos_thread_internal (vlib_main_t * vm)
476 {
477   dpdk_main_t *dm = &dpdk_main;
478   u32 thread_index = vm->thread_index;
479   u32 dev_pos;
480
481   dev_pos = 0;
482   while (1)
483     {
484       vlib_worker_thread_barrier_check ();
485
486       u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]);
487       if (PREDICT_FALSE (n_devs == 0))
488         {
489           dev_pos = 0;
490           continue;
491         }
492       if (dev_pos >= n_devs)
493         dev_pos = 0;
494
495       dpdk_device_and_queue_t *dq =
496         vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos);
497       dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
498
499       dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht;
500       u32 device_index = xd->port_id;
501       u16 queue_id = dq->queue_id;
502
503       struct rte_mbuf **pkts_enq = hqos->pkts_enq;
504       struct rte_mbuf **pkts_deq = hqos->pkts_deq;
505       u32 pkts_enq_len = hqos->pkts_enq_len;
506       u32 swq_pos = hqos->swq_pos;
507       u32 n_swq = vec_len (hqos->swq), i;
508       u32 flush_count = hqos->flush_count;
509
510       /*
511        * SWQ dequeue and HQoS enqueue for current device
512        */
513       for (i = 0; i < n_swq; i++)
514         {
515           /* Get current SWQ for this device */
516           struct rte_ring *swq = hqos->swq[swq_pos];
517
518           /* Read SWQ burst to packet buffer of this device */
519           pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
520                                                      (void **)
521                                                      &pkts_enq[pkts_enq_len],
522                                                      hqos->hqos_burst_enq, 0);
523
524           /* Get next SWQ for this device */
525           swq_pos++;
526           if (swq_pos >= n_swq)
527             swq_pos = 0;
528           hqos->swq_pos = swq_pos;
529
530           /* HQoS enqueue when burst available */
531           if (pkts_enq_len >= hqos->hqos_burst_enq)
532             {
533               rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
534
535               pkts_enq_len = 0;
536               flush_count = 0;
537               break;
538             }
539         }
540       if (pkts_enq_len)
541         {
542           flush_count++;
543           if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
544             {
545               rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
546
547               pkts_enq_len = 0;
548               flush_count = 0;
549             }
550         }
551       hqos->pkts_enq_len = pkts_enq_len;
552       hqos->flush_count = flush_count;
553
554       /*
555        * HQoS dequeue and HWQ TX enqueue for current device
556        */
557       {
558         u32 pkts_deq_len, n_pkts;
559
560         pkts_deq_len = rte_sched_port_dequeue (hqos->hqos,
561                                                pkts_deq,
562                                                hqos->hqos_burst_deq);
563
564         for (n_pkts = 0; n_pkts < pkts_deq_len;)
565           n_pkts += rte_eth_tx_burst (device_index,
566                                       (uint16_t) queue_id,
567                                       &pkts_deq[n_pkts],
568                                       (uint16_t) (pkts_deq_len - n_pkts));
569       }
570
571       /* Advance to next device */
572       dev_pos++;
573     }
574 }
575
576 void
577 dpdk_hqos_thread (vlib_worker_thread_t * w)
578 {
579   vlib_main_t *vm;
580   vlib_thread_main_t *tm = vlib_get_thread_main ();
581   dpdk_main_t *dm = &dpdk_main;
582
583   vm = vlib_get_main ();
584
585   ASSERT (vm->thread_index == vlib_get_thread_index ());
586
587   clib_time_init (&vm->clib_time);
588   clib_mem_set_heap (w->thread_mheap);
589
590   /* Wait until the dpdk init sequence is complete */
591   while (tm->worker_thread_release == 0)
592     vlib_worker_thread_barrier_check ();
593
594   if (vec_len (dm->devices_by_hqos_cpu[vm->thread_index]) == 0)
595     return
596       clib_error
597       ("current I/O TX thread does not have any devices assigned to it");
598
599   if (DPDK_HQOS_DBG_BYPASS)
600     dpdk_hqos_thread_internal_hqos_dbg_bypass (vm);
601   else
602     dpdk_hqos_thread_internal (vm);
603 }
604
605 void
606 dpdk_hqos_thread_fn (void *arg)
607 {
608   vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
609   vlib_worker_thread_init (w);
610   dpdk_hqos_thread (w);
611 }
612
613 /* *INDENT-OFF* */
614 VLIB_REGISTER_THREAD (hqos_thread_reg, static) =
615 {
616   .name = "hqos-threads",
617   .short_name = "hqos-threads",
618   .function = dpdk_hqos_thread_fn,
619 };
620 /* *INDENT-ON* */
621
622 /*
623  * HQoS run-time code to be called by the worker threads
624  */
625 #define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr)     \
626 ({                                                              \
627   u64 slab = *((u64 *) &byte_array[slab_pos]);                  \
628   u64 val = (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr;   \
629   val;                                                          \
630 })
631
632 #define RTE_SCHED_PORT_HIERARCHY(subport, pipe, traffic_class, queue, color) \
633   ((((u64) (queue)) & 0x3) |                               \
634   ((((u64) (traffic_class)) & 0x3) << 2) |                 \
635   ((((u64) (color)) & 0x3) << 4) |                         \
636   ((((u64) (subport)) & 0xFFFF) << 16) |                   \
637   ((((u64) (pipe)) & 0xFFFFFFFF) << 32))
638
639 void
640 dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos,
641                         struct rte_mbuf **pkts, u32 n_pkts)
642 {
643   u32 i;
644
645   for (i = 0; i < (n_pkts & (~0x3)); i += 4)
646     {
647       struct rte_mbuf *pkt0 = pkts[i];
648       struct rte_mbuf *pkt1 = pkts[i + 1];
649       struct rte_mbuf *pkt2 = pkts[i + 2];
650       struct rte_mbuf *pkt3 = pkts[i + 3];
651
652       u8 *pkt0_data = rte_pktmbuf_mtod (pkt0, u8 *);
653       u8 *pkt1_data = rte_pktmbuf_mtod (pkt1, u8 *);
654       u8 *pkt2_data = rte_pktmbuf_mtod (pkt2, u8 *);
655       u8 *pkt3_data = rte_pktmbuf_mtod (pkt3, u8 *);
656
657       u64 pkt0_subport = BITFIELD (pkt0_data, hqos->hqos_field0_slabpos,
658                                    hqos->hqos_field0_slabmask,
659                                    hqos->hqos_field0_slabshr);
660       u64 pkt0_pipe = BITFIELD (pkt0_data, hqos->hqos_field1_slabpos,
661                                 hqos->hqos_field1_slabmask,
662                                 hqos->hqos_field1_slabshr);
663       u64 pkt0_dscp = BITFIELD (pkt0_data, hqos->hqos_field2_slabpos,
664                                 hqos->hqos_field2_slabmask,
665                                 hqos->hqos_field2_slabshr);
666       u32 pkt0_tc = hqos->hqos_tc_table[pkt0_dscp & 0x3F] >> 2;
667       u32 pkt0_tc_q = hqos->hqos_tc_table[pkt0_dscp & 0x3F] & 0x3;
668
669       u64 pkt1_subport = BITFIELD (pkt1_data, hqos->hqos_field0_slabpos,
670                                    hqos->hqos_field0_slabmask,
671                                    hqos->hqos_field0_slabshr);
672       u64 pkt1_pipe = BITFIELD (pkt1_data, hqos->hqos_field1_slabpos,
673                                 hqos->hqos_field1_slabmask,
674                                 hqos->hqos_field1_slabshr);
675       u64 pkt1_dscp = BITFIELD (pkt1_data, hqos->hqos_field2_slabpos,
676                                 hqos->hqos_field2_slabmask,
677                                 hqos->hqos_field2_slabshr);
678       u32 pkt1_tc = hqos->hqos_tc_table[pkt1_dscp & 0x3F] >> 2;
679       u32 pkt1_tc_q = hqos->hqos_tc_table[pkt1_dscp & 0x3F] & 0x3;
680
681       u64 pkt2_subport = BITFIELD (pkt2_data, hqos->hqos_field0_slabpos,
682                                    hqos->hqos_field0_slabmask,
683                                    hqos->hqos_field0_slabshr);
684       u64 pkt2_pipe = BITFIELD (pkt2_data, hqos->hqos_field1_slabpos,
685                                 hqos->hqos_field1_slabmask,
686                                 hqos->hqos_field1_slabshr);
687       u64 pkt2_dscp = BITFIELD (pkt2_data, hqos->hqos_field2_slabpos,
688                                 hqos->hqos_field2_slabmask,
689                                 hqos->hqos_field2_slabshr);
690       u32 pkt2_tc = hqos->hqos_tc_table[pkt2_dscp & 0x3F] >> 2;
691       u32 pkt2_tc_q = hqos->hqos_tc_table[pkt2_dscp & 0x3F] & 0x3;
692
693       u64 pkt3_subport = BITFIELD (pkt3_data, hqos->hqos_field0_slabpos,
694                                    hqos->hqos_field0_slabmask,
695                                    hqos->hqos_field0_slabshr);
696       u64 pkt3_pipe = BITFIELD (pkt3_data, hqos->hqos_field1_slabpos,
697                                 hqos->hqos_field1_slabmask,
698                                 hqos->hqos_field1_slabshr);
699       u64 pkt3_dscp = BITFIELD (pkt3_data, hqos->hqos_field2_slabpos,
700                                 hqos->hqos_field2_slabmask,
701                                 hqos->hqos_field2_slabshr);
702       u32 pkt3_tc = hqos->hqos_tc_table[pkt3_dscp & 0x3F] >> 2;
703       u32 pkt3_tc_q = hqos->hqos_tc_table[pkt3_dscp & 0x3F] & 0x3;
704
705       u64 pkt0_sched = RTE_SCHED_PORT_HIERARCHY (pkt0_subport,
706                                                  pkt0_pipe,
707                                                  pkt0_tc,
708                                                  pkt0_tc_q,
709                                                  0);
710       u64 pkt1_sched = RTE_SCHED_PORT_HIERARCHY (pkt1_subport,
711                                                  pkt1_pipe,
712                                                  pkt1_tc,
713                                                  pkt1_tc_q,
714                                                  0);
715       u64 pkt2_sched = RTE_SCHED_PORT_HIERARCHY (pkt2_subport,
716                                                  pkt2_pipe,
717                                                  pkt2_tc,
718                                                  pkt2_tc_q,
719                                                  0);
720       u64 pkt3_sched = RTE_SCHED_PORT_HIERARCHY (pkt3_subport,
721                                                  pkt3_pipe,
722                                                  pkt3_tc,
723                                                  pkt3_tc_q,
724                                                  0);
725
726       pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
727       pkt0->hash.sched.hi = pkt0_sched >> 32;
728       pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
729       pkt1->hash.sched.hi = pkt1_sched >> 32;
730       pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
731       pkt2->hash.sched.hi = pkt2_sched >> 32;
732       pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
733       pkt3->hash.sched.hi = pkt3_sched >> 32;
734     }
735
736   for (; i < n_pkts; i++)
737     {
738       struct rte_mbuf *pkt = pkts[i];
739
740       u8 *pkt_data = rte_pktmbuf_mtod (pkt, u8 *);
741
742       u64 pkt_subport = BITFIELD (pkt_data, hqos->hqos_field0_slabpos,
743                                   hqos->hqos_field0_slabmask,
744                                   hqos->hqos_field0_slabshr);
745       u64 pkt_pipe = BITFIELD (pkt_data, hqos->hqos_field1_slabpos,
746                                hqos->hqos_field1_slabmask,
747                                hqos->hqos_field1_slabshr);
748       u64 pkt_dscp = BITFIELD (pkt_data, hqos->hqos_field2_slabpos,
749                                hqos->hqos_field2_slabmask,
750                                hqos->hqos_field2_slabshr);
751       u32 pkt_tc = hqos->hqos_tc_table[pkt_dscp & 0x3F] >> 2;
752       u32 pkt_tc_q = hqos->hqos_tc_table[pkt_dscp & 0x3F] & 0x3;
753
754       u64 pkt_sched = RTE_SCHED_PORT_HIERARCHY (pkt_subport,
755                                                 pkt_pipe,
756                                                 pkt_tc,
757                                                 pkt_tc_q,
758                                                 0);
759
760       pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
761       pkt->hash.sched.hi = pkt_sched >> 32;
762     }
763 }
764
765 /*
766  * fd.io coding-style-patch-verification: ON
767  *
768  * Local Variables:
769  * eval: (c-set-style "gnu")
770  * End:
771  */