2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <sys/mount.h>
23 #include <vppinfra/vec.h>
24 #include <vppinfra/error.h>
25 #include <vppinfra/format.h>
26 #include <vppinfra/bitmap.h>
28 #include <vnet/vnet.h>
29 #include <vnet/ethernet/ethernet.h>
30 #include <dpdk/device/dpdk.h>
32 #include <vlib/pci/pci.h>
33 #include <vlibmemory/api.h>
34 #include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */
36 #define vl_typedefs /* define message structures */
37 #include <vlibmemory/vl_memory_api_h.h>
40 /* instantiate all the print functions we know about */
41 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
43 #include <vlibmemory/vl_memory_api_h.h>
46 #include <dpdk/device/dpdk_priv.h>
50 * HQoS default configuration values
54 static dpdk_device_config_hqos_t hqos_params_default = {
55 .hqos_thread_valid = 0,
62 * Packet field to identify the subport.
64 * Default value: Since only one subport is defined by default (see below:
65 * n_subports_per_port = 1), the subport ID is hardcoded to 0.
67 .pktfield0_slabpos = 0,
68 .pktfield0_slabmask = 0,
71 * Packet field to identify the pipe.
73 * Default value: Assuming Ethernet/IPv4/UDP packets, UDP payload bits 12 .. 23
75 .pktfield1_slabpos = 40,
76 .pktfield1_slabmask = 0x0000000FFF000000LLU,
78 /* Packet field used as index into TC translation table to identify the traffic
81 * Default value: Assuming Ethernet/IPv4 packets, IPv4 DSCP field
83 .pktfield2_slabpos = 8,
84 .pktfield2_slabmask = 0x00000000000000FCLLU,
86 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
87 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
88 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
89 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
94 .name = NULL, /* Set at init */
95 .socket = 0, /* Set at init */
96 .rate = 1250000000, /* Assuming 10GbE port */
97 .mtu = 14 + 1500, /* Assuming Ethernet/IPv4 pkt (Ethernet FCS not included) */
98 .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
99 .n_subports_per_port = 1,
100 .n_pipes_per_subport = 4096,
101 .qsize = {64, 64, 64, 64},
102 .pipe_profiles = NULL, /* Set at config */
103 .n_pipe_profiles = 1,
107 /* Traffic Class 0 Colors Green / Yellow / Red */
108 [0][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
110 [0][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
112 [0][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
115 /* Traffic Class 1 - Colors Green / Yellow / Red */
116 [1][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
118 [1][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
120 [1][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
123 /* Traffic Class 2 - Colors Green / Yellow / Red */
124 [2][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
126 [2][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
128 [2][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
131 /* Traffic Class 3 - Colors Green / Yellow / Red */
132 [3][0] = {.min_th = 48,.max_th = 64,.maxp_inv =
134 [3][1] = {.min_th = 40,.max_th = 64,.maxp_inv =
136 [3][2] = {.min_th = 32,.max_th = 64,.maxp_inv =
139 #endif /* RTE_SCHED_RED */
143 static struct rte_sched_subport_params hqos_subport_params_default = {
144 .tb_rate = 1250000000, /* 10GbE line rate (measured in bytes/second) */
146 .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
150 static struct rte_sched_pipe_params hqos_pipe_params_default = {
151 .tb_rate = 305175, /* 10GbE line rate divided by 4K pipes */
153 .tc_rate = {305175, 305175, 305175, 305175},
155 #ifdef RTE_SCHED_SUBPORT_TC_OV
158 .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
168 dpdk_hqos_validate_mask (u64 mask, u32 n)
170 int count = __builtin_popcountll (mask);
171 int pos_lead = sizeof (u64) * 8 - count_leading_zeros (mask);
172 int pos_trail = count_trailing_zeros (mask);
173 int count_expected = __builtin_popcount (n - 1);
175 /* Handle the exceptions */
177 return -1; /* Error */
179 if ((mask == 0) && (n == 1))
182 if (((mask == 0) && (n != 1)) || ((mask != 0) && (n == 1)))
183 return -2; /* Error */
185 /* Check that mask is contiguous */
186 if ((pos_lead - pos_trail) != count)
187 return -3; /* Error */
189 /* Check that mask contains the expected number of bits set */
190 if (count != count_expected)
191 return -4; /* Error */
197 dpdk_device_config_hqos_pipe_profile_default (dpdk_device_config_hqos_t *
198 hqos, u32 pipe_profile_id)
200 memcpy (&hqos->pipe[pipe_profile_id], &hqos_pipe_params_default,
201 sizeof (hqos_pipe_params_default));
205 dpdk_device_config_hqos_default (dpdk_device_config_hqos_t * hqos)
207 struct rte_sched_subport_params *subport_params;
208 struct rte_sched_pipe_params *pipe_params;
212 memcpy (hqos, &hqos_params_default, sizeof (hqos_params_default));
215 vec_add2 (hqos->pipe, pipe_params, hqos->port.n_pipe_profiles);
217 for (i = 0; i < vec_len (hqos->pipe); i++)
218 memcpy (&pipe_params[i],
219 &hqos_pipe_params_default, sizeof (hqos_pipe_params_default));
221 hqos->port.pipe_profiles = hqos->pipe;
224 vec_add2 (hqos->subport, subport_params, hqos->port.n_subports_per_port);
226 for (i = 0; i < vec_len (hqos->subport); i++)
227 memcpy (&subport_params[i],
228 &hqos_subport_params_default,
229 sizeof (hqos_subport_params_default));
232 vec_add2 (hqos->pipe_map,
234 hqos->port.n_subports_per_port * hqos->port.n_pipes_per_subport);
236 for (i = 0; i < vec_len (hqos->pipe_map); i++)
247 dpdk_port_setup_hqos (dpdk_device_t * xd, dpdk_device_config_hqos_t * hqos)
249 vlib_thread_main_t *tm = vlib_get_thread_main ();
254 /* Detect the set of worker threads */
255 int worker_thread_first = 0;
256 int worker_thread_count = 0;
258 uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
259 vlib_thread_registration_t *tr =
260 p ? (vlib_thread_registration_t *) p[0] : 0;
262 if (tr && tr->count > 0)
264 worker_thread_first = tr->first_index;
265 worker_thread_count = tr->count;
268 /* Allocate the per-thread device data array */
269 vec_validate_aligned (xd->hqos_wt, tm->n_vlib_mains - 1,
270 CLIB_CACHE_LINE_BYTES);
271 clib_memset (xd->hqos_wt, 0, tm->n_vlib_mains * sizeof (xd->hqos_wt[0]));
273 vec_validate_aligned (xd->hqos_ht, 0, CLIB_CACHE_LINE_BYTES);
274 clib_memset (xd->hqos_ht, 0, sizeof (xd->hqos_ht[0]));
276 /* Allocate space for one SWQ per worker thread in the I/O TX thread data structure */
277 vec_validate (xd->hqos_ht->swq, worker_thread_count);
280 for (i = 0; i < worker_thread_count + 1; i++)
282 u32 swq_flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
284 snprintf (name, sizeof (name), "SWQ-worker%u-to-device%u", i,
286 xd->hqos_ht->swq[i] =
287 rte_ring_create (name, hqos->swq_size, xd->cpu_socket, swq_flags);
288 if (xd->hqos_ht->swq[i] == NULL)
289 return clib_error_return (0,
290 "SWQ-worker%u-to-device%u: rte_ring_create err",
299 snprintf (name, sizeof (name), "HQoS%u", xd->port_id);
300 hqos->port.name = strdup (name);
301 if (hqos->port.name == NULL)
302 return clib_error_return (0, "HQoS%u: strdup err", xd->port_id);
304 hqos->port.socket = rte_eth_dev_socket_id (xd->port_id);
305 if (hqos->port.socket == SOCKET_ID_ANY)
306 hqos->port.socket = 0;
308 xd->hqos_ht->hqos = rte_sched_port_config (&hqos->port);
309 if (xd->hqos_ht->hqos == NULL)
310 return clib_error_return (0, "HQoS%u: rte_sched_port_config err",
314 for (subport_id = 0; subport_id < hqos->port.n_subports_per_port;
320 rte_sched_subport_config (xd->hqos_ht->hqos, subport_id,
321 &hqos->subport[subport_id]);
323 return clib_error_return (0,
324 "HQoS%u subport %u: rte_sched_subport_config err (%d)",
325 xd->port_id, subport_id, rv);
328 for (pipe_id = 0; pipe_id < hqos->port.n_pipes_per_subport; pipe_id++)
330 u32 pos = subport_id * hqos->port.n_pipes_per_subport + pipe_id;
331 u32 profile_id = hqos->pipe_map[pos];
334 rte_sched_pipe_config (xd->hqos_ht->hqos, subport_id, pipe_id,
337 return clib_error_return (0,
338 "HQoS%u subport %u pipe %u: rte_sched_pipe_config err (%d)",
339 xd->port_id, subport_id, pipe_id, rv);
343 /* Set up per-thread device data for the I/O TX thread */
344 xd->hqos_ht->hqos_burst_enq = hqos->burst_enq;
345 xd->hqos_ht->hqos_burst_deq = hqos->burst_deq;
346 vec_validate (xd->hqos_ht->pkts_enq, 2 * hqos->burst_enq - 1);
347 vec_validate (xd->hqos_ht->pkts_deq, hqos->burst_deq - 1);
348 xd->hqos_ht->pkts_enq_len = 0;
349 xd->hqos_ht->swq_pos = 0;
350 xd->hqos_ht->flush_count = 0;
352 /* Set up per-thread device data for each worker thread */
353 for (i = 0; i < worker_thread_count + 1; i++)
357 tid = worker_thread_first + (i - 1);
361 xd->hqos_wt[tid].swq = xd->hqos_ht->swq[i];
362 xd->hqos_wt[tid].hqos_field0_slabpos = hqos->pktfield0_slabpos;
363 xd->hqos_wt[tid].hqos_field0_slabmask = hqos->pktfield0_slabmask;
364 xd->hqos_wt[tid].hqos_field0_slabshr =
365 count_trailing_zeros (hqos->pktfield0_slabmask);
366 xd->hqos_wt[tid].hqos_field1_slabpos = hqos->pktfield1_slabpos;
367 xd->hqos_wt[tid].hqos_field1_slabmask = hqos->pktfield1_slabmask;
368 xd->hqos_wt[tid].hqos_field1_slabshr =
369 count_trailing_zeros (hqos->pktfield1_slabmask);
370 xd->hqos_wt[tid].hqos_field2_slabpos = hqos->pktfield2_slabpos;
371 xd->hqos_wt[tid].hqos_field2_slabmask = hqos->pktfield2_slabmask;
372 xd->hqos_wt[tid].hqos_field2_slabshr =
373 count_trailing_zeros (hqos->pktfield2_slabmask);
374 memcpy (xd->hqos_wt[tid].hqos_tc_table, hqos->tc_table,
375 sizeof (hqos->tc_table));
387 * dpdk_hqos_thread - Contains the main loop of an HQoS thread.
390 * Information for the current thread
392 static_always_inline void
393 dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm)
395 dpdk_main_t *dm = &dpdk_main;
396 u32 thread_index = vm->thread_index;
402 vlib_worker_thread_barrier_check ();
404 u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]);
405 if (dev_pos >= n_devs)
408 dpdk_device_and_queue_t *dq =
409 vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos);
410 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
412 dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht;
413 u32 device_index = xd->port_id;
414 u16 queue_id = dq->queue_id;
416 struct rte_mbuf **pkts_enq = hqos->pkts_enq;
417 u32 pkts_enq_len = hqos->pkts_enq_len;
418 u32 swq_pos = hqos->swq_pos;
419 u32 n_swq = vec_len (hqos->swq), i;
420 u32 flush_count = hqos->flush_count;
422 for (i = 0; i < n_swq; i++)
424 /* Get current SWQ for this device */
425 struct rte_ring *swq = hqos->swq[swq_pos];
427 /* Read SWQ burst to packet buffer of this device */
428 pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
430 &pkts_enq[pkts_enq_len],
431 hqos->hqos_burst_enq, 0);
433 /* Get next SWQ for this device */
435 if (swq_pos >= n_swq)
437 hqos->swq_pos = swq_pos;
439 /* HWQ TX enqueue when burst available */
440 if (pkts_enq_len >= hqos->hqos_burst_enq)
442 u32 n_pkts = rte_eth_tx_burst (device_index,
445 (uint16_t) pkts_enq_len);
447 for (; n_pkts < pkts_enq_len; n_pkts++)
448 rte_pktmbuf_free (pkts_enq[n_pkts]);
458 if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
460 rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
466 hqos->pkts_enq_len = pkts_enq_len;
467 hqos->flush_count = flush_count;
469 /* Advance to next device */
474 static_always_inline void
475 dpdk_hqos_thread_internal (vlib_main_t * vm)
477 dpdk_main_t *dm = &dpdk_main;
478 u32 thread_index = vm->thread_index;
484 vlib_worker_thread_barrier_check ();
486 u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]);
487 if (PREDICT_FALSE (n_devs == 0))
492 if (dev_pos >= n_devs)
495 dpdk_device_and_queue_t *dq =
496 vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos);
497 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device);
499 dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht;
500 u32 device_index = xd->port_id;
501 u16 queue_id = dq->queue_id;
503 struct rte_mbuf **pkts_enq = hqos->pkts_enq;
504 struct rte_mbuf **pkts_deq = hqos->pkts_deq;
505 u32 pkts_enq_len = hqos->pkts_enq_len;
506 u32 swq_pos = hqos->swq_pos;
507 u32 n_swq = vec_len (hqos->swq), i;
508 u32 flush_count = hqos->flush_count;
511 * SWQ dequeue and HQoS enqueue for current device
513 for (i = 0; i < n_swq; i++)
515 /* Get current SWQ for this device */
516 struct rte_ring *swq = hqos->swq[swq_pos];
518 /* Read SWQ burst to packet buffer of this device */
519 pkts_enq_len += rte_ring_sc_dequeue_burst (swq,
521 &pkts_enq[pkts_enq_len],
522 hqos->hqos_burst_enq, 0);
524 /* Get next SWQ for this device */
526 if (swq_pos >= n_swq)
528 hqos->swq_pos = swq_pos;
530 /* HQoS enqueue when burst available */
531 if (pkts_enq_len >= hqos->hqos_burst_enq)
533 rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
543 if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
545 rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
551 hqos->pkts_enq_len = pkts_enq_len;
552 hqos->flush_count = flush_count;
555 * HQoS dequeue and HWQ TX enqueue for current device
558 u32 pkts_deq_len, n_pkts;
560 pkts_deq_len = rte_sched_port_dequeue (hqos->hqos,
562 hqos->hqos_burst_deq);
564 for (n_pkts = 0; n_pkts < pkts_deq_len;)
565 n_pkts += rte_eth_tx_burst (device_index,
568 (uint16_t) (pkts_deq_len - n_pkts));
571 /* Advance to next device */
577 dpdk_hqos_thread (vlib_worker_thread_t * w)
580 vlib_thread_main_t *tm = vlib_get_thread_main ();
581 dpdk_main_t *dm = &dpdk_main;
583 vm = vlib_get_main ();
585 ASSERT (vm->thread_index == vlib_get_thread_index ());
587 clib_time_init (&vm->clib_time);
588 clib_mem_set_heap (w->thread_mheap);
590 /* Wait until the dpdk init sequence is complete */
591 while (tm->worker_thread_release == 0)
592 vlib_worker_thread_barrier_check ();
594 if (vec_len (dm->devices_by_hqos_cpu[vm->thread_index]) == 0)
597 ("current I/O TX thread does not have any devices assigned to it");
599 if (DPDK_HQOS_DBG_BYPASS)
600 dpdk_hqos_thread_internal_hqos_dbg_bypass (vm);
602 dpdk_hqos_thread_internal (vm);
606 dpdk_hqos_thread_fn (void *arg)
608 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
609 vlib_worker_thread_init (w);
610 dpdk_hqos_thread (w);
614 VLIB_REGISTER_THREAD (hqos_thread_reg, static) =
616 .name = "hqos-threads",
617 .short_name = "hqos-threads",
618 .function = dpdk_hqos_thread_fn,
623 * HQoS run-time code to be called by the worker threads
625 #define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \
627 u64 slab = *((u64 *) &byte_array[slab_pos]); \
628 u64 val = (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \
632 #define RTE_SCHED_PORT_HIERARCHY(subport, pipe, traffic_class, queue, color) \
633 ((((u64) (queue)) & 0x3) | \
634 ((((u64) (traffic_class)) & 0x3) << 2) | \
635 ((((u64) (color)) & 0x3) << 4) | \
636 ((((u64) (subport)) & 0xFFFF) << 16) | \
637 ((((u64) (pipe)) & 0xFFFFFFFF) << 32))
640 dpdk_hqos_metadata_set (dpdk_device_hqos_per_worker_thread_t * hqos,
641 struct rte_mbuf **pkts, u32 n_pkts)
645 for (i = 0; i < (n_pkts & (~0x3)); i += 4)
647 struct rte_mbuf *pkt0 = pkts[i];
648 struct rte_mbuf *pkt1 = pkts[i + 1];
649 struct rte_mbuf *pkt2 = pkts[i + 2];
650 struct rte_mbuf *pkt3 = pkts[i + 3];
652 u8 *pkt0_data = rte_pktmbuf_mtod (pkt0, u8 *);
653 u8 *pkt1_data = rte_pktmbuf_mtod (pkt1, u8 *);
654 u8 *pkt2_data = rte_pktmbuf_mtod (pkt2, u8 *);
655 u8 *pkt3_data = rte_pktmbuf_mtod (pkt3, u8 *);
657 u64 pkt0_subport = BITFIELD (pkt0_data, hqos->hqos_field0_slabpos,
658 hqos->hqos_field0_slabmask,
659 hqos->hqos_field0_slabshr);
660 u64 pkt0_pipe = BITFIELD (pkt0_data, hqos->hqos_field1_slabpos,
661 hqos->hqos_field1_slabmask,
662 hqos->hqos_field1_slabshr);
663 u64 pkt0_dscp = BITFIELD (pkt0_data, hqos->hqos_field2_slabpos,
664 hqos->hqos_field2_slabmask,
665 hqos->hqos_field2_slabshr);
666 u32 pkt0_tc = hqos->hqos_tc_table[pkt0_dscp & 0x3F] >> 2;
667 u32 pkt0_tc_q = hqos->hqos_tc_table[pkt0_dscp & 0x3F] & 0x3;
669 u64 pkt1_subport = BITFIELD (pkt1_data, hqos->hqos_field0_slabpos,
670 hqos->hqos_field0_slabmask,
671 hqos->hqos_field0_slabshr);
672 u64 pkt1_pipe = BITFIELD (pkt1_data, hqos->hqos_field1_slabpos,
673 hqos->hqos_field1_slabmask,
674 hqos->hqos_field1_slabshr);
675 u64 pkt1_dscp = BITFIELD (pkt1_data, hqos->hqos_field2_slabpos,
676 hqos->hqos_field2_slabmask,
677 hqos->hqos_field2_slabshr);
678 u32 pkt1_tc = hqos->hqos_tc_table[pkt1_dscp & 0x3F] >> 2;
679 u32 pkt1_tc_q = hqos->hqos_tc_table[pkt1_dscp & 0x3F] & 0x3;
681 u64 pkt2_subport = BITFIELD (pkt2_data, hqos->hqos_field0_slabpos,
682 hqos->hqos_field0_slabmask,
683 hqos->hqos_field0_slabshr);
684 u64 pkt2_pipe = BITFIELD (pkt2_data, hqos->hqos_field1_slabpos,
685 hqos->hqos_field1_slabmask,
686 hqos->hqos_field1_slabshr);
687 u64 pkt2_dscp = BITFIELD (pkt2_data, hqos->hqos_field2_slabpos,
688 hqos->hqos_field2_slabmask,
689 hqos->hqos_field2_slabshr);
690 u32 pkt2_tc = hqos->hqos_tc_table[pkt2_dscp & 0x3F] >> 2;
691 u32 pkt2_tc_q = hqos->hqos_tc_table[pkt2_dscp & 0x3F] & 0x3;
693 u64 pkt3_subport = BITFIELD (pkt3_data, hqos->hqos_field0_slabpos,
694 hqos->hqos_field0_slabmask,
695 hqos->hqos_field0_slabshr);
696 u64 pkt3_pipe = BITFIELD (pkt3_data, hqos->hqos_field1_slabpos,
697 hqos->hqos_field1_slabmask,
698 hqos->hqos_field1_slabshr);
699 u64 pkt3_dscp = BITFIELD (pkt3_data, hqos->hqos_field2_slabpos,
700 hqos->hqos_field2_slabmask,
701 hqos->hqos_field2_slabshr);
702 u32 pkt3_tc = hqos->hqos_tc_table[pkt3_dscp & 0x3F] >> 2;
703 u32 pkt3_tc_q = hqos->hqos_tc_table[pkt3_dscp & 0x3F] & 0x3;
705 u64 pkt0_sched = RTE_SCHED_PORT_HIERARCHY (pkt0_subport,
710 u64 pkt1_sched = RTE_SCHED_PORT_HIERARCHY (pkt1_subport,
715 u64 pkt2_sched = RTE_SCHED_PORT_HIERARCHY (pkt2_subport,
720 u64 pkt3_sched = RTE_SCHED_PORT_HIERARCHY (pkt3_subport,
726 pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
727 pkt0->hash.sched.hi = pkt0_sched >> 32;
728 pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
729 pkt1->hash.sched.hi = pkt1_sched >> 32;
730 pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
731 pkt2->hash.sched.hi = pkt2_sched >> 32;
732 pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
733 pkt3->hash.sched.hi = pkt3_sched >> 32;
736 for (; i < n_pkts; i++)
738 struct rte_mbuf *pkt = pkts[i];
740 u8 *pkt_data = rte_pktmbuf_mtod (pkt, u8 *);
742 u64 pkt_subport = BITFIELD (pkt_data, hqos->hqos_field0_slabpos,
743 hqos->hqos_field0_slabmask,
744 hqos->hqos_field0_slabshr);
745 u64 pkt_pipe = BITFIELD (pkt_data, hqos->hqos_field1_slabpos,
746 hqos->hqos_field1_slabmask,
747 hqos->hqos_field1_slabshr);
748 u64 pkt_dscp = BITFIELD (pkt_data, hqos->hqos_field2_slabpos,
749 hqos->hqos_field2_slabmask,
750 hqos->hqos_field2_slabshr);
751 u32 pkt_tc = hqos->hqos_tc_table[pkt_dscp & 0x3F] >> 2;
752 u32 pkt_tc_q = hqos->hqos_tc_table[pkt_dscp & 0x3F] & 0x3;
754 u64 pkt_sched = RTE_SCHED_PORT_HIERARCHY (pkt_subport,
760 pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
761 pkt->hash.sched.hi = pkt_sched >> 32;
766 * fd.io coding-style-patch-verification: ON
769 * eval: (c-set-style "gnu")