X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=lib%2Flibrte_sched%2Frte_sched.c;h=614705d81ed8cfbac7cf25484da86bd483d61601;hb=cf4840535d850698d76047702a6951da6cfd73e8;hp=1609ea878ee2470024a37091e6ce72417ec1d1ca;hpb=b5cdd645c9fc62341d55aebbfc93a1b648415512;p=deb_dpdk.git diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c index 1609ea87..614705d8 100644 --- a/lib/librte_sched/rte_sched.c +++ b/lib/librte_sched/rte_sched.c @@ -734,19 +734,25 @@ rte_sched_port_config(struct rte_sched_port_params *params) void rte_sched_port_free(struct rte_sched_port *port) { - unsigned int queue; + uint32_t qindex; + uint32_t n_queues_per_port; /* Check user parameters */ if (port == NULL) return; - /* Free enqueued mbufs */ - for (queue = 0; queue < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; queue++) { - struct rte_mbuf **mbufs = rte_sched_port_qbase(port, queue); - unsigned int i; + n_queues_per_port = rte_sched_port_queues_per_port(port); - for (i = 0; i < rte_sched_port_qsize(port, queue); i++) - rte_pktmbuf_free(mbufs[i]); + /* Free enqueued mbufs */ + for (qindex = 0; qindex < n_queues_per_port; qindex++) { + struct rte_mbuf **mbufs = rte_sched_port_qbase(port, qindex); + uint16_t qsize = rte_sched_port_qsize(port, qindex); + struct rte_sched_queue *queue = port->queue + qindex; + uint16_t qr = queue->qr & (qsize - 1); + uint16_t qw = queue->qw & (qsize - 1); + + for (; qr != qw; qr = (qr + 1) & (qsize - 1)) + rte_pktmbuf_free(mbufs[qr]); } rte_bitmap_free(port->bmp); @@ -1084,10 +1090,17 @@ rte_sched_port_update_subport_stats(struct rte_sched_port *port, uint32_t qindex s->stats.n_bytes_tc[tc_index] += pkt_len; } +#ifdef RTE_SCHED_RED +static inline void +rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, + uint32_t qindex, + struct rte_mbuf *pkt, uint32_t red) +#else static inline void rte_sched_port_update_subport_stats_on_drop(struct rte_sched_port *port, - uint32_t qindex, - struct rte_mbuf *pkt, uint32_t red) + uint32_t qindex, + struct rte_mbuf *pkt, __rte_unused uint32_t red) +#endif { struct rte_sched_subport *s = port->subport + (qindex / rte_sched_port_queues_per_subport(port)); uint32_t tc_index = (qindex >> 2) & 0x3; @@ -1110,10 +1123,17 @@ rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t qindex, qe->stats.n_bytes += pkt_len; } +#ifdef RTE_SCHED_RED static inline void rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, - uint32_t qindex, - struct rte_mbuf *pkt, uint32_t red) + uint32_t qindex, + struct rte_mbuf *pkt, uint32_t red) +#else +static inline void +rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, + uint32_t qindex, + struct rte_mbuf *pkt, __rte_unused uint32_t red) +#endif { struct rte_sched_queue_extra *qe = port->queue_extra + qindex; uint32_t pkt_len = pkt->pkt_len;