New upstream version 16.11.9
[deb_dpdk.git] / drivers / net / enic / enic_rxtx.c
index 2f4a08c..0cfde46 100644 (file)
@@ -149,30 +149,18 @@ enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
        uint8_t cqrd_flags = cqrd->flags;
        static const uint32_t cq_type_table[128] __rte_cache_aligned = {
                [0x00] = RTE_PTYPE_UNKNOWN,
-               [0x20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_NONFRAG,
-               [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_UDP,
-               [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_TCP,
-               [0x60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_FRAG,
-               [0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_UDP,
-               [0x64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_TCP,
-               [0x10] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_NONFRAG,
-               [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_UDP,
-               [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_TCP,
-               [0x50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_FRAG,
-               [0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_UDP,
-               [0x54] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
-                         | RTE_PTYPE_L4_TCP,
+               [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+               [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+               [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+               [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+               [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+               [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+               [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+               [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+               [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+               [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+               [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+               [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
                /* All others reserved */
        };
        cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
@@ -185,9 +173,10 @@ static inline void
 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
 {
        struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-       uint16_t ciflags, bwflags, pkt_flags = 0;
+       uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci;
        ciflags = enic_cq_rx_desc_ciflags(cqrd);
        bwflags = enic_cq_rx_desc_bwflags(cqrd);
+       vlan_tci = enic_cq_rx_desc_vlan(cqrd);
 
        mbuf->ol_flags = 0;
 
@@ -195,13 +184,19 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
        if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
                goto mbuf_flags_done;
 
-       /* VLAN stripping */
+       /* VLAN STRIPPED flag. The L2 packet type updated here also */
        if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
                pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
-               mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
+               mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
        } else {
-               mbuf->vlan_tci = 0;
+               if (vlan_tci != 0) {
+                       pkt_flags |= PKT_RX_VLAN_PKT;
+                       mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+               } else {
+                       mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
+               }
        }
+       mbuf->vlan_tci = vlan_tci;
 
        /* RSS flag */
        if (enic_cq_rx_desc_rss_type(cqrd)) {
@@ -212,9 +207,12 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
        /* checksum flags */
        if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
                (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
+               uint32_t l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
+
                if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
                        pkt_flags |= PKT_RX_IP_CKSUM_BAD;
-               if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
+               if (l4_flags == RTE_PTYPE_L4_UDP ||
+                   l4_flags == RTE_PTYPE_L4_TCP) {
                        if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
                                pkt_flags |= PKT_RX_L4_CKSUM_BAD;
                }
@@ -224,6 +222,17 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
        mbuf->ol_flags = pkt_flags;
 }
 
+/* dummy receive function to replace actual function in
+ * order to do safe reconfiguration operations.
+ */
+uint16_t
+enic_dummy_recv_pkts(__rte_unused void *rx_queue,
+                    __rte_unused struct rte_mbuf **rx_pkts,
+                    __rte_unused uint16_t nb_pkts)
+{
+       return 0;
+}
+
 uint16_t
 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
               uint16_t nb_pkts)
@@ -300,6 +309,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                        (struct cq_enet_rq_desc *)&cqd);
 
                /* Push descriptor for newly allocated mbuf */
+               nmb->data_off = RTE_PKTMBUF_HEADROOM;
                dma_addr = (dma_addr_t)(nmb->buf_physaddr +
                                        RTE_PKTMBUF_HEADROOM);
                rq_enet_desc_enc(rqd_ptr, dma_addr,
@@ -398,7 +408,14 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
        pool = ((struct rte_mbuf *)buf->mb)->pool;
        for (i = 0; i < nb_to_free; i++) {
                buf = &wq->bufs[tail_idx];
-               m = (struct rte_mbuf *)(buf->mb);
+               m = __rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
+               buf->mb = NULL;
+
+               if (unlikely(m == NULL)) {
+                       tail_idx = enic_ring_incr(desc_count, tail_idx);
+                       continue;
+               }
+
                if (likely(m->pool == pool)) {
                        RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
                        free[nb_free++] = m;
@@ -409,10 +426,10 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
                        pool = m->pool;
                }
                tail_idx = enic_ring_incr(desc_count, tail_idx);
-               buf->mb = NULL;
        }
 
-       rte_mempool_put_bulk(pool, (void **)free, nb_free);
+       if (nb_free > 0)
+               rte_mempool_put_bulk(pool, (void **)free, nb_free);
 
        wq->tail_idx = tail_idx;
        wq->ring.desc_avail += nb_to_free;
@@ -463,16 +480,23 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        for (index = 0; index < nb_pkts; index++) {
                tx_pkt = *tx_pkts++;
+               pkt_len = tx_pkt->pkt_len;
+               data_len = tx_pkt->data_len;
+               ol_flags = tx_pkt->ol_flags;
                nb_segs = tx_pkt->nb_segs;
+
+               if (pkt_len > ENIC_TX_MAX_PKT_SIZE) {
+                       rte_pktmbuf_free(tx_pkt);
+                       rte_atomic64_inc(&enic->soft_stats.tx_oversized);
+                       continue;
+               }
+
                if (nb_segs > wq_desc_avail) {
                        if (index > 0)
                                goto post;
                        goto done;
                }
 
-               pkt_len = tx_pkt->pkt_len;
-               data_len = tx_pkt->data_len;
-               ol_flags = tx_pkt->ol_flags;
                mss = 0;
                vlan_id = 0;
                vlan_tag_insert = 0;