1 /* Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
2 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 * Copyright (c) 2014, Cisco Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_prefetch.h>
37 #include "enic_compat.h"
38 #include "rq_enet_desc.h"
40 #include <rte_ether.h>
44 #define RTE_PMD_USE_PREFETCH
46 #ifdef RTE_PMD_USE_PREFETCH
47 /*Prefetch a cache line into all cache levels. */
48 #define rte_enic_prefetch(p) rte_prefetch0(p)
50 #define rte_enic_prefetch(p) do {} while (0)
53 #ifdef RTE_PMD_PACKET_PREFETCH
54 #define rte_packet_prefetch(p) rte_prefetch1(p)
56 #define rte_packet_prefetch(p) do {} while (0)
59 static inline uint16_t
60 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
62 return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
65 static inline uint16_t
66 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
68 return le16_to_cpu(crd->bytes_written_flags) &
69 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
73 enic_cq_rx_desc_packet_error(uint16_t bwflags)
75 return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
76 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
80 enic_cq_rx_desc_eop(uint16_t ciflags)
82 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
83 == CQ_ENET_RQ_DESC_FLAGS_EOP;
87 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
89 return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
90 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
91 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
95 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
97 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
98 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
101 static inline uint8_t
102 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
104 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
105 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
108 static inline uint8_t
109 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
111 return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
112 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
115 static inline uint32_t
116 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
118 return le32_to_cpu(cqrd->rss_hash);
121 static inline uint16_t
122 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
124 return le16_to_cpu(cqrd->vlan);
127 static inline uint16_t
128 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
130 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
131 return le16_to_cpu(cqrd->bytes_written_flags) &
132 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
135 /* Find the offset to L5. This is needed by enic TSO implementation.
136 * Return 0 if not a TCP packet or can't figure out the length.
138 static inline uint8_t tso_header_len(struct rte_mbuf *mbuf)
140 struct ether_hdr *eh;
142 struct ipv4_hdr *ip4;
143 struct ipv6_hdr *ip6;
148 /* offset past Ethernet header */
149 eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
150 ether_type = eh->ether_type;
151 hdr_len = sizeof(struct ether_hdr);
152 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
153 vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len);
154 ether_type = vh->eth_proto;
155 hdr_len += sizeof(struct vlan_hdr);
158 /* offset past IP header */
159 switch (rte_be_to_cpu_16(ether_type)) {
160 case ETHER_TYPE_IPv4:
161 ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len);
162 if (ip4->next_proto_id != IPPROTO_TCP)
164 hdr_len += (ip4->version_ihl & 0xf) * 4;
166 case ETHER_TYPE_IPv6:
167 ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len);
168 if (ip6->proto != IPPROTO_TCP)
170 hdr_len += sizeof(struct ipv6_hdr);
176 if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len)
179 /* offset past TCP header */
180 th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len);
181 hdr_len += (th->data_off >> 4) * 4;
183 if (hdr_len > mbuf->pkt_len)
189 static inline uint8_t
190 enic_cq_rx_check_err(struct cq_desc *cqd)
192 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
195 bwflags = enic_cq_rx_desc_bwflags(cqrd);
196 if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
201 /* Lookup table to translate RX CQ flags to mbuf flags. */
202 static inline uint32_t
203 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
205 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
206 uint8_t cqrd_flags = cqrd->flags;
207 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
208 [0x00] = RTE_PTYPE_UNKNOWN,
209 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
210 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
211 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
212 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
213 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
214 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
215 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
216 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
217 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
218 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
219 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
220 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
221 /* All others reserved */
223 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
224 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
225 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
226 return cq_type_table[cqrd_flags];
230 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
232 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
233 uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci;
234 ciflags = enic_cq_rx_desc_ciflags(cqrd);
235 bwflags = enic_cq_rx_desc_bwflags(cqrd);
236 vlan_tci = enic_cq_rx_desc_vlan(cqrd);
240 /* flags are meaningless if !EOP */
241 if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
242 goto mbuf_flags_done;
244 /* VLAN STRIPPED flag. The L2 packet type updated here also */
245 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
246 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
247 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
250 pkt_flags |= PKT_RX_VLAN;
251 mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
253 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
256 mbuf->vlan_tci = vlan_tci;
258 if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
259 struct cq_enet_rq_clsf_desc *clsf_cqd;
261 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
262 filter_id = clsf_cqd->filter_id;
264 pkt_flags |= PKT_RX_FDIR;
265 if (filter_id != ENIC_MAGIC_FILTER_ID) {
266 mbuf->hash.fdir.hi = clsf_cqd->filter_id;
267 pkt_flags |= PKT_RX_FDIR_ID;
270 } else if (enic_cq_rx_desc_rss_type(cqrd)) {
272 pkt_flags |= PKT_RX_RSS_HASH;
273 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
277 if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
278 if (enic_cq_rx_desc_csum_not_calc(cqrd))
279 pkt_flags |= (PKT_RX_IP_CKSUM_UNKNOWN &
280 PKT_RX_L4_CKSUM_UNKNOWN);
283 l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
285 if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
286 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
288 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
290 if (l4_flags == RTE_PTYPE_L4_UDP ||
291 l4_flags == RTE_PTYPE_L4_TCP) {
292 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
293 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
295 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
301 mbuf->ol_flags = pkt_flags;
304 /* dummy receive function to replace actual function in
305 * order to do safe reconfiguration operations.
308 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
309 __rte_unused struct rte_mbuf **rx_pkts,
310 __rte_unused uint16_t nb_pkts)
316 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
319 struct vnic_rq *sop_rq = rx_queue;
320 struct vnic_rq *data_rq;
322 struct enic *enic = vnic_dev_priv(sop_rq->vdev);
326 struct rte_mbuf *nmb, *rxmb;
329 volatile struct cq_desc *cqd_ptr;
332 struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
333 struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
335 cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
336 cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
337 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
339 data_rq = &enic->rq[sop_rq->data_queue_idx];
341 while (nb_rx < nb_pkts) {
342 volatile struct rq_enet_desc *rqd_ptr;
345 uint8_t packet_error;
348 /* Check for pkts available */
349 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
350 & CQ_DESC_COLOR_MASK;
351 if (color == cq->last_color)
354 /* Get the cq descriptor and extract rq info from it */
356 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
357 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
359 rq = &enic->rq[rq_num];
360 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
362 /* allocate a new mbuf */
363 nmb = rte_mbuf_raw_alloc(rq->mp);
365 rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
369 /* A packet error means descriptor and data are untrusted */
370 packet_error = enic_cq_rx_check_err(&cqd);
372 /* Get the mbuf to return and replace with one just allocated */
373 rxmb = rq->mbuf_ring[rq_idx];
374 rq->mbuf_ring[rq_idx] = nmb;
376 /* Increment cqd, rqd, mbuf_table index */
378 if (unlikely(cq_idx == cq->ring.desc_count)) {
380 cq->last_color = cq->last_color ? 0 : 1;
383 /* Prefetch next mbuf & desc while processing current one */
384 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
385 rte_enic_prefetch(cqd_ptr);
387 ciflags = enic_cq_rx_desc_ciflags(
388 (struct cq_enet_rq_desc *)&cqd);
390 /* Push descriptor for newly allocated mbuf */
391 nmb->data_off = RTE_PKTMBUF_HEADROOM;
392 dma_addr = (dma_addr_t)(nmb->buf_iova +
393 RTE_PKTMBUF_HEADROOM);
394 rq_enet_desc_enc(rqd_ptr, dma_addr,
395 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
396 : RQ_ENET_TYPE_NOT_SOP),
397 nmb->buf_len - RTE_PKTMBUF_HEADROOM);
399 /* Fill in the rest of the mbuf */
400 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
404 first_seg->pkt_len = seg_length;
406 first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
408 first_seg->nb_segs++;
409 last_seg->next = rxmb;
412 rxmb->port = enic->port_id;
413 rxmb->data_len = seg_length;
417 if (!(enic_cq_rx_desc_eop(ciflags))) {
422 /* cq rx flags are only valid if eop bit is set */
423 first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
424 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
426 if (unlikely(packet_error)) {
427 rte_pktmbuf_free(first_seg);
428 rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
433 /* prefetch mbuf data for caller */
434 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
435 RTE_PKTMBUF_HEADROOM));
437 /* store the mbuf address into the next entry of the array */
438 rx_pkts[nb_rx++] = first_seg;
441 sop_rq->pkt_first_seg = first_seg;
442 sop_rq->pkt_last_seg = last_seg;
444 cq->to_clean = cq_idx;
446 if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
447 sop_rq->rx_free_thresh) {
448 if (data_rq->in_use) {
449 data_rq->posted_index =
450 enic_ring_add(data_rq->ring.desc_count,
451 data_rq->posted_index,
452 data_rq->rx_nb_hold);
453 data_rq->rx_nb_hold = 0;
455 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
456 sop_rq->posted_index,
458 sop_rq->rx_nb_hold = 0;
462 iowrite32_relaxed(data_rq->posted_index,
463 &data_rq->ctrl->posted_index);
464 rte_compiler_barrier();
465 iowrite32_relaxed(sop_rq->posted_index,
466 &sop_rq->ctrl->posted_index);
473 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
475 struct vnic_wq_buf *buf;
476 struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
477 unsigned int nb_to_free, nb_free = 0, i;
478 struct rte_mempool *pool;
479 unsigned int tail_idx;
480 unsigned int desc_count = wq->ring.desc_count;
482 nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
484 tail_idx = wq->tail_idx;
485 buf = &wq->bufs[tail_idx];
486 pool = ((struct rte_mbuf *)buf->mb)->pool;
487 for (i = 0; i < nb_to_free; i++) {
488 buf = &wq->bufs[tail_idx];
489 m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
492 if (unlikely(m == NULL)) {
493 tail_idx = enic_ring_incr(desc_count, tail_idx);
497 if (likely(m->pool == pool)) {
498 RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
501 rte_mempool_put_bulk(pool, (void *)free, nb_free);
506 tail_idx = enic_ring_incr(desc_count, tail_idx);
510 rte_mempool_put_bulk(pool, (void **)free, nb_free);
512 wq->tail_idx = tail_idx;
513 wq->ring.desc_avail += nb_to_free;
516 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
520 completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
522 if (wq->last_completed_index != completed_index) {
523 enic_free_wq_bufs(wq, completed_index);
524 wq->last_completed_index = completed_index;
529 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
533 unsigned int pkt_len, data_len;
534 unsigned int nb_segs;
535 struct rte_mbuf *tx_pkt;
536 struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
537 struct enic *enic = vnic_dev_priv(wq->vdev);
538 unsigned short vlan_id;
540 uint64_t ol_flags_mask;
541 unsigned int wq_desc_avail;
543 struct vnic_wq_buf *buf;
544 unsigned int desc_count;
545 struct wq_enet_desc *descs, *desc_p, desc_tmp;
547 uint8_t vlan_tag_insert;
550 uint8_t offload_mode;
553 rte_atomic64_t *tx_oversized;
555 enic_cleanup_wq(enic, wq);
556 wq_desc_avail = vnic_wq_desc_avail(wq);
557 head_idx = wq->head_idx;
558 desc_count = wq->ring.desc_count;
559 ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
560 tx_oversized = &enic->soft_stats.tx_oversized;
562 nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
564 for (index = 0; index < nb_pkts; index++) {
566 pkt_len = tx_pkt->pkt_len;
567 data_len = tx_pkt->data_len;
568 ol_flags = tx_pkt->ol_flags;
569 nb_segs = tx_pkt->nb_segs;
570 tso = ol_flags & PKT_TX_TCP_SEG;
572 /* drop packet if it's too big to send */
573 if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
574 rte_pktmbuf_free(tx_pkt);
575 rte_atomic64_inc(tx_oversized);
579 if (nb_segs > wq_desc_avail) {
588 bus_addr = (dma_addr_t)
589 (tx_pkt->buf_iova + tx_pkt->data_off);
591 descs = (struct wq_enet_desc *)wq->ring.descs;
592 desc_p = descs + head_idx;
594 eop = (data_len == pkt_len);
595 offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
599 header_len = tso_header_len(tx_pkt);
601 /* Drop if non-TCP packet or TSO seg size is too big */
602 if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
603 header_len) > ENIC_TX_MAX_PKT_SIZE))) {
604 rte_pktmbuf_free(tx_pkt);
605 rte_atomic64_inc(tx_oversized);
609 offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
610 mss = tx_pkt->tso_segsz;
613 if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
614 if (ol_flags & PKT_TX_IP_CKSUM)
615 mss |= ENIC_CALC_IP_CKSUM;
617 /* Nic uses just 1 bit for UDP and TCP */
618 switch (ol_flags & PKT_TX_L4_MASK) {
619 case PKT_TX_TCP_CKSUM:
620 case PKT_TX_UDP_CKSUM:
621 mss |= ENIC_CALC_TCP_UDP_CKSUM;
626 if (ol_flags & PKT_TX_VLAN_PKT) {
628 vlan_id = tx_pkt->vlan_tci;
631 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
632 offload_mode, eop, eop, 0, vlan_tag_insert,
636 buf = &wq->bufs[head_idx];
637 buf->mb = (void *)tx_pkt;
638 head_idx = enic_ring_incr(desc_count, head_idx);
642 for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
644 data_len = tx_pkt->data_len;
646 if (tx_pkt->next == NULL)
648 desc_p = descs + head_idx;
649 bus_addr = (dma_addr_t)(tx_pkt->buf_iova
651 wq_enet_desc_enc((struct wq_enet_desc *)
652 &desc_tmp, bus_addr, data_len,
653 mss, 0, offload_mode, eop, eop,
654 0, vlan_tag_insert, vlan_id,
658 buf = &wq->bufs[head_idx];
659 buf->mb = (void *)tx_pkt;
660 head_idx = enic_ring_incr(desc_count, head_idx);
667 iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
669 wq->ring.desc_avail = wq_desc_avail;
670 wq->head_idx = head_idx;